code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
# -*- python -*-
# This file is part of Fusion-icon.
# Fusion-icon is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fusion-icon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Publaic License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Original copyright 2007 Christopher Williams <christopherw@verizon.net>
# Author(s): crdlb, kozec, raveit65
import os
mesa_libgl_locations = (
# ubuntu
'/usr/lib/fglrx/libGL.so.1.2.xlibmesa',
'/usr/lib/nvidia/libGL.so.1.2.xlibmesa',
# gentoo
'/usr/lib/opengl/xorg-x11/lib/libGL.so.1.2',
# archlinux
'/opt/mesa-xgl/lib/libGL.so.1.2',
'/lib/mesa/libGL.so.1.2',
# debian
'/usr/lib/fglrx/diversions/libGL.so.1.2',
'/usr/share/nvidia-glx/diversions/libGL.so.1.2',
)
compiz_args = ['ccp', '--replace', '--sm-disable', '--ignore-desktop-hints']
config_home = os.environ.get('XDG_CONFIG_HOME',
os.path.join(os.environ['HOME'], '.config'))
config_folder = os.path.join(config_home, 'compiz')
config_file = os.path.join(config_folder, 'fusion-icon')
#app
# base command, full command line
# label
apps = {
'ccsm':
('ccsm', ['ccsm'],
'Settings Manager'),
'emerald theme manager':
('emerald-theme-manager', ['emerald-theme-manager'],
'Emerald Theme Manager'),
}
#wm
# base command, full command line
# label, desktop, special flags, command to run before replacing
wms = {
'marco':
('marco', ['marco', '--replace'],
'Marco', 'mate', None, None,),
'metacity':
('metacity', ['metacity', '--replace'],
'Metacity', 'gnome', None, None,),
'mutter':
('mutter', ['mutter', '--replace'],
'Mutter', 'gnome', None, None,),
'kwin':
('kwin_x11', ['kwin_x11', '--replace'],
'KWin', 'kde', None, None),
'kwin4':
('kwin', ['kwin', '--replace'],
'KWin4', 'kde', None, None),
'xfwm4':
('xfwm4', ['xfwm4'],
'Xfwm4', 'xfce', ['noreplace'], ['killall', 'xfwm4']),
'openbox':
('openbox', ['openbox', '--replace'],
'Openbox', None, None, None),
'blackbox':
('blackbox', ['blackbox', '--replace'],
'Blackbox', None, None, None),
'fvwm':
('fvwm', ['fvwm', '--replace'],
'FVWM', None, None, None),
'icewm':
('icewm', ['icewm', '--replace'],
'IceWM', None, None, None),
}
#decorator
# base command, full command line,
# label, desktop environment
decorators = {
'emerald':
('emerald', 'emerald --replace',
'Emerald', None),
'gwd':
('gtk-window-decorator', 'gtk-window-decorator --replace',
'GTK+ Window Decorator', 'mate'),
}
#option:
# (unused), switch,
# label
options = {
'indirect rendering':
(None, '--indirect-rendering', 'Indirect Rendering'),
'loose binding':
(None, '--loose-binding', 'Loose Binding'),
} | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module ActiveRecord
include ActiveSupport::Deprecation::DeprecatedConstantAccessor
# = Active Record Errors
#
# Generic Active Record exception class.
class ActiveRecordError < StandardError
end
# Raised when the single-table inheritance mechanism fails to locate the subclass
# (for example due to improper usage of column that
# {ActiveRecord::Base.inheritance_column}[rdoc-ref:ModelSchema.inheritance_column]
# points to).
class SubclassNotFound < ActiveRecordError
end
# Raised when an object assigned to an association has an incorrect type.
#
# class Ticket < ActiveRecord::Base
# has_many :patches
# end
#
# class Patch < ActiveRecord::Base
# belongs_to :ticket
# end
#
# # Comments are not patches, this assignment raises AssociationTypeMismatch.
# @ticket.patches << Comment.new(content: "Please attach tests to your patch.")
class AssociationTypeMismatch < ActiveRecordError
end
# Raised when unserialized object's type mismatches one specified for serializable field.
class SerializationTypeMismatch < ActiveRecordError
end
# Raised when adapter not specified on connection (or configuration file
# +config/database.yml+ misses adapter field).
class AdapterNotSpecified < ActiveRecordError
end
# Raised when a model makes a query but it has not specified an associated table.
class TableNotSpecified < ActiveRecordError
end
# Raised when Active Record cannot find database adapter specified in
# +config/database.yml+ or programmatically.
class AdapterNotFound < ActiveRecordError
end
# Superclass for all errors raised from an Active Record adapter.
class AdapterError < ActiveRecordError
def initialize(message = nil, connection_pool: nil)
@connection_pool = connection_pool
super(message)
end
attr_reader :connection_pool
end
# Raised when connection to the database could not been established (for example when
# {ActiveRecord::Base.lease_connection=}[rdoc-ref:ConnectionHandling#lease_connection]
# is given a +nil+ object).
class ConnectionNotEstablished < AdapterError
def initialize(message = nil, connection_pool: nil)
super(message, connection_pool: connection_pool)
end
def set_pool(connection_pool)
unless @connection_pool
@connection_pool = connection_pool
end
self
end
end
# Raised when a connection could not be obtained within the connection
# acquisition timeout period: because max connections in pool
# are in use.
class ConnectionTimeoutError < ConnectionNotEstablished
end
# Raised when a database connection pool is requested but
# has not been defined.
class ConnectionNotDefined < ConnectionNotEstablished
def initialize(message = nil, connection_name: nil, role: nil, shard: nil)
super(message)
@connection_name = connection_name
@role = role
@shard = shard
end
attr_reader :connection_name, :role, :shard
end
# Raised when connection to the database could not been established because it was not
# able to connect to the host or when the authorization failed.
class DatabaseConnectionError < ConnectionNotEstablished
def initialize(message = nil)
super(message || "Database connection error")
end
class << self
def hostname_error(hostname)
DatabaseConnectionError.new(<<~MSG)
There is an issue connecting with your hostname: #{hostname}.\n
Please check your database configuration and ensure there is a valid connection to your database.
MSG
end
def username_error(username)
DatabaseConnectionError.new(<<~MSG)
There is an issue connecting to your database with your username/password, username: #{username}.\n
Please check your database configuration to ensure the username/password are valid.
MSG
end
end
end
# Raised when a pool was unable to get ahold of all its connections
# to perform a "group" action such as
# {ActiveRecord::Base.connection_pool.disconnect!}[rdoc-ref:ConnectionAdapters::ConnectionPool#disconnect!]
# or {ActiveRecord::Base.connection_handler.clear_reloadable_connections!}[rdoc-ref:ConnectionAdapters::ConnectionHandler#clear_reloadable_connections!].
class ExclusiveConnectionTimeoutError < ConnectionTimeoutError
end
# Raised when a write to the database is attempted on a read only connection.
class ReadOnlyError < ActiveRecordError
end
# Raised when shard swapping is attempted on a connection that prohibits it.
# See {ActiveRecord::ConnectionHandling#prohibit_shard_swapping}[rdoc-ref:ConnectionHandling#prohibit_shard_swapping].
class ShardSwapProhibitedError < ArgumentError
# This subclasses ArgumentError for backwards compatibility.
end
# Raised when Active Record cannot find a record by given id or set of ids.
class RecordNotFound < ActiveRecordError
attr_reader :model, :primary_key, :id
def initialize(message = nil, model = nil, primary_key = nil, id = nil)
@primary_key = primary_key
@model = model
@id = id
super(message)
end
end
# Raised by {ActiveRecord::Base#save!}[rdoc-ref:Persistence#save!] and
# {ActiveRecord::Base.update_attribute!}[rdoc-ref:Persistence#update_attribute!]
# methods when a record failed to validate or cannot be saved due to any of the
# <tt>before_*</tt> callbacks throwing +:abort+. See
# ActiveRecord::Callbacks for further details.
#
# class Product < ActiveRecord::Base
# before_save do
# throw :abort if price < 0
# end
# end
#
# Product.create! # => raises an ActiveRecord::RecordNotSaved
class RecordNotSaved < ActiveRecordError
attr_reader :record
def initialize(message = nil, record = nil)
@record = record
super(message)
end
end
# Raised by {ActiveRecord::Base#destroy!}[rdoc-ref:Persistence#destroy!]
# when a record cannot be destroyed due to any of the
# <tt>before_destroy</tt> callbacks throwing +:abort+. See
# ActiveRecord::Callbacks for further details.
#
# class User < ActiveRecord::Base
# before_destroy do
# throw :abort if still_active?
# end
# end
#
# User.first.destroy! # => raises an ActiveRecord::RecordNotDestroyed
class RecordNotDestroyed < ActiveRecordError
attr_reader :record
def initialize(message = nil, record = nil)
@record = record
super(message)
end
end
# Raised when Active Record finds multiple records but only expected one.
class SoleRecordExceeded < ActiveRecordError
attr_reader :record
def initialize(record = nil)
@record = record
super "Wanted only one #{record&.name || "record"}"
end
end
# Superclass for all database execution errors.
#
# Wraps the underlying database error as +cause+.
class StatementInvalid < AdapterError
def initialize(message = nil, sql: nil, binds: nil, connection_pool: nil)
super(message || $!&.message, connection_pool: connection_pool)
@sql = sql
@binds = binds
end
attr_reader :sql, :binds
def set_query(sql, binds)
unless @sql
@sql = sql
@binds = binds
end
self
end
end
# Defunct wrapper class kept for compatibility.
# StatementInvalid wraps the original exception now.
class WrappedDatabaseException < StatementInvalid
end
# Raised when a record cannot be inserted or updated because it would violate a uniqueness constraint.
class RecordNotUnique < WrappedDatabaseException
end
# Raised when a record cannot be inserted or updated because it references a non-existent record,
# or when a record cannot be deleted because a parent record references it.
class InvalidForeignKey < WrappedDatabaseException
end
# Raised when a foreign key constraint cannot be added because the column type does not match the referenced column type.
class MismatchedForeignKey < StatementInvalid
def initialize(
message: nil,
sql: nil,
binds: nil,
table: nil,
foreign_key: nil,
target_table: nil,
primary_key: nil,
primary_key_column: nil,
query_parser: nil,
connection_pool: nil
)
@original_message = message
@query_parser = query_parser
if table
type = primary_key_column.bigint? ? :bigint : primary_key_column.type
msg = <<~EOM.squish
Column `#{foreign_key}` on table `#{table}` does not match column `#{primary_key}` on `#{target_table}`,
which has type `#{primary_key_column.sql_type}`.
To resolve this issue, change the type of the `#{foreign_key}` column on `#{table}` to be :#{type}.
(For example `t.#{type} :#{foreign_key}`).
EOM
else
msg = <<~EOM.squish
There is a mismatch between the foreign key and primary key column types.
Verify that the foreign key column type and the primary key of the associated table match types.
EOM
end
if message
msg << "\nOriginal message: #{message}"
end
super(msg, sql: sql, binds: binds, connection_pool: connection_pool)
end
def set_query(sql, binds)
if @query_parser && !@sql
self.class.new(
message: @original_message,
sql: sql,
binds: binds,
connection_pool: @connection_pool,
**@query_parser.call(sql)
).tap do |exception|
exception.set_backtrace backtrace
end
else
super
end
end
end
# Raised when a record cannot be inserted or updated because it would violate a not null constraint.
class NotNullViolation < StatementInvalid
end
# Raised when a record cannot be inserted or updated because it would violate a check constraint.
class CheckViolation < StatementInvalid
end
# Raised when a record cannot be inserted or updated because it would violate an exclusion constraint.
class ExclusionViolation < StatementInvalid
end
# Raised when a record cannot be inserted or updated because a value too long for a column type.
class ValueTooLong < StatementInvalid
end
# Raised when values that executed are out of range.
class RangeError < StatementInvalid
end
# Raised when a statement produces an SQL warning.
class SQLWarning < AdapterError
attr_reader :code, :level
attr_accessor :sql
def initialize(message = nil, code = nil, level = nil, sql = nil, connection_pool = nil)
super(message, connection_pool: connection_pool)
@code = code
@level = level
@sql = sql
end
end
# Raised when the number of placeholders in an SQL fragment passed to
# {ActiveRecord::Base.where}[rdoc-ref:QueryMethods#where]
# does not match the number of values supplied.
#
# For example, when there are two placeholders with only one value supplied:
#
# Location.where("lat = ? AND lng = ?", 53.7362)
class PreparedStatementInvalid < ActiveRecordError
end
# Raised when a given database does not exist.
class NoDatabaseError < StatementInvalid
include ActiveSupport::ActionableError
action "Create database" do
ActiveRecord::Tasks::DatabaseTasks.create_current
end
def initialize(message = nil, connection_pool: nil)
super(message || "Database not found", connection_pool: connection_pool)
end
class << self
def db_error(db_name)
NoDatabaseError.new(<<~MSG)
Database not found: #{db_name}. Available database configurations can be found in config/database.yml.
To resolve this error:
- Create the database by running:
bin/rails db:create
- Verify that config/database.yml contains the correct database name.
MSG
end
end
end
# Raised when creating a database if it exists.
class DatabaseAlreadyExists < StatementInvalid
end
# Raised when PostgreSQL returns 'cached plan must not change result type' and
# we cannot retry gracefully (e.g. inside a transaction)
class PreparedStatementCacheExpired < StatementInvalid
end
# Raised on attempt to save stale record. Record is stale when it's being saved in another query after
# instantiation, for example, when two users edit the same wiki page and one starts editing and saves
# the page before the other.
#
# Read more about optimistic locking in ActiveRecord::Locking module
# documentation.
class StaleObjectError < ActiveRecordError
attr_reader :record, :attempted_action
def initialize(record = nil, attempted_action = nil)
if record && attempted_action
@record = record
@attempted_action = attempted_action
super("Attempted to #{attempted_action} a stale object: #{record.class.name}.")
else
super("Stale object error.")
end
end
end
# Raised when association is being configured improperly or user tries to use
# offset and limit together with
# {ActiveRecord::Base.has_many}[rdoc-ref:Associations::ClassMethods#has_many] or
# {ActiveRecord::Base.has_and_belongs_to_many}[rdoc-ref:Associations::ClassMethods#has_and_belongs_to_many]
# associations.
class ConfigurationError < ActiveRecordError
end
# Raised on attempt to update record that is instantiated as read only.
class ReadOnlyRecord < ActiveRecordError
end
# Raised on attempt to lazily load records that are marked as strict loading.
#
# You can resolve this error by eager loading marked records before accessing
# them. The
# {Eager Loading Associations}[https://guides.rubyonrails.org/active_record_querying.html#eager-loading-associations]
# guide covers solutions, such as using
# {ActiveRecord::Base.includes}[rdoc-ref:QueryMethods#includes].
class StrictLoadingViolationError < ActiveRecordError
end
# {ActiveRecord::Base.transaction}[rdoc-ref:Transactions::ClassMethods#transaction]
# uses this exception to distinguish a deliberate rollback from other exceptional situations.
# Normally, raising an exception will cause the
# {.transaction}[rdoc-ref:Transactions::ClassMethods#transaction] method to rollback
# the database transaction *and* pass on the exception. But if you raise an
# ActiveRecord::Rollback exception, then the database transaction will be rolled back,
# without passing on the exception.
#
# For example, you could do this in your controller to rollback a transaction:
#
# class BooksController < ActionController::Base
# def create
# Book.transaction do
# book = Book.new(params[:book])
# book.save!
# if today_is_friday?
# # The system must fail on Friday so that our support department
# # won't be out of job. We silently rollback this transaction
# # without telling the user.
# raise ActiveRecord::Rollback
# end
# end
# # ActiveRecord::Rollback is the only exception that won't be passed on
# # by ActiveRecord::Base.transaction, so this line will still be reached
# # even on Friday.
# redirect_to root_url
# end
# end
class Rollback < ActiveRecordError
end
# Raised when attribute has a name reserved by Active Record (when attribute
# has name of one of Active Record instance methods).
class DangerousAttributeError < ActiveRecordError
end
# Raised when unknown attributes are supplied via mass assignment.
UnknownAttributeError = ActiveModel::UnknownAttributeError
# Raised when an error occurred while doing a mass assignment to an attribute through the
# {ActiveRecord::Base#attributes=}[rdoc-ref:ActiveModel::AttributeAssignment#attributes=] method.
# The exception has an +attribute+ property that is the name of the offending attribute.
class AttributeAssignmentError < ActiveRecordError
attr_reader :exception, :attribute
def initialize(message = nil, exception = nil, attribute = nil)
super(message)
@exception = exception
@attribute = attribute
end
end
# Raised when there are multiple errors while doing a mass assignment through the
# {ActiveRecord::Base#attributes=}[rdoc-ref:ActiveModel::AttributeAssignment#attributes=]
# method. The exception has an +errors+ property that contains an array of AttributeAssignmentError
# objects, each corresponding to the error while assigning to an attribute.
class MultiparameterAssignmentErrors < ActiveRecordError
attr_reader :errors
def initialize(errors = nil)
@errors = errors
end
end
# Raised when a primary key is needed, but not specified in the schema or model.
class UnknownPrimaryKey < ActiveRecordError
attr_reader :model
def initialize(model = nil, description = nil)
if model
message = "Unknown primary key for table #{model.table_name} in model #{model}."
message += "\n#{description}" if description
@model = model
super(message)
else
super("Unknown primary key.")
end
end
end
# Raised when a relation cannot be mutated because it's already loaded.
#
# class Task < ActiveRecord::Base
# end
#
# relation = Task.all
# relation.load
# relation.loaded? # => true
#
# # Methods which try to mutate a loaded relation fail.
# relation.where!(title: 'TODO') # => ActiveRecord::UnmodifiableRelation
# relation.limit!(5) # => ActiveRecord::UnmodifiableRelation
class UnmodifiableRelation < ActiveRecordError
end
# TransactionIsolationError will be raised under the following conditions:
#
# * The adapter does not support setting the isolation level
# * You are joining an existing open transaction
# * You are creating a nested (savepoint) transaction
#
# The mysql2, trilogy, and postgresql adapters support setting the transaction isolation level.
class TransactionIsolationError < ActiveRecordError
end
# TransactionRollbackError will be raised when a transaction is rolled
# back by the database due to a serialization failure or a deadlock.
#
# These exceptions should not be generally rescued in nested transaction
# blocks, because they have side-effects in the actual enclosing transaction
# and internal Active Record state. They can be rescued if you are above the
# root transaction block, though.
#
# In that case, beware of transactional tests, however, because they run test
# cases in their own umbrella transaction. If you absolutely need to handle
# these exceptions in tests please consider disabling transactional tests in
# the affected test class (<tt>self.use_transactional_tests = false</tt>).
#
# Due to the aforementioned side-effects, this exception should not be raised
# manually by users.
#
# See the following:
#
# * https://www.postgresql.org/docs/current/static/transaction-iso.html
# * https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html#error_er_lock_deadlock
class TransactionRollbackError < StatementInvalid
end
# AsynchronousQueryInsideTransactionError will be raised when attempting
# to perform an asynchronous query from inside a transaction
class AsynchronousQueryInsideTransactionError < ActiveRecordError
end
# SerializationFailure will be raised when a transaction is rolled
# back by the database due to a serialization failure.
#
# This is a subclass of TransactionRollbackError, please make sure to check
# its documentation to be aware of its caveats.
class SerializationFailure < TransactionRollbackError
end
# Deadlocked will be raised when a transaction is rolled
# back by the database when a deadlock is encountered.
#
# This is a subclass of TransactionRollbackError, please make sure to check
# its documentation to be aware of its caveats.
class Deadlocked < TransactionRollbackError
end
# MissingRequiredOrderError is raised when a relation requires ordering but
# lacks any +order+ values in scope or any model order columns to use.
class MissingRequiredOrderError < ActiveRecordError
end
# IrreversibleOrderError is raised when a relation's order is too complex for
# +reverse_order+ to automatically reverse.
class IrreversibleOrderError < ActiveRecordError
end
# Superclass for errors that have been aborted (either by client or server).
class QueryAborted < StatementInvalid
end
# LockWaitTimeout will be raised when lock wait timeout exceeded.
class LockWaitTimeout < StatementInvalid
end
# StatementTimeout will be raised when statement timeout exceeded.
class StatementTimeout < QueryAborted
end
# QueryCanceled will be raised when canceling statement due to user request.
class QueryCanceled < QueryAborted
end
# AdapterTimeout will be raised when database clients times out while waiting from the server.
class AdapterTimeout < QueryAborted
end
# ConnectionFailed will be raised when the network connection to the
# database fails while sending a query or waiting for its result.
class ConnectionFailed < QueryAborted
end
# UnknownAttributeReference is raised when an unknown and potentially unsafe
# value is passed to a query method. For example, passing a non column name
# value to a relation's #order method might cause this exception.
#
# When working around this exception, caution should be taken to avoid SQL
# injection vulnerabilities when passing user-provided values to query
# methods. Known-safe values can be passed to query methods by wrapping them
# in Arel.sql.
#
# For example, the following code would raise this exception:
#
# Post.order("REPLACE(title, 'misc', 'zzzz') asc").pluck(:id)
#
# The desired result can be accomplished by wrapping the known-safe string
# in Arel.sql:
#
# Post.order(Arel.sql("REPLACE(title, 'misc', 'zzzz') asc")).pluck(:id)
#
# Again, such a workaround should *not* be used when passing user-provided
# values, such as request parameters or model attributes to query methods.
class UnknownAttributeReference < ActiveRecordError
end
# DatabaseVersionError will be raised when the database version is not supported, or when
# the database version cannot be determined.
class DatabaseVersionError < ActiveRecordError
end
class DeprecatedAssociationError < ActiveRecordError
end
end
require "active_record/associations/errors" | ruby | github | https://github.com/rails/rails | activerecord/lib/active_record/errors.rb |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#update:2014-09-25 by liufeily@163.com
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def SelfPaginator(request,List,Limit):
'''分页模块,用法:
1.view中引入:
ex:from website.common.CommonPaginator import SelfPaginator
2.SelfPaginator需要传入三个参数
(1).request:获取请求数据
(2).List:为需要分页的数据(一般为*.objects.all()取出来数据)
(3).Limit:为每页显示的条数
ex:lst = SelfPaginator(request,mList, 5)
3.view需要获取SelfPaginator return的lst,并把lst返回给前端模板
ex:kwvars = {'lPage':lst,}
4.前端需要for循环lPage也就是lst读取每页内容
ex:{% for i in lPage %} ... {% endfor %}
5.模板页引入paginator.html
ex:{% include "common/paginator.html" %}
'''
paginator = Paginator(List, int(Limit))
page = request.GET.get('page')
try:
lst = paginator.page(page)
except PageNotAnInteger:
lst = paginator.page(1)
except EmptyPage:
lst = paginator.page(paginator.num_pages)
return lst
if __name__=='__main__':
rList = User.objects.all()
lst = SelfPaginator(request,rList,20) | unknown | codeparrot/codeparrot-clean | ||
"""Integration tests for acquisition of COVID hospitalization."""
# standard library
import unittest
from unittest.mock import MagicMock
# first party
from delphi.epidata.acquisition.covid_hosp.common.database import Database
from delphi.epidata.acquisition.covid_hosp.common.test_utils import UnitTestUtils
from delphi.epidata.client.delphi_epidata import Epidata
from delphi.epidata.acquisition.covid_hosp.state_timeseries.update import Update
import delphi.operations.secrets as secrets
# third party
from freezegun import freeze_time
# py3tester coverage target (equivalent to `import *`)
__test_target__ = \
'delphi.epidata.acquisition.covid_hosp.state_timeseries.update'
class AcquisitionTests(unittest.TestCase):
def setUp(self):
"""Perform per-test setup."""
# configure test data
self.test_utils = UnitTestUtils(__file__)
# use the local instance of the Epidata API
Epidata.BASE_URL = 'http://delphi_web_epidata/epidata/api.php'
# use the local instance of the epidata database
secrets.db.host = 'delphi_database_epidata'
secrets.db.epi = ('user', 'pass')
# clear relevant tables
with Database.connect() as db:
with db.new_cursor() as cur:
cur.execute('truncate table covid_hosp_state_timeseries')
cur.execute('truncate table covid_hosp_meta')
@freeze_time("2021-03-17")
def test_acquire_dataset(self):
"""Acquire a new dataset."""
# only mock out network calls to external hosts
mock_network = MagicMock()
mock_network.fetch_metadata.return_value = \
self.test_utils.load_sample_metadata()
mock_network.fetch_dataset.return_value = \
self.test_utils.load_sample_dataset()
# make sure the data does not yet exist
with self.subTest(name='no data yet'):
response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))
self.assertEqual(response['result'], -2)
# acquire sample data into local database
with self.subTest(name='first acquisition'):
acquired = Update.run(network=mock_network)
self.assertTrue(acquired)
# make sure the data now exists
with self.subTest(name='initial data checks'):
response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))
self.assertEqual(response['result'], 1)
self.assertEqual(len(response['epidata']), 1)
row = response['epidata'][0]
self.assertEqual(row['state'], 'WY')
self.assertEqual(row['date'], 20200826)
self.assertEqual(row['issue'], 20210315)
self.assertEqual(row['critical_staffing_shortage_today_yes'], 2)
actual = row['inpatient_bed_covid_utilization']
expected = 0.011946591707659873
self.assertAlmostEqual(actual, expected)
self.assertIsNone(row['critical_staffing_shortage_today_no'])
# expect 61 fields per row (63 database columns, except `id` and `record_type`)
self.assertEqual(len(row), 61)
# re-acquisition of the same dataset should be a no-op
with self.subTest(name='second acquisition'):
acquired = Update.run(network=mock_network)
self.assertFalse(acquired)
# make sure the data still exists
with self.subTest(name='final data checks'):
response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))
self.assertEqual(response['result'], 1)
self.assertEqual(len(response['epidata']), 1)
# acquire new data into local database
with self.subTest(name='first acquisition'):
# acquire new data with 3/16 issue date
mock_network.fetch_metadata.return_value = \
self.test_utils.load_sample_metadata("metadata2.csv")
mock_network.fetch_dataset.return_value = \
self.test_utils.load_sample_dataset("dataset2.csv")
acquired = Update.run(network=mock_network)
self.assertTrue(acquired)
with self.subTest(name='as_of checks'):
response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))
self.assertEqual(len(response['epidata']), 2)
row = response['epidata'][1]
self.assertEqual(row['date'], 20200827)
# previous data should have 3/15 issue date
response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), as_of=20210315)
self.assertEqual(len(response['epidata']), 1)
row = response['epidata'][0]
self.assertEqual(row['date'], 20200826)
# no data before 3/15
response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101), as_of=20210314)
self.assertEqual(response['result'], -2) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibcheck plugin to add tex keys """
from invenio.bibrecord import record_add_field
from invenio.sequtils_texkey import TexkeySeq, TexkeyNoAuthorError
def check_record(record, texkey_field="035__a", extra_subfields=()):
"""
Add a tex key to a record, checking that it doesn't have one already.
"""
tag = texkey_field[:3]
ind1, ind2, subfield = texkey_field[3:]
provenances = list(record.iterfield(texkey_field[:5] + "9"))
if len(provenances) and provenances[0][1] in ("SPIRESTeX", "INSPIRETeX"):
for _, val in record.iterfield(texkey_field[:5] + "z"):
if val:
return # Record already has a texkey
if len(list(record.iterfield(texkey_field))) == 0:
try:
texkey = TexkeySeq().next_value(bibrecord=record)
except TexkeyNoAuthorError:
record.warn("No first author or collaboration")
return
subfields_to_add = [(subfield, texkey)] + map(tuple, extra_subfields)
record_add_field(record, tag=tag, ind1=ind1, ind2=ind2,
subfields=subfields_to_add)
record.set_amended("Added Tex key '%s' to field %s" % (texkey, texkey_field)) | unknown | codeparrot/codeparrot-clean | ||
__author__ = 'jianxun'
import db_config
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
import json_wrapper
engine = create_engine('mysql://%s:%s@%s:%s/%s?charset=utf8' % (db_config.USERNAME,
db_config.PASSWORD,
db_config.HOST,
db_config.PORT,
db_config.DB),
encoding="utf-8", pool_recycle=7200, echo=False)
Base = declarative_base()
Session = sessionmaker()
Session.configure(bind=engine)
class DataInfo(Base):
__tablename__ = 'datainfo'
id = Column(Integer, primary_key=True)
data_type_id = Column(Integer)
attrs = Column(String)
time = Column(DateTime)
def to_dict(self):
_dict = {'id': self.id, 'attr': self.attr, 'data_type_id': self.data_type_id, 'time': self.time}
return _dict
def insert_data(data):
session = Session()
info = DataInfo(data_type_id=data['data_type_id'], attrs=json_wrapper.dumps(data['attrs']), time=data['time'])
session.add(info)
session.commit()
result = info.id
session.close()
return result
def query_data(data_type_id, start_time, end_time):
session = Session()
rows = session.query(DataInfo.id, DataInfo.attrs, DataInfo.time)\
.filter(DataInfo.time >= start_time, DataInfo.time <= end_time, DataInfo.data_type_id == data_type_id)
result = []
for info in rows:
temp = {'id': info[0], 'attrs': json_wrapper.loads(info[1]), 'time': info[2]}
result.append(temp)
session.close()
return result | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.10.1'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .module import Module
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import getpass
import logging
import os
import platform
import re
import shlex
import subprocess
import sys
import webbrowser
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.platforminfo import PlatformInfo
_log = logging.getLogger(__name__)
try:
import readline
except ImportError:
if sys.platform != "win32":
# There is no readline module for win32, not much to do except cry.
_log.warn("Unable to import readline.")
class User(object):
DEFAULT_NO = 'n'
DEFAULT_YES = 'y'
def __init__(self, platforminfo=None):
# We cannot get the PlatformInfo object from a SystemHost because
# User is part of SystemHost itself.
self._platforminfo = platforminfo or PlatformInfo(sys, platform, Executive())
# FIXME: These are @classmethods because bugzilla.py doesn't have a Tool object (thus no User instance).
@classmethod
def prompt(cls, message, repeat=1, raw_input=raw_input):
response = None
while (repeat and not response):
repeat -= 1
response = raw_input(message)
return response
@classmethod
def prompt_password(cls, message, repeat=1):
return cls.prompt(message, repeat=repeat, raw_input=getpass.getpass)
@classmethod
def prompt_with_multiple_lists(cls, list_title, subtitles, lists, can_choose_multiple=False, raw_input=raw_input):
item_index = 0
cumulated_list = []
print list_title
for i in range(len(subtitles)):
print "\n" + subtitles[i]
for item in lists[i]:
item_index += 1
print "%2d. %s" % (item_index, item)
cumulated_list += lists[i]
return cls._wait_on_list_response(cumulated_list, can_choose_multiple, raw_input)
@classmethod
def _wait_on_list_response(cls, list_items, can_choose_multiple, raw_input):
while True:
if can_choose_multiple:
response = cls.prompt("Enter one or more numbers (comma-separated) or ranges (e.g. 3-7), or \"all\": ", raw_input=raw_input)
if not response.strip() or response == "all":
return list_items
try:
indices = []
for value in re.split("\s*,\s*", response):
parts = value.split('-')
if len(parts) == 2:
indices += range(int(parts[0]) - 1, int(parts[1]))
else:
indices.append(int(value) - 1)
except ValueError, err:
continue
return [list_items[i] for i in indices]
else:
try:
result = int(cls.prompt("Enter a number: ", raw_input=raw_input)) - 1
except ValueError, err:
continue
return list_items[result]
@classmethod
def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
print list_title
i = 0
for item in list_items:
i += 1
print "%2d. %s" % (i, item)
return cls._wait_on_list_response(list_items, can_choose_multiple, raw_input)
def edit(self, files):
editor = os.environ.get("EDITOR") or "vi"
args = shlex.split(editor)
# Note: Not thread safe: http://bugs.python.org/issue2320
subprocess.call(args + files)
def _warn_if_application_is_xcode(self, edit_application):
if "Xcode" in edit_application:
print "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\"."
def edit_changelog(self, files):
edit_application = os.environ.get("CHANGE_LOG_EDIT_APPLICATION")
if edit_application and self._platforminfo.is_mac():
# On Mac we support editing ChangeLogs using an application.
args = shlex.split(edit_application)
print "Using editor in the CHANGE_LOG_EDIT_APPLICATION environment variable."
print "Please quit the editor application when done editing."
self._warn_if_application_is_xcode(edit_application)
subprocess.call(["open", "-W", "-n", "-a"] + args + files)
return
self.edit(files)
def page(self, message):
pager = os.environ.get("PAGER") or "less"
try:
# Note: Not thread safe: http://bugs.python.org/issue2320
child_process = subprocess.Popen([pager], stdin=subprocess.PIPE)
child_process.communicate(input=message)
except IOError, e:
pass
def confirm(self, message=None, default=DEFAULT_YES, raw_input=raw_input):
if not message:
message = "Continue?"
choice = {'y': 'Y/n', 'n': 'y/N'}[default]
response = raw_input("%s [%s]: " % (message, choice))
if not response:
response = default
return response.lower() == 'y'
def can_open_url(self):
try:
webbrowser.get()
return True
except webbrowser.Error, e:
return False
def open_url(self, url):
if not self.can_open_url():
_log.warn("Failed to open %s" % url)
webbrowser.open(url) | unknown | codeparrot/codeparrot-clean | ||
---
- name: Include tasks that have a failure in a block
hosts: localhost
tasks:
- include_tasks: block_fail_tasks.yml | unknown | github | https://github.com/ansible/ansible | test/integration/targets/blocks/block_fail.yml |
% This is generated by ESQL's AbstractFunctionTestCase. Do not edit it. See ../README.md for how to regenerate it.
**Example**
```esql
FROM airports
| EVAL geohex = ST_GEOHEX(location, 1)
| STATS
count = COUNT(geohex),
centroid = ST_CENTROID_AGG(location)
BY geohex
| WHERE count >= 10
| EVAL geohexString = TO_STRING(geohex)
| KEEP count, centroid, geohexString
| SORT count DESC, geohexString ASC
```
| count:long | centroid:geo_point | geohexString:keyword |
| --- | --- | --- |
| 22 | POINT (7.250850197689777 48.21363834643059) | 811fbffffffffff |
| 18 | POINT (-80.64959161449224 40.04119813675061) | 812abffffffffff |
| 17 | POINT (-0.7606179875266903 52.86413913565304) | 81197ffffffffff |
| 13 | POINT (22.53157936179867 41.98255742864254) | 811efffffffffff |
| 13 | POINT (78.30096947387435 26.073904778951636) | 813dbffffffffff |
| 12 | POINT (-76.39781514415517 45.16300531569868) | 812bbffffffffff |
| 12 | POINT (-100.30120467301458 20.114154297625646) | 8149bffffffffff |
| 11 | POINT (18.037187419831753 48.66540593306788) | 811e3ffffffffff |
| 11 | POINT (-83.42379064553164 33.18388901439241) | 8144fffffffffff |
| 11 | POINT (-99.4237939513881 27.100012352774765) | 8148bffffffffff |
| 10 | POINT (128.01009018346667 35.8699960866943) | 8130fffffffffff | | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/query-languages/esql/_snippets/functions/examples/st_geohex.md |
# -*- coding: utf-8 -*-
################################################################
### common functions for data structures, file name manipulation, etc.
################################################################
import os,os.path
import re
import numpy
import unicodedata
import sys
import warnings
import inspect
import glob
from numpy import *
from scipy.ndimage import morphology
import ligatures
import multiprocessing
import lstm
import pylab
from pylab import imshow
import morph
from toplevel import *
################################################################
### exceptions
################################################################
class OcropusException(Exception):
trace = 1
def __init__(self,*args,**kw):
Exception.__init__(self,*args,**kw)
class Unimplemented(OcropusException):
trace = 1
"Exception raised when a feature is unimplemented."
def __init__(self,s):
Exception.__init__(self,inspect.stack()[1][3])
class Internal(OcropusException):
trace = 1
"Exception raised when a feature is unimplemented."
def __init__(self,s):
Exception.__init__(self,inspect.stack()[1][3])
class RecognitionError(OcropusException):
trace = 1
"Some kind of error during recognition."
def __init__(self,explanation,**kw):
self.context = kw
s = [explanation]
s += ["%s=%s"%(k,summary(kw[k])) for k in kw]
message = " ".join(s)
Exception.__init__(self,message)
class Warning(OcropusException):
trace = 0
def __init__(self,*args,**kw):
OcropusException.__init__(self,*args,**kw)
class BadClassLabel(OcropusException):
trace = 0
"Exception for bad class labels in a dataset or input."
def __init__(self,s):
Exception.__init__(self,s)
class BadImage(OcropusException):
trace = 0
def __init__(self,*args,**kw):
OcropusException.__init__(self,*args)
class BadInput(OcropusException):
trace = 0
def __init__(self,*args,**kw):
OcropusException.__init__(self,*args,**kw)
class FileNotFound(OcropusException):
trace = 0
"""Some file-not-found error during OCRopus processing."""
def __init__(self,fname):
self.fname = fname
def __str__(self):
return "file not found %s"%(self.fname,)
pickle_mode = 2
def deprecated(f):
def _wrap(f):
warned = 0
def _wrapper(*args,**kw):
if not warned:
print f,"has been DEPRECATED"
warned = 1
return f(*args,**kw)
return _wrap
################################################################
# text normalization
################################################################
import chars
replacements = chars.replacements
def normalize_text(s):
"""Apply standard Unicode normalizations for OCR.
This eliminates common ambiguities and weird unicode
characters."""
s = unicode(s)
s = unicodedata.normalize('NFC',s)
s = re.sub(ur'\s+(?u)',' ',s)
s = re.sub(ur'\n(?u)','',s)
s = re.sub(ur'^\s+(?u)','',s)
s = re.sub(ur'\s+$(?u)','',s)
for m,r in replacements:
s = re.sub(unicode(m),unicode(r),s)
return s
def project_text(s,kind="exact"):
"""Project text onto a smaller subset of characters
for comparison."""
s = normalize_text(s)
s = re.sub(ur'( *[.] *){4,}',u'....',s) # dot rows
s = re.sub(ur'[~_]',u'',s) # dot rows
if kind=="exact":
return s
if kind=="nospace":
return re.sub(ur'\s','',s)
if kind=="spletdig":
return re.sub(ur'[^A-Za-z0-9 ]','',s)
if kind=="letdig":
return re.sub(ur'[^A-Za-z0-9]','',s)
if kind=="letters":
return re.sub(ur'[^A-Za-z]','',s)
if kind=="digits":
return re.sub(ur'[^0-9]','',s)
if kind=="lnc":
s = s.upper()
return re.sub(ur'[^A-Z]','',s)
raise BadInput("unknown normalization: "+kind)
################################################################
### Text I/O
################################################################
import codecs
def read_text(fname,nonl=1,normalize=1):
"""Read text. This assumes files are in unicode.
By default, it removes newlines and normalizes the
text for OCR processing with `normalize_text`"""
with codecs.open(fname,"r","utf-8") as stream:
result = stream.read()
if nonl and len(result)>0 and result[-1]=='\n':
result = result[:-1]
if normalize:
result = normalize_text(result)
return result
def write_text(fname,text,nonl=0,normalize=1):
"""Write text. This assumes files are in unicode.
By default, it removes newlines and normalizes the
text for OCR processing with `normalize_text`"""
if normalize:
text = normalize_text(text)
with codecs.open(fname,"w","utf-8") as stream:
stream.write(text)
if not nonl and text[-1]!='\n':
stream.write('\n')
################################################################
### Image I/O
################################################################
import PIL
def pil2array(im,alpha=0):
if im.mode=="L":
a = numpy.fromstring(im.tostring(),'B')
a.shape = im.size[1],im.size[0]
return a
if im.mode=="RGB":
a = numpy.fromstring(im.tostring(),'B')
a.shape = im.size[1],im.size[0],3
return a
if im.mode=="RGBA":
a = numpy.fromstring(im.tostring(),'B')
a.shape = im.size[1],im.size[0],4
if not alpha: a = a[:,:,:3]
return a
return pil2array(im.convert("L"))
def array2pil(a):
if a.dtype==dtype("B"):
if a.ndim==2:
return PIL.Image.frombytes("L",(a.shape[1],a.shape[0]),a.tostring())
elif a.ndim==3:
return PIL.Image.frombytes("RGB",(a.shape[1],a.shape[0]),a.tostring())
else:
raise OcropusException("bad image rank")
elif a.dtype==dtype('float32'):
return PIL.Image.fromstring("F",(a.shape[1],a.shape[0]),a.tostring())
else:
raise OcropusException("unknown image type")
def isbytearray(a):
return a.dtype in [dtype('uint8')]
def isfloatarray(a):
return a.dtype in [dtype('f'),dtype('float32'),dtype('float64')]
def isintarray(a):
return a.dtype in [dtype('B'),dtype('int16'),dtype('int32'),dtype('int64'),dtype('uint16'),dtype('uint32'),dtype('uint64')]
def isintegerarray(a):
return a.dtype in [dtype('int32'),dtype('int64'),dtype('uint32'),dtype('uint64')]
@checks(str,pageno=int,_=GRAYSCALE)
def read_image_gray(fname,pageno=0):
"""Read an image and returns it as a floating point array.
The optional page number allows images from files containing multiple
images to be addressed. Byte and short arrays are rescaled to
the range 0...1 (unsigned) or -1...1 (signed)."""
if type(fname)==tuple: fname,pageno = fname
assert pageno==0
pil = PIL.Image.open(fname)
a = pil2array(pil)
if a.dtype==dtype('uint8'):
a = a/255.0
if a.dtype==dtype('int8'):
a = a/127.0
elif a.dtype==dtype('uint16'):
a = a/65536.0
elif a.dtype==dtype('int16'):
a = a/32767.0
elif isfloatarray(a):
pass
else:
raise OcropusException("unknown image type: "+a.dtype)
if a.ndim==3:
a = mean(a,2)
return a
def write_image_gray(fname,image,normalize=0,verbose=0):
"""Write an image to disk. If the image is of floating point
type, its values are clipped to the range [0,1],
multiplied by 255 and converted to unsigned bytes. Otherwise,
the image must be of type unsigned byte."""
if verbose: print "# writing",fname
if isfloatarray(image):
image = array(255*clip(image,0.0,1.0),'B')
assert image.dtype==dtype('B'),"array has wrong dtype: %s"%image.dtype
im = array2pil(image)
im.save(fname)
@checks(str,_=ABINARY2)
def read_image_binary(fname,dtype='i',pageno=0):
"""Read an image from disk and return it as a binary image
of the given dtype."""
if type(fname)==tuple: fname,pageno = fname
assert pageno==0
pil = PIL.Image.open(fname)
a = pil2array(pil)
if a.ndim==3: a = amax(a,axis=2)
return array(a>0.5*(amin(a)+amax(a)),dtype)
@checks(str,ABINARY2)
def write_image_binary(fname,image,verbose=0):
"""Write a binary image to disk. This verifies first that the given image
is, in fact, binary. The image may be of any type, but must consist of only
two values."""
if verbose: print "# writing",fname
assert image.ndim==2
image = array(255*(image>midrange(image)),'B')
im = array2pil(image)
im.save(fname)
@checks(AINT3,_=AINT2)
def rgb2int(a):
"""Converts a rank 3 array with RGB values stored in the
last axis into a rank 2 array containing 32 bit RGB values."""
assert a.ndim==3
assert a.dtype==dtype('B')
return array(0xffffff&((0x10000*a[:,:,0])|(0x100*a[:,:,1])|a[:,:,2]),'i')
@checks(AINT2,_=AINT3)
def int2rgb(image):
"""Converts a rank 3 array with RGB values stored in the
last axis into a rank 2 array containing 32 bit RGB values."""
assert image.ndim==2
assert isintarray(image)
a = zeros(list(image.shape)+[3],'B')
a[:,:,0] = (image>>16)
a[:,:,1] = (image>>8)
a[:,:,2] = image
return a
@checks(LIGHTSEG,_=DARKSEG)
def make_seg_black(image):
assert isintegerarray(image),"%s: wrong type for segmentation"%image.dtype
image = image.copy()
image[image==0xffffff] = 0
return image
@checks(DARKSEG,_=LIGHTSEG)
def make_seg_white(image):
assert isintegerarray(image),"%s: wrong type for segmentation"%image.dtype
image = image.copy()
image[image==0] = 0xffffff
return image
@checks(str,_=LINESEG)
def read_line_segmentation(fname):
"""Reads a line segmentation, that is an RGB image whose values
encode the segmentation of a text line. Returns an int array."""
pil = PIL.Image.open(fname)
a = pil2array(pil)
assert a.dtype==dtype('B')
assert a.ndim==3
image = rgb2int(a)
result = make_seg_black(image)
return result
@checks(str,LINESEG)
def write_line_segmentation(fname,image):
"""Writes a line segmentation, that is an RGB image whose values
encode the segmentation of a text line."""
a = int2rgb(make_seg_white(image))
im = array2pil(a)
im.save(fname)
@checks(str,_=PAGESEG)
def read_page_segmentation(fname):
"""Reads a page segmentation, that is an RGB image whose values
encode the segmentation of a page. Returns an int array."""
pil = PIL.Image.open(fname)
a = pil2array(pil)
assert a.dtype==dtype('B')
assert a.ndim==3
segmentation = rgb2int(a)
segmentation = make_seg_black(segmentation)
return segmentation
@checks(str,PAGESEG)
def write_page_segmentation(fname,image):
"""Writes a page segmentation, that is an RGB image whose values
encode the segmentation of a page."""
assert image.ndim==2
assert image.dtype in [dtype('int32'),dtype('int64')]
a = int2rgb(make_seg_white(image))
im = array2pil(a)
im.save(fname)
def iulib_page_iterator(files):
for fname in files:
image = read_image_gray(fname)
yield image,fname
def norm_max(a):
return a/amax(a)
def pad_by(image,r,dtype=None):
"""Symmetrically pad the image by the given amount.
FIXME: replace by scipy version."""
if dtype is None: dtype = image.dtype
w,h = image.shape
result = zeros((w+2*r,h+2*r))
result[r:(w+r),r:(h+r)] = image
return result
class RegionExtractor:
"""A class facilitating iterating over the parts of a segmentation."""
def __init__(self):
self.cache = {}
def clear(self):
del self.cache
self.cache = {}
def setImage(self,image):
return self.setImageMasked(image)
def setImageMasked(self,image,mask=None,lo=None,hi=None):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This picks a subset of the segmentation to iterate
over, using a mask and lo and hi values.."""
assert image.dtype==dtype('B') or image.dtype==dtype('i'),"image must be type B or i"
if image.ndim==3: image = rgb2int(image)
assert image.ndim==2,"wrong number of dimensions"
self.image = image
labels = image
if lo is not None: labels[labels<lo] = 0
if hi is not None: labels[labels>hi] = 0
if mask is not None: labels = bitwise_and(labels,mask)
labels,correspondence = morph.renumber_labels_ordered(labels,correspondence=1)
self.labels = labels
self.correspondence = correspondence
self.objects = [None]+morph.find_objects(labels)
def setPageColumns(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the columns."""
self.setImageMasked(image,0xff0000,hi=0x800000)
def setPageParagraphs(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the paragraphs (if present
in the segmentation)."""
self.setImageMasked(image,0xffff00,hi=0x800000)
def setPageLines(self,image):
"""Set the image to be iterated over. This should be an RGB image,
ndim==3, dtype=='B'. This iterates over the lines."""
self.setImageMasked(image,0xffffff,hi=0x800000)
def id(self,i):
"""Return the RGB pixel value for this segment."""
return self.correspondence[i]
def x0(self,i):
"""Return x0 (column) for the start of the box."""
return self.bbox(i)[1]
def x1(self,i):
"""Return x0 (column) for the end of the box."""
return self.bbox(i)[3]
def y0(self,i):
"""Return y0 (row) for the start of the box."""
h = self.image.shape[0]
return h-self.bbox(i)[2]-1
def y1(self,i):
"""Return y0 (row) for the end of the box."""
h = self.image.shape[0]
return h-self.bbox(i)[0]-1
def bbox(self,i):
"""Return the bounding box in raster coordinates
(row0,col0,row1,col1)."""
r = self.objects[i]
# print "@@@bbox",i,r
return (r[0].start,r[1].start,r[0].stop,r[1].stop)
def bboxMath(self,i):
"""Return the bounding box in math coordinates
(row0,col0,row1,col1)."""
h = self.image.shape[0]
(y0,x0,y1,x1) = self.bbox(i)
return (h-y1-1,x0,h-y0-1,x1)
def length(self):
"""Return the number of components."""
return len(self.objects)
def mask(self,index,margin=0):
"""Return the mask for component index."""
b = self.objects[index]
#print "@@@mask",index,b
m = self.labels[b]
m[m!=index] = 0
if margin>0: m = pad_by(m,margin)
return array(m!=0,'B')
def extract(self,image,index,margin=0):
"""Return the subimage for component index."""
h,w = image.shape[:2]
(r0,c0,r1,c1) = self.bbox(index)
# mask = self.mask(index,margin=margin)
return image[max(0,r0-margin):min(h,r1+margin),max(0,c0-margin):min(w,c1+margin),...]
def extractMasked(self,image,index,grow=0,bg=None,margin=0,dtype=None):
"""Return the masked subimage for component index, elsewhere the bg value."""
if bg is None: bg = amax(image)
h,w = image.shape[:2]
mask = self.mask(index,margin=margin)
# FIXME ... not circular
if grow>0: mask = morphology.binary_dilation(mask,iterations=grow)
mh,mw = mask.shape
box = self.bbox(index)
r0,c0,r1,c1 = box
subimage = improc.cut(image,(r0,c0,r0+mh-2*margin,c0+mw-2*margin),margin,bg=bg)
return where(mask,subimage,bg)
################################################################
### Object reading and writing
### This handles reading and writing zipped files directly,
### and it also contains workarounds for changed module/class names.
################################################################
import cPickle
import gzip
def save_object(fname,obj,zip=0):
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"wb") as stream:
with os.popen("gzip -9 > '%s'"%fname,"wb") as stream:
cPickle.dump(obj,stream,2)
else:
with open(fname,"wb") as stream:
cPickle.dump(obj,stream,2)
def unpickle_find_global(mname,cname):
if mname=="lstm.lstm":
return getattr(lstm,cname)
if not mname in sys.modules.keys():
exec "import "+mname
return getattr(sys.modules[mname],cname)
def load_object(fname,zip=0,nofind=0,verbose=0):
"""Loads an object from disk. By default, this handles zipped files
and searches in the usual places for OCRopus. It also handles some
class names that have changed."""
if not nofind:
fname = ocropus_find_file(fname)
if verbose:
print "# loading object",fname
if zip==0 and fname.endswith(".gz"):
zip = 1
if zip>0:
# with gzip.GzipFile(fname,"rb") as stream:
with os.popen("gunzip < '%s'"%fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
else:
with open(fname,"rb") as stream:
unpickler = cPickle.Unpickler(stream)
unpickler.find_global = unpickle_find_global
return unpickler.load()
################################################################
### Simple record object.
################################################################
class Record:
"""A simple record datatype that allows initialization with
keyword arguments, as in Record(x=3,y=9)"""
def __init__(self,**kw):
self.__dict__.update(kw)
def like(self,obj):
self.__dict__.update(obj.__dict__)
return self
################################################################
### Histograms
################################################################
def chist(l):
"""Simple counting histogram. Takes a list of items
and returns a list of (count,object) tuples."""
counts = {}
for c in l:
counts[c] = counts.get(c,0)+1
hist = [(v,k) for k,v in counts.items()]
return sorted(hist,reverse=1)
################################################################
### multiprocessing
################################################################
def number_of_processors():
"""Estimates the number of processors."""
return multiprocessing.cpu_count()
# return int(os.popen("cat /proc/cpuinfo | grep 'processor.*:' | wc -l").read())
def parallel_map(fun,jobs,parallel=0,chunksize=1):
if parallel<2:
for e in jobs:
result = fun(e)
yield result
else:
try:
pool = multiprocessing.Pool(parallel)
for e in pool.imap_unordered(fun,jobs,chunksize):
yield e
finally:
pool.close()
pool.join()
del pool
def check_valid_class_label(s):
"""Determines whether the given character is a valid class label.
Control characters and spaces are not permitted."""
if type(s)==unicode:
if re.search(r'[\0-\x20]',s):
raise BadClassLabel(s)
elif type(s)==str:
if re.search(r'[^\x21-\x7e]',s):
raise BadClassLabel(s)
else:
raise BadClassLabel(s)
def summary(x):
"""Summarize a datatype as a string (for display and debugging)."""
if type(x)==numpy.ndarray:
return "<ndarray %s %s>"%(x.shape,x.dtype)
if type(x)==str and len(x)>10:
return '"%s..."'%x
if type(x)==list and len(x)>10:
return '%s...'%x
return str(x)
################################################################
### file name manipulation
################################################################
from default import getlocal
@checks(str,_=str)
def findfile(name,error=1):
result = ocropus_find_file(name)
return result
@checks(str)
def finddir(name):
"""Find some OCRopus-related resource by looking in a bunch off standard places.
(This needs to be integrated better with setup.py and the build system.)"""
local = getlocal()
path = name
if os.path.exists(path) and os.path.isdir(path): return path
path = local+name
if os.path.exists(path) and os.path.isdir(path): return path
_,tail = os.path.split(name)
path = tail
if os.path.exists(path) and os.path.isdir(path): return path
path = local+tail
if os.path.exists(path) and os.path.isdir(path): return path
raise FileNotFound("file '"+path+"' not found in . or /usr/local/share/ocropus/")
@checks(str)
def allsplitext(path):
"""Split all the pathname extensions, so that "a/b.c.d" -> "a/b", ".c.d" """
match = re.search(r'((.*/)*[^.]*)([^/]*)',path)
if not match:
return path,""
else:
return match.group(1),match.group(3)
@checks(str)
def base(path):
return allsplitext(path)[0]
@checks(str,{str,unicode})
def write_text_simple(file,s):
"""Write the given string s to the output file."""
with open(file,"w") as stream:
if type(s)==unicode: s = s.encode("utf-8")
stream.write(s)
@checks([str])
def glob_all(args):
"""Given a list of command line arguments, expand all of them with glob."""
result = []
for arg in args:
if arg[0]=="@":
with open(arg[1:],"r") as stream:
expanded = stream.read().split("\n")
expanded = [s for s in expanded if s!=""]
else:
expanded = sorted(glob.glob(arg))
if len(expanded)<1:
raise FileNotFound("%s: expansion did not yield any files"%arg)
result += expanded
return result
@checks([str])
def expand_args(args):
"""Given a list of command line arguments, if the
length is one, assume it's a book directory and expands it.
Otherwise returns the arguments unchanged."""
if len(args)==1 and os.path.isdir(args[0]):
return sorted(glob.glob(args[0]+"/????/??????.png"))
else:
return args
data_paths = [
".",
"./models",
"./data",
"./gui",
"/usr/local/share/ocropus/models",
"/usr/local/share/ocropus/data",
"/usr/local/share/ocropus/gui",
"/usr/local/share/ocropus",
]
def ocropus_find_file(fname,gz=1):
"""Search for OCRopus-related files in common OCRopus install
directories (as well as the current directory)."""
if os.path.exists(fname):
return fname
if gz:
if os.path.exists(fname+".gz"):
return fname+".gz"
for path in data_paths:
full = path+"/"+fname
if os.path.exists(full): return full
if gz:
for path in data_paths:
full = path+"/"+fname+".gz"
if os.path.exists(full): return full
raise FileNotFound(fname)
def fvariant(fname,kind,gt=""):
"""Find the file variant corresponding to the given file name.
Possible fil variants are line (or png), rseg, cseg, fst, costs, and txt.
Ground truth files have an extra suffix (usually something like "gt",
as in 010001.gt.txt or 010001.rseg.gt.png). By default, the variant
with the same ground truth suffix is produced. The non-ground-truth
version can be produced with gt="", the ground truth version can
be produced with gt="gt" (or some other desired suffix)."""
if gt!="": gt = "."+gt
base,ext = allsplitext(fname)
# text output
if kind=="txt":
return base+gt+".txt"
assert gt=="","gt suffix may only be supplied for .txt files (%s,%s,%s)"%(fname,kind,gt)
# a text line image
if kind=="line" or kind=="png" or kind=="bin":
return base+".bin.png"
if kind=="nrm":
return base+".nrm.png"
# a recognition lattice
if kind=="lattice":
return base+gt+".lattice"
# raw segmentation
if kind=="rseg":
return base+".rseg.png"
# character segmentation
if kind=="cseg":
return base+".cseg.png"
# text specifically aligned with cseg (this may be different from gt or txt)
if kind=="aligned":
return base+".aligned"
# per character costs
if kind=="costs":
return base+".costs"
raise BadInput("unknown kind: %s"%kind)
################################################################
### Utility for setting "parameters" on an object: a list of keywords for
### changing instance variables.
################################################################
def set_params(object,kw,warn=1):
"""Given an object and a dictionary of keyword arguments,
set only those object properties that are already instance
variables of the given object. Returns a new dictionary
without the key,value pairs that have been used. If
all keywords have been used, afterwards, len(kw)==0."""
kw = kw.copy()
for k,v in kw.items():
if hasattr(object,k):
setattr(object,k,v)
del kw[k]
return kw
################################################################
### warning and logging
################################################################
def caller():
"""Just returns info about the caller in string for (for error messages)."""
frame = sys._getframe(2)
info = inspect.getframeinfo(frame)
result = "%s:%d (%s)"%(info.filename,info.lineno,info.function)
del frame
return result
def die(message,*args):
"""Die with an error message."""
message = message%args
message = caller()+" FATAL "+message+"\n"
sys.stderr.write(message)
sys.exit(1)
def warn(message,*args):
"""Give a warning message."""
message = message%args
message = caller()+" WARNING "+message+"\n"
sys.stderr.write(message)
already_warned = {}
def warn_once(message,*args):
"""Give a warning message, but just once."""
c = caller()
if c in already_warned: return
already_warned[c] = 1
message = message%args
message = c+" WARNING "+message+"\n"
sys.stderr.write(message)
def quick_check_page_components(page_bin,dpi):
"""Quickly check whether the components of page_bin are
reasonable. Returns a value between 0 and 1; <0.5 means that
there is probably something wrong."""
return 1.0
def quick_check_line_components(line_bin,dpi):
"""Quickly check whether the components of line_bin are
reasonable. Returns a value between 0 and 1; <0.5 means that
there is probably something wrong."""
return 1.0
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning,stacklevel=2)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
################################################################
### conversion functions
################################################################
def ustrg2unicode(u,lig=ligatures.lig):
"""Convert an iulib ustrg to a Python unicode string; the
C++ version iulib.ustrg2unicode does weird things for special
symbols like -3"""
result = ""
for i in range(u.length()):
value = u.at(i)
if value>=0:
c = lig.chr(value)
if c is not None:
result += c
else:
result += "<%d>"%value
return result
### code for instantiation native components
def pyconstruct(s):
"""Constructs a Python object from a constructor, an expression
of the form x.y.z.name(args). This ensures that x.y.z is imported.
In the future, more forms of syntax may be accepted."""
env = {}
if "(" not in s:
s += "()"
path = s[:s.find("(")]
if "." in path:
module = path[:path.rfind(".")]
print "import",module
exec "import "+module in env
return eval(s,env)
def mkpython(name):
"""Tries to instantiate a Python class. Gives an error if it looks
like a Python class but can't be instantiated. Returns None if it
doesn't look like a Python class."""
if name is None or len(name)==0:
return None
elif type(name) is not str:
return name()
elif name[0]=="=":
return pyconstruct(name[1:])
elif "(" in name or "." in name:
return pyconstruct(name)
else:
return None
################################################################
### loading and saving components
################################################################
# This code has to deal with a lot of special cases for all the
# different formats we have accrued.
def obinfo(ob):
"""A bit of information about the given object. Returns
the str representation of the object, and if it has a shape,
also includes the shape."""
result = str(ob)
if hasattr(ob,"shape"):
result += " "
result += str(ob.shape)
return result
def save_component(file,object,verbose=0,verify=0):
"""Save an object to disk in an appropriate format. If the object
is a wrapper for a native component (=inherits from
CommonComponent and has a comp attribute, or is in package
ocropus), write it using ocropus.save_component in native format.
Otherwise, write it using Python's pickle. We could use pickle
for everything (since the native components pickle), but that
would be slower and more confusing."""
if hasattr(object,"save_component"):
object.save_component(file)
return
if object.__class__.__name__=="CommonComponent" and hasattr(object,"comp"):
# FIXME -- get rid of this eventually
import ocropus
ocropus.save_component(file,object.comp)
return
if type(object).__module__=="ocropus":
import ocropus
ocropus.save_component(file,object)
return
if verbose:
print "[save_component]"
if verbose:
for k,v in object.__dict__.items():
print ":",k,obinfo(v)
with open(file,"wb") as stream:
pickle.dump(object,stream,pickle_mode)
if verify:
if verbose:
print "[trying to read it again]"
with open(file,"rb") as stream:
pickle.load(stream)
def load_component(file):
"""Load a component. This handles various special cases,
including old-style C++ recognizers (soon to be gotten rid of),
python expressions ("=package.ObjectName(arg1,arg2)"),
and simple pickled Python objects (default)."""
if file[0]=="=":
return pyconstruct(file[1:])
elif file[0]=="@":
file = file[1:]
with open(file,"r") as stream:
# FIXME -- get rid of this eventually
start = stream.read(128)
if start.startswith("<object>\nlinerec\n"):
# FIXME -- get rid of this eventually
warnings.warn("loading old-style linerec: %s"%file)
result = RecognizeLine()
import ocropus
result.comp = ocropus.load_IRecognizeLine(file)
return result
if start.startswith("<object>"):
# FIXME -- get rid of this eventually
warnings.warn("loading old-style cmodel: %s"%file)
import ocroold
result = ocroold.Model()
import ocropus
result.comp = ocropus.load_IModel(file)
return result
return load_object(file)
def binarize_range(image,dtype='B',threshold=0.5):
"""Binarize an image by its range."""
threshold = (amax(image)+amin(image))*threshold
scale = 1
if dtype=='B': scale = 255
return array(scale*(image>threshold),dtype=dtype)
def draw_pseg(pseg,axis=None):
if axis is None:
axis = subplot(111)
h = pseg.dim(1)
regions = ocropy.RegionExtractor()
regions.setPageLines(pseg)
for i in range(1,regions.length()):
x0,y0,x1,y1 = (regions.x0(i),regions.y0(i),regions.x1(i),regions.y1(i))
p = patches.Rectangle((x0,h-y1-1),x1-x0,y1-y0,edgecolor="red",fill=0)
axis.add_patch(p)
def draw_aligned(result,axis=None):
raise Unimplemented("FIXME draw_aligned")
if axis is None:
axis = subplot(111)
axis.imshow(NI(result.image),cmap=cm.gray)
cseg = result.cseg
if type(cseg)==numpy.ndarray: cseg = common.lseg2narray(cseg)
ocropy.make_line_segmentation_black(cseg)
ocropy.renumber_labels(cseg,1)
bboxes = ocropy.rectarray()
ocropy.bounding_boxes(bboxes,cseg)
s = re.sub(r'\s+','',result.output)
h = cseg.dim(1)
for i in range(1,bboxes.length()):
r = bboxes.at(i)
x0,y0,x1,y1 = (r.x0,r.y0,r.x1,r.y1)
p = patches.Rectangle((x0,h-y1-1),x1-x0,y1-y0,edgecolor=(0.0,0.0,1.0,0.5),fill=0)
axis.add_patch(p)
if i>0 and i-1<len(s):
axis.text(x0,h-y0-1,s[i-1],color="red",weight="bold",fontsize=14)
draw()
def plotgrid(data,d=10,shape=(30,30)):
"""Plot a list of images on a grid."""
ion()
gray()
clf()
for i in range(min(d*d,len(data))):
subplot(d,d,i+1)
row = data[i]
if shape is not None: row = row.reshape(shape)
imshow(row)
ginput(1,timeout=0.1)
def showrgb(r,g=None,b=None):
if g is None: g = r
if b is None: b = r
imshow(array([r,g,b]).transpose([1,2,0]))
def showgrid(l,cols=None,n=400,titles=None,xlabels=None,ylabels=None,**kw):
import pylab
if "cmap" not in kw: kw["cmap"] = pylab.cm.gray
if "interpolation" not in kw: kw["interpolation"] = "nearest"
n = minimum(n,len(l))
if cols is None: cols = int(sqrt(n))
rows = (n+cols-1)//cols
for i in range(n):
pylab.xticks([]); pylab.yticks([])
pylab.subplot(rows,cols,i+1)
pylab.imshow(l[i],**kw)
if titles is not None: pylab.title(str(titles[i]))
if xlabels is not None: pylab.xlabel(str(xlabels[i]))
if ylabels is not None: pylab.ylabel(str(ylabels[i]))
def gt_explode(s):
l = re.split(r'_(.{1,4})_',s)
result = []
for i,e in enumerate(l):
if i%2==0:
result += [c for c in e]
else:
result += [e]
result = [re.sub("\001","_",s) for s in result]
result = [re.sub("\002","\\\\",s) for s in result]
return result
def gt_implode(l):
result = []
for c in l:
if c=="_":
result.append("___")
elif len(c)<=1:
result.append(c)
elif len(c)<=4:
result.append("_"+c+"_")
else:
raise BadInput("cannot create ground truth transcription for: %s"%l)
return "".join(result)
@checks(int,sequence=int,frac=int,_=BOOL)
def testset(index,sequence=0,frac=10):
# this doesn't have to be good, just a fast, somewhat random function
return sequence==int(abs(sin(index))*1.23456789e6)%frac
def midrange(image,frac=0.5):
"""Computes the center of the range of image values
(for quick thresholding)."""
return frac*(amin(image)+amax(image))
from scipy.ndimage import measurements
def remove_noise(line,minsize=8):
"""Remove small pixels from an image."""
if minsize==0: return line
bin = (line>0.5*amax(line))
labels,n = morph.label(bin)
sums = measurements.sum(bin,labels,range(n+1))
sums = sums[labels]
good = minimum(bin,1-(sums>0)*(sums<minsize))
return good
class MovingStats:
def __init__(self,n=100):
self.data = []
self.n = n
self.count = 0
def add(self,x):
self.data += [x]
self.data = self.data[-self.n:]
self.count += 1
def mean(self):
if len(self.data)==0: return nan
return mean(self.data) | unknown | codeparrot/codeparrot-clean | ||
# kube-dns
`kube-dns` schedules DNS Pods and Service on the cluster, other pods in cluster
can use the DNS Service’s IP to resolve DNS names.
* [Administrators guide](http://kubernetes.io/docs/admin/dns/)
* [Code repository](http://www.github.com/kubernetes/dns)
## Manually scale kube-dns Deployment
kube-dns creates only one DNS Pod by default. If
[dns-horizontal-autoscaler](../../dns-horizontal-autoscaler/)
is not enabled, you may need to manually scale kube-dns Deployment.
Please use below `kubectl scale` command to scale:
```
kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUM_YOU_WANT>
```
Do not use `kubectl edit` to modify kube-dns Deployment object if it is
controlled by [Addon Manager](../../addon-manager/). Otherwise the modifications
will be clobbered, in addition the replicas count for kube-dns Deployment will
be reset to 1. See [Cluster add-ons README](../../README.md) and
[#36411](https://github.com/kubernetes/kubernetes/issues/36411) for reference.
## kube-dns addon templates
This directory contains the base UNDERSCORE templates that can be used to
generate the kube-dns.yaml.in needed in Salt format.
Due to a varied preference in templating language choices, the transform
Makefile in this directory should be enhanced to generate all required formats
from the base underscore templates.
**N.B.**: When you add a parameter you should also update the various scripts
that supply values for your new parameter. Here is one way you might find those
scripts:
```
cd kubernetes && git grep 'kube-dns.yaml'
```
### Base Template files
These are the authoritative base templates.
Run 'make' to generate the Salt and Sed yaml templates from these.
```
kube-dns.yaml.base
```
### Generated Salt files
```
kube-dns.yaml.in
```
### Generated Sed files
```
kube-dns.yaml.sed
``` | unknown | github | https://github.com/kubernetes/kubernetes | cluster/addons/dns/kube-dns/README.md |
from __future__ import absolute_import
from django import template
from sentry import features
register = template.Library()
@register.tag
def feature(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise template.TemplateSyntaxError("%r tag requires an argument" % token.contents.split()[0])
name = bits[1]
params = bits[2:]
nodelist_true = parser.parse(('else', 'endfeature'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endfeature',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return FeatureNode(nodelist_true, nodelist_false, name, params)
class FeatureNode(template.Node):
def __init__(self, nodelist_true, nodelist_false, name, params):
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
self.name = name
self.params = [template.Variable(i) for i in params]
def render(self, context):
params = [i.resolve(context) for i in self.params]
if 'request' in context:
user = context['request'].user
else:
user = None
if not features.has(self.name, actor=user, *params):
return self.nodelist_false.render(context)
return self.nodelist_true.render(context) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover Logger."""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import logging
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
import unittest
from adspygoogle.common import Utils
from tests.adspygoogle.dfa.v1_19 import client
from tests.adspygoogle.dfa.v1_19 import HTTP_PROXY
from tests.adspygoogle.dfa.v1_19 import SERVER_V1_19
from tests.adspygoogle.dfa.v1_19 import VERSION_V1_19
class DfaLoggerTestV1_19(unittest.TestCase):
"""Unittest suite for Logger using v1_19."""
SERVER = SERVER_V1_19
VERSION = VERSION_V1_19
TMP_LOG = os.path.join('..', '..', '..', '..', 'logs', 'logger_unittest.log')
DEBUG_MSG1 = 'Message before call to an API method.'
DEBUG_MSG2 = 'Message after call to an API method.'
client.debug = False
def setUp(self):
"""Prepare unittest."""
print self.id()
def testUpperStackLogging(self):
"""Tests whether we can define logger at client level and log before and
after the API request is made.
"""
logger = logging.getLogger(self.__class__.__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(self.__class__.TMP_LOG)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# Clean up temporary log file.
Utils.PurgeLog(self.__class__.TMP_LOG)
logger.debug(self.__class__.DEBUG_MSG1)
advertiser_service = client.GetAdvertiserService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
advertiser_service.GetAdvertisers({})
logger.debug(self.__class__.DEBUG_MSG2)
data = Utils.ReadFile(self.__class__.TMP_LOG)
self.assertEqual(data.find(self.__class__.DEBUG_MSG1), 0)
self.assertEqual(data.find(self.__class__.DEBUG_MSG2),
len(self.__class__.DEBUG_MSG1) + 1)
# Clean up and remove temporary log file.
Utils.PurgeLog(self.__class__.TMP_LOG)
os.remove(self.__class__.TMP_LOG)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
// RUN: clang-reorder-fields -record-name ::bar::Foo -fields-order z,y,x %s -- | FileCheck %s
namespace bar {
// The order of fields should not change.
struct Foo {
int x, y; // CHECK: {{^ int x, y;}}
double z; // CHECK-NEXT: {{^ double z;}}
};
} // end namespace bar | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-reorder-fields/MultipleFieldDeclsInStatement.cpp |
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This is an internal header file intended to only be included as the
// front-matter in the implementation files of various reduction ops. It
// is a header file because we split the various reduction ops into their
// own compilation units to get more parallelism in compilation.
#ifndef TENSORFLOW_CORE_KERNELS_REDUCTION_OPS_COMMON_H_
#define TENSORFLOW_CORE_KERNELS_REDUCTION_OPS_COMMON_H_
#define EIGEN_USE_THREADS
#include "Eigen/Core" // from @eigen_archive
#include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/reduction_ops.h"
#include "tensorflow/core/kernels/transpose_functor.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device>
struct Constants {
// Derive Index type. int (32-bit) or long (64-bit) depending on the
// compile-time configuration. "float" here is not relevant.
// TODO(zhifengc): Moves the definition to TTypes.
typedef TTypes<float>::Tensor::Index Index;
Eigen::array<Index, 1> kZero;
Eigen::array<Index, 1> kOne;
Eigen::array<Index, 2> kZeroTwo;
Constants() {
kZero[0] = 0;
kOne[0] = 1;
kZeroTwo[0] = 0;
kZeroTwo[1] = 2;
}
};
struct ConstantsBase {
const Eigen::IndexList<Eigen::type2index<0>> kZero;
const Eigen::IndexList<Eigen::type2index<1>> kOne;
const Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<2>> kZeroTwo;
};
template <>
struct Constants<CPUDevice> : ConstantsBase {};
class ReductionHelper {
public:
ReductionHelper() : reduce_first_axis_(false) {}
absl::Status Simplify(const Tensor& data, const Tensor& axis,
const bool keep_dims);
// We need to do roughly:
// tmp_out = allocate(out_reshape())
// tmp_out.reshape(out_reshape) = data.reshape(data_reshape).reduce(axes)
// out = tmp_out.reshape(out_shape)
// The reduction result must be allocated with this shape.
TensorShape out_reshape() const;
// The final output shape must be allocated with this shape.
TensorShape out_shape() const;
// The reduction is on a reshaped tensor of this rank.
int ndims() const { return data_reshape_.size(); }
// True if need to reduce the 0-th dimension.
bool reduce_first_axis() const { return reduce_first_axis_; }
// The output is reshaped.
template <typename T, int N>
typename TTypes<T, N>::Tensor out(Tensor* out) {
return out->shaped<T, N>(out_reshape_);
}
// The input is reshaped.
template <typename T, int N>
typename TTypes<T, N>::ConstTensor in(const Tensor& data) {
return data.shaped<T, N>(data_reshape_);
}
// Shape of shuffled input
TensorShape data_reshape() const {
TensorShape shape;
for (auto s : data_reshape_) shape.AddDim(s);
return shape;
}
// Shape with all reduction dimensions at the end
TensorShape shuffled_shape();
// Permutation of reduced dims needed to put reduction dimensions at the end
absl::InlinedVector<int32_t, 8> permutation();
private:
bool reduce_first_axis_; // True if need to reduce the 0-th dimension.
absl::InlinedVector<int64_t, 4>
data_reshape_; // Reshape data before reduction.
absl::InlinedVector<int64_t, 4> out_shape_; // The final output shape.
absl::InlinedVector<int64_t, 4>
out_reshape_; // Reshape output for reduction.
};
// For operations where the output is a reduction function along some
// dimensions of the input.
template <typename Device, class T, typename Tperm, typename Reducer>
class ReductionOp : public OpKernel {
public:
explicit ReductionOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
const DataType dt = DataTypeToEnum<T>::v();
const DataType pt = DataTypeToEnum<Tperm>::v();
OP_REQUIRES_OK(ctx, ctx->MatchSignature({dt, pt}, {dt}));
OP_REQUIRES_OK(ctx, ctx->GetAttr("keep_dims", &keep_dims_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& data = ctx->input(0);
const Tensor& axes = ctx->input(1);
VLOG(1) << "data shape: " << data.shape().DebugString();
VLOG(1) << "axes : " << axes.SummarizeValue(10);
ReductionHelper helper;
OP_REQUIRES_OK(ctx, helper.Simplify(data, axes, keep_dims_));
CHECK_GE(helper.ndims(), 0);
bool is_scalar_identity = functor::ReducerTraits<Reducer>::IsScalarIdentity;
bool is_trivial = helper.ndims() == 0 ||
(helper.ndims() == 1 && !helper.reduce_first_axis());
if (is_scalar_identity && is_trivial) {
Tensor out;
// Special case. Reduces nothing and does not alter the input values.
if (!out.CopyFrom(data, helper.out_shape())) {
ctx->SetStatus(errors::Internal("Error during reduction copy."));
}
ctx->set_output(0, out);
return;
}
// We must allocate temp tensors using the same alloc attr as
// output(0) because it is returned as output(0) in the end.
const AllocatorAttributes alloc_attr = ctx->output_alloc_attr(0);
Tensor tmp_out;
typedef functor::ReduceFunctor<Device, Reducer> Functor;
Constants<Device> constants;
const Device& d = ctx->eigen_device<Device>();
Reducer reducer;
if (data.NumElements() > 0 && is_trivial && !is_scalar_identity) {
OP_REQUIRES_OK(ctx, ctx->allocate_temp(ctx->expected_output_dtype(0),
TensorShape({data.NumElements()}),
&tmp_out, alloc_attr));
Functor::Reduce(ctx, tmp_out.flat<T>(),
data.shaped<T, 2>({1, data.NumElements()}),
constants.kZero, reducer);
} else {
// A temporary tensor whose size matches the size of the reduced
// output.
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(ctx->expected_output_dtype(0),
helper.out_reshape(), &tmp_out, alloc_attr));
if (tmp_out.NumElements() == 0) {
// Nothing to do, fall through to final reshaping.
} else if (data.NumElements() == 0) {
// Degenerate reduction where the input is empty but the output is
// nonempty (thus tmp_out.NumElements() > 0), and we must fill the
// output with identity elements. Example: tf.reduce_sum(tf.zeros((0,
// 3)), [0]). Eigen sometimes crashes in this case, so we do it
// manually.
Functor::FillIdentity(d, tmp_out.flat<T>(), reducer);
} else if ((helper.ndims() == 1) && helper.reduce_first_axis()) {
// Reduce to a scalar.
Functor::Reduce(ctx, helper.out<T, 0>(&tmp_out), helper.in<T, 1>(data),
constants.kZero, reducer);
} else if ((helper.ndims() == 2) && helper.reduce_first_axis()) {
// Can be viewed as a reduction of a matrix along 1st dimension.
Functor::Reduce(ctx, helper.out<T, 1>(&tmp_out), helper.in<T, 2>(data),
constants.kZero, reducer);
} else if ((helper.ndims() == 2) && !helper.reduce_first_axis()) {
// Can be viewed as a reduction of a matrix along 2nd dimension.
Functor::Reduce(ctx, helper.out<T, 1>(&tmp_out), helper.in<T, 2>(data),
constants.kOne, reducer);
} else if ((helper.ndims() == 3) && helper.reduce_first_axis()) {
// Can be viewed as a reduction of a 3D tensor along 1st and 3rd
// dimensions.
Functor::Reduce(ctx, helper.out<T, 1>(&tmp_out), helper.in<T, 3>(data),
constants.kZeroTwo, reducer);
} else if ((helper.ndims() == 3) && !helper.reduce_first_axis()) {
// Can be viewed as a reduction of a 3D tensor along 2nd dimension.
Functor::Reduce(ctx, helper.out<T, 2>(&tmp_out), helper.in<T, 3>(data),
constants.kOne, reducer);
} else {
// If we don't hit one of the cases above, transpose the data so that
// all reduced dimensions are last and reuse the 2-D -> 1-D case.
Tensor data_reshaped;
OP_REQUIRES(ctx, data_reshaped.CopyFrom(data, helper.data_reshape()),
errors::Internal("Error during reduction copy."));
Tensor shuffled;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
helper.shuffled_shape(),
&shuffled, alloc_attr));
OP_REQUIRES_OK(ctx, DoTranspose(d, data_reshaped, helper.permutation(),
&shuffled));
const int64_t unreduced = tmp_out.NumElements();
const int64_t reduced = shuffled.NumElements() / unreduced;
const Tensor& const_shuffled = shuffled;
Functor::Reduce(ctx, tmp_out.flat<T>(),
const_shuffled.shaped<T, 2>({unreduced, reduced}),
constants.kOne, reducer);
}
}
// Set the real output using the contents of the reduction but the
// real expected output shape. The number of elements should
// match between the two shapes.
Tensor out;
OP_REQUIRES(ctx, out.CopyFrom(tmp_out, helper.out_shape()),
errors::Internal("Error during reduction copy."));
ctx->set_output(0, out);
}
private:
// True if the number of dimensions should be maintained.
bool keep_dims_;
};
namespace functor {
template <typename Device, typename Reducer>
struct ReduceFunctorBase {
template <typename OUT_T, typename IN_T, typename ReductionAxes>
static void Reduce(OpKernelContext* ctx, OUT_T out, IN_T in,
const ReductionAxes& reduction_axes,
const Reducer& reducer) {
const Device& d = ctx->eigen_device<Device>();
ReduceEigenImpl<Device, OUT_T, IN_T, ReductionAxes, Reducer> reducer_impl;
reducer_impl(d, out, in, reduction_axes, reducer);
}
template <typename OUT_T>
static void FillIdentity(const Device& d, OUT_T out, const Reducer& reducer) {
FillIdentityEigenImpl(d, out, reducer);
}
};
template <typename Reducer>
struct ReduceFunctor<CPUDevice, Reducer>
: ReduceFunctorBase<CPUDevice, Reducer> {};
} // namespace functor
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_REDUCTION_OPS_COMMON_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/kernels/reduction_ops_common.h |
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/bus/palmbus.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ralink PalmBus
maintainers:
- Sergio Paracuellos <sergio.paracuellos@gmail.com>
description: |
The ralink palmbus controller can be found in all ralink MIPS
SoCs. It provides an external bus for connecting multiple
external devices to the SoC.
properties:
$nodename:
pattern: "^palmbus(@[0-9a-f]+)?$"
"#address-cells":
const: 1
"#size-cells":
const: 1
compatible:
const: palmbus
reg:
maxItems: 1
ranges: true
patternProperties:
# All other properties should be child nodes with unit-address and 'reg'
"@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
reg:
maxItems: 1
required:
- reg
required:
- compatible
- reg
- "#address-cells"
- "#size-cells"
- ranges
additionalProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/mips-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
palmbus@1e000000 {
compatible = "palmbus";
reg = <0x1e000000 0x100000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x1e000000 0x0fffff>;
gpio@600 {
#gpio-cells = <2>;
#interrupt-cells = <2>;
compatible = "mediatek,mt7621-gpio";
gpio-controller;
gpio-ranges = <&pinctrl 0 0 95>;
interrupt-controller;
reg = <0x600 0x100>;
interrupt-parent = <&gic>;
interrupts = <GIC_SHARED 12 IRQ_TYPE_LEVEL_HIGH>;
};
};
... | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/bus/palmbus.yaml |
test_kind: js_test
selector:
roots:
- src/mongo/db/modules/*/jstests/streams/aspio/iceberg/iceberg_benchmark_one_partition.js
- src/mongo/db/modules/*/jstests/streams/aspio/iceberg/iceberg_benchmark_many_partitions.js
- src/mongo/db/modules/*/jstests/streams/aspio/iceberg/iceberg_benchmark_tpcc_orders.js
- src/mongo/db/modules/*/jstests/streams/aspio/iceberg/iceberg_benchmark_tpcc_orders_mixedoperations.js
executor:
fixture:
class: ReplicaSetFixture
mongod_options:
bind_ip_all: ""
set_parameters:
enableTestCommands: 1
featureFlagStreams: true
diagnosticDataCollectionEnabled: false
num_nodes: 1 | unknown | github | https://github.com/mongodb/mongo | buildscripts/resmokeconfig/suites/streams_aspio_iceberg_4.yml |
# coding: utf-8
from unittest.mock import patch
from django.test import TestCase
from spectator.core.apps import Apps, spectator_apps
class SpectatorAppsTestCase(TestCase):
def test_all(self):
all_apps = spectator_apps.all()
self.assertEqual(2, len(all_apps))
self.assertEqual(all_apps[0], "events")
self.assertEqual(all_apps[1], "reading")
@patch.object(Apps, "all")
def test_installed(self, patched_all):
# all() will return an app that is not installed:
patched_all.return_value = ["events", "reading", "NOPE"]
# So 'NOPE' shouldn't be returned here:
installed_apps = spectator_apps.installed()
self.assertEqual(2, len(installed_apps))
self.assertEqual(installed_apps[0], "events")
self.assertEqual(installed_apps[1], "reading")
@patch.object(Apps, "all")
def test_enabled(self, patched_all):
# all() will return an app that is not installed:
patched_all.return_value = ["events", "reading", "NOPE"]
# So 'NOPE' shouldn't be returned here:
enabled_apps = spectator_apps.enabled()
self.assertEqual(2, len(enabled_apps))
self.assertEqual(enabled_apps[0], "events")
self.assertEqual(enabled_apps[1], "reading")
def test_is_installed(self):
self.assertTrue(spectator_apps.is_installed("events"))
self.assertFalse(spectator_apps.is_installed("NOPE"))
def test_is_enabled(self):
self.assertTrue(spectator_apps.is_enabled("events"))
self.assertFalse(spectator_apps.is_enabled("NOPE")) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
###############################################################################
#
# GetLocationElevation
# Obtain elevation information for a path generated by a set of geo-coordinates.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetLocationElevation(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetLocationElevation Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetLocationElevation, self).__init__(temboo_session, '/Library/Google/Elevation/GetLocationElevation')
def new_input_set(self):
return GetLocationElevationInputSet()
def _make_result_set(self, result, path):
return GetLocationElevationResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetLocationElevationChoreographyExecution(session, exec_id, path)
class GetLocationElevationInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetLocationElevation
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Locations(self, value):
"""
Set the value of the Locations input for this Choreo. ((required, string) Enter the location(s) for which elevation data will be obtained. Input formats: a single latitude/longitude coordinate pair; an array of coordinates separated by a |. A set of encoded coordinates.)
"""
super(GetLocationElevationInputSet, self)._set_input('Locations', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that response should be in. Can be set to xml or json. Defaults to json.)
"""
super(GetLocationElevationInputSet, self)._set_input('ResponseFormat', value)
def set_Sensor(self, value):
"""
Set the value of the Sensor input for this Choreo. ((optional, boolean) Indicates whether or not the directions request is from a device with a location sensor. Value must be either 1 or 0. Defaults to 0 (false).)
"""
super(GetLocationElevationInputSet, self)._set_input('Sensor', value)
class GetLocationElevationResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetLocationElevation Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Google.)
"""
return self._output.get('Response', None)
class GetLocationElevationChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetLocationElevationResultSet(response, path) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, api, _
from openerp.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class ResCurrency(models.Model):
_inherit = "res.currency"
@api.multi
def action_get_pyafipws_currencies(self):
return self.get_pyafipws_currencies()
@api.model
def get_pyafipws_currencies(self, afip_ws='wsfex', company=False):
# if not company, then we search one that uses argentinian localization
if not company:
company = self.env['res.company'].search(
[('localization', '=', 'argentina')],
limit=1)
if not company:
raise UserError(_(
'No company found using argentinian localization'))
ws = company.get_connection(afip_ws).connect()
if afip_ws == 'wsfex':
ret = ws.GetParamMon(sep=" ")
elif afip_ws == 'wsfe':
ret = ws.ParamGetTiposMonedas(sep=" ")
elif afip_ws == 'wsbfe':
ret = ws.GetParamMon()
else:
raise UserError(_('AFIP WS %s not implemented') % (
afip_ws))
msg = (_("Authorized Currencies on AFIP%s\n. \nObservations: %s") % (
'\n '.join(ret), ".\n".join([ws.Excepcion, ws.ErrMsg, ws.Obs])))
raise UserError(msg)
@api.multi
def action_get_pyafipws_currency_rate(self):
raise UserError(self.get_pyafipws_currency_rate()[1])
@api.multi
# def get_pyafipws_currency_rate(self, afip_ws='wsfex', company=False):
def get_pyafipws_currency_rate(self, afip_ws='wsfe', company=False):
self.ensure_one()
# if not company, then we search one that uses argentinian localization
if not company:
company = self.env['res.company'].search(
[('localization', '=', 'argentina')],
limit=1)
if not company:
raise UserError(_(
'No company found using argentinian localization'))
if not self.afip_code:
raise UserError(_('No AFIP code for currency %s') % self.name)
ws = company.get_connection(afip_ws).connect()
# deberia implementarse igual para wsbfe pero nos da un error
# BFEGetPARAM_Ctz not found in WSDL
# if afip_ws in ["wsfex", 'wsbfe']:
if afip_ws == "wsfex":
rate = ws.GetParamCtz(self.afip_code)
elif afip_ws == "wsfe":
rate = ws.ParamGetCotizacion(self.afip_code)
else:
raise UserError(_('AFIP WS %s not implemented') % (
afip_ws))
msg = (_("Currency rate for %s: %s.\nObservations: %s") % (
self.name, rate, ".\n".join([ws.Excepcion, ws.ErrMsg, ws.Obs])))
return (float(rate), msg) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
)
] | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import, unicode_literals
import datetime
import re
from mopidy.models import TlTrack
from mopidy.mpd.protocol import tagtype_list
# TODO: special handling of local:// uri scheme
normalize_path_re = re.compile(r'[^/]+')
def normalize_path(path, relative=False):
parts = normalize_path_re.findall(path or '')
if not relative:
parts.insert(0, '')
return '/'.join(parts)
def track_to_mpd_format(track, position=None, stream_title=None):
"""
Format track for output to MPD client.
:param track: the track
:type track: :class:`mopidy.models.Track` or :class:`mopidy.models.TlTrack`
:param position: track's position in playlist
:type position: integer
:param stream_title: the current streams title
:type position: string
:rtype: list of two-tuples
"""
if isinstance(track, TlTrack):
(tlid, track) = track
else:
(tlid, track) = (None, track)
result = [
('file', track.uri or ''),
('Time', track.length and (track.length // 1000) or 0),
('Artist', concat_multi_values(track.artists, 'name')),
('Album', track.album and track.album.name or ''),
]
if stream_title is not None:
result.append(('Title', stream_title))
if track.name:
result.append(('Name', track.name))
else:
result.append(('Title', track.name or ''))
if track.date:
result.append(('Date', track.date))
if track.album is not None and track.album.num_tracks is not None:
result.append(('Track', '%d/%d' % (
track.track_no or 0, track.album.num_tracks)))
else:
result.append(('Track', track.track_no or 0))
if position is not None and tlid is not None:
result.append(('Pos', position))
result.append(('Id', tlid))
if track.album is not None and track.album.musicbrainz_id is not None:
result.append(('MUSICBRAINZ_ALBUMID', track.album.musicbrainz_id))
if track.album is not None and track.album.artists:
result.append(
('AlbumArtist', concat_multi_values(track.album.artists, 'name')))
musicbrainz_ids = concat_multi_values(
track.album.artists, 'musicbrainz_id')
if musicbrainz_ids:
result.append(('MUSICBRAINZ_ALBUMARTISTID', musicbrainz_ids))
if track.artists:
musicbrainz_ids = concat_multi_values(track.artists, 'musicbrainz_id')
if musicbrainz_ids:
result.append(('MUSICBRAINZ_ARTISTID', musicbrainz_ids))
if track.composers:
result.append(
('Composer', concat_multi_values(track.composers, 'name')))
if track.performers:
result.append(
('Performer', concat_multi_values(track.performers, 'name')))
if track.genre:
result.append(('Genre', track.genre))
if track.disc_no:
result.append(('Disc', track.disc_no))
if track.last_modified:
datestring = datetime.datetime.utcfromtimestamp(
track.last_modified // 1000).isoformat()
result.append(('Last-Modified', datestring + 'Z'))
if track.musicbrainz_id is not None:
result.append(('MUSICBRAINZ_TRACKID', track.musicbrainz_id))
if track.album and track.album.uri:
result.append(('X-AlbumUri', track.album.uri))
if track.album and track.album.images:
images = ';'.join(i for i in track.album.images if i is not '')
result.append(('X-AlbumImage', images))
result = [element for element in result if _has_value(*element)]
return result
def _has_value(tagtype, value):
"""
Determine whether to add the tagtype to the output or not.
:param tagtype: the MPD tagtype
:type tagtype: string
:param value: the tag value
:rtype: bool
"""
if tagtype in tagtype_list.TAGTYPE_LIST:
return bool(value)
return True
def concat_multi_values(models, attribute):
"""
Format Mopidy model values for output to MPD client.
:param models: the models
:type models: array of :class:`mopidy.models.Artist`,
:class:`mopidy.models.Album` or :class:`mopidy.models.Track`
:param attribute: the attribute to use
:type attribute: string
:rtype: string
"""
# Don't sort the values. MPD doesn't appear to (or if it does it's not
# strict alphabetical). If we just use them in the order in which they come
# in then the musicbrainz ids have a higher chance of staying in sync
return ';'.join(
getattr(m, attribute)
for m in models if getattr(m, attribute, None) is not None
)
def tracks_to_mpd_format(tracks, start=0, end=None):
"""
Format list of tracks for output to MPD client.
Optionally limit output to the slice ``[start:end]`` of the list.
:param tracks: the tracks
:type tracks: list of :class:`mopidy.models.Track` or
:class:`mopidy.models.TlTrack`
:param start: position of first track to include in output
:type start: int (positive or negative)
:param end: position after last track to include in output
:type end: int (positive or negative) or :class:`None` for end of list
:rtype: list of lists of two-tuples
"""
if end is None:
end = len(tracks)
tracks = tracks[start:end]
positions = range(start, end)
assert len(tracks) == len(positions)
result = []
for track, position in zip(tracks, positions):
result.append(track_to_mpd_format(track, position))
return result
def playlist_to_mpd_format(playlist, *args, **kwargs):
"""
Format playlist for output to MPD client.
Arguments as for :func:`tracks_to_mpd_format`, except the first one.
"""
return tracks_to_mpd_format(playlist.tracks, *args, **kwargs) | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module ActiveModel
module Validations
class PresenceValidator < EachValidator # :nodoc:
def validate_each(record, attr_name, value)
record.errors.add(attr_name, :blank, **options) if value.blank?
end
end
module HelperMethods
# Validates that the specified attributes are not blank (as defined by
# Object#blank?).
#
# class Person < ActiveRecord::Base
# validates_presence_of :first_name
# end
#
# The first_name attribute must be in the object and it cannot be blank.
#
# If you want to validate the presence of a boolean field (where the real
# values are +true+ and +false+), you will want to use
# <tt>validates_inclusion_of :field_name, in: [true, false]</tt>.
#
# This is due to the way Object#blank? handles boolean values:
# <tt>false.blank? # => true</tt>.
#
# Configuration options:
# * <tt>:message</tt> - A custom error message (default is: "can't be blank").
#
# There is also a list of default options supported by every validator:
# +:if+, +:unless+, +:on+, +:allow_nil+, +:allow_blank+, and +:strict+.
# See ActiveModel::Validations::ClassMethods#validates for more information.
def validates_presence_of(*attr_names)
validates_with PresenceValidator, _merge_attributes(attr_names)
end
end
end
end | ruby | github | https://github.com/rails/rails | activemodel/lib/active_model/validations/presence.rb |
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.exceptions import Warning
from openerp import fields, models, api
from openerp.tools.translate import _
class l10n_eu_service(models.TransientModel):
"""Create fiscal positions for EU Service VAT"""
_name = "l10n_eu_service.wizard"
_description = __doc__
def _get_eu_res_country_group(self):
eu_group = self.env.ref("base.europe", raise_if_not_found=False)
if not eu_group:
raise Warning(_('The Europe country group cannot be found. '
'Please update the base module.'))
return eu_group
def _default_chart_id(self):
user = self.env.user
return self.env['account.account'].search(
[('company_id', '=', user.company_id.id), ('parent_id', '=', False)], limit=1)
def _default_fiscal_position_id(self):
user = self.env.user
eu_id = self._get_eu_res_country_group()
return self.env['account.fiscal.position'].search(
[('company_id', '=', user.company_id.id), ('vat_required', '=', True),
('country_group_id.id', '=', eu_id.id)], limit=1)
def _default_tax_id(self):
user = self.env.user
return self.env['account.tax'].search(
[('company_id', '=', user.company_id.id), ('type_tax_use', '=', 'sale'),
('type', '=', 'percent'), ('account_collected_id', '!=', False),
('tax_code_id', '!=', False)], limit=1, order='amount desc')
def _default_done_country_ids(self):
user = self.env.user
eu_country_group = self._get_eu_res_country_group()
return eu_country_group.country_ids - self._default_todo_country_ids() - user.company_id.country_id
def _default_todo_country_ids(self):
user = self.env.user
eu_country_group = self._get_eu_res_country_group()
eu_fiscal = self.env['account.fiscal.position'].search(
[('country_id', 'in', eu_country_group.country_ids.ids),
('vat_required', '=', False), ('auto_apply', '=', True),
('company_id', '=', user.company_id.id)])
return eu_country_group.country_ids - eu_fiscal.mapped('country_id') - user.company_id.country_id
chart_id = fields.Many2one(
"account.account", string="Chart of Accounts", required=True, default=_default_chart_id)
company_id = fields.Many2one(
'res.company', string='Company', required=True,
related='chart_id.company_id', readonly=True)
fiscal_position_id = fields.Many2one(
'account.fiscal.position', string='Fiscal Position', default=_default_fiscal_position_id,
help="Optional fiscal position to use as template for general account mapping. "
"Should usually be your current Intra-EU B2B fiscal position. "
"If not set, no general account mapping will be configured for EU fiscal positions.")
tax_id = fields.Many2one(
'account.tax', string='Service VAT', required=True, default=_default_tax_id,
help="Select your current VAT tax for services. This is the tax that will be mapped "
"to the corresponding VAT tax in each EU country selected below.")
account_collected_id = fields.Many2one(
"account.account", string="Tax Collection Account",
help="Optional account to use for collecting tax amounts when selling services in "
"each EU country selected below. If not set, the current collecting account of "
"your Service VAT will be used.")
done_country_ids = fields.Many2many(
'res.country', 'l10n_eu_service_country_rel_done', default=_default_done_country_ids,
string='Already Supported')
todo_country_ids = fields.Many2many(
'res.country', 'l10n_eu_service_country_rel_todo', default=_default_todo_country_ids,
string='EU Customers From', required=True)
@api.multi
def generate_eu_service(self):
imd = self.env['ir.model.data']
tax_code = self.env['account.tax.code']
tax_rate = self.env["l10n_eu_service.service_tax_rate"]
account_tax = self.env['account.tax']
fpos = self.env['account.fiscal.position']
chart_xid = 'l10n_eu_service.tax_chart_service_eu_company_%s' % self.company_id.name
chart = self.env.ref(chart_xid, raise_if_not_found=False)
if not chart:
vals = {
'name': _("EU MOSS VAT Chart - %(company)s") % {'company': self.company_id.name},
'company_id': self.company_id.id,
'parent_id': False
}
chart_id = tax_code.create(vals).id
vals_data = {
'name': 'tax_chart_service_eu_company_%s'%(self.company_id.name),
'model': 'account.tax.code',
'module': 'l10n_eu_service',
'res_id': chart_id,
'noupdate': True, # Don't drop it when module is updated
}
imd.create(vals_data)
else:
chart_id = chart.id
for country in self.todo_country_ids:
format_params = {'country_name': country.name}
tx_base_code_data = {
'name': _("Base - VAT for EU Services to %(country_name)s") % format_params,
'code': "BASE-EU-VAT-%s" % country.code,
'parent_id': chart_id,
}
tax_name = _("VAT for EU Services to %(country_name)s") % format_params
tx_code_data = {
'name': tax_name,
'code': "EU-VAT-%s" % country.code,
'parent_id': chart_id,
}
tx_base_code = tax_code.create(tx_base_code_data)
tx_code = tax_code.create(tx_code_data)
#create a new tax based on the selected service tax
data_tax = {
'name': tax_name,
'amount': tax_rate.search([('country_id', '=', country.id)]).rate,
'base_code_id': self.tax_id.base_code_id.id,
'account_collected_id': self.account_collected_id.id or self.tax_id.account_collected_id.id,
'account_paid_id': self.account_collected_id.id or self.tax_id.account_collected_id.id,
'type_tax_use': 'sale',
'base_code_id': tx_base_code.id,
'ref_base_code_id': tx_base_code.id,
'tax_code_id': tx_code.id,
'ref_tax_code_id': tx_code.id,
'ref_base_sign': -1,
'ref_tax_sign': -1,
'description': "EU-VAT-%s-S" % country.code,
'sequence': 1000,
}
tax = account_tax.create(data_tax)
if self.fiscal_position_id:
account_ids = [(6, 0, self.fiscal_position_id.account_ids.ids)]
else:
account_ids = False
#create a fiscal position for the country
fiscal_pos_name = _("Intra-EU B2C in %(country_name)s") % {'country_name': country.name}
fiscal_pos_name += " (EU-VAT-%s)" % country.code
data_fiscal = {
'name': fiscal_pos_name,
'company_id': self.chart_id.company_id.id,
'vat_required': False,
'auto_apply': True,
'country_id': country.id,
'account_ids': account_ids,
'tax_ids': [(0, 0, {'tax_src_id': self.tax_id.id, 'tax_dest_id': tax.id})],
}
fpos.create(data_fiscal)
return {'type': 'ir.actions.act_window_close'} | unknown | codeparrot/codeparrot-clean | ||
import logging
import asyncio
from aiohttp.web import Response
from api_hour.plugins.aiohttp import JSON
import aiohttp_jinja2
from ..services import queries_number
from ..services.world import get_random_record, get_random_records, update_random_records, get_fortunes
from ..services import mysql
LOG = logging.getLogger(__name__)
@asyncio.coroutine
def json(request):
"""Test type 1: JSON serialization"""
return JSON({'message': 'Hello, World!'})
@asyncio.coroutine
def db(request):
"""Test type 2: Single database query"""
container = request.app.ah_container
return JSON((yield from get_random_record(container)))
@asyncio.coroutine
def db_mysql(request):
"""Test type 2: Single database query"""
container = request.app.ah_container
return JSON((yield from mysql.get_random_record(container)))
@asyncio.coroutine
def queries(request):
"""Test type 3: Multiple database queries"""
container = request.app.ah_container
limit = queries_number(request.GET.get('queries', 1))
return JSON((yield from get_random_records(container, limit)))
@asyncio.coroutine
def queries_mysql(request):
"""Test type 3: Multiple database queries"""
container = request.app.ah_container
limit = queries_number(request.GET.get('queries', 1))
return JSON((yield from mysql.get_random_records(container, limit)))
@asyncio.coroutine
def fortunes(request):
"""Test type 4: Fortunes"""
container = request.app.ah_container
return aiohttp_jinja2.render_template('fortunes.html.j2',
request,
{'fortunes': (yield from get_fortunes(container))})
@asyncio.coroutine
def fortunes_mysql(request):
"""Test type 4: Fortunes"""
container = request.app.ah_container
return aiohttp_jinja2.render_template('fortunes.html.j2',
request,
{'fortunes': (yield from mysql.get_fortunes(container))})
@asyncio.coroutine
def updates(request):
"""Test type 5: Database updates"""
container = request.app.ah_container
limit = queries_number(request.GET.get('queries', 1))
return JSON((yield from update_random_records(container, limit)))
@asyncio.coroutine
def updates_mysql(request):
"""Test type 5: Database updates"""
container = request.app.ah_container
limit = queries_number(request.GET.get('queries', 1))
return JSON((yield from mysql.update_random_records(container, limit)))
@asyncio.coroutine
def plaintext(request):
"""Test type 6: Plaintext"""
return Response(text='Hello, World!') | unknown | codeparrot/codeparrot-clean | ||
'''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com>
.. py:module:: fantastico.sdk.test.test_command_argument
'''
from fantastico.sdk.sdk_core import SdkCommandArgument
from fantastico.tests.base_case import FantasticoUnitTestsCase
class SdkCommandArgumentTests(FantasticoUnitTestsCase):
'''This class provides the test cases which ensure command argument object provide the specified functionality.'''
def test_arg_instantiation(self):
'''This test case instantiate a command argument and ensures all attributes are correctly set. Moreover it ensures
command argument attributes are readonly.'''
expected_short_name = "t"
expected_name = "test_argument"
expected_type = int
expected_help = "Simple help message"
arg = SdkCommandArgument(arg_short_name=expected_short_name,
arg_name=expected_name,
arg_type=expected_type,
arg_help=expected_help)
self.assertEqual(expected_short_name, arg.short_name)
self.assertEqual(expected_name, arg.name)
self.assertEqual(expected_type, arg.type)
self.assertEqual(expected_help, arg.help)
for attr_name in ["short_name", "name", "type", "help"]:
with self.assertRaises(AttributeError):
setattr(arg, attr_name, "Simple test") | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
# :enddoc:
# :markup: markdown
require "rack/cache"
require "rack/cache/context"
require "active_support/cache"
module ActionDispatch
class RailsMetaStore < Rack::Cache::MetaStore
def self.resolve(uri)
new
end
def initialize(store = Rails.cache)
@store = store
end
def read(key)
if data = @store.read(key)
Marshal.load(data)
else
[]
end
end
def write(key, value)
@store.write(key, Marshal.dump(value))
end
::Rack::Cache::MetaStore::RAILS = self
end
class RailsEntityStore < Rack::Cache::EntityStore
def self.resolve(uri)
new
end
def initialize(store = Rails.cache)
@store = store
end
def exist?(key)
@store.exist?(key)
end
def open(key)
@store.read(key)
end
def read(key)
body = open(key)
body.join if body
end
def write(body)
buf = []
key, size = slurp(body) { |part| buf << part }
@store.write(key, buf)
[key, size]
end
::Rack::Cache::EntityStore::RAILS = self
end
end | ruby | github | https://github.com/rails/rails | actionpack/lib/action_dispatch/http/rack_cache.rb |
import re
from django.db.backends import BaseDatabaseIntrospection
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict:
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
import re
m = re.search(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$', key)
if m:
return ('CharField', {'max_length': int(m.group(1))})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND NOT name='sqlite_sequence'
ORDER BY name""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [(info['name'], info['type'], None, None, None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(')+1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchone()
if not result:
continue
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li+1:ri]
for other_index, other_desc in enumerate(other_table_results.split(',')):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
name = other_desc.split(' ', 1)[0].strip('"')
if name == column:
relations[field_index] = (other_index, table)
break
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
indexes = {}
for info in self._table_info(cursor, table_name):
indexes[info['name']] = {'primary_key': info['pk'] != 0,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
if not unique:
continue
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name]['unique'] = True
return indexes
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()] | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
// Package depsfile contains the logic for reading and writing Terraform's
// dependency lock and development override configuration files.
//
// These files are separate from the main Terraform configuration files (.tf)
// for a number of reasons. The first is to help establish a distinction
// where .tf files configure a particular module while these configure
// a whole configuration tree. Another, more practical consideration is that
// we intend both of these files to be primarily maintained automatically by
// Terraform itself, rather than by human-originated edits, and so keeping
// them separate means that it's easier to distinguish the files that Terraform
// will change automatically during normal workflow from the files that
// Terraform only edits on direct request.
//
// Both files use HCL syntax, for consistency with other files in Terraform
// that we expect humans to (in this case, only occasionally) edit directly.
// A dependency lock file tracks the most recently selected upstream versions
// of each dependency, and is intended for checkin to version control.
// A development override file allows for temporarily overriding upstream
// dependencies with local files/directories on disk as an aid to testing
// a cross-codebase change during development, and should not be saved in
// version control.
package depsfile | go | github | https://github.com/hashicorp/terraform | internal/depsfile/doc.go |
class FeatureSet(object):
"""
An implementation of features as loaded from an ISUPPORT server directive.
Each feature is loaded into an attribute of the same name (but lowercased
to match Python sensibilities).
>>> f = FeatureSet()
>>> f.load(['target', 'PREFIX=(abc)+-/', 'your message sir'])
>>> f.prefix == {'+': 'a', '-': 'b', '/': 'c'}
True
>>> f.load_feature('CHANMODES=foo,bar,baz')
>>> f.chanmodes
['foo', 'bar', 'baz']
"""
def __init__(self):
self._set_rfc1459_prefixes()
def _set_rfc1459_prefixes(self):
"install standard (RFC1459) prefixes"
self.set('PREFIX', {
'@': 'o',
'+': 'v',
})
def set(self, name, value=True):
"set a feature value"
setattr(self, name.lower(), value)
def remove(self, feature_name):
if feature_name in vars(self):
delattr(self, feature_name)
def load(self, arguments):
"Load the values from the a ServerConnection arguments"
target, features, msg = arguments[:1], arguments[1:-1], arguments[-1:]
list(map(self.load_feature, features))
def load_feature(self, feature):
# negating
if feature[0] == '-':
return self.remove(feature[1:].lower())
name, sep, value = feature.partition('=')
if not sep:
return
if not value:
self.set(name)
return
parser = getattr(self, '_parse_' + name, self._parse_other)
value = parser(value)
self.set(name, value)
@staticmethod
def _parse_PREFIX(value):
"channel user prefixes"
channel_modes, channel_chars = value.split(')')
channel_modes = channel_modes[1:]
return dict(zip(channel_chars, channel_modes))
@staticmethod
def _parse_CHANMODES(value):
"channel mode letters"
return value.split(',')
@staticmethod
def _parse_TARGMAX(value):
"""
>>> res = FeatureSet._parse_TARGMAX('a:3,c:,b:2')
>>> res['a']
3
"""
return dict(string_int_pair(target, ':')
for target in value.split(','))
@staticmethod
def _parse_CHANLIMIT(value):
"""
>>> res = FeatureSet._parse_CHANLIMIT('ibe:250,xyz:100')
>>> len(res)
6
>>> res['x']
100
>>> res['i'] == res['b'] == res['e'] == 250
True
"""
pairs = map(string_int_pair, value.split(','))
return dict(
(target, number)
for target_keys, number in pairs
for target in target_keys
)
_parse_MAXLIST = _parse_CHANLIMIT
@staticmethod
def _parse_other(value):
if value.isdigit():
return int(value)
return value
def string_int_pair(target, sep=':'):
name, value = target.split(sep)
value = int(value) if value else None
return name, value | unknown | codeparrot/codeparrot-clean | ||
package kotlinx.coroutines.flow
import kotlinx.coroutines.testing.*
import kotlinx.coroutines.*
import kotlin.test.*
class CombineStressTest : TestBase() {
@Test
fun testCancellation() = runTest {
withContext(Dispatchers.Default + CoroutineExceptionHandler { _, _ -> expectUnreached() }) {
flow {
expect(1)
repeat(1_000 * stressTestMultiplier) {
emit(it)
}
}.flatMapLatest {
combine(flowOf(it), flowOf(it)) { arr -> arr[0] }
}.collect()
finish(2)
reset()
}
}
@Test
fun testFailure() = runTest {
val innerIterations = 100 * stressTestMultiplierSqrt
val outerIterations = 10 * stressTestMultiplierSqrt
withContext(Dispatchers.Default + CoroutineExceptionHandler { _, _ -> expectUnreached() }) {
repeat(outerIterations) {
try {
flow {
expect(1)
repeat(innerIterations) {
emit(it)
}
}.flatMapLatest {
combine(flowOf(it), flowOf(it)) { arr -> arr[0] }
}.onEach {
if (it >= innerIterations / 2) throw TestException()
}.collect()
} catch (e: TestException) {
expect(2)
}
finish(3)
reset()
}
}
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/concurrent/test/flow/CombineStressTest.kt |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import absolute_import, unicode_literals
from flask import render_template
from markupsafe import Markup
from indico.core.db import db
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.util.i18n import _
from indico.web.forms.fields import IndicoEnumRadioField
from indico.web.forms.widgets import JinjaWidget
class IndicoProtectionField(IndicoEnumRadioField):
widget = JinjaWidget('forms/protection_widget.html', single_kwargs=True)
radio_widget = JinjaWidget('forms/radio_buttons_widget.html', orientation='horizontal', single_kwargs=True)
def __init__(self, *args, **kwargs):
self.protected_object = kwargs.pop('protected_object')(kwargs['_form'])
get_acl_message_url = kwargs.pop('acl_message_url', None)
self.acl_message_url = get_acl_message_url(kwargs['_form']) if get_acl_message_url else None
self.can_inherit_protection = self.protected_object.protection_parent is not None
if not self.can_inherit_protection:
kwargs['skip'] = {ProtectionMode.inheriting}
super(IndicoProtectionField, self).__init__(*args, enum=ProtectionMode, **kwargs)
def render_protection_message(self):
protected_object = self.get_form().protected_object
if hasattr(protected_object, 'get_non_inheriting_objects'):
non_inheriting_objects = protected_object.get_non_inheriting_objects()
else:
non_inheriting_objects = []
if isinstance(protected_object.protection_parent, db.m.Event):
parent_type = _('Event')
elif isinstance(protected_object.protection_parent, db.m.Category):
parent_type = _('Category')
else:
parent_type = _('Session')
rv = render_template('_protection_info.html', field=self, protected_object=protected_object,
parent_type=parent_type, non_inheriting_objects=non_inheriting_objects)
return Markup(rv) | unknown | codeparrot/codeparrot-clean | ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from ..._models import BaseModel
__all__ = ["TextSimilarityGrader"]
class TextSimilarityGrader(BaseModel):
"""A TextSimilarityGrader object which grades text based on similarity metrics."""
evaluation_metric: Literal[
"cosine",
"fuzzy_match",
"bleu",
"gleu",
"meteor",
"rouge_1",
"rouge_2",
"rouge_3",
"rouge_4",
"rouge_5",
"rouge_l",
]
"""The evaluation metric to use.
One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`,
`rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
"""
input: str
"""The text being graded."""
name: str
"""The name of the grader."""
reference: str
"""The text being graded against."""
type: Literal["text_similarity"]
"""The type of grader.""" | python | github | https://github.com/openai/openai-python | src/openai/types/graders/text_similarity_grader.py |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Dummy Socks5 server for testing.
'''
import socket, threading, queue
import traceback, sys
### Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
### Utility functions
def recvall(s, n):
'''Receive n bytes from a socket, or fail'''
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
### Implementation classes
class Socks5Configuration(object):
'''Proxy configuration'''
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command(object):
'''Information about an incoming socks5 command'''
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection(object):
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
'''
Handle socks5 request according to RFC1928
'''
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
(ver,cmd,rsv,atyp) = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = recvall(self.conn, n)
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
print('Proxy: ', cmdin)
# Fall through to disconnect
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server(object):
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join() | unknown | codeparrot/codeparrot-clean | ||
//// [tests/cases/conformance/classes/propertyMemberDeclarations/autoAccessor6.ts] ////
//// [autoAccessor6.ts]
class C1 {
accessor a: any;
}
class C2 extends C1 {
a = 1;
}
class C3 extends C1 {
get a() { return super.a; }
}
//// [autoAccessor6.js]
"use strict";
class C1 {
accessor a;
}
class C2 extends C1 {
constructor() {
super(...arguments);
this.a = 1;
}
}
class C3 extends C1 {
get a() { return super.a; }
} | javascript | github | https://github.com/microsoft/TypeScript | tests/baselines/reference/autoAccessor6(target=esnext,usedefineforclassfields=false).js |
from django.db import connection
from django.db.models import Max
from django.test import TestCase
from .models import Cash, CashModel
class FromDBValueTest(TestCase):
@classmethod
def setUpTestData(cls):
CashModel.objects.create(cash="12.50")
def test_simple_load(self):
instance = CashModel.objects.get()
self.assertIsInstance(instance.cash, Cash)
def test_values_list(self):
values_list = CashModel.objects.values_list("cash", flat=True)
self.assertIsInstance(values_list[0], Cash)
def test_values(self):
values = CashModel.objects.values("cash")
self.assertIsInstance(values[0]["cash"], Cash)
def test_aggregation(self):
maximum = CashModel.objects.aggregate(m=Max("cash"))["m"]
self.assertIsInstance(maximum, Cash)
def test_defer(self):
instance = CashModel.objects.defer("cash").get()
self.assertIsInstance(instance.cash, Cash)
def test_connection(self):
instance = CashModel.objects.get()
self.assertEqual(instance.cash.vendor, connection.vendor) | python | github | https://github.com/django/django | tests/from_db_value/tests.py |
#!/usr/bin/env python
"""
@package mi.instrument.teledyne.pd0_parser
@file marine-integrations/mi/instrument/teledyne/pd0_parser.py
@author Peter Cable
@brief Parser for ADCP PD0 data
Release notes:
"""
from collections import namedtuple
import pprint
import struct
import sys
namedtuple_store = {}
bitmapped_namedtuple_store = {}
class PD0ParsingException(Exception):
pass
class InsufficientDataException(PD0ParsingException):
pass
class UnhandledBlockException(PD0ParsingException):
pass
class ChecksumException(PD0ParsingException):
pass
class BadHeaderException(PD0ParsingException):
pass
class BadOffsetException(PD0ParsingException):
pass
class BlockId(object):
FIXED_DATA = 0
VARIABLE_DATA = 128
VELOCITY_DATA = 256
CORRELATION_DATA = 512
ECHO_INTENSITY_DATA = 768
PERCENT_GOOD_DATA = 1024
STATUS_DATA_ID = 1280
BOTTOM_TRACK = 1536
AUV_NAV_DATA = 8192
def count_zero_bits(bitmask):
if not bitmask:
return 0
zero_digits = 0
submask = 1
while True:
x = bitmask & submask
submask <<= 1
if x != 0:
break
zero_digits += 1
return zero_digits
class AdcpPd0Record(object):
def __init__(self, data, glider=False):
self.data = data
self.header = None
self.offsets = None
self.fixed_data = None
self.variable_data = None
self.echo_intensity = None
self.velocities = None
self.correlation_magnitudes = None
self.percent_good = None
self.sysconfig = None
self.sensor_source = None
self.sensor_avail = None
self.bit_result = None
self.error_word = None
self.stored_checksum = None
self._process(glider)
def __str__(self):
return repr(self)
def __repr__(self):
return pprint.pformat(self.__dict__)
def _unpack_from_format(self, name, formatter, offset):
format_string = ''.join([item[1] for item in formatter])
fields = [item[0] for item in formatter]
data = struct.unpack_from('<' + format_string, self.data, offset)
if name not in namedtuple_store:
namedtuple_store[name] = namedtuple(name, fields)
_class = namedtuple_store[name]
return _class(*data)
def _unpack_cell_data(self, name, format_string, offset):
_class = namedtuple(name, ('id', 'beam1', 'beam2', 'beam3', 'beam4'))
data = struct.unpack_from('<H%d%s' % (self.fixed_data.number_of_cells * 4, format_string), self.data, offset)
_object = _class(data[0], [], [], [], [])
_object.beam1[:] = data[1::4]
_object.beam2[:] = data[2::4]
_object.beam3[:] = data[3::4]
_object.beam4[:] = data[4::4]
return _object
@staticmethod
def _unpack_bitmapped(name, formatter, source_data):
# short circuit if we've seen this bitmap before
short_circuit_key = (name, source_data)
if short_circuit_key in bitmapped_namedtuple_store:
return bitmapped_namedtuple_store[short_circuit_key]
# create the namedtuple class if it doesn't already exist
fields = [item[0] for item in formatter]
if name not in namedtuple_store:
namedtuple_store[name] = namedtuple(name, fields)
_class = namedtuple_store[name]
# create an instance of the namedtuple for this data
data = []
for _, bitmask, lookup_table in formatter:
raw = (source_data & bitmask) >> count_zero_bits(bitmask)
if lookup_table is not None:
data.append(lookup_table[raw])
else:
data.append(raw)
value = _class(*data)
# store this value for future short circuit operations
bitmapped_namedtuple_store[short_circuit_key] = value
return value
def _validate(self):
self._process_header()
self._validate_offset_data()
self._validate_checksum()
def _validate_checksum(self):
calculated_checksum = sum(bytearray(self.data[:-2])) & 65535
self.stored_checksum = struct.unpack_from('<H', self.data, self.header.num_bytes)[0]
if calculated_checksum != self.stored_checksum:
raise ChecksumException('Checksum failure in PD0 data (expected %d, calculated %d' %
(self.stored_checksum, calculated_checksum))
def _validate_offset_data(self):
self.offsets = struct.unpack_from('<%dH' % self.header.num_data_types, self.data, 6)
valid_block_ids = BlockId.__dict__.values()
for offset in self.offsets:
if offset > len(self.data) - 2:
raise BadOffsetException
block_id = struct.unpack_from('<H', self.data, offset)[0]
if block_id not in valid_block_ids:
raise UnhandledBlockException('Found unhandled data type id: %d' % block_id)
def _process(self, glider):
self._validate()
self._parse_offset_data()
self._parse_sysconfig()
self._parse_coord_transform()
self._parse_sensor_source(glider)
self._parse_sensor_avail(glider)
self._parse_bit_result()
self._parse_error_word()
def _process_header(self):
header_format = (
('id', 'B'),
('data_source', 'B'),
('num_bytes', 'H'),
('spare', 'B'),
('num_data_types', 'B')
)
self.header = self._unpack_from_format('header', header_format, 0)
self.data = self.data[:self.header.num_bytes + 2]
if len(self.data) < self.header.num_bytes + 2:
raise InsufficientDataException(
'Insufficient data in PD0 record (expected %d bytes, found %d)' %
(self.header.num_bytes + 2, len(self.data)))
if not(5 < self.header.num_data_types < 10):
raise BadHeaderException
def _parse_offset_data(self):
self.offsets = struct.unpack_from('<%dH' % self.header.num_data_types, self.data, 6)
for offset in self.offsets:
block_id = struct.unpack_from('<H', self.data, offset)[0]
if block_id == BlockId.FIXED_DATA:
self._parse_fixed(offset)
elif block_id == BlockId.VARIABLE_DATA:
self._parse_variable(offset)
elif block_id == BlockId.VELOCITY_DATA:
self._parse_velocity(offset)
elif block_id == BlockId.CORRELATION_DATA:
self._parse_correlation(offset)
elif block_id == BlockId.ECHO_INTENSITY_DATA:
self._parse_echo(offset)
elif block_id == BlockId.PERCENT_GOOD_DATA:
self._parse_percent_good(offset)
elif block_id == BlockId.BOTTOM_TRACK:
self._parse_bottom_track(offset)
elif block_id == BlockId.AUV_NAV_DATA:
pass
elif block_id == BlockId.STATUS_DATA_ID:
pass
else:
print >> sys.stderr, block_id
raise UnhandledBlockException('Found unhandled data type id: %d' % block_id)
def _parse_fixed(self, offset):
fixed_format = (
('id', 'H'),
('cpu_firmware_version', 'B'),
('cpu_firmware_revision', 'B'),
('system_configuration', 'H'),
('simulation_data_flag', 'B'),
('lag_length', 'B'),
('number_of_beams', 'B'),
('number_of_cells', 'B'),
('pings_per_ensemble', 'H'),
('depth_cell_length', 'H'),
('blank_after_transmit', 'H'),
('signal_processing_mode', 'B'),
('low_corr_threshold', 'B'),
('num_code_reps', 'B'),
('minimum_percentage', 'B'),
('error_velocity_max', 'H'),
('tpp_minutes', 'B'),
('tpp_seconds', 'B'),
('tpp_hundredths', 'B'),
('coord_transform', 'B'),
('heading_alignment', 'H'),
('heading_bias', 'H'),
('sensor_source', 'B'),
('sensor_available', 'B'),
('bin_1_distance', 'H'),
('transmit_pulse_length', 'H'),
('starting_depth_cell', 'B'),
('ending_depth_cell', 'B'),
('false_target_threshold', 'B'),
('spare1', 'B'),
('transmit_lag_distance', 'H'),
('cpu_board_serial_number', 'Q'),
('system_bandwidth', 'H'),
('system_power', 'B'),
('spare2', 'B'),
('serial_number', 'I'),
('beam_angle', 'B')
)
self.fixed_data = self._unpack_from_format('fixed', fixed_format, offset)
def _parse_variable(self, offset):
variable_format = (
('id', 'H'),
('ensemble_number', 'H'),
('rtc_year', 'B'),
('rtc_month', 'B'),
('rtc_day', 'B'),
('rtc_hour', 'B'),
('rtc_minute', 'B'),
('rtc_second', 'B'),
('rtc_hundredths', 'B'),
('ensemble_roll_over', 'B'),
('bit_result', 'H'),
('speed_of_sound', 'H'),
('depth_of_transducer', 'H'),
('heading', 'H'),
('pitch', 'h'),
('roll', 'h'),
('salinity', 'H'),
('temperature', 'h'),
('mpt_minutes', 'B'),
('mpt_seconds', 'B'),
('mpt_hundredths', 'B'),
('heading_standard_deviation', 'B'),
('pitch_standard_deviation', 'B'),
('roll_standard_deviation', 'B'),
('transmit_current', 'B'),
('transmit_voltage', 'B'),
('ambient_temperature', 'B'),
('pressure_positive', 'B'),
('pressure_negative', 'B'),
('attitude_temperature', 'B'),
('attitude', 'B'),
('contamination_sensor', 'B'),
('error_status_word', 'I'),
('reserved', 'H'),
('pressure', 'I'),
('pressure_variance', 'I'),
('spare', 'B'),
('rtc_y2k_century', 'B'),
('rtc_y2k_year', 'B'),
('rtc_y2k_month', 'B'),
('rtc_y2k_day', 'B'),
('rtc_y2k_hour', 'B'),
('rtc_y2k_minute', 'B'),
('rtc_y2k_seconds', 'B'),
('rtc_y2k_hundredths', 'B')
)
self.variable_data = self._unpack_from_format('variable', variable_format, offset)
def _parse_velocity(self, offset):
self.velocities = self._unpack_cell_data('velocity', 'h', offset)
def _parse_correlation(self, offset):
self.correlation_magnitudes = self._unpack_cell_data('correlation', 'B', offset)
def _parse_echo(self, offset):
self.echo_intensity = self._unpack_cell_data('echo_intensity', 'B', offset)
def _parse_percent_good(self, offset):
self.percent_good = self._unpack_cell_data('percent_good', 'B', offset)
def _parse_bottom_track(self, offset):
bottom_track_format = (
('id', 'H'),
('pings_per_ensemble', 'H'),
('delay_before_reacquire', 'H'),
('correlation_mag_min', 'B'),
('eval_amplitude_min', 'B'),
('percent_good_minimum', 'B'),
('mode', 'B'),
('error_velocity_max', 'H'),
('reserved', 'I'),
('range_1', 'H'),
('range_2', 'H'),
('range_3', 'H'),
('range_4', 'H'),
('velocity_1', 'h'),
('velocity_2', 'h'),
('velocity_3', 'h'),
('velocity_4', 'h'),
('corr_1', 'B'),
('corr_2', 'B'),
('corr_3', 'B'),
('corr_4', 'B'),
('amp_1', 'B'),
('amp_2', 'B'),
('amp_3', 'B'),
('amp_4', 'B'),
('pcnt_1', 'B'),
('pcnt_2', 'B'),
('pcnt_3', 'B'),
('pcnt_4', 'B'),
('ref_layer_min', 'H'),
('ref_layer_near', 'H'),
('ref_layer_far', 'H'),
('ref_velocity_1', 'h'),
('ref_velocity_2', 'h'),
('ref_velocity_3', 'h'),
('ref_velocity_4', 'h'),
('ref_corr_1', 'B'),
('ref_corr_2', 'B'),
('ref_corr_3', 'B'),
('ref_corr_4', 'B'),
('ref_amp_1', 'B'),
('ref_amp_2', 'B'),
('ref_amp_3', 'B'),
('ref_amp_4', 'B'),
('ref_pcnt_1', 'B'),
('ref_pcnt_2', 'B'),
('ref_pcnt_3', 'B'),
('ref_pcnt_4', 'B'),
('max_depth', 'H'),
('rssi_1', 'B'),
('rssi_2', 'B'),
('rssi_3', 'B'),
('rssi_4', 'B'),
('gain', 'B'),
('range_msb_1', 'B'),
('range_msb_2', 'B'),
('range_msb_3', 'B'),
('range_msb_4', 'B'),
)
self.bottom_track = self._unpack_from_format('bottom_track', bottom_track_format, offset)
def _parse_sysconfig(self):
"""
LSB
BITS 7 6 5 4 3 2 1 0
- - - - - 0 0 0 75-kHz SYSTEM
- - - - - 0 0 1 150-kHz SYSTEM
- - - - - 0 1 0 300-kHz SYSTEM
- - - - - 0 1 1 600-kHz SYSTEM
- - - - - 1 0 0 1200-kHz SYSTEM
- - - - - 1 0 1 2400-kHz SYSTEM
- - - - 0 - - - CONCAVE BEAM PAT.
- - - - 1 - - - CONVEX BEAM PAT.
- - 0 0 - - - - SENSOR CONFIG #1
- - 0 1 - - - - SENSOR CONFIG #2
- - 1 0 - - - - SENSOR CONFIG #3
- 0 - - - - - - XDCR HD NOT ATT.
- 1 - - - - - - XDCR HD ATTACHED
0 - - - - - - - DOWN FACING BEAM
1 - - - - - - - UP-FACING BEAM
MSB
BITS 7 6 5 4 3 2 1 0
- - - - - - 0 0 15E BEAM ANGLE
- - - - - - 0 1 20E BEAM ANGLE
- - - - - - 1 0 30E BEAM ANGLE
- - - - - - 1 1 OTHER BEAM ANGLE
0 1 0 0 - - - - 4-BEAM JANUS CONFIG
0 1 0 1 - - - - 5-BM JANUS CFIG DEMOD)
1 1 1 1 - - - - 5-BM JANUS CFIG.(2 DEMD)
"""
frequencies = [75, 150, 300, 600, 1200, 2400]
sysconfig_format = (
('frequency', 0b111, frequencies),
('beam_pattern', 0b1000, None),
('sensor_config', 0b110000, None),
('xdcr_head_attached', 0b1000000, None),
('beam_facing', 0b10000000, None),
('beam_angle', 0b11 << 8, None),
('janus_config', 0b11110000 << 8, None))
self.sysconfig = self._unpack_bitmapped('sysconfig', sysconfig_format, self.fixed_data.system_configuration)
def _parse_coord_transform(self):
"""
xxx00xxx = NO TRANSFORMATION (BEAM COORDINATES)
xxx01xxx = INSTRUMENT COORDINATES
xxx10xxx = SHIP COORDINATES
xxx11xxx = EARTH COORDINATES
xxxxx1xx = TILTS (PITCH AND ROLL) USED IN SHIP OR EARTH TRANSFORMATION
xxxxxx1x = 3-BEAM SOLUTION USED IF ONE BEAM IS BELOW THE CORRELATION THRESHOLD SET BY THE WC-COMMAND
xxxxxxx1 = BIN MAPPING USED
"""
coord_transform_format = (
('coord_transform', 0b11000, None),
('tilts_used', 0b100, None),
('three_beam_used', 0b10, None),
('bin_mapping_used', 0b1, None))
self.coord_transform = self._unpack_bitmapped('coord_transform', coord_transform_format,
self.fixed_data.coord_transform)
def _parse_sensor_source(self, glider):
"""
FIELD DESCRIPTION
x1xxxxxx = CALCULATES EC (SPEED OF SOUND) FROM ED, ES, AND ET
xx1xxxxx = USES ED FROM DEPTH SENSOR
xxx1xxxx = USES EH FROM TRANSDUCER HEADING SENSOR
xxxx1xxx = USES EP FROM TRANSDUCER PITCH SENSOR
xxxxx1xx = USES ER FROM TRANSDUCER ROLL SENSOR
xxxxxx1x = USES ES (SALINITY) FROM CONDUCTIVITY SENSOR
xxxxxxx1 = USES ET FROM TRANSDUCER TEMPERATURE SENSOR
FIELD DESCRIPTION (ExplorerDVL)
1xxxxxxx = CALCULATES EC (SPEED OF SOUND) FROM ED, ES, AND ET
x1xxxxxx = USES ED FROM DEPTH SENSOR
xx1xxxxx = USES EH FROM TRANSDUCER HEADING SENSOR
xxx1xxxx = USES EP FROM TRANSDUCER PITCH SENSOR
xxxx1xxx = USES ER FROM TRANSDUCER ROLL SENSOR
xxxxx1xx = USES ES (SALINITY) FROM CONDUCTIVITY SENSOR
xxxxxx1x = USES ET FROM TRANSDUCER TEMPERATURE SENSOR
xxxxxxx1 = USES EU FROM TRANSDUCER TEMPERATURE SENSOR
"""
if glider:
sensor_source_format = (
('calculate_ec', 0b10000000, None),
('depth_used', 0b1000000, None),
('heading_used', 0b100000, None),
('pitch_used', 0b10000, None),
('roll_used', 0b1000, None),
('conductivity_used', 0b100, None),
('temperature_used', 0b10, None),
('temperature_eu_used', 0b1, None))
self.sensor_source = self._unpack_bitmapped('sensor_source_glider', sensor_source_format,
self.fixed_data.sensor_source)
else:
sensor_source_format = (
('calculate_ec', 0b1000000, None),
('depth_used', 0b100000, None),
('heading_used', 0b10000, None),
('pitch_used', 0b1000, None),
('roll_used', 0b100, None),
('conductivity_used', 0b10, None),
('temperature_used', 0b1, None))
self.sensor_source = self._unpack_bitmapped('sensor_source', sensor_source_format,
self.fixed_data.sensor_source)
def _parse_sensor_avail(self, glider):
"""
Fields match sensor source above
"""
if glider:
sensor_avail_format = (
('speed_avail', 0b10000000, None),
('depth_avail', 0b1000000, None),
('heading_avail', 0b100000, None),
('pitch_avail', 0b10000, None),
('roll_avail', 0b1000, None),
('conductivity_avail', 0b100, None),
('temperature_avail', 0b10, None),
('temperature_eu_avail', 0b1, None))
self.sensor_avail = self._unpack_bitmapped('sensor_avail_glider', sensor_avail_format,
self.fixed_data.sensor_available)
else:
sensor_avail_format = (
('speed_avail', 0b1000000, None),
('depth_avail', 0b100000, None),
('heading_avail', 0b10000, None),
('pitch_avail', 0b1000, None),
('roll_avail', 0b100, None),
('conductivity_avail', 0b10, None),
('temperature_avail', 0b1, None))
self.sensor_avail = self._unpack_bitmapped('sensor_avail', sensor_avail_format,
self.fixed_data.sensor_available)
def _parse_bit_result(self):
"""
BYTE 13 BYTE 14 (BYTE 14 RESERVED FOR FUTURE USE)
1xxxxxxx xxxxxxxx = RESERVED
x1xxxxxx xxxxxxxx = RESERVED
xx1xxxxx xxxxxxxx = RESERVED
xxx1xxxx xxxxxxxx = DEMOD 1 ERROR
xxxx1xxx xxxxxxxx = DEMOD 0 ERROR
xxxxx1xx xxxxxxxx = RESERVED
xxxxxx1x xxxxxxxx = TIMING CARD ERROR
xxxxxxx1 xxxxxxxx = RESERVED
"""
bit_result_format = (
('demod1_error', 0b10000, None),
('demod0_error', 0b1000, None),
('timing_card_error', 0b10, None))
self.bit_result = self._unpack_bitmapped('bit_result', bit_result_format, self.variable_data.bit_result)
def _parse_error_word(self):
"""
Low 16 BITS
LSB
BITS 07 06 05 04 03 02 01 00
x x x x x x x 1 Bus Error exception
x x x x x x 1 x Address Error exception
x x x x x 1 x x Illegal Instruction exception
x x x x 1 x x x Zero Divide exception
x x x 1 x x x x Emulator exception
x x 1 x x x x x Unassigned exception
x 1 x x x x x x Watchdog restart occurred
1 x x x x x x x Battery Saver power
87-88 44 Low 16 BITS
MSB
BITS 15 14 13 12 11 10 09 08
x x x x x x x 1 Pinging
x x x x x x 1 x Not Used
x x x x x 1 x x Not Used
x x x x 1 x x x Not Used
x x x 1 x x x x Not Used
x x 1 x x x x x Not Used
x 1 x x x x x x Cold Wakeup occurred
1 x x x x x x x Unknown Wakeup occurred
89-90 45 High 16 BITS
LSB
BITS 24 23 22 21 20 19 18 17
x x x x x x x 1 Clock Read error occurred
x x x x x x 1 x Unexpected alarm
x x x x x 1 x x Clock jump forward
x x x x 1 x x x Clock jump backward
x x x 1 x x x x Not Used
x x 1 x x x x x Not Used
x 1 x x x x x x Not Used
1 x x x x x x x Not Used
High 16 BITS
MSB
BITS 32 31 30 29 28 27 26 25
x x x x x x x 1 Not Used
x x x x x x 1 x Not Used
x x x x x 1 x x Not Used
x x x x 1 x x x Power Fail (Unrecorded)
x x x 1 x x x x Spurious level 4 intr (DSP)
x x 1 x x x x x Spurious level 5 intr (UART)
x 1 x x x x x x Spurious level 6 intr (CLOCK)
1 x x x x x x x Level 7 interrupt occurred
"""
error_word_format = (
('bus_error', 0b1, None),
('address_error', 0b10, None),
('illegal_instruction', 0b100, None),
('zero_divide', 0b1000, None),
('emulator', 0b10000, None),
('unassigned', 0b100000, None),
('watchdog_restart', 0b1000000, None),
('battery_saver', 0b10000000, None),
('pinging', 0b1 << 8, None),
('cold_wakeup', 0b1000000 << 8, None),
('unknown_wakeup', 0b10000000 << 8, None),
('clock_read', 0b1 << 16, None),
('unexpected_alarm', 0b10 << 16, None),
('clock_jump_forward', 0b100 << 16, None),
('clock_jump_backward', 0b1000 << 16, None),
('power_fail', 0b1000 << 24, None),
('spurious_dsp', 0b10000 << 24, None),
('spurious_uart', 0b100000 << 24, None),
('spurious_clock', 0b1000000 << 24, None),
('level_7_interrupt', 0b10000000 << 24, None),
)
self.error_word = self._unpack_bitmapped('error_word', error_word_format, self.variable_data.error_status_word) | unknown | codeparrot/codeparrot-clean | ||
# import the necessary packages
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dense
class LeNet:
@staticmethod
def build(width, height, depth, classes, weightsPath=None):
# initialize the model
model = Sequential()
# first set of CONV => RELU => POOL
model.add(Conv2D(20, (5, 5), padding="same", input_shape=(height, width, depth)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# second set of CONV => RELU => POOL
model.add(Conv2D(50, (5, 5), padding="same", input_shape=(height, width, depth)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# set of FC => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
# softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
# if a weights path is supplied (indicating that the model was
# pre-trained), then load the weights
if weightsPath is not None:
model.load_weights(weightsPath)
# return the constructed network architecture
return model | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_event.h>
/*
* It seems that Darwin 9.4 (Mac OS X 1.5) sendfile() has the same
* old bug as early FreeBSD sendfile() syscall:
* http://bugs.freebsd.org/33771
*
* Besides sendfile() has another bug: if one calls sendfile()
* with both a header and a trailer, then sendfile() ignores a file part
* at all and sends only the header and the trailer together.
* For this reason we send a trailer only if there is no a header.
*
* Although sendfile() allows to pass a header or a trailer,
* it may send the header or the trailer and a part of the file
* in different packets. And FreeBSD workaround (TCP_NOPUSH option)
* does not help.
*/
ngx_chain_t *
ngx_darwin_sendfile_chain(ngx_connection_t *c, ngx_chain_t *in, off_t limit)
{
int rc;
off_t send, prev_send, sent;
off_t file_size;
ssize_t n;
ngx_uint_t eintr;
ngx_err_t err;
ngx_buf_t *file;
ngx_event_t *wev;
ngx_chain_t *cl;
ngx_iovec_t header, trailer;
struct sf_hdtr hdtr;
struct iovec headers[NGX_IOVS_PREALLOCATE];
struct iovec trailers[NGX_IOVS_PREALLOCATE];
wev = c->write;
if (!wev->ready) {
return in;
}
#if (NGX_HAVE_KQUEUE)
if ((ngx_event_flags & NGX_USE_KQUEUE_EVENT) && wev->pending_eof) {
(void) ngx_connection_error(c, wev->kq_errno,
"kevent() reported about an closed connection");
wev->error = 1;
return NGX_CHAIN_ERROR;
}
#endif
/* the maximum limit size is the maximum size_t value - the page size */
if (limit == 0 || limit > (off_t) (NGX_MAX_SIZE_T_VALUE - ngx_pagesize)) {
limit = NGX_MAX_SIZE_T_VALUE - ngx_pagesize;
}
send = 0;
header.iovs = headers;
header.nalloc = NGX_IOVS_PREALLOCATE;
trailer.iovs = trailers;
trailer.nalloc = NGX_IOVS_PREALLOCATE;
for ( ;; ) {
eintr = 0;
prev_send = send;
/* create the header iovec and coalesce the neighbouring bufs */
cl = ngx_output_chain_to_iovec(&header, in, limit - send, c->log);
if (cl == NGX_CHAIN_ERROR) {
return NGX_CHAIN_ERROR;
}
send += header.size;
if (cl && cl->buf->in_file && send < limit) {
file = cl->buf;
/* coalesce the neighbouring file bufs */
file_size = ngx_chain_coalesce_file(&cl, limit - send);
send += file_size;
if (header.count == 0 && send < limit) {
/*
* create the trailer iovec and coalesce the neighbouring bufs
*/
cl = ngx_output_chain_to_iovec(&trailer, cl, limit - send,
c->log);
if (cl == NGX_CHAIN_ERROR) {
return NGX_CHAIN_ERROR;
}
send += trailer.size;
} else {
trailer.count = 0;
}
/*
* sendfile() returns EINVAL if sf_hdtr's count is 0,
* but corresponding pointer is not NULL
*/
hdtr.headers = header.count ? header.iovs : NULL;
hdtr.hdr_cnt = header.count;
hdtr.trailers = trailer.count ? trailer.iovs : NULL;
hdtr.trl_cnt = trailer.count;
sent = header.size + file_size;
ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0,
"sendfile: @%O %O h:%uz",
file->file_pos, sent, header.size);
rc = sendfile(file->file->fd, c->fd, file->file_pos,
&sent, &hdtr, 0);
if (rc == -1) {
err = ngx_errno;
switch (err) {
case NGX_EAGAIN:
break;
case NGX_EINTR:
eintr = 1;
break;
default:
wev->error = 1;
(void) ngx_connection_error(c, err, "sendfile() failed");
return NGX_CHAIN_ERROR;
}
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, err,
"sendfile() sent only %O bytes", sent);
}
if (rc == 0 && sent == 0) {
/*
* if rc and sent equal to zero, then someone
* has truncated the file, so the offset became beyond
* the end of the file
*/
ngx_log_error(NGX_LOG_ALERT, c->log, 0,
"sendfile() reported that \"%s\" was truncated",
file->file->name.data);
return NGX_CHAIN_ERROR;
}
ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0,
"sendfile: %d, @%O %O:%O",
rc, file->file_pos, sent, file_size + header.size);
} else {
n = ngx_writev(c, &header);
if (n == NGX_ERROR) {
return NGX_CHAIN_ERROR;
}
sent = (n == NGX_AGAIN) ? 0 : n;
}
c->sent += sent;
in = ngx_chain_update_sent(in, sent);
if (eintr) {
send = prev_send + sent;
continue;
}
if (send - prev_send != sent) {
wev->ready = 0;
return in;
}
if (send >= limit || in == NULL) {
return in;
}
}
} | c | github | https://github.com/nginx/nginx | src/os/unix/ngx_darwin_sendfile_chain.c |
#include "git-compat-util.h"
#include "fetch-negotiator.h"
#include "negotiator/default.h"
#include "negotiator/skipping.h"
#include "negotiator/noop.h"
#include "repository.h"
void fetch_negotiator_init(struct repository *r,
struct fetch_negotiator *negotiator)
{
prepare_repo_settings(r);
switch(r->settings.fetch_negotiation_algorithm) {
case FETCH_NEGOTIATION_SKIPPING:
skipping_negotiator_init(negotiator);
return;
case FETCH_NEGOTIATION_NOOP:
noop_negotiator_init(negotiator);
return;
case FETCH_NEGOTIATION_CONSECUTIVE:
default_negotiator_init(negotiator);
return;
}
}
void fetch_negotiator_init_noop(struct fetch_negotiator *negotiator)
{
noop_negotiator_init(negotiator);
} | c | github | https://github.com/git/git | fetch-negotiator.c |
'use strict';
const common = require('../common.js');
const networkInterfaces = require('os').networkInterfaces;
const bench = common.createBenchmark(main, {
n: [1e4],
});
function main({ n }) {
bench.start();
for (let i = 0; i < n; ++i)
networkInterfaces();
bench.end(n);
} | javascript | github | https://github.com/nodejs/node | benchmark/os/networkInterfaces.js |
/*
* Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.http
import io.ktor.util.*
/** Separator symbols listed in RFC https://tools.ietf.org/html/rfc2616#section-2.2 */
private val HeaderFieldValueSeparators =
setOf('(', ')', '<', '>', '@', ',', ';', ':', '\\', '\"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t', '\n', '\r')
/**
* Represents a header value that consist of [content] followed by [parameters].
* Useful for headers such as `Content-Type`, `Content-Disposition` and so on.
*
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.HeaderValueWithParameters)
*
* @property content header's content without parameters
* @property parameters
*/
public abstract class HeaderValueWithParameters(
protected val content: String,
public val parameters: List<HeaderValueParam> = emptyList()
) {
/**
* The first value for the parameter with [name] comparing case-insensitively or `null` if no such parameters found
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.HeaderValueWithParameters.parameter)
*/
public fun parameter(name: String): String? {
for (index in 0..parameters.lastIndex) {
val parameter = parameters[index]
if (parameter.name.equals(name, ignoreCase = true)) {
return parameter.value
}
}
return null
}
override fun toString(): String = when {
parameters.isEmpty() -> content
else -> {
val size = content.length + parameters.sumOf { it.name.length + it.value.length + 3 }
StringBuilder(size).apply {
append(content)
for (index in 0..parameters.lastIndex) {
val element = parameters[index]
append("; ")
append(element.name)
append("=")
element.value.escapeIfNeededTo(this)
}
}.toString()
}
}
public companion object {
/**
* Parse header with parameter and pass it to [init] function to instantiate particular type
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.HeaderValueWithParameters.Companion.parse)
*/
public inline fun <R> parse(value: String, init: (String, List<HeaderValueParam>) -> R): R {
val headerValue = parseHeaderValue(value).last()
return init(headerValue.value, headerValue.params)
}
}
}
/**
* Append formatted header value to the builder
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.append)
*/
public fun StringValuesBuilder.append(name: String, value: HeaderValueWithParameters) {
append(name, value.toString())
}
/**
* Escape using double quotes if needed or keep as is if no dangerous strings found
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.escapeIfNeeded)
*/
public fun String.escapeIfNeeded(): String = when {
needQuotes() -> quote()
else -> this
}
@Suppress("NOTHING_TO_INLINE")
private inline fun String.escapeIfNeededTo(out: StringBuilder) {
when {
needQuotes() -> out.append(quote())
else -> out.append(this)
}
}
private fun String.needQuotes(): Boolean {
if (isEmpty()) return true
if (isQuoted()) return false
for (element in this) {
if (HeaderFieldValueSeparators.contains(element)) return true
}
return false
}
private fun String.isQuoted(): Boolean {
if (length < 2) {
return false
}
if (first() != '"' || last() != '"') {
return false
}
var startIndex = 1
do {
val index = indexOf('"', startIndex)
if (index == lastIndex) {
break
}
var slashesCount = 0
var slashIndex = index - 1
while (this[slashIndex] == '\\') {
slashesCount++
slashIndex--
}
if (slashesCount % 2 == 0) {
return false
}
startIndex = index + 1
} while (startIndex < length)
return true
}
/**
* Escape string using double quotes
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.http.quote)
*/
public fun String.quote(): String = buildString { this@quote.quoteTo(this) }
private fun String.quoteTo(out: StringBuilder) {
out.append("\"")
for (element in this) {
when (val ch = element) {
'\\' -> out.append("\\\\")
'\n' -> out.append("\\n")
'\r' -> out.append("\\r")
'\t' -> out.append("\\t")
'\"' -> out.append("\\\"")
else -> out.append(ch)
}
}
out.append("\"")
} | kotlin | github | https://github.com/ktorio/ktor | ktor-http/common/src/io/ktor/http/HeaderValueWithParameters.kt |
package kotlinx.coroutines
import kotlinx.coroutines.testing.*
import org.junit.*
class CancelledAwaitStressTest : TestBase() {
private val n = 1000 * stressTestMultiplier
/**
* Tests that memory does not leak from cancelled [Deferred.await]
*/
@Test
fun testCancelledAwait() = runTest {
val d = async {
delay(Long.MAX_VALUE)
}
repeat(n) {
val waiter = launch(start = CoroutineStart.UNDISPATCHED) {
val a = ByteArray(10000000) // allocate 10M of memory here
d.await()
keepMe(a) // make sure it is kept in state machine
}
waiter.cancel() // cancel await
yield() // complete the waiter job, release its memory
}
d.cancel() // done test
}
/**
* Tests that memory does not leak from cancelled [Job.join]
*/
@Test
fun testCancelledJoin() = runTest {
val j = launch {
delay(Long.MAX_VALUE)
}
repeat(n) {
val joiner = launch(start = CoroutineStart.UNDISPATCHED) {
val a = ByteArray(10000000) // allocate 10M of memory here
j.join()
keepMe(a) // make sure it is kept in state machine
}
joiner.cancel() // cancel join
yield() // complete the joiner job, release its memory
}
j.cancel() // done test
}
private fun keepMe(a: ByteArray) {
// does nothing, makes sure the variable is kept in state-machine
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/jvm/test/CancelledAwaitStressTest.kt |
import pickle
from django import forms
from django.core.exceptions import ValidationError
from django.db import models
from django.test import SimpleTestCase, TestCase
from django.utils.choices import CallableChoiceIterator
from django.utils.functional import lazy
from .models import (
Bar,
Choiceful,
Foo,
RenamedField,
VerboseNameField,
Whiz,
WhizDelayed,
WhizIter,
WhizIterEmpty,
)
class Nested:
class Field(models.Field):
pass
class BasicFieldTests(SimpleTestCase):
def test_show_hidden_initial(self):
"""
Fields with choices respect show_hidden_initial as a kwarg to
formfield().
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_field_repr(self):
"""
__repr__() of a field displays its name.
"""
f = Foo._meta.get_field("a")
self.assertEqual(repr(f), "<django.db.models.fields.CharField: a>")
f = models.fields.CharField()
self.assertEqual(repr(f), "<django.db.models.fields.CharField>")
def test_field_repr_nested(self):
"""__repr__() uses __qualname__ for nested class support."""
self.assertEqual(repr(Nested.Field()), "<model_fields.tests.Nested.Field>")
def test_field_name(self):
"""
A defined field name (name="fieldname") is used instead of the model
model's attribute name (modelname).
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, "get_fieldname_display"))
self.assertFalse(hasattr(instance, "get_modelname_display"))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 22):
self.assertEqual(
m._meta.get_field("field%d" % i).verbose_name, "verbose field%d" % i
)
self.assertEqual(m._meta.get_field("id").verbose_name, "verbose pk")
def test_choices_form_class(self):
"""Can supply a custom choices form class to Field.formfield()"""
choices = [("a", "a")]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
def test_formfield_disabled(self):
"""Field.formfield() sets disabled for fields with choices."""
field = models.CharField(choices=[("a", "b")])
form_field = field.formfield(disabled=True)
self.assertIs(form_field.disabled, True)
def test_field_str(self):
f = models.Field()
self.assertEqual(str(f), "<django.db.models.fields.Field>")
f = Foo._meta.get_field("a")
self.assertEqual(str(f), "model_fields.Foo.a")
def test_field_ordering(self):
"""Fields are ordered based on their creation."""
f1 = models.Field()
f2 = models.Field(auto_created=True)
f3 = models.Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ""))
def test_field_instance_is_picklable(self):
"""Field instances can be pickled."""
field = models.Field(max_length=100, default="a string")
# Must be picklable with this cached property populated (#28188).
field._get_default
pickle.dumps(field)
def test_deconstruct_nested_field(self):
"""deconstruct() uses __qualname__ for nested class support."""
name, path, args, kwargs = Nested.Field().deconstruct()
self.assertEqual(path, "model_fields.tests.Nested.Field")
def test_abstract_inherited_fields(self):
"""Field instances from abstract models are not equal."""
class AbstractModel(models.Model):
field = models.IntegerField()
class Meta:
abstract = True
class InheritAbstractModel1(AbstractModel):
pass
class InheritAbstractModel2(AbstractModel):
pass
abstract_model_field = AbstractModel._meta.get_field("field")
inherit1_model_field = InheritAbstractModel1._meta.get_field("field")
inherit2_model_field = InheritAbstractModel2._meta.get_field("field")
self.assertNotEqual(abstract_model_field, inherit1_model_field)
self.assertNotEqual(abstract_model_field, inherit2_model_field)
self.assertNotEqual(inherit1_model_field, inherit2_model_field)
self.assertLess(abstract_model_field, inherit1_model_field)
self.assertLess(abstract_model_field, inherit2_model_field)
self.assertLess(inherit1_model_field, inherit2_model_field)
def test_hash_immutability(self):
field = models.IntegerField()
field_hash = hash(field)
class MyModel(models.Model):
rank = field
self.assertEqual(field_hash, hash(field))
class ChoicesTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.no_choices = Choiceful._meta.get_field("no_choices")
cls.empty_choices = Choiceful._meta.get_field("empty_choices")
cls.empty_choices_bool = Choiceful._meta.get_field("empty_choices_bool")
cls.empty_choices_text = Choiceful._meta.get_field("empty_choices_text")
cls.with_choices = Choiceful._meta.get_field("with_choices")
cls.with_choices_dict = Choiceful._meta.get_field("with_choices_dict")
cls.with_choices_nested_dict = Choiceful._meta.get_field(
"with_choices_nested_dict"
)
cls.choices_from_enum = Choiceful._meta.get_field("choices_from_enum")
cls.choices_from_iterator = Choiceful._meta.get_field("choices_from_iterator")
cls.choices_from_callable = Choiceful._meta.get_field("choices_from_callable")
def test_choices(self):
self.assertIsNone(self.no_choices.choices)
self.assertEqual(self.empty_choices.choices, [])
self.assertEqual(self.empty_choices_bool.choices, [])
self.assertEqual(self.empty_choices_text.choices, [])
self.assertEqual(self.with_choices.choices, [(1, "A")])
self.assertEqual(self.with_choices_dict.choices, [(1, "A")])
self.assertEqual(self.with_choices_nested_dict.choices, [("Thing", [(1, "A")])])
self.assertEqual(
self.choices_from_iterator.choices, [(0, "0"), (1, "1"), (2, "2")]
)
self.assertIsInstance(
self.choices_from_callable.choices, CallableChoiceIterator
)
self.assertEqual(
self.choices_from_callable.choices.func(), [(0, "0"), (1, "1"), (2, "2")]
)
def test_choices_slice(self):
for choices, expected_slice in [
(self.empty_choices.choices, []),
(self.empty_choices_bool.choices, []),
(self.empty_choices_text.choices, []),
(self.with_choices.choices, [(1, "A")]),
(self.with_choices_dict.choices, [(1, "A")]),
(self.with_choices_nested_dict.choices, [("Thing", [(1, "A")])]),
(self.choices_from_iterator.choices, [(0, "0"), (1, "1")]),
(self.choices_from_callable.choices.func(), [(0, "0"), (1, "1")]),
(self.choices_from_callable.choices, [(0, "0"), (1, "1")]),
]:
with self.subTest(choices=choices):
self.assertEqual(choices[:2], expected_slice)
def test_choices_negative_index(self):
for choices, expected_choice in [
(self.with_choices.choices, (1, "A")),
(self.with_choices_dict.choices, (1, "A")),
(self.with_choices_nested_dict.choices, ("Thing", [(1, "A")])),
(self.choices_from_iterator.choices, (2, "2")),
(self.choices_from_callable.choices.func(), (2, "2")),
(self.choices_from_callable.choices, (2, "2")),
]:
with self.subTest(choices=choices):
self.assertEqual(choices[-1], expected_choice)
def test_flatchoices(self):
self.assertEqual(self.no_choices.flatchoices, [])
self.assertEqual(self.empty_choices.flatchoices, [])
self.assertEqual(self.empty_choices_bool.flatchoices, [])
self.assertEqual(self.empty_choices_text.flatchoices, [])
self.assertEqual(self.with_choices.flatchoices, [(1, "A")])
self.assertEqual(self.with_choices_dict.flatchoices, [(1, "A")])
self.assertEqual(self.with_choices_nested_dict.flatchoices, [(1, "A")])
self.assertEqual(
self.choices_from_iterator.flatchoices, [(0, "0"), (1, "1"), (2, "2")]
)
self.assertEqual(
self.choices_from_callable.flatchoices, [(0, "0"), (1, "1"), (2, "2")]
)
def test_check(self):
self.assertEqual(Choiceful.check(), [])
def test_invalid_choice(self):
model_instance = None # Actual model instance not needed.
self.no_choices.validate(0, model_instance)
msg = "['Value 99 is not a valid choice.']"
with self.assertRaisesMessage(ValidationError, msg):
self.empty_choices.validate(99, model_instance)
with self.assertRaisesMessage(ValidationError, msg):
self.with_choices.validate(99, model_instance)
def test_formfield(self):
no_choices_formfield = self.no_choices.formfield()
self.assertIsInstance(no_choices_formfield, forms.IntegerField)
fields = (
self.empty_choices,
self.empty_choices_bool,
self.empty_choices_text,
self.with_choices,
self.with_choices_dict,
self.with_choices_nested_dict,
self.choices_from_enum,
self.choices_from_iterator,
self.choices_from_callable,
)
for field in fields:
with self.subTest(field=field):
self.assertIsInstance(field.formfield(), forms.ChoiceField)
def test_choices_from_enum(self):
# Choices class was transparently resolved when given as argument.
self.assertEqual(self.choices_from_enum.choices, Choiceful.Suit.choices)
self.assertEqual(self.choices_from_enum.flatchoices, Choiceful.Suit.choices)
class GetFieldDisplayTests(SimpleTestCase):
def test_choices_and_field_display(self):
"""
get_choices() interacts with get_FIELD_display() to return the expected
values.
"""
self.assertEqual(Whiz(c=1).get_c_display(), "First") # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), "Other") # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertIsNone(Whiz(c=None).get_c_display()) # Blank value
self.assertEqual(Whiz(c="").get_c_display(), "") # Empty value
self.assertEqual(WhizDelayed(c=0).get_c_display(), "Other") # Delayed choices
def test_get_FIELD_display_translated(self):
"""A translated display value is coerced to str."""
val = Whiz(c=5).get_c_display()
self.assertIsInstance(val, str)
self.assertEqual(val, "translated")
def test_overriding_FIELD_display(self):
class FooBar(models.Model):
foo_bar = models.IntegerField(choices=[(1, "foo"), (2, "bar")])
def get_foo_bar_display(self):
return "something"
f = FooBar(foo_bar=1)
self.assertEqual(f.get_foo_bar_display(), "something")
def test_overriding_inherited_FIELD_display(self):
class Base(models.Model):
foo = models.CharField(max_length=254, choices=[("A", "Base A")])
class Meta:
abstract = True
class Child(Base):
foo = models.CharField(
max_length=254, choices=[("A", "Child A"), ("B", "Child B")]
)
self.assertEqual(Child(foo="A").get_foo_display(), "Child A")
self.assertEqual(Child(foo="B").get_foo_display(), "Child B")
def test_iterator_choices(self):
"""
get_choices() works with Iterators.
"""
self.assertEqual(WhizIter(c=1).c, 1) # A nested value
self.assertEqual(WhizIter(c=9).c, 9) # Invalid value
self.assertIsNone(WhizIter(c=None).c) # Blank value
self.assertEqual(WhizIter(c="").c, "") # Empty value
def test_empty_iterator_choices(self):
"""
get_choices() works with empty iterators.
"""
self.assertEqual(WhizIterEmpty(c="a").c, "a") # A nested value
self.assertEqual(WhizIterEmpty(c="b").c, "b") # Invalid value
self.assertIsNone(WhizIterEmpty(c=None).c) # Blank value
self.assertEqual(WhizIterEmpty(c="").c, "") # Empty value
class GetChoicesTests(SimpleTestCase):
def test_empty_choices(self):
choices = []
f = models.CharField(choices=choices)
self.assertEqual(f.get_choices(include_blank=False), choices)
def test_blank_in_choices(self):
choices = [("", "<><>"), ("a", "A")]
f = models.CharField(choices=choices)
self.assertEqual(f.get_choices(include_blank=True), choices)
def test_blank_in_grouped_choices(self):
choices = [
("f", "Foo"),
("b", "Bar"),
(
"Group",
[
("", "No Preference"),
("fg", "Foo"),
("bg", "Bar"),
],
),
]
f = models.CharField(choices=choices)
self.assertEqual(f.get_choices(include_blank=True), choices)
def test_lazy_strings_not_evaluated(self):
lazy_func = lazy(lambda x: 0 / 0, int) # raises ZeroDivisionError if evaluated.
f = models.CharField(choices=[(lazy_func("group"), [("a", "A"), ("b", "B")])])
self.assertEqual(f.get_choices(include_blank=True)[0], ("", "---------"))
class GetChoicesOrderingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.foo1 = Foo.objects.create(a="a", d="12.35")
cls.foo2 = Foo.objects.create(a="b", d="12.34")
cls.bar1 = Bar.objects.create(a=cls.foo1, b="b")
cls.bar2 = Bar.objects.create(a=cls.foo2, b="a")
cls.field = Bar._meta.get_field("a")
def assertChoicesEqual(self, choices, objs):
self.assertEqual(choices, [(obj.pk, str(obj)) for obj in objs])
def test_get_choices(self):
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, ordering=("a",)),
[self.foo1, self.foo2],
)
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, ordering=("-a",)),
[self.foo2, self.foo1],
)
def test_get_choices_default_ordering(self):
self.addCleanup(setattr, Foo._meta, "ordering", Foo._meta.ordering)
Foo._meta.ordering = ("d",)
self.assertChoicesEqual(
self.field.get_choices(include_blank=False), [self.foo2, self.foo1]
)
def test_get_choices_reverse_related_field(self):
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False, ordering=("a",)),
[self.bar1, self.bar2],
)
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False, ordering=("-a",)),
[self.bar2, self.bar1],
)
def test_get_choices_reverse_related_field_default_ordering(self):
self.addCleanup(setattr, Bar._meta, "ordering", Bar._meta.ordering)
Bar._meta.ordering = ("b",)
self.assertChoicesEqual(
self.field.remote_field.get_choices(include_blank=False),
[self.bar2, self.bar1],
)
class GetChoicesLimitChoicesToTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.foo1 = Foo.objects.create(a="a", d="12.34")
cls.foo2 = Foo.objects.create(a="b", d="12.34")
cls.bar1 = Bar.objects.create(a=cls.foo1, b="b")
cls.bar2 = Bar.objects.create(a=cls.foo2, b="a")
cls.field = Bar._meta.get_field("a")
def assertChoicesEqual(self, choices, objs):
self.assertCountEqual(choices, [(obj.pk, str(obj)) for obj in objs])
def test_get_choices(self):
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, limit_choices_to={"a": "a"}),
[self.foo1],
)
self.assertChoicesEqual(
self.field.get_choices(include_blank=False, limit_choices_to={}),
[self.foo1, self.foo2],
)
def test_get_choices_reverse_related_field(self):
field = self.field.remote_field
self.assertChoicesEqual(
field.get_choices(include_blank=False, limit_choices_to={"b": "b"}),
[self.bar1],
)
self.assertChoicesEqual(
field.get_choices(include_blank=False, limit_choices_to={}),
[self.bar1, self.bar2],
) | python | github | https://github.com/django/django | tests/model_fields/tests.py |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<title><%= title %></title>
<link rel="stylesheet" href="/stylesheets/style.css">
</head>
<body> | html | github | https://github.com/expressjs/express | examples/ejs/views/header.html |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the inotify wrapper in L{twisted.internet.inotify}.
"""
from twisted.internet import defer, reactor
from twisted.python import filepath, runtime
from twisted.trial import unittest
try:
from twisted.python import _inotify
except ImportError:
inotify = None
else:
from twisted.internet import inotify
class TestINotify(unittest.TestCase):
"""
Define all the tests for the basic functionality exposed by
L{inotify.INotify}.
"""
if not runtime.platform.supportsINotify():
skip = "This platform doesn't support INotify."
def setUp(self):
self.dirname = filepath.FilePath(self.mktemp())
self.dirname.createDirectory()
self.inotify = inotify.INotify()
self.inotify.startReading()
self.addCleanup(self.inotify.loseConnection)
def test_initializationErrors(self):
"""
L{inotify.INotify} emits a C{RuntimeError} when initialized
in an environment that doesn't support inotify as we expect it.
We just try to raise an exception for every possible case in
the for loop in L{inotify.INotify._inotify__init__}.
"""
class FakeINotify:
def init(self):
raise inotify.INotifyError()
self.patch(inotify.INotify, '_inotify', FakeINotify())
self.assertRaises(inotify.INotifyError, inotify.INotify)
def _notificationTest(self, mask, operation, expectedPath=None):
"""
Test notification from some filesystem operation.
@param mask: The event mask to use when setting up the watch.
@param operation: A function which will be called with the
name of a file in the watched directory and which should
trigger the event.
@param expectedPath: Optionally, the name of the path which is
expected to come back in the notification event; this will
also be passed to C{operation} (primarily useful when the
operation is being done to the directory itself, not a
file in it).
@return: A L{Deferred} which fires successfully when the
expected event has been received or fails otherwise.
"""
if expectedPath is None:
expectedPath = self.dirname.child("foo.bar")
notified = defer.Deferred()
def cbNotified((watch, filename, events)):
self.assertEquals(filename, expectedPath)
self.assertTrue(events & mask)
notified.addCallback(cbNotified)
self.inotify.watch(
self.dirname, mask=mask,
callbacks=[lambda *args: notified.callback(args)])
operation(expectedPath)
return notified
def test_access(self):
"""
Reading from a file in a monitored directory sends an
C{inotify.IN_ACCESS} event to the callback.
"""
def operation(path):
path.setContent("foo")
path.getContent()
return self._notificationTest(inotify.IN_ACCESS, operation)
def test_modify(self):
"""
Writing to a file in a monitored directory sends an
C{inotify.IN_MODIFY} event to the callback.
"""
def operation(path):
fObj = path.open("w")
fObj.write('foo')
fObj.close()
return self._notificationTest(inotify.IN_MODIFY, operation)
def test_attrib(self):
"""
Changing the metadata of a a file in a monitored directory
sends an C{inotify.IN_ATTRIB} event to the callback.
"""
def operation(path):
path.touch()
path.touch()
return self._notificationTest(inotify.IN_ATTRIB, operation)
def test_closeWrite(self):
"""
Closing a file which was open for writing in a monitored
directory sends an C{inotify.IN_CLOSE_WRITE} event to the
callback.
"""
def operation(path):
fObj = path.open("w")
fObj.close()
return self._notificationTest(inotify.IN_CLOSE_WRITE, operation)
def test_closeNoWrite(self):
"""
Closing a file which was open for reading but not writing in a
monitored directory sends an C{inotify.IN_CLOSE_NOWRITE} event
to the callback.
"""
def operation(path):
path.touch()
fObj = path.open("r")
fObj.close()
return self._notificationTest(inotify.IN_CLOSE_NOWRITE, operation)
def test_open(self):
"""
Opening a file in a monitored directory sends an
C{inotify.IN_OPEN} event to the callback.
"""
def operation(path):
fObj = path.open("w")
fObj.close()
return self._notificationTest(inotify.IN_OPEN, operation)
def test_movedFrom(self):
"""
Moving a file out of a monitored directory sends an
C{inotify.IN_MOVED_FROM} event to the callback.
"""
def operation(path):
fObj = path.open("w")
fObj.close()
path.moveTo(filepath.FilePath(self.mktemp()))
return self._notificationTest(inotify.IN_MOVED_FROM, operation)
def test_movedTo(self):
"""
Moving a file into a monitored directory sends an
C{inotify.IN_MOVED_TO} event to the callback.
"""
def operation(path):
p = filepath.FilePath(self.mktemp())
p.touch()
p.moveTo(path)
return self._notificationTest(inotify.IN_MOVED_TO, operation)
def test_create(self):
"""
Creating a file in a monitored directory sends an
C{inotify.IN_CREATE} event to the callback.
"""
def operation(path):
fObj = path.open("w")
fObj.close()
return self._notificationTest(inotify.IN_CREATE, operation)
def test_delete(self):
"""
Deleting a file in a monitored directory sends an
C{inotify.IN_DELETE} event to the callback.
"""
def operation(path):
path.touch()
path.remove()
return self._notificationTest(inotify.IN_DELETE, operation)
def test_deleteSelf(self):
"""
Deleting the monitored directory itself sends an
C{inotify.IN_DELETE_SELF} event to the callback.
"""
def operation(path):
path.remove()
return self._notificationTest(
inotify.IN_DELETE_SELF, operation, expectedPath=self.dirname)
def test_moveSelf(self):
"""
Renaming the monitored directory itself sends an
C{inotify.IN_MOVE_SELF} event to the callback.
"""
def operation(path):
path.moveTo(filepath.FilePath(self.mktemp()))
return self._notificationTest(
inotify.IN_MOVE_SELF, operation, expectedPath=self.dirname)
def test_simpleSubdirectoryAutoAdd(self):
"""
L{inotify.INotify} when initialized with autoAdd==True adds
also adds the created subdirectories to the watchlist.
"""
def _callback(wp, filename, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertTrue(self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True,
callbacks=[_callback])
subdir = self.dirname.child('test')
d = defer.Deferred()
subdir.createDirectory()
return d
def test_simpleDeleteDirectory(self):
"""
L{inotify.INotify} removes a directory from the watchlist when
it's removed from the filesystem.
"""
calls = []
def _callback(wp, filename, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertTrue(self.inotify._isWatched(subdir))
subdir.remove()
except Exception:
d.errback()
def _eb():
# second call, we have just removed the subdir
try:
self.assertTrue(not self.inotify._isWatched(subdir))
d.callback(None)
except Exception:
d.errback()
if not calls:
# first call, it's the create subdir
calls.append(filename)
reactor.callLater(0, _)
else:
reactor.callLater(0, _eb)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True,
callbacks=[_callback])
subdir = self.dirname.child('test')
d = defer.Deferred()
subdir.createDirectory()
return d
def test_ignoreDirectory(self):
"""
L{inotify.INotify.ignore} removes a directory from the watchlist
"""
self.inotify.watch(self.dirname, autoAdd=True)
self.assertTrue(self.inotify._isWatched(self.dirname))
self.inotify.ignore(self.dirname)
self.assertFalse(self.inotify._isWatched(self.dirname))
def test_humanReadableMask(self):
"""
L{inotify.humaReadableMask} translates all the possible event
masks to a human readable string.
"""
for mask, value in inotify._FLAG_TO_HUMAN:
self.assertEquals(inotify.humanReadableMask(mask)[0], value)
checkMask = (
inotify.IN_CLOSE_WRITE | inotify.IN_ACCESS | inotify.IN_OPEN)
self.assertEquals(
set(inotify.humanReadableMask(checkMask)),
set(['close_write', 'access', 'open']))
def test_recursiveWatch(self):
"""
L{inotify.INotify.watch} with recursive==True will add all the
subdirectories under the given path to the watchlist.
"""
subdir = self.dirname.child('test')
subdir2 = subdir.child('test2')
subdir3 = subdir2.child('test3')
subdir3.makedirs()
dirs = [subdir, subdir2, subdir3]
self.inotify.watch(self.dirname, recursive=True)
# let's even call this twice so that we test that nothing breaks
self.inotify.watch(self.dirname, recursive=True)
for d in dirs:
self.assertTrue(self.inotify._isWatched(d))
def test_connectionLostError(self):
"""
L{inotify.INotify.connectionLost} if there's a problem while closing
the fd shouldn't raise the exception but should log the error
"""
import os
in_ = inotify.INotify()
os.close(in_._fd)
in_.loseConnection()
self.flushLoggedErrors()
def test_noAutoAddSubdirectory(self):
"""
L{inotify.INotify.watch} with autoAdd==False will stop inotify
from watching subdirectories created under the watched one.
"""
def _callback(wp, fp, mask):
# We are notified before we actually process new
# directories, so we need to defer this check.
def _():
try:
self.assertFalse(self.inotify._isWatched(subdir.path))
d.callback(None)
except Exception:
d.errback()
reactor.callLater(0, _)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=False,
callbacks=[_callback])
subdir = self.dirname.child('test')
d = defer.Deferred()
subdir.createDirectory()
return d
def test_seriesOfWatchAndIgnore(self):
"""
L{inotify.INotify} will watch a filepath for events even if the same
path is repeatedly added/removed/re-added to the watchpoints.
"""
expectedPath = self.dirname.child("foo.bar2")
expectedPath.touch()
notified = defer.Deferred()
def cbNotified((ignored, filename, events)):
self.assertEquals(filename, expectedPath)
self.assertTrue(events & inotify.IN_DELETE_SELF)
def callIt(*args):
notified.callback(args)
# Watch, ignore, watch again to get into the state being tested.
self.assertTrue(self.inotify.watch(expectedPath, callbacks=[callIt]))
self.inotify.ignore(expectedPath)
self.assertTrue(
self.inotify.watch(
expectedPath, mask=inotify.IN_DELETE_SELF, callbacks=[callIt]))
notified.addCallback(cbNotified)
# Apparently in kernel version < 2.6.25, inofify has a bug in the way
# similar events are coalesced. So, be sure to generate a different
# event here than the touch() at the top of this method might have
# generated.
expectedPath.remove()
return notified
def test_ignoreFilePath(self):
"""
L{inotify.INotify} will ignore a filepath after it has been removed from
the watch list.
"""
expectedPath = self.dirname.child("foo.bar2")
expectedPath.touch()
expectedPath2 = self.dirname.child("foo.bar3")
expectedPath2.touch()
notified = defer.Deferred()
def cbNotified((ignored, filename, events)):
self.assertEquals(filename, expectedPath2)
self.assertTrue(events & inotify.IN_DELETE_SELF)
def callIt(*args):
notified.callback(args)
self.assertTrue(
self.inotify.watch(
expectedPath, inotify.IN_DELETE_SELF, callbacks=[callIt]))
notified.addCallback(cbNotified)
self.assertTrue(
self.inotify.watch(
expectedPath2, inotify.IN_DELETE_SELF, callbacks=[callIt]))
self.inotify.ignore(expectedPath)
expectedPath.remove()
expectedPath2.remove()
return notified
def test_ignoreNonWatchedFile(self):
"""
L{inotify.INotify} will raise KeyError if a non-watched filepath is
ignored.
"""
expectedPath = self.dirname.child("foo.ignored")
expectedPath.touch()
self.assertRaises(KeyError, self.inotify.ignore, expectedPath)
def test_complexSubdirectoryAutoAdd(self):
"""
L{inotify.INotify} with autoAdd==True for a watched path
generates events for every file or directory already present
in a newly created subdirectory under the watched one.
This tests that we solve a race condition in inotify even though
we may generate duplicate events.
"""
calls = set()
def _callback(wp, filename, mask):
calls.add(filename)
if len(calls) == 6:
try:
self.assertTrue(self.inotify._isWatched(subdir))
self.assertTrue(self.inotify._isWatched(subdir2))
self.assertTrue(self.inotify._isWatched(subdir3))
created = someFiles + [subdir, subdir2, subdir3]
self.assertEquals(len(calls), len(created))
self.assertEquals(calls, set(created))
except Exception:
d.errback()
else:
d.callback(None)
checkMask = inotify.IN_ISDIR | inotify.IN_CREATE
self.inotify.watch(
self.dirname, mask=checkMask, autoAdd=True,
callbacks=[_callback])
subdir = self.dirname.child('test')
subdir2 = subdir.child('test2')
subdir3 = subdir2.child('test3')
d = defer.Deferred()
subdir3.makedirs()
someFiles = [subdir.child('file1.dat'),
subdir2.child('file2.dat'),
subdir3.child('file3.dat')]
# Add some files in pretty much all the directories so that we
# see that we process all of them.
for i, filename in enumerate(someFiles):
filename.setContent(filename.path)
return d | unknown | codeparrot/codeparrot-clean | ||
import json
from django.conf import settings
from django_statsd.clients import statsd
import commonware.log
import jwt
import requests
log = commonware.log.getLogger('z.crypto')
class SigningError(Exception):
pass
def sign(receipt):
"""
Send the receipt to the signing service.
This could possibly be made async via celery.
"""
# If no destination is set. Just ignore this request.
if not settings.SIGNING_SERVER:
return ValueError('Invalid config. SIGNING_SERVER empty.')
destination = settings.SIGNING_SERVER + '/1.0/sign'
timeout = settings.SIGNING_SERVER_TIMEOUT
receipt_json = json.dumps(receipt)
log.info('Calling service: %s' % destination)
log.info('Receipt contents: %s' % receipt_json)
headers = {'Content-Type': 'application/json'}
data = receipt if isinstance(receipt, basestring) else receipt_json
try:
with statsd.timer('services.sign.receipt'):
req = requests.post(destination, data=data, headers=headers,
timeout=timeout)
except requests.Timeout:
statsd.incr('services.sign.receipt.timeout')
log.error('Posting to receipt signing timed out')
raise SigningError('Posting to receipt signing timed out')
except requests.RequestException:
# Will occur when some other error occurs.
statsd.incr('services.sign.receipt.error')
log.error('Posting to receipt signing failed', exc_info=True)
raise SigningError('Posting to receipt signing failed')
if req.status_code != 200:
statsd.incr('services.sign.receipt.error')
log.error('Posting to signing failed: %s' % req.status_code)
raise SigningError('Posting to signing failed: %s'
% req.status_code)
return json.loads(req.content)['receipt']
def decode(receipt):
"""
Decode and verify that the receipt is sound from a crypto point of view.
Will raise errors if the receipt is not valid, returns receipt contents
if it is valid.
"""
raise NotImplementedError
def crack(receipt):
"""
Crack open the receipt, without checking that the crypto is valid.
Returns a list of all the elements of a receipt, which by default is
cert, receipt.
"""
return map(lambda x: jwt.decode(x.encode('ascii'), verify=False),
receipt.split('~')) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import re
import sys
import subprocess
# TODO(timurrrr): we may use it on POSIX too to avoid code duplication once we
# support layout_tests, remove Dr. Memory specific code and verify it works
# on a "clean" Mac.
testcase_name = None
for arg in sys.argv:
m = re.match("\-\-gtest_filter=(.*)", arg)
if m:
assert testcase_name is None
testcase_name = m.groups()[0]
# arg #0 is the path to this python script
cmd_to_run = sys.argv[1:]
# TODO(timurrrr): this is Dr. Memory-specific
# Usually, we pass "-logdir" "foo\bar\spam path" args to Dr. Memory.
# To group reports per UI test, we want to put the reports for each test into a
# separate directory. This code can be simplified when we have
# http://code.google.com/p/drmemory/issues/detail?id=684 fixed.
logdir_idx = cmd_to_run.index("-logdir")
old_logdir = cmd_to_run[logdir_idx + 1]
wrapper_pid = str(os.getpid())
# On Windows, there is a chance of PID collision. We avoid it by appending the
# number of entries in the logdir at the end of wrapper_pid.
# This number is monotonic and we can't have two simultaneously running wrappers
# with the same PID.
wrapper_pid += "_%d" % len(glob.glob(old_logdir + "\\*"))
cmd_to_run[logdir_idx + 1] += "\\testcase.%s.logs" % wrapper_pid
os.makedirs(cmd_to_run[logdir_idx + 1])
if testcase_name:
f = open(old_logdir + "\\testcase.%s.name" % wrapper_pid, "w")
print >>f, testcase_name
f.close()
exit(subprocess.call(cmd_to_run)) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to dynamically load objects from the Hub."""
import ast
import filecmp
import hashlib
import importlib
import importlib.metadata
import importlib.util
import keyword
import os
import re
import shutil
import signal
import sys
import threading
from pathlib import Path
from types import ModuleType
from typing import Any
from huggingface_hub import is_offline_mode, try_to_load_from_cache
from packaging import version
from .utils import (
HF_MODULES_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
cached_file,
extract_commit_hash,
logging,
)
from .utils.import_utils import VersionComparison, split_package_version
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def _sanitize_module_name(name: str) -> str:
r"""
Tries to sanitize a module name so that it can be used as a Python module.
The following transformations are applied:
1. Replace `.` in module names with `_dot_`.
2. Replace `-` in module names with `_hyphen_`.
3. If the module name starts with a digit, prepend it with `_`.
4. Warn if the sanitized name is a Python reserved keyword or not a valid identifier.
If the input name is already a valid identifier, it is returned unchanged.
"""
# We not replacing `\W` characters with `_` to avoid collisions. Because `_` is a very common
# separator used in module names, replacing `\W` with `_` would create too many collisions.
# Once a module is imported, it is cached in `sys.modules` and the second import would return
# the first module, which might not be the expected behavior if name collisions happen.
new_name = name.replace(".", "_dot_").replace("-", "_hyphen_")
if new_name and new_name[0].isdigit():
new_name = f"_{new_name}"
if keyword.iskeyword(new_name):
logger.warning(
f"The module name {new_name} (originally {name}) is a reserved keyword in Python. "
"Please rename the original module to avoid import issues."
)
elif not new_name.isidentifier():
logger.warning(
f"The module name {new_name} (originally {name}) is not a valid Python identifier. "
"Please rename the original module to avoid import issues."
)
return new_name
_HF_REMOTE_CODE_LOCK = threading.Lock()
def init_hf_modules():
"""
Creates the cache directory for modules with an init, and adds it to the Python path.
"""
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(HF_MODULES_CACHE)
os.makedirs(HF_MODULES_CACHE, exist_ok=True)
init_path = Path(HF_MODULES_CACHE) / "__init__.py"
if not init_path.exists():
init_path.touch()
importlib.invalidate_caches()
def create_dynamic_module(name: str | os.PathLike) -> None:
"""
Creates a dynamic module in the cache directory for modules.
Args:
name (`str` or `os.PathLike`):
The name of the dynamic module to create.
"""
init_hf_modules()
dynamic_module_path = (Path(HF_MODULES_CACHE) / name).resolve()
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent)
os.makedirs(dynamic_module_path, exist_ok=True)
init_path = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
# It is extremely important to invalidate the cache when we change stuff in those modules, or users end up
# with errors about module that do not exist. Same for all other `invalidate_caches` in this file.
importlib.invalidate_caches()
def get_relative_imports(module_file: str | os.PathLike) -> list[str]:
"""
Get the list of modules that are relatively imported in a module file.
Args:
module_file (`str` or `os.PathLike`): The module file to inspect.
Returns:
`list[str]`: The list of relative imports in the module.
"""
with open(module_file, encoding="utf-8") as f:
content = f.read()
# Imports of the form `import .xxx`
relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE)
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE)
# Unique-ify
return list(set(relative_imports))
def get_relative_import_files(module_file: str | os.PathLike) -> list[str]:
"""
Get the list of all files that are needed for a given module. Note that this function recurses through the relative
imports (if a imports b and b imports c, it will return module files for b and c).
Args:
module_file (`str` or `os.PathLike`): The module file to inspect.
Returns:
`list[str]`: The list of all relative imports a given module needs (recursively), which will give us the list
of module files a given module needs.
"""
no_change = False
files_to_check = [module_file]
all_relative_imports = []
# Let's recurse through all relative imports
while not no_change:
new_imports = []
for f in files_to_check:
new_imports.extend(get_relative_imports(f))
module_path = Path(module_file).parent
new_import_files = [f"{str(module_path / m)}.py" for m in new_imports]
files_to_check = [f for f in new_import_files if f not in all_relative_imports]
no_change = len(files_to_check) == 0
all_relative_imports.extend(files_to_check)
return all_relative_imports
def get_imports(filename: str | os.PathLike) -> list[str]:
"""
Extracts all the libraries (not relative imports this time) that are imported in a file.
Args:
filename (`str` or `os.PathLike`): The module file to inspect.
Returns:
`list[str]`: The list of all packages required to use the input module.
"""
with open(filename, encoding="utf-8") as f:
content = f.read()
imported_modules = set()
import transformers.utils
def recursive_look_for_imports(node):
if isinstance(node, ast.Try):
return # Don't recurse into Try blocks and ignore imports in them
elif isinstance(node, ast.If):
test = node.test
for condition_node in ast.walk(test):
if isinstance(condition_node, ast.Call):
check_function = getattr(condition_node.func, "id", "")
if (
check_function.endswith("available")
and check_function.startswith("is_flash_attn")
or hasattr(transformers.utils.import_utils, check_function)
):
# Don't recurse into "if flash_attn_available()" or any "if library_available" blocks
# that appears in `transformers.utils.import_utils` and ignore imports in them
return
elif isinstance(node, ast.Import):
# Handle 'import x' statements
for alias in node.names:
top_module = alias.name.split(".")[0]
if top_module:
imported_modules.add(top_module)
elif isinstance(node, ast.ImportFrom):
# Handle 'from x import y' statements, ignoring relative imports
if node.level == 0 and node.module:
top_module = node.module.split(".")[0]
if top_module:
imported_modules.add(top_module)
# Recursively visit all children
for child in ast.iter_child_nodes(node):
recursive_look_for_imports(child)
tree = ast.parse(content)
recursive_look_for_imports(tree)
return sorted(imported_modules)
def check_imports(filename: str | os.PathLike) -> list[str]:
"""
Check if the current Python environment contains all the libraries that are imported in a file. Will raise if a
library is missing.
Args:
filename (`str` or `os.PathLike`): The module file to check.
Returns:
`list[str]`: The list of relative imports in the file.
"""
imports = get_imports(filename)
missing_packages = []
for imp in imports:
try:
importlib.import_module(imp)
except ImportError as exception:
logger.warning(f"Encountered exception while importing {imp}: {exception}")
# Some packages can fail with an ImportError because of a dependency issue.
# This check avoids hiding such errors.
# See https://github.com/huggingface/transformers/issues/33604
if "No module named" in str(exception):
missing_packages.append(imp)
else:
raise
if len(missing_packages) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`"
)
return get_relative_imports(filename)
def get_class_in_module(
class_name: str,
module_path: str | os.PathLike,
*,
force_reload: bool = False,
) -> type:
"""
Import a module on the cache directory for modules and extract a class from it.
Args:
class_name (`str`): The name of the class to import.
module_path (`str` or `os.PathLike`): The path to the module to import.
force_reload (`bool`, *optional*, defaults to `False`):
Whether to reload the dynamic module from file if it already exists in `sys.modules`.
Otherwise, the module is only reloaded if the file has changed.
Returns:
`typing.Type`: The class looked for.
"""
name = os.path.normpath(module_path)
name = name.removesuffix(".py")
name = name.replace(os.path.sep, ".")
module_file: Path = Path(HF_MODULES_CACHE) / module_path
with _HF_REMOTE_CODE_LOCK:
if force_reload:
sys.modules.pop(name, None)
importlib.invalidate_caches()
cached_module: ModuleType | None = sys.modules.get(name)
module_spec = importlib.util.spec_from_file_location(name, location=module_file)
# Hash the module file and all its relative imports to check if we need to reload it
module_files: list[Path] = [module_file] + sorted(map(Path, get_relative_import_files(module_file)))
module_hash: str = hashlib.sha256(b"".join(bytes(f) + f.read_bytes() for f in module_files)).hexdigest()
module: ModuleType
if cached_module is None:
module = importlib.util.module_from_spec(module_spec)
# insert it into sys.modules before any loading begins
sys.modules[name] = module
else:
module = cached_module
# reload in both cases, unless the module is already imported and the hash hits
if getattr(module, "__transformers_module_hash__", "") != module_hash:
module_spec.loader.exec_module(module)
module.__transformers_module_hash__ = module_hash
return getattr(module, class_name)
def get_cached_module_file(
pretrained_model_name_or_path: str | os.PathLike,
module_file: str,
cache_dir: str | os.PathLike | None = None,
force_download: bool = False,
proxies: dict[str, str] | None = None,
token: bool | str | None = None,
revision: str | None = None,
local_files_only: bool = False,
repo_type: str | None = None,
_commit_hash: str | None = None,
**deprecated_kwargs,
) -> str:
"""
Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached
Transformers module.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a configuration file saved using the
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
module_file (`str`):
The name of the module file containing the class to look for.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
repo_type (`str`, *optional*):
Specify the repo type (useful when downloading from a space for instance).
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Returns:
`str`: The path to the module inside the cache.
"""
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file.
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
is_local = os.path.isdir(pretrained_model_name_or_path)
if is_local:
submodule = _sanitize_module_name(os.path.basename(pretrained_model_name_or_path))
else:
submodule = os.path.sep.join(map(_sanitize_module_name, pretrained_model_name_or_path.split("/")))
cached_module = try_to_load_from_cache(
pretrained_model_name_or_path, module_file, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type
)
new_files = []
try:
# Load from URL or cache if already cached
resolved_module_file = cached_file(
pretrained_model_name_or_path,
module_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
repo_type=repo_type,
_commit_hash=_commit_hash,
)
if not is_local and cached_module != resolved_module_file:
new_files.append(module_file)
except OSError:
logger.info(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
raise
# Check we have all the requirements in our environment
modules_needed = check_imports(resolved_module_file)
# Now we move the module inside our cached dynamic modules.
full_submodule = TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(full_submodule)
submodule_path = Path(HF_MODULES_CACHE) / full_submodule
if submodule == _sanitize_module_name(os.path.basename(pretrained_model_name_or_path)):
# We copy local files to avoid putting too many folders in sys.path. This copy is done when the file is new or
# has changed since last copy.
if not (submodule_path / module_file).exists() or not filecmp.cmp(
resolved_module_file, str(submodule_path / module_file)
):
(submodule_path / module_file).parent.mkdir(parents=True, exist_ok=True)
shutil.copy(resolved_module_file, submodule_path / module_file)
importlib.invalidate_caches()
for module_needed in modules_needed:
module_needed = Path(module_file).parent / f"{module_needed}.py"
module_needed_file = os.path.join(pretrained_model_name_or_path, module_needed)
if not (submodule_path / module_needed).exists() or not filecmp.cmp(
module_needed_file, str(submodule_path / module_needed)
):
shutil.copy(module_needed_file, submodule_path / module_needed)
importlib.invalidate_caches()
else:
# Get the commit hash
commit_hash = extract_commit_hash(resolved_module_file, _commit_hash)
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
submodule_path = submodule_path / commit_hash
full_submodule = full_submodule + os.path.sep + commit_hash
full_submodule_module_file_path = os.path.join(full_submodule, module_file)
create_dynamic_module(Path(full_submodule_module_file_path).parent)
if not (submodule_path / module_file).exists():
shutil.copy(resolved_module_file, submodule_path / module_file)
importlib.invalidate_caches()
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not ((submodule_path / module_file).parent / f"{module_needed}.py").exists():
get_cached_module_file(
pretrained_model_name_or_path,
f"{Path(module_file).parent / module_needed}.py",
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
token=token,
revision=revision,
local_files_only=local_files_only,
_commit_hash=commit_hash,
)
new_files.append(f"{module_needed}.py")
if len(new_files) > 0 and revision is None:
new_files = "\n".join([f"- {f}" for f in new_files])
repo_type_str = "" if repo_type is None else f"{repo_type}s/"
url = f"https://huggingface.co/{repo_type_str}{pretrained_model_name_or_path}"
logger.warning(
f"A new version of the following files was downloaded from {url}:\n{new_files}"
"\n. Make sure to double-check they do not contain any added malicious code. To avoid downloading new "
"versions of the code file, you can pin a revision."
)
return os.path.join(full_submodule, module_file)
def get_class_from_dynamic_module(
class_reference: str,
pretrained_model_name_or_path: str | os.PathLike,
cache_dir: str | os.PathLike | None = None,
force_download: bool = False,
proxies: dict[str, str] | None = None,
token: bool | str | None = None,
revision: str | None = None,
local_files_only: bool = False,
repo_type: str | None = None,
code_revision: str | None = None,
**kwargs,
) -> type:
"""
Extracts a class from a module file, present in the local folder or repository of a model.
<Tip warning={true}>
Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should
therefore only be called on trusted repos.
</Tip>
Args:
class_reference (`str`):
The full name of the class to load, including its module and optionally its repo.
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a configuration file saved using the
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
This is used when `class_reference` does not specify another repo.
module_file (`str`):
The name of the module file containing the class to look for.
class_name (`str`):
The name of the class to import in the module.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
repo_type (`str`, *optional*):
Specify the repo type (useful when downloading from a space for instance).
code_revision (`str`, *optional*, defaults to `"main"`):
The specific revision to use for the code on the Hub, if the code leaves in a different repository than the
rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based system for
storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Returns:
`typing.Type`: The class, dynamically imported from the module.
Examples:
```python
# Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this
# module.
cls = get_class_from_dynamic_module("modeling.MyBertModel", "sgugger/my-bert-model")
# Download module `modeling.py` from a given repo and cache then extract the class `MyBertModel` from this
# module.
cls = get_class_from_dynamic_module("sgugger/my-bert-model--modeling.MyBertModel", "sgugger/another-bert-model")
```"""
# Catch the name of the repo if it's specified in `class_reference`
if "--" in class_reference:
repo_id, class_reference = class_reference.split("--")
else:
repo_id = pretrained_model_name_or_path
module_file, class_name = class_reference.split(".")
if code_revision is None and pretrained_model_name_or_path == repo_id:
code_revision = revision
# And lastly we get the class inside our newly created module
final_module = get_cached_module_file(
repo_id,
module_file + ".py",
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
token=token,
revision=code_revision,
local_files_only=local_files_only,
repo_type=repo_type,
)
return get_class_in_module(class_name, final_module, force_reload=force_download)
def custom_object_save(obj: Any, folder: str | os.PathLike, config: dict | None = None) -> list[str]:
"""
Save the modeling files corresponding to a custom model/configuration/tokenizer etc. in a given folder. Optionally
adds the proper fields in a config.
Args:
obj (`Any`): The object for which to save the module files.
folder (`str` or `os.PathLike`): The folder where to save.
config (`PreTrainedConfig` or dictionary, `optional`):
A config in which to register the auto_map corresponding to this custom object.
Returns:
`list[str]`: The list of files saved.
"""
if obj.__module__ == "__main__":
logger.warning(
f"We can't save the code defining {obj} in {folder} as it's been defined in __main__. You should put "
"this code in a separate module so we can include it in the saved folder and make it easier to share via "
"the Hub."
)
return
def _set_auto_map_in_config(_config):
module_name = obj.__class__.__module__
last_module = module_name.split(".")[-1]
full_name = f"{last_module}.{obj.__class__.__name__}"
# Special handling for tokenizers
if "Tokenizer" in full_name:
slow_tokenizer_class = None
fast_tokenizer_class = None
if obj.__class__.__name__.endswith("Fast"):
# Fast tokenizer: we have the fast tokenizer class and we may have the slow one has an attribute.
fast_tokenizer_class = f"{last_module}.{obj.__class__.__name__}"
if getattr(obj, "slow_tokenizer_class", None) is not None:
slow_tokenizer = getattr(obj, "slow_tokenizer_class")
slow_tok_module_name = slow_tokenizer.__module__
last_slow_tok_module = slow_tok_module_name.split(".")[-1]
slow_tokenizer_class = f"{last_slow_tok_module}.{slow_tokenizer.__name__}"
else:
# Slow tokenizer: no way to have the fast class
slow_tokenizer_class = f"{last_module}.{obj.__class__.__name__}"
full_name = (slow_tokenizer_class, fast_tokenizer_class)
if isinstance(_config, dict):
auto_map = _config.get("auto_map", {})
auto_map[obj._auto_class] = full_name
_config["auto_map"] = auto_map
elif getattr(_config, "auto_map", None) is not None:
_config.auto_map[obj._auto_class] = full_name
else:
_config.auto_map = {obj._auto_class: full_name}
# Add object class to the config auto_map
if isinstance(config, (list, tuple)):
for cfg in config:
_set_auto_map_in_config(cfg)
elif config is not None:
_set_auto_map_in_config(config)
result = []
# Copy module file to the output folder.
object_file = sys.modules[obj.__module__].__file__
dest_file = Path(folder) / (Path(object_file).name)
shutil.copy(object_file, dest_file)
result.append(dest_file)
# Gather all relative imports recursively and make sure they are copied as well.
for needed_file in get_relative_import_files(object_file):
dest_file = Path(folder) / (Path(needed_file).name)
shutil.copy(needed_file, dest_file)
result.append(dest_file)
return result
def _raise_timeout_error(signum, frame):
raise ValueError(
"Loading this model requires you to execute custom code contained in the model repository on your local "
"machine. Please set the option `trust_remote_code=True` to permit loading of this model."
)
TIME_OUT_REMOTE_CODE = 15
def resolve_trust_remote_code(
trust_remote_code, model_name, has_local_code, has_remote_code, error_message=None, upstream_repo=None
):
"""
Resolves the `trust_remote_code` argument. If there is remote code to be loaded, the user must opt-in to loading
it.
Args:
trust_remote_code (`bool` or `None`):
User-defined `trust_remote_code` value.
model_name (`str`):
The name of the model repository in huggingface.co.
has_local_code (`bool`):
Whether the model has local code.
has_remote_code (`bool`):
Whether the model has remote code.
error_message (`str`, *optional*):
Custom error message to display if there is remote code to load and the user didn't opt-in. If unset, the error
message will be regarding loading a model with custom code.
Returns:
The resolved `trust_remote_code` value.
"""
if error_message is None:
if upstream_repo is not None:
error_message = (
f"The repository {model_name} references custom code contained in {upstream_repo} which "
f"must be executed to correctly load the model. You can inspect the repository "
f"content at https://hf.co/{upstream_repo} .\n"
)
elif os.path.isdir(model_name):
error_message = (
f"The repository {model_name} contains custom code which must be executed "
f"to correctly load the model. You can inspect the repository "
f"content at {os.path.abspath(model_name)} .\n"
)
else:
error_message = (
f"The repository {model_name} contains custom code which must be executed "
f"to correctly load the model. You can inspect the repository "
f"content at https://hf.co/{model_name} .\n"
)
if trust_remote_code is None:
if has_local_code:
trust_remote_code = False
elif has_remote_code and TIME_OUT_REMOTE_CODE > 0:
prev_sig_handler = None
try:
prev_sig_handler = signal.signal(signal.SIGALRM, _raise_timeout_error)
signal.alarm(TIME_OUT_REMOTE_CODE)
while trust_remote_code is None:
answer = input(
f"{error_message} You can inspect the repository content at https://hf.co/{model_name}.\n"
f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n"
f"Do you wish to run the custom code? [y/N] "
)
if answer.lower() in ["yes", "y", "1"]:
trust_remote_code = True
elif answer.lower() in ["no", "n", "0", ""]:
trust_remote_code = False
signal.alarm(0)
except Exception:
# OS which does not support signal.SIGALRM
raise ValueError(
f"{error_message} You can inspect the repository content at https://hf.co/{model_name}.\n"
f"Please pass the argument `trust_remote_code=True` to allow custom code to be run."
)
finally:
if prev_sig_handler is not None:
signal.signal(signal.SIGALRM, prev_sig_handler)
signal.alarm(0)
elif has_remote_code:
# For the CI which puts the timeout at 0
_raise_timeout_error(None, None)
if has_remote_code and not has_local_code and not trust_remote_code:
raise ValueError(
f"{error_message} You can inspect the repository content at https://hf.co/{model_name}.\n"
f"Please pass the argument `trust_remote_code=True` to allow custom code to be run."
)
return trust_remote_code
def check_python_requirements(path_or_repo_id, requirements_file="requirements.txt", **kwargs):
"""
Tries to locate `requirements_file` in a local folder or repo, and confirms that the environment has all the
python dependencies installed.
Args:
path_or_repo_id (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a model repo on huggingface.co.
- a path to a *directory* potentially containing the file.
kwargs (`dict[str, Any]`, *optional*):
Additional arguments to pass to `cached_file`.
"""
failed = [] # error messages regarding requirements
try:
requirements = cached_file(path_or_repo_id=path_or_repo_id, filename=requirements_file, **kwargs)
with open(requirements, "r") as f:
requirements = f.readlines()
for requirement in requirements:
requirement = requirement.strip()
if not requirement or requirement.startswith("#"): # skip empty lines and comments
continue
try:
# e.g. "torch>2.6.0" -> "torch", ">", "2.6.0"
package_name, delimiter, version_number = split_package_version(requirement)
except ValueError: # e.g. "torch", as opposed to "torch>2.6.0"
package_name = requirement
delimiter, version_number = None, None
try:
local_package_version = importlib.metadata.version(package_name)
except importlib.metadata.PackageNotFoundError:
failed.append(f"{requirement} (installed: None)")
continue
if delimiter is not None and version_number is not None:
is_satisfied = VersionComparison.from_string(delimiter)(
version.parse(local_package_version), version.parse(version_number)
)
else:
is_satisfied = True
if not is_satisfied:
failed.append(f"{requirement} (installed: {local_package_version})")
except OSError: # no requirements.txt
pass
if failed:
raise ImportError(
f"Missing requirements in your local environment for `{path_or_repo_id}`:\n" + "\n".join(failed)
) | python | github | https://github.com/huggingface/transformers | src/transformers/dynamic_module_utils.py |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Command functions for managing Airflow pools."""
from __future__ import annotations
import json
from json import JSONDecodeError
from pathlib import Path
import rich
from airflowctl.api.client import NEW_API_CLIENT, Client, ClientKind, provide_api_client
from airflowctl.api.datamodels.generated import (
BulkActionOnExistence,
BulkBodyPoolBody,
BulkCreateActionPoolBody,
PoolBody,
)
@provide_api_client(kind=ClientKind.CLI)
def import_(args, api_client: Client = NEW_API_CLIENT) -> None:
"""Import pools from file."""
filepath = Path(args.file)
if not filepath.exists():
raise SystemExit(f"Missing pools file {args.file}")
success, errors = _import_helper(api_client, filepath)
if errors:
raise SystemExit(f"Failed to update pool(s): {errors}")
rich.print(success)
@provide_api_client(kind=ClientKind.CLI)
def export(args, api_client: Client = NEW_API_CLIENT) -> None:
"""
Export all pools.
If output is json, write to file. Otherwise, print to console.
"""
try:
pools_response = api_client.pools.list()
pools_list = [
{
"name": pool.name,
"slots": pool.slots,
"description": pool.description,
"include_deferred": pool.include_deferred,
"occupied_slots": pool.occupied_slots,
"running_slots": pool.running_slots,
"queued_slots": pool.queued_slots,
"scheduled_slots": pool.scheduled_slots,
"open_slots": pool.open_slots,
"deferred_slots": pool.deferred_slots,
}
for pool in pools_response.pools
]
if args.output == "json":
file_path = Path(args.file)
with open(file_path, "w") as f:
json.dump(pools_list, f, indent=4, sort_keys=True)
rich.print(f"Exported {pools_response.total_entries} pool(s) to {args.file}")
else:
# For non-json formats, print the pools directly to console
rich.print(pools_list)
except Exception as e:
raise SystemExit(f"Failed to export pools: {e}")
def _import_helper(api_client: Client, filepath: Path):
"""Help import pools from the json file."""
try:
with open(filepath) as f:
pools_json = json.load(f)
except JSONDecodeError as e:
raise SystemExit(f"Invalid json file: {e}")
if not isinstance(pools_json, list):
raise SystemExit("Invalid format: Expected a list of pool objects")
pools_to_update = []
for pool_config in pools_json:
if not isinstance(pool_config, dict) or "name" not in pool_config or "slots" not in pool_config:
raise SystemExit(f"Invalid pool configuration: {pool_config}")
pools_to_update.append(
PoolBody(
name=pool_config["name"],
slots=pool_config["slots"],
description=pool_config.get("description", ""),
include_deferred=pool_config.get("include_deferred", False),
)
)
bulk_body = BulkBodyPoolBody(
actions=[
BulkCreateActionPoolBody(
action="create",
entities=pools_to_update,
action_on_existence=BulkActionOnExistence.FAIL,
)
]
)
result = api_client.pools.bulk(pools=bulk_body)
# Return the successful and failed entities directly from the response
return result.create.success, result.create.errors | python | github | https://github.com/apache/airflow | airflow-ctl/src/airflowctl/ctl/commands/pool_command.py |
# -*- coding: utf-8 -*-
import logging
from box import CredentialsV2, BoxClient
from box.client import BoxClientException
from modularodm import fields
from framework.auth import Auth
from framework.exceptions import HTTPError
from website.addons.base import exceptions
from website.addons.base import AddonOAuthUserSettingsBase, AddonOAuthNodeSettingsBase
from website.addons.base import StorageAddonBase
from website.addons.box import settings
from website.addons.box.utils import BoxNodeLogger, refresh_oauth_key
from website.addons.box.serializer import BoxSerializer
from website.oauth.models import ExternalProvider
logger = logging.getLogger(__name__)
class Box(ExternalProvider):
name = 'Box'
short_name = 'box'
client_id = settings.BOX_KEY
client_secret = settings.BOX_SECRET
auth_url_base = settings.BOX_OAUTH_AUTH_ENDPOINT
callback_url = settings.BOX_OAUTH_TOKEN_ENDPOINT
auto_refresh_url = settings.BOX_OAUTH_TOKEN_ENDPOINT
default_scopes = ['root_readwrite']
def handle_callback(self, response):
"""View called when the Oauth flow is completed. Adds a new BoxUserSettings
record to the user and saves the user's access token and account info.
"""
client = BoxClient(CredentialsV2(
response['access_token'],
response['refresh_token'],
settings.BOX_KEY,
settings.BOX_SECRET,
))
about = client.get_user_info()
return {
'provider_id': about['id'],
'display_name': about['name'],
'profile_url': 'https://app.box.com/profile/{0}'.format(about['id'])
}
class BoxUserSettings(AddonOAuthUserSettingsBase):
"""Stores user-specific box information
"""
oauth_provider = Box
serializer = BoxSerializer
class BoxNodeSettings(StorageAddonBase, AddonOAuthNodeSettingsBase):
oauth_provider = Box
serializer = BoxSerializer
foreign_user_settings = fields.ForeignField(
'boxusersettings', backref='authorized'
)
folder_id = fields.StringField(default=None)
folder_name = fields.StringField()
folder_path = fields.StringField()
_folder_data = None
_api = None
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = Box(self.external_account)
return self._api
@property
def display_name(self):
return '{0}: {1}'.format(self.config.full_name, self.folder_id)
@property
def has_auth(self):
"""Whether an access token is associated with this node."""
return bool(self.user_settings and self.user_settings.has_auth)
@property
def complete(self):
return bool(self.has_auth and self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account,
))
def fetch_folder_name(self):
self._update_folder_data()
return self.folder_name.replace('All Files', '/ (Full Box)')
def fetch_full_folder_path(self):
self._update_folder_data()
return self.folder_path
def _update_folder_data(self):
if self.folder_id is None:
return None
if not self._folder_data:
try:
refresh_oauth_key(self.external_account)
client = BoxClient(self.external_account.oauth_key)
self._folder_data = client.get_folder(self.folder_id)
except BoxClientException:
return
self.folder_name = self._folder_data['name']
self.folder_path = '/'.join(
[x['name'] for x in self._folder_data['path_collection']['entries']]
+ [self._folder_data['name']]
)
self.save()
def set_folder(self, folder_id, auth):
self.folder_id = str(folder_id)
self._update_folder_data()
self.save()
if not self.complete:
self.user_settings.grant_oauth_access(
node=self.owner,
external_account=self.external_account,
metadata={'folder': self.folder_id}
)
self.user_settings.save()
# Add log to node
nodelogger = BoxNodeLogger(node=self.owner, auth=auth)
nodelogger.log(action="folder_selected", save=True)
def set_user_auth(self, user_settings):
"""Import a user's Box authentication and create a NodeLog.
:param BoxUserSettings user_settings: The user settings to link.
"""
self.user_settings = user_settings
nodelogger = BoxNodeLogger(node=self.owner, auth=Auth(user_settings.owner))
nodelogger.log(action="node_authorized", save=True)
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
node = self.owner
if add_log:
extra = {'folder_id': self.folder_id}
nodelogger = BoxNodeLogger(node=node, auth=auth)
nodelogger.log(action="node_deauthorized", extra=extra, save=True)
self.folder_id = None
self._update_folder_data()
self.user_settings = None
self.clear_auth()
self.save()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
try:
refresh_oauth_key(self.external_account)
return {'token': self.external_account.oauth_key}
except BoxClientException as error:
raise HTTPError(error.status_code, data={'message_long': error.message})
def serialize_waterbutler_settings(self):
if self.folder_id is None:
raise exceptions.AddonError('Folder is not configured')
return {'folder': self.folder_id}
def create_waterbutler_log(self, auth, action, metadata):
self.owner.add_log(
'box_{0}'.format(action),
auth=auth,
params={
'path': metadata['materialized'],
'project': self.owner.parent_id,
'node': self.owner._id,
'folder': self.folder_id,
'urls': {
'view': self.owner.web_url_for('addon_view_or_download_file', provider='box', action='view', path=metadata['path']),
'download': self.owner.web_url_for('addon_view_or_download_file', provider='box', action='download', path=metadata['path']),
},
},
)
##### Callback overrides #####
def after_delete(self, node=None, user=None):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.clear_auth()
self.save() | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "cases/helper"
require "models/contact"
require "models/helicopter"
class ConversionTest < ActiveModel::TestCase
test "to_model default implementation returns self" do
contact = Contact.new
assert_equal contact, contact.to_model
end
test "to_key default implementation returns nil for new records" do
assert_nil Contact.new.to_key
end
test "to_key default implementation returns the id in an array for persisted records" do
assert_equal [1], Contact.new(id: 1).to_key
end
test "to_key doesn't double-wrap composite `id`s" do
assert_equal ["abc", "xyz"], Contact.new(id: ["abc", "xyz"]).to_key
end
test "to_param default implementation returns nil for new records" do
assert_nil Contact.new.to_param
end
test "to_param default implementation returns a string of ids for persisted records" do
assert_equal "1", Contact.new(id: 1).to_param
end
test "to_param returns the string joined by '-'" do
assert_equal "abc-xyz", Contact.new(id: ["abc", "xyz"]).to_param
end
test "to_param returns nil if composite id is incomplete" do
assert_nil Contact.new(id: [1, nil]).to_param
end
test "to_param returns nil if to_key is nil" do
klass = Class.new(Contact) do
def persisted?
true
end
end
assert_nil klass.new.to_param
end
test "to_partial_path default implementation returns a string giving a relative path" do
assert_equal "contacts/contact", Contact.new.to_partial_path
assert_equal "helicopters/helicopter", Helicopter.new.to_partial_path,
"ActiveModel::Conversion#to_partial_path caching should be class-specific"
end
test "to_partial_path handles namespaced models" do
assert_equal "helicopter/comanches/comanche", Helicopter::Comanche.new.to_partial_path
end
test "to_partial_path handles non-standard model_name" do
assert_equal "attack_helicopters/ah-64", Helicopter::Apache.new.to_partial_path
end
test "#to_param_delimiter allows redefining the delimiter used in #to_param" do
old_delimiter = Contact.param_delimiter
Contact.param_delimiter = "_"
assert_equal("abc_xyz", Contact.new(id: ["abc", "xyz"]).to_param)
ensure
Contact.param_delimiter = old_delimiter
end
test "#to_param_delimiter is defined per class" do
old_contact_delimiter = Contact.param_delimiter
custom_contract = Class.new(Contact)
Contact.param_delimiter = "_"
custom_contract.param_delimiter = ";"
assert_equal("abc_xyz", Contact.new(id: ["abc", "xyz"]).to_param)
assert_equal("abc;xyz", custom_contract.new(id: ["abc", "xyz"]).to_param)
ensure
Contact.param_delimiter = old_contact_delimiter
end
end | ruby | github | https://github.com/rails/rails | activemodel/test/cases/conversion_test.rb |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A deep MNIST classifier using convolutional layers.
Sample usage:
python mnist.py --help
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import functools
import os
import sys
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
class MNISTModel(tfe.Network):
"""MNIST Network.
Network structure is equivalent to:
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py
and
https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
But written using the tf.layers API.
"""
def __init__(self, data_format):
"""Creates a model for classifying a hand-written digit.
Args:
data_format: Either 'channels_first' or 'channels_last'.
'channels_first' is typically faster on GPUs while 'channels_last' is
typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
"""
super(MNISTModel, self).__init__(name='')
if data_format == 'channels_first':
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == 'channels_last'
self._input_shape = [-1, 28, 28, 1]
self.conv1 = self.track_layer(
tf.layers.Conv2D(32, 5, data_format=data_format, activation=tf.nn.relu))
self.conv2 = self.track_layer(
tf.layers.Conv2D(64, 5, data_format=data_format, activation=tf.nn.relu))
self.fc1 = self.track_layer(tf.layers.Dense(1024, activation=tf.nn.relu))
self.fc2 = self.track_layer(tf.layers.Dense(10))
self.dropout = self.track_layer(tf.layers.Dropout(0.5))
self.max_pool2d = self.track_layer(
tf.layers.MaxPooling2D(
(2, 2), (2, 2), padding='SAME', data_format=data_format))
def call(self, inputs, training):
"""Computes labels from inputs.
Users should invoke __call__ to run the network, which delegates to this
method (and not call this method directly).
Args:
inputs: A batch of images as a Tensor with shape [batch_size, 784].
training: True if invoked in the context of training (causing dropout to
be applied). False otherwise.
Returns:
A Tensor with shape [batch_size, 10] containing the predicted logits
for each image in the batch, for each of the 10 classes.
"""
x = tf.reshape(inputs, self._input_shape)
x = self.conv1(x)
x = self.max_pool2d(x)
x = self.conv2(x)
x = self.max_pool2d(x)
x = tf.layers.flatten(x)
x = self.fc1(x)
if training:
x = self.dropout(x)
x = self.fc2(x)
return x
def loss(predictions, labels):
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=predictions, labels=labels))
def compute_accuracy(predictions, labels):
return tf.reduce_sum(
tf.cast(
tf.equal(
tf.argmax(predictions, axis=1,
output_type=tf.int64),
tf.argmax(labels, axis=1,
output_type=tf.int64)),
dtype=tf.float32)) / float(predictions.shape[0].value)
def train_one_epoch(model, optimizer, dataset, log_interval=None):
"""Trains model on `dataset` using `optimizer`."""
tf.train.get_or_create_global_step()
def model_loss(labels, images):
prediction = model(images, training=True)
loss_value = loss(prediction, labels)
tf.contrib.summary.scalar('loss', loss_value)
tf.contrib.summary.scalar('accuracy',
compute_accuracy(prediction, labels))
return loss_value
for (batch, (images, labels)) in enumerate(tfe.Iterator(dataset)):
with tf.contrib.summary.record_summaries_every_n_global_steps(10):
batch_model_loss = functools.partial(model_loss, labels, images)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print('Batch #%d\tLoss: %.6f' % (batch, batch_model_loss()))
def test(model, dataset):
"""Perform an evaluation of `model` on the examples from `dataset`."""
avg_loss = tfe.metrics.Mean('loss')
accuracy = tfe.metrics.Accuracy('accuracy')
for (images, labels) in tfe.Iterator(dataset):
predictions = model(images, training=False)
avg_loss(loss(predictions, labels))
accuracy(tf.argmax(predictions, axis=1, output_type=tf.int64),
tf.argmax(labels, axis=1, output_type=tf.int64))
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
(avg_loss.result(), 100 * accuracy.result()))
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar('loss', avg_loss.result())
tf.contrib.summary.scalar('accuracy', accuracy.result())
def load_data(data_dir):
"""Returns training and test tf.data.Dataset objects."""
data = input_data.read_data_sets(data_dir, one_hot=True)
train_ds = tf.data.Dataset.from_tensor_slices((data.train.images,
data.train.labels))
test_ds = tf.data.Dataset.from_tensors((data.test.images, data.test.labels))
return (train_ds, test_ds)
def main(_):
tfe.enable_eager_execution()
# Log Info
print("-" * 64)
print("TEST INFO - EAGER")
print("-" * 64)
print("TF version:\t {}".format(tf.__version__))
print("Dataset:\t MNIST")
print("Model:\t CNN")
(device, data_format) = ('/gpu:0', 'channels_first')
if FLAGS.no_gpu or tfe.num_gpus() <= 0:
(device, data_format) = ('/cpu:0', 'channels_last')
print('Device:\t {}'.format(device))
if data_format == 'channels_first':
print("Data format:\t NCHW (channel first)")
else:
print("Data format:\t NHWC (channel last)")
print("=" * 64)
# Load the datasets
(train_ds, test_ds) = load_data(FLAGS.data_dir)
train_ds = train_ds.shuffle(60000).batch(FLAGS.batch_size)
# Create the model and optimizer
model = MNISTModel(data_format)
optimizer = tf.train.MomentumOptimizer(FLAGS.lr, FLAGS.momentum)
if FLAGS.output_dir:
train_dir = os.path.join(FLAGS.output_dir, 'train')
test_dir = os.path.join(FLAGS.output_dir, 'eval')
tf.gfile.MakeDirs(FLAGS.output_dir)
else:
train_dir = None
test_dir = None
summary_writer = tf.contrib.summary.create_file_writer(
train_dir, flush_millis=10000)
test_summary_writer = tf.contrib.summary.create_file_writer(
test_dir, flush_millis=10000, name='test')
checkpoint_prefix = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
with tf.device(device):
for epoch in range(1, 6):
with tfe.restore_variables_on_create(
tf.train.latest_checkpoint(FLAGS.checkpoint_dir)):
global_step = tf.train.get_or_create_global_step()
start = time.time()
with summary_writer.as_default():
train_one_epoch(model, optimizer, train_ds, FLAGS.log_interval)
end = time.time()
print('\nTrain time for epoch #%d (global step %d): %f' % (
epoch, global_step.numpy(), end - start))
with test_summary_writer.as_default():
test(model, test_ds)
all_variables = (
model.variables
+ optimizer.variables()
+ [global_step])
tfe.Saver(all_variables).save(
checkpoint_prefix, global_step=global_step)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
parser.add_argument(
'--batch-size',
type=int,
default=100,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument(
'--log-interval',
type=int,
default=10,
metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument(
'--output_dir',
type=str,
default=None,
metavar='N',
help='Directory to write TensorBoard summaries')
parser.add_argument(
'--checkpoint_dir',
type=str,
default='/tmp/tensorflow/mnist/checkpoints/',
metavar='N',
help='Directory to save checkpoints in (once per epoch)')
parser.add_argument(
'--lr',
type=float,
default=0.01,
metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument(
'--momentum',
type=float,
default=0.5,
metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument(
'--no-gpu',
action='store_true',
default=False,
help='disables GPU usage even if a GPU is available')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""The file object file input/output (IO) object implementation."""
import abc
import os
from dfvfs.file_io import file_io
class FileObjectIO(file_io.FileIO):
"""Base class for file object-based file input/output (IO) object."""
# pylint: disable=redundant-returns-doc
def __init__(self, resolver_context, path_spec):
"""Initializes a file input/output (IO) object.
Args:
resolver_context (Context): resolver context.
path_spec (PathSpec): a path specification.
"""
super(FileObjectIO, self).__init__(resolver_context, path_spec)
self._file_object = None
self._size = None
def _Close(self):
"""Closes the file-like object."""
self._file_object.close()
self._file_object = None
def _Open(self, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
mode (Optional[str]): file access mode.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
"""
self._file_object = self._OpenFileObject(self._path_spec)
if not self._file_object:
raise IOError('Unable to open missing file-like object.')
# pylint: disable=redundant-returns-doc
@abc.abstractmethod
def _OpenFileObject(self, path_spec):
"""Opens the file-like object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileIO: a file-like object.
Raises:
PathSpecError: if the path specification is incorrect.
"""
# Note: that the following functions do not follow the style guide
# because they are part of the file-like object interface.
# pylint: disable=invalid-name
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
# Do not pass the size argument as a keyword argument since it breaks
# some file-like object implementations.
return self._file_object.read(size)
def seek(self, offset, whence=os.SEEK_SET):
"""Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed.
OSError: if the seek failed.
"""
if not self._is_open:
raise IOError('Not opened.')
self._file_object.seek(offset, whence)
def get_offset(self):
"""Retrieves the current offset into the file-like object.
Returns:
int: current offset into the file-like object.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
if not hasattr(self._file_object, 'get_offset'):
return self._file_object.tell()
return self._file_object.get_offset()
def get_size(self):
"""Retrieves the size of the file-like object.
Returns:
int: size of the file-like object data.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._is_open:
raise IOError('Not opened.')
if not hasattr(self._file_object, 'get_size'):
if not self._size:
current_offset = self.get_offset()
self.seek(0, os.SEEK_END)
self._size = self.get_offset()
self.seek(current_offset, os.SEEK_SET)
return self._size
return self._file_object.get_size() | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/media/mediatek,mdp3-tdshp.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Media Data Path 3 Two-Dimensional Sharpness
maintainers:
- Matthias Brugger <matthias.bgg@gmail.com>
- Moudy Ho <moudy.ho@mediatek.com>
description:
Two-Dimensional Sharpness (TDSHP) is a Media Profile Path 3 (MDP3) component
used to perform image edge sharpening and enhance vividness and contrast.
properties:
compatible:
oneOf:
- enum:
- mediatek,mt8195-mdp3-tdshp
- items:
- const: mediatek,mt8188-mdp3-tdshp
- const: mediatek,mt8195-mdp3-tdshp
reg:
maxItems: 1
mediatek,gce-client-reg:
description:
The register of display function block to be set by gce. There are 4 arguments,
such as gce node, subsys id, offset and register size. The subsys id that is
mapping to the register of display function blocks is defined in the gce header
include/dt-bindings/gce/<chip>-gce.h of each chips.
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
items:
- description: phandle of GCE
- description: GCE subsys id
- description: register offset
- description: register size
maxItems: 1
clocks:
maxItems: 1
required:
- compatible
- reg
- mediatek,gce-client-reg
- clocks
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/mt8195-clk.h>
#include <dt-bindings/gce/mt8195-gce.h>
display@14007000 {
compatible = "mediatek,mt8195-mdp3-tdshp";
reg = <0x14007000 0x1000>;
mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0x7000 0x1000>;
clocks = <&vppsys0 CLK_VPP0_MDP_TDSHP>;
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/media/mediatek,mdp3-tdshp.yaml |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package terraform
import "context"
// UIInput is the interface that must be implemented to ask for input
// from this user. This should forward the request to wherever the user
// inputs things to ask for values.
type UIInput interface {
Input(context.Context, *InputOpts) (string, error)
}
// InputOpts are options for asking for input.
type InputOpts struct {
// Id is a unique ID for the question being asked that might be
// used for logging or to look up a prior answered question.
Id string
// Query is a human-friendly question for inputting this value.
Query string
// Description is a description about what this option is. Be wary
// that this will probably be in a terminal so split lines as you see
// necessary.
Description string
// Default will be the value returned if no data is entered.
Default string
// Secret should be true if we are asking for sensitive input.
// If attached to a TTY, Terraform will disable echo.
Secret bool
} | go | github | https://github.com/hashicorp/terraform | internal/terraform/ui_input.go |
/*
* Copyright 2014-2025 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
@file:OptIn(ExperimentalKotlinGradlePluginApi::class)
import org.gradle.api.NamedDomainObjectContainer
import org.gradle.api.NamedDomainObjectProvider
import org.gradle.api.NamedDomainObjectSet
import org.jetbrains.kotlin.gradle.ExperimentalKotlinGradlePluginApi
import org.jetbrains.kotlin.gradle.dsl.KotlinSourceSetConvention
import org.jetbrains.kotlin.gradle.plugin.KotlinDependencyHandler
import org.jetbrains.kotlin.gradle.plugin.KotlinSourceSet
private typealias KotlinSourceSets = NamedDomainObjectContainer<KotlinSourceSet>
private typealias KotlinSourceSetProvider = NamedDomainObjectProvider<KotlinSourceSet>
private typealias OptionalKotlinSourceSetProvider = NamedDomainObjectSet<KotlinSourceSet>
// Additional accessors to the ones declared in KotlinMultiplatformSourceSetConventions
val KotlinSourceSets.posixMain: KotlinSourceSetProvider by KotlinSourceSetConvention
val KotlinSourceSets.darwinMain: KotlinSourceSetProvider by KotlinSourceSetConvention
val KotlinSourceSets.darwinTest: KotlinSourceSetProvider by KotlinSourceSetConvention
val KotlinSourceSets.desktopMain: KotlinSourceSetProvider by KotlinSourceSetConvention
val KotlinSourceSets.desktopTest: KotlinSourceSetProvider by KotlinSourceSetConvention
val KotlinSourceSets.windowsMain: KotlinSourceSetProvider by KotlinSourceSetConvention
val KotlinSourceSets.windowsTest: KotlinSourceSetProvider by KotlinSourceSetConvention
val KotlinSourceSets.optional: OptionalSourceSets get() = OptionalSourceSets(this)
@JvmInline
value class OptionalSourceSets(private val sourceSets: KotlinSourceSets) {
val androidMain: OptionalKotlinSourceSetProvider get() = optional("androidMain")
val androidTest: OptionalKotlinSourceSetProvider get() = optional("androidTest")
val androidDeviceTest: OptionalKotlinSourceSetProvider get() = optional("androidDeviceTest")
private fun optional(name: String): OptionalKotlinSourceSetProvider = sourceSets.named { it == name }
}
fun OptionalKotlinSourceSetProvider.dependencies(handler: KotlinDependencyHandler.() -> Unit) {
configureEach { dependencies(handler) }
} | kotlin | github | https://github.com/ktorio/ktor | build-logic/src/main/kotlin/ktorbuild/dsl/KotlinSourceSets.kt |
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package addrs
import (
"fmt"
"strings"
"unicode"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/gocty"
)
// InstanceKey represents the key of an instance within an object that
// contains multiple instances due to using "count" or "for_each" arguments
// in configuration.
//
// IntKey and StringKey are the two implementations of this type. No other
// implementations are allowed. The single instance of an object that _isn't_
// using "count" or "for_each" is represented by NoKey, which is a nil
// InstanceKey.
type InstanceKey interface {
instanceKeySigil()
String() string
// Value returns the cty.Value of the appropriate type for the InstanceKey
// value.
Value() cty.Value
}
// ParseInstanceKey returns the instance key corresponding to the given value,
// which must be known and non-null.
//
// If an unknown or null value is provided then this function will panic. This
// function is intended to deal with the values that would naturally be found
// in a hcl.TraverseIndex, which (when parsed from source, at least) can never
// contain unknown or null values.
func ParseInstanceKey(key cty.Value) (InstanceKey, error) {
switch key.Type() {
case cty.String:
return StringKey(key.AsString()), nil
case cty.Number:
var idx int
err := gocty.FromCtyValue(key, &idx)
return IntKey(idx), err
default:
return NoKey, fmt.Errorf("either a string or an integer is required")
}
}
// NoKey represents the absense of an InstanceKey, for the single instance
// of a configuration object that does not use "count" or "for_each" at all.
var NoKey InstanceKey
// WildcardKey represents the "unknown" value of an InstanceKey. This is used
// within the deferral logic to express absolute module and resource addresses
// that are not known at the time of planning.
var WildcardKey InstanceKey = &wildcardKey{}
// wildcardKey is a special kind of InstanceKey that represents the "unknown"
// value of an InstanceKey. This is used within the deferral logic to express
// absolute module and resource addresses that are not known at the time of
// planning.
type wildcardKey struct{}
func (w *wildcardKey) instanceKeySigil() {}
func (w *wildcardKey) String() string {
return "[*]"
}
func (w *wildcardKey) Value() cty.Value {
return cty.DynamicVal
}
// IntKey is the InstanceKey representation representing integer indices, as
// used when the "count" argument is specified or if for_each is used with
// a sequence type.
type IntKey int
func (k IntKey) instanceKeySigil() {
}
func (k IntKey) String() string {
return fmt.Sprintf("[%d]", int(k))
}
func (k IntKey) Value() cty.Value {
return cty.NumberIntVal(int64(k))
}
// StringKey is the InstanceKey representation representing string indices, as
// used when the "for_each" argument is specified with a map or object type.
type StringKey string
func (k StringKey) instanceKeySigil() {
}
func (k StringKey) String() string {
// We use HCL's quoting syntax here so that we can in principle parse
// an address constructed by this package as if it were an HCL
// traversal, even if the string contains HCL's own metacharacters.
return fmt.Sprintf("[%s]", toHCLQuotedString(string(k)))
}
func (k StringKey) Value() cty.Value {
return cty.StringVal(string(k))
}
// InstanceKeyLess returns true if the first given instance key i should sort
// before the second key j, and false otherwise.
func InstanceKeyLess(i, j InstanceKey) bool {
iTy := instanceKeyType(i)
jTy := instanceKeyType(j)
switch {
case i == j:
return false
case i == NoKey:
return true
case j == NoKey:
return false
case iTy != jTy:
// The ordering here is arbitrary except that we want NoKeyType
// to sort before the others, so we'll just use the enum values
// of InstanceKeyType here (where NoKey is zero, sorting before
// any other).
return uint32(iTy) < uint32(jTy)
case iTy == IntKeyType:
return int(i.(IntKey)) < int(j.(IntKey))
case iTy == StringKeyType:
return string(i.(StringKey)) < string(j.(StringKey))
default:
// Shouldn't be possible to get down here in practice, since the
// above is exhaustive.
return false
}
}
func instanceKeyType(k InstanceKey) InstanceKeyType {
if _, ok := k.(StringKey); ok {
return StringKeyType
}
if _, ok := k.(IntKey); ok {
return IntKeyType
}
return NoKeyType
}
// InstanceKeyType represents the different types of instance key that are
// supported. Usually it is sufficient to simply type-assert an InstanceKey
// value to either IntKey or StringKey, but this type and its values can be
// used to represent the types themselves, rather than specific values
// of those types.
type InstanceKeyType rune
const (
NoKeyType InstanceKeyType = 0
IntKeyType InstanceKeyType = 'I'
StringKeyType InstanceKeyType = 'S'
// UnknownKeyType is a placeholder key type for situations where Terraform
// doesn't yet know which key type to use. There are no [InstanceKey]
// values of this type.
UnknownKeyType InstanceKeyType = '?'
)
// toHCLQuotedString is a helper which formats the given string in a way that
// HCL's expression parser would treat as a quoted string template.
//
// This includes:
// - Adding quote marks at the start and the end.
// - Using backslash escapes as needed for characters that cannot be represented directly.
// - Escaping anything that would be treated as a template interpolation or control sequence.
func toHCLQuotedString(s string) string {
// This is an adaptation of a similar function inside the hclwrite package,
// inlined here because hclwrite's version generates HCL tokens but we
// only need normal strings.
if len(s) == 0 {
return `""`
}
var buf strings.Builder
buf.WriteByte('"')
for i, r := range s {
switch r {
case '\n':
buf.WriteString(`\n`)
case '\r':
buf.WriteString(`\r`)
case '\t':
buf.WriteString(`\t`)
case '"':
buf.WriteString(`\"`)
case '\\':
buf.WriteString(`\\`)
case '$', '%':
buf.WriteRune(r)
remain := s[i+1:]
if len(remain) > 0 && remain[0] == '{' {
// Double up our template introducer symbol to escape it.
buf.WriteRune(r)
}
default:
if !unicode.IsPrint(r) {
var fmted string
if r < 65536 {
fmted = fmt.Sprintf("\\u%04x", r)
} else {
fmted = fmt.Sprintf("\\U%08x", r)
}
buf.WriteString(fmted)
} else {
buf.WriteRune(r)
}
}
}
buf.WriteByte('"')
return buf.String()
} | go | github | https://github.com/hashicorp/terraform | internal/addrs/instance_key.go |
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_role
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Role Avi RESTful Object
description:
- This module is used to configure Role object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
name:
description:
- Name of the object.
required: true
privileges:
description:
- List of permission.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Role object
avi_role:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_role
"""
RETURN = '''
obj:
description: Role (api/role) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
name=dict(type='str', required=True),
privileges=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'role',
set([]))
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
{
"apiVersion": "dashboard.grafana.app/v1beta1",
"kind": "Dashboard",
"metadata": {
"name": "ad5vfcn",
"namespace": "default",
"uid": "dlMZZl6GndU8gJLUQSmgZxXBPCNXyXhNBeQJhHXl0r4X",
"resourceVersion": "2",
"generation": 2,
"creationTimestamp": "2025-11-28T10:14:21Z",
"labels": {
"grafana.app/deprecatedInternalID": "288"
},
"annotations": {
"grafana.app/createdBy": "user:eex2ofwuj0agwd",
"grafana.app/updatedBy": "user:eex2ofwuj0agwd",
"grafana.app/updatedTimestamp": "2025-11-28T10:15:06Z"
}
},
"spec": {
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 288,
"links": [],
"panels": [
{
"datasource": {
"type": "testdata",
"uid": "gdev-testdata"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"barWidthFactor": 0.6,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"showValues": false,
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": 0
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"hideZeros": false,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "12.4.0-pre",
"targets": [
{
"datasource": {},
"queryType": "randomWalk",
"refId": "A"
}
],
"title": "New panel",
"type": "timeseries"
}
],
"preload": false,
"schemaVersion": 42,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"title": "Panel ds inheritance ",
"uid": "ad5vfcn",
"version": 2
},
"status": {}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/input/v1beta1.panel-datasource-type-datasource.json |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# This file is part of Cockpit.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# Cockpit is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Cockpit is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Cockpit; If not, see <http://www.gnu.org/licenses/>.
# Shared GitHub code. When run as a script, we print out info about
# our GitHub interacition.
import errno
import http.client
import json
import os
import socket
import sys
import time
import urllib.parse
from . import cache
__all__ = (
'GitHub',
'Checklist',
'TESTING',
'NO_TESTING',
'NOT_TESTED'
)
TESTING = "Testing in progress"
NOT_TESTED = "Not yet tested"
NO_TESTING = "Manual testing required"
OUR_CONTEXTS = [
"verify/",
"avocado/",
"container/",
"selenium/",
# generic prefix for external repos
"cockpit/",
]
ISSUE_TITLE_IMAGE_REFRESH = "Image refresh for {0}"
BASE = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))
TOKEN = "~/.config/github-token"
TEAM_CONTRIBUTORS = "Contributors"
def known_context(context):
for prefix in OUR_CONTEXTS:
if context.startswith(prefix):
return True
return False
class Logger(object):
def __init__(self, directory):
hostname = socket.gethostname().split(".")[0]
month = time.strftime("%Y%m")
self.path = os.path.join(directory, "{0}-{1}.log".format(hostname, month))
if not os.path.exists(directory):
os.makedirs(directory)
# Yes, we open the file each time
def write(self, value):
with open(self.path, 'a') as f:
f.write(value)
class GitHub(object):
def __init__(self, base=None, cacher=None, repo=None):
if base is None:
if repo is None:
repo = os.environ.get("GITHUB_BASE", "cockpit-project/cockpit")
netloc = os.environ.get("GITHUB_API", "https://api.github.com")
base = "{0}/repos/{1}/".format(netloc, repo)
self.url = urllib.parse.urlparse(base)
self.conn = None
self.token = None
self.debug = False
try:
gt = open(os.path.expanduser(TOKEN), "r")
self.token = gt.read().strip()
gt.close()
except IOError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
self.available = self.token and True or False
# The cache directory is $TEST_DATA/github ~/.cache/github
if not cacher:
data = os.environ.get("TEST_DATA", os.path.expanduser("~/.cache"))
cacher = cache.Cache(os.path.join(data, "github"))
self.cache = cacher
# Create a log for debugging our GitHub access
self.log = Logger(self.cache.directory)
self.log.write("")
def qualify(self, resource):
return urllib.parse.urljoin(self.url.path, resource)
def request(self, method, resource, data="", headers=None):
if headers is None:
headers = { }
headers["User-Agent"] = "Cockpit Tests"
if self.token:
headers["Authorization"] = "token " + self.token
connected = False
while not connected:
if not self.conn:
if self.url.scheme == 'http':
self.conn = http.client.HTTPConnection(self.url.netloc)
else:
self.conn = http.client.HTTPSConnection(self.url.netloc)
connected = True
self.conn.set_debuglevel(self.debug and 1 or 0)
try:
self.conn.request(method, self.qualify(resource), data, headers)
response = self.conn.getresponse()
break
# This happens when GitHub disconnects in python3
except ConnectionResetError:
if connected:
raise
self.conn = None
# This happens when GitHub disconnects a keep-alive connection
except http.client.BadStatusLine:
if connected:
raise
self.conn = None
# This happens when TLS is the source of a disconnection
except socket.error as ex:
if connected or ex.errno != errno.EPIPE:
raise
self.conn = None
heads = { }
for (header, value) in response.getheaders():
heads[header.lower()] = value
self.log.write('{0} - - [{1}] "{2} {3} HTTP/1.1" {4} -\n'.format(
self.url.netloc,
time.asctime(),
method,
resource,
response.status
))
return {
"status": response.status,
"reason": response.reason,
"headers": heads,
"data": response.read().decode('utf-8')
}
def get(self, resource):
headers = { }
qualified = self.qualify(resource)
cached = self.cache.read(qualified)
if cached:
if self.cache.current(qualified):
return json.loads(cached['data'] or "null")
etag = cached['headers'].get("etag", None)
modified = cached['headers'].get("last-modified", None)
if etag:
headers['If-None-Match'] = etag
elif modified:
headers['If-Modified-Since'] = modified
response = self.request("GET", resource, "", headers)
if response['status'] == 404:
return None
elif cached and response['status'] == 304: # Not modified
self.cache.write(qualified, cached)
return json.loads(cached['data'] or "null")
elif response['status'] < 200 or response['status'] >= 300:
sys.stderr.write("{0}\n{1}\n".format(resource, response['data']))
raise RuntimeError("GitHub API problem: {0}".format(response['reason'] or response['status']))
else:
self.cache.write(qualified, response)
return json.loads(response['data'] or "null")
def post(self, resource, data, accept=[]):
response = self.request("POST", resource, json.dumps(data), { "Content-Type": "application/json" })
status = response['status']
if (status < 200 or status >= 300) and status not in accept:
sys.stderr.write("{0}\n{1}\n".format(resource, response['data']))
raise RuntimeError("GitHub API problem: {0}".format(response['reason'] or status))
self.cache.mark()
return json.loads(response['data'])
def patch(self, resource, data, accept=[]):
response = self.request("PATCH", resource, json.dumps(data), { "Content-Type": "application/json" })
status = response['status']
if (status < 200 or status >= 300) and status not in accept:
sys.stderr.write("{0}\n{1}\n".format(resource, response['data']))
raise RuntimeError("GitHub API problem: {0}".format(response['reason'] or status))
self.cache.mark()
return json.loads(response['data'])
def statuses(self, revision):
result = { }
page = 1
count = 100
while count == 100:
data = self.get("commits/{0}/status?page={1}&per_page={2}".format(revision, page, count))
count = 0
page += 1
if "statuses" in data:
for status in data["statuses"]:
if known_context(status["context"]) and status["context"] not in result:
result[status["context"]] = status
count = len(data["statuses"])
return result
def pulls(self, state='open', since=None):
result = [ ]
page = 1
count = 100
while count == 100:
pulls = self.get("pulls?page={0}&per_page={1}&state={2}&sort=created&direction=desc".format(page, count, state))
count = 0
page += 1
for pull in pulls or []:
# Check that the pulls are past the expected date
if since:
closed = pull.get("closed_at", None)
if closed and since > time.mktime(time.strptime(closed, "%Y-%m-%dT%H:%M:%SZ")):
continue
created = pull.get("created_at", None)
if not closed and created and since > time.mktime(time.strptime(created, "%Y-%m-%dT%H:%M:%SZ")):
continue
result.append(pull)
count += 1
return result
# The since argument is seconds since the issue was either
# created (for open issues) or closed (for closed issues)
def issues(self, labels=[ "bot" ], state="open", since=None):
result = [ ]
page = 1
count = 100
opened = True
label = ",".join(labels)
while count == 100 and opened:
req = "issues?labels={0}&state=all&page={1}&per_page={2}".format(label, page, count)
issues = self.get(req)
count = 0
page += 1
opened = False
for issue in issues:
count += 1
# On each loop of 100 issues we must encounter at least 1 open issue
if issue["state"] == "open":
opened = True
# Make sure the state matches
if state != "all" and issue["state"] != state:
continue
# Check that the issues are past the expected date
if since:
closed = issue.get("closed_at", None)
if closed and since > time.mktime(time.strptime(closed, "%Y-%m-%dT%H:%M:%SZ")):
continue
created = issue.get("created_at", None)
if not closed and created and since > time.mktime(time.strptime(created, "%Y-%m-%dT%H:%M:%SZ")):
continue
result.append(issue)
return result
def commits(self, branch='master', since=None):
page = 1
count = 100
if since:
since = "&since={0}".format(time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(since)))
else:
since = ""
while count == 100:
commits = self.get("commits?page={0}&per_page={1}&sha={2}{3}".format(page, count, branch, since))
count = 0
page += 1
for commit in commits or []:
yield commit
count += 1
def whitelist(self):
users = set()
teamId = self.teamIdFromName(TEAM_CONTRIBUTORS)
page = 1
count = 100
while count == 100:
data = self.get("/teams/{0}/members?page={1}&per_page={2}".format(teamId, page, count)) or []
users.update(user.get("login") for user in data)
count = len(data)
page += 1
return users
def teamIdFromName(self, name):
for team in self.get("/orgs/cockpit-project/teams") or []:
if team.get("name") == name:
return team["id"]
else:
raise KeyError("Team {0} not found".format(name))
class Checklist(object):
def __init__(self, body=None):
self.process(body or "")
@staticmethod
def format_line(item, check):
status = ""
if isinstance(check, str):
status = check + ": "
check = False
return " * [{0}] {1}{2}".format(check and "x" or " ", status, item)
@staticmethod
def parse_line(line):
check = item = None
stripped = line.strip()
if stripped[:6] in ["* [ ] ", "- [ ] ", "* [x] ", "- [x] ", "* [X] ", "- [X] "]:
status, unused, item = stripped[6:].strip().partition(": ")
if not item:
item = status
status = None
if status:
check = status
else:
check = stripped[3] in ["x", "X"]
return (item, check)
def process(self, body, items={ }):
self.items = { }
lines = [ ]
items = items.copy()
for line in body.splitlines():
(item, check) = self.parse_line(line)
if item:
if item in items:
check = items[item]
del items[item]
line = self.format_line(item, check)
self.items[item] = check
lines.append(line)
for item, check in items.items():
lines.append(self.format_line(item, check))
self.items[item] = check
self.body = "\n".join(lines)
def check(self, item, checked=True):
self.process(self.body, { item: checked })
def add(self, item):
self.process(self.body, { item: False }) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# hl_api_info.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Functions to get information on NEST.
"""
from .hl_api_helper import *
import sys
import os
import webbrowser
@check_stack
def sysinfo():
"""Print information on the platform on which NEST was compiled."""
sr("sysinfo")
@check_stack
def version():
"""Return the NEST version.
Returns
-------
str:
The version of NEST.
"""
sr("statusdict [[ /kernelname /version ]] get")
return " ".join(spp())
@check_stack
def authors():
"""Print the authors of NEST."""
sr("authors")
@check_stack
def helpdesk():
"""Open the NEST helpdesk in browser.
Use the system default browser.
"""
if sys.version_info < (2, 7, 8):
print("The NEST Helpdesk is only available with Python 2.7.8 or "
"later. \n")
return
if 'NEST_DOC_DIR' not in os.environ:
print(
'NEST help needs to know where NEST is installed.'
'Please source nest_vars.sh or define NEST_DOC_DIR manually.')
return
helpfile = os.path.join(os.environ['NEST_DOC_DIR'], 'help',
'helpindex.html')
# Under Windows systems webbrowser.open is incomplete
# See <https://bugs.python.org/issue8232>
if sys.platform[:3] == "win":
os.startfile(helpfile)
# Under MacOs we need to ask for the browser explicitly.
# See <https://bugs.python.org/issue30392>.
if sys.platform[:3] == "dar":
webbrowser.get('safari').open_new(helpfile)
else:
webbrowser.open_new(helpfile)
@check_stack
def help(obj=None, pager=None):
"""Show the help page for the given object using the given pager.
The default pager is more.
Parameters
----------
obj : object, optional
Object to display help for
pager : str, optional
Pager to use
"""
hlpobj = obj
if hlpobj is not None:
show_help_with_pager(hlpobj, pager)
else:
print("Type 'nest.helpdesk()' to access the online documentation "
"in a browser.")
print("Type 'nest.help(object)' to get help on a NEST object or "
"command.\n")
print("Type 'nest.Models()' to see a list of available models "
"in NEST.")
print("Type 'nest.authors()' for information about the makers "
"of NEST.")
print("Type 'nest.sysinfo()' to see details on the system "
"configuration.")
print("Type 'nest.version()' for information about the NEST "
"version.\n")
print("For more information visit http://www.nest-simulator.org.")
@check_stack
def get_argv():
"""Return argv as seen by NEST.
This is similar to Python sys.argv but might have changed after
MPI initialization.
Returns
-------
tuple:
Argv, as seen by NEST.
"""
sr('statusdict')
statusdict = spp()
return statusdict['argv']
@check_stack
def message(level, sender, text):
"""Print a message using NEST's message system.
Parameters
----------
level :
Level
sender :
Message sender
text : str
Text to be sent in the message
"""
sps(level)
sps(sender)
sps(text)
sr('message')
@check_stack
def SetStatus(nodes, params, val=None):
"""Set the parameters of nodes or connections to params.
If val is given, params has to be the name
of an attribute, which is set to val on the nodes/connections. val
can be a single value or a list of the same size as nodes.
Parameters
----------
nodes : list or tuple
Either a list of global ids of nodes, or a tuple of connection
handles as returned by GetConnections()
params : str or dict or list
Dictionary of parameters or list of dictionaries of parameters of
same length as nodes. If val is given, this has to be the name of
a model property as a str.
val : str, optional
If given, params has to be the name of a model property.
Raises
------
TypeError
Description
"""
if not is_coercible_to_sli_array(nodes):
raise TypeError("nodes must be a list of nodes or synapses")
# This was added to ensure that the function is a nop (instead of,
# for instance, raising an exception) when applied to an empty list,
# which is an artifact of the API operating on lists, rather than
# relying on language idioms, such as comprehensions
#
if len(nodes) == 0:
return
if val is not None and is_literal(params):
if is_iterable(val) and not isinstance(val, (uni_str, dict)):
params = [{params: x} for x in val]
else:
params = {params: val}
params = broadcast(params, len(nodes), (dict,), "params")
if len(nodes) != len(params):
raise TypeError(
"status dict must be a dict, or list of dicts of length 1 "
"or len(nodes)")
if is_sequence_of_connections(nodes):
pcd(nodes)
else:
sps(nodes)
sps(params)
sr('2 arraystore')
sr('Transpose { arrayload pop SetStatus } forall')
@check_stack
def GetStatus(nodes, keys=None):
"""Return the parameter dictionaries of nodes or connections.
If keys is given, a list of values is returned instead. keys may also be a
list, in which case the returned list contains lists of values.
Parameters
----------
nodes : list or tuple
Either a list of global ids of nodes, or a tuple of connection
handles as returned by GetConnections()
keys : str or list, optional
String or a list of strings naming model properties. GetDefaults then
returns a single value or a list of values belonging to the keys
given.
Returns
-------
dict:
All parameters
type:
If keys is a string, the corrsponding default parameter is returned
list:
If keys is a list of strings, a list of corrsponding default parameters
is returned
Raises
------
TypeError
Description
"""
if not is_coercible_to_sli_array(nodes):
raise TypeError("nodes must be a list of nodes or synapses")
if len(nodes) == 0:
return nodes
if keys is None:
cmd = '{ GetStatus } Map'
elif is_literal(keys):
cmd = '{{ GetStatus /{0} get }} Map'.format(keys)
elif is_iterable(keys):
keys_str = " ".join("/{0}".format(x) for x in keys)
cmd = '{{ GetStatus }} Map {{ [ [ {0} ] ] get }} Map'.format(keys_str)
else:
raise TypeError("keys should be either a string or an iterable")
if is_sequence_of_connections(nodes):
pcd(nodes)
else:
sps(nodes)
sr(cmd)
return spp() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2007 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2007 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import zlib
import logging
import StringIO
import struct
from exceptions import ParseError
import core
# get logging object
log = logging.getLogger(__name__)
# http://developer.apple.com/documentation/QuickTime/QTFF/index.html
# http://developer.apple.com/documentation/QuickTime/QTFF/QTFFChap4/\
# chapter_5_section_2.html#//apple_ref/doc/uid/TP40000939-CH206-BBCBIICE
# Note: May need to define custom log level to work like ATOM_DEBUG did here
QTUDTA = {
'nam': 'title',
'aut': 'artist',
'cpy': 'copyright'
}
QTLANGUAGES = {
0: "en",
1: "fr",
2: "de",
3: "it",
4: "nl",
5: "sv",
6: "es",
7: "da",
8: "pt",
9: "no",
10: "he",
11: "ja",
12: "ar",
13: "fi",
14: "el",
15: "is",
16: "mt",
17: "tr",
18: "hr",
19: "Traditional Chinese",
20: "ur",
21: "hi",
22: "th",
23: "ko",
24: "lt",
25: "pl",
26: "hu",
27: "et",
28: "lv",
29: "Lappish",
30: "fo",
31: "Farsi",
32: "ru",
33: "Simplified Chinese",
34: "Flemish",
35: "ga",
36: "sq",
37: "ro",
38: "cs",
39: "sk",
40: "sl",
41: "yi",
42: "sr",
43: "mk",
44: "bg",
45: "uk",
46: "be",
47: "uz",
48: "kk",
49: "az",
50: "AzerbaijanAr",
51: "hy",
52: "ka",
53: "mo",
54: "ky",
55: "tg",
56: "tk",
57: "mn",
58: "MongolianCyr",
59: "ps",
60: "ku",
61: "ks",
62: "sd",
63: "bo",
64: "ne",
65: "sa",
66: "mr",
67: "bn",
68: "as",
69: "gu",
70: "pa",
71: "or",
72: "ml",
73: "kn",
74: "ta",
75: "te",
76: "si",
77: "my",
78: "Khmer",
79: "lo",
80: "vi",
81: "id",
82: "tl",
83: "MalayRoman",
84: "MalayArabic",
85: "am",
86: "ti",
87: "om",
88: "so",
89: "sw",
90: "Ruanda",
91: "Rundi",
92: "Chewa",
93: "mg",
94: "eo",
128: "cy",
129: "eu",
130: "ca",
131: "la",
132: "qu",
133: "gn",
134: "ay",
135: "tt",
136: "ug",
137: "Dzongkha",
138: "JavaneseRom",
}
class MPEG4(core.AVContainer):
"""
Parser for the MP4 container format. This format is mostly
identical to Apple Quicktime and 3GP files. It maps to mp4, mov,
qt and some other extensions.
"""
table_mapping = {'QTUDTA': QTUDTA}
def __init__(self, file):
core.AVContainer.__init__(self)
self._references = []
self.mime = 'video/quicktime'
self.type = 'Quicktime Video'
h = file.read(8)
try:
(size, type) = struct.unpack('>I4s', h)
except struct.error:
# EOF.
raise ParseError()
if type == 'ftyp':
# file type information
if size >= 12:
# this should always happen
if file.read(4) != 'qt ':
# not a quicktime movie, it is a mpeg4 container
self.mime = 'video/mp4'
self.type = 'MPEG-4 Video'
size -= 4
file.seek(size - 8, 1)
h = file.read(8)
(size, type) = struct.unpack('>I4s', h)
while type in ['mdat', 'skip']:
# movie data at the beginning, skip
file.seek(size - 8, 1)
h = file.read(8)
(size, type) = struct.unpack('>I4s', h)
if not type in ['moov', 'wide', 'free']:
log.debug(u'invalid header: %r' % type)
raise ParseError()
# Extended size
if size == 1:
size = struct.unpack('>Q', file.read(8))
# Back over the atom header we just read, since _readatom expects the
# file position to be at the start of an atom.
file.seek(-8, 1)
while self._readatom(file):
pass
if self._references:
self._set('references', self._references)
def _readatom(self, file):
s = file.read(8)
if len(s) < 8:
return 0
atomsize, atomtype = struct.unpack('>I4s', s)
if not str(atomtype).decode('latin1').isalnum():
# stop at nonsense data
return 0
log.debug(u'%r [%X]' % (atomtype, atomsize))
if atomtype == 'udta':
# Userdata (Metadata)
pos = 0
tabl = {}
i18ntabl = {}
atomdata = file.read(atomsize - 8)
while pos < atomsize - 12:
(datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8])
if ord(datatype[0]) == 169:
# i18n Metadata...
mypos = 8 + pos
while mypos + 4 < datasize + pos:
# first 4 Bytes are i18n header
(tlen, lang) = struct.unpack('>HH', atomdata[mypos:mypos + 4])
i18ntabl[lang] = i18ntabl.get(lang, {})
l = atomdata[mypos + 4:mypos + tlen + 4]
i18ntabl[lang][datatype[1:]] = l
mypos += tlen + 4
elif datatype == 'WLOC':
# Drop Window Location
pass
else:
if ord(atomdata[pos + 8:pos + datasize][0]) > 1:
tabl[datatype] = atomdata[pos + 8:pos + datasize]
pos += datasize
if len(i18ntabl.keys()) > 0:
for k in i18ntabl.keys():
if QTLANGUAGES.has_key(k) and QTLANGUAGES[k] == 'en':
self._appendtable('QTUDTA', i18ntabl[k])
self._appendtable('QTUDTA', tabl)
else:
log.debug(u'NO i18')
self._appendtable('QTUDTA', tabl)
elif atomtype == 'trak':
atomdata = file.read(atomsize - 8)
pos = 0
trackinfo = {}
tracktype = None
while pos < atomsize - 8:
(datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8])
if datatype == 'tkhd':
tkhd = struct.unpack('>6I8x4H36xII', atomdata[pos + 8:pos + datasize])
trackinfo['width'] = tkhd[10] >> 16
trackinfo['height'] = tkhd[11] >> 16
trackinfo['id'] = tkhd[3]
try:
# XXX Timestamp of Seconds is since January 1st 1904!
# XXX 2082844800 is the difference between Unix and
# XXX Apple time. FIXME to work on Apple, too
self.timestamp = int(tkhd[1]) - 2082844800
except Exception, e:
log.exception(u'There was trouble extracting timestamp')
elif datatype == 'mdia':
pos += 8
datasize -= 8
log.debug(u'--> mdia information')
while datasize:
mdia = struct.unpack('>I4s', atomdata[pos:pos + 8])
if mdia[1] == 'mdhd':
# Parse based on version of mdhd header. See
# http://wiki.multimedia.cx/index.php?title=QuickTime_container#mdhd
ver = ord(atomdata[pos + 8])
if ver == 0:
mdhd = struct.unpack('>IIIIIhh', atomdata[pos + 8:pos + 8 + 24])
elif ver == 1:
mdhd = struct.unpack('>IQQIQhh', atomdata[pos + 8:pos + 8 + 36])
else:
mdhd = None
if mdhd:
# duration / time scale
trackinfo['length'] = mdhd[4] / mdhd[3]
if mdhd[5] in QTLANGUAGES:
trackinfo['language'] = QTLANGUAGES[mdhd[5]]
# mdhd[6] == quality
self.length = max(self.length, mdhd[4] / mdhd[3])
elif mdia[1] == 'minf':
# minf has only atoms inside
pos -= (mdia[0] - 8)
datasize += (mdia[0] - 8)
elif mdia[1] == 'stbl':
# stbl has only atoms inside
pos -= (mdia[0] - 8)
datasize += (mdia[0] - 8)
elif mdia[1] == 'hdlr':
hdlr = struct.unpack('>I4s4s', atomdata[pos + 8:pos + 8 + 12])
if hdlr[1] == 'mhlr':
if hdlr[2] == 'vide':
tracktype = 'video'
if hdlr[2] == 'soun':
tracktype = 'audio'
elif mdia[1] == 'stsd':
stsd = struct.unpack('>2I', atomdata[pos + 8:pos + 8 + 8])
if stsd[1] > 0:
codec = atomdata[pos + 16:pos + 16 + 8]
codec = struct.unpack('>I4s', codec)
trackinfo['codec'] = codec[1]
if codec[1] == 'jpeg':
tracktype = 'image'
elif mdia[1] == 'dinf':
dref = struct.unpack('>I4s', atomdata[pos + 8:pos + 8 + 8])
log.debug(u' --> %r, %r (useless)' % mdia)
if dref[1] == 'dref':
num = struct.unpack('>I', atomdata[pos + 20:pos + 20 + 4])[0]
rpos = pos + 20 + 4
for ref in range(num):
# FIXME: do somthing if this references
ref = struct.unpack('>I3s', atomdata[rpos:rpos + 7])
data = atomdata[rpos + 7:rpos + ref[0]]
rpos += ref[0]
else:
if mdia[1].startswith('st'):
log.debug(u' --> %r, %r (sample)' % mdia)
elif mdia[1] == 'vmhd' and not tracktype:
# indicates that this track is video
tracktype = 'video'
elif mdia[1] in ['vmhd', 'smhd'] and not tracktype:
# indicates that this track is audio
tracktype = 'audio'
else:
log.debug(u' --> %r, %r (unknown)' % mdia)
pos += mdia[0]
datasize -= mdia[0]
elif datatype == 'udta':
log.debug(u'udta: %r' % struct.unpack('>I4s', atomdata[:8]))
else:
if datatype == 'edts':
log.debug(u'--> %r [%d] (edit list)' % \
(datatype, datasize))
else:
log.debug(u'--> %r [%d] (unknown)' % \
(datatype, datasize))
pos += datasize
info = None
if tracktype == 'video':
info = core.VideoStream()
self.video.append(info)
if tracktype == 'audio':
info = core.AudioStream()
self.audio.append(info)
if info:
for key, value in trackinfo.items():
setattr(info, key, value)
elif atomtype == 'mvhd':
# movie header
mvhd = struct.unpack('>6I2h', file.read(28))
self.length = max(self.length, mvhd[4] / mvhd[3])
self.volume = mvhd[6]
file.seek(atomsize - 8 - 28, 1)
elif atomtype == 'cmov':
# compressed movie
datasize, atomtype = struct.unpack('>I4s', file.read(8))
if not atomtype == 'dcom':
return atomsize
method = struct.unpack('>4s', file.read(datasize - 8))[0]
datasize, atomtype = struct.unpack('>I4s', file.read(8))
if not atomtype == 'cmvd':
return atomsize
if method == 'zlib':
data = file.read(datasize - 8)
try:
decompressed = zlib.decompress(data)
except Exception, e:
try:
decompressed = zlib.decompress(data[4:])
except Exception, e:
log.exception(u'There was a proble decompressiong atom')
return atomsize
decompressedIO = StringIO.StringIO(decompressed)
while self._readatom(decompressedIO):
pass
else:
log.info(u'unknown compression %r' % method)
# unknown compression method
file.seek(datasize - 8, 1)
elif atomtype == 'moov':
# decompressed movie info
while self._readatom(file):
pass
elif atomtype == 'mdat':
pos = file.tell() + atomsize - 8
# maybe there is data inside the mdat
log.info(u'parsing mdat')
while self._readatom(file):
pass
log.info(u'end of mdat')
file.seek(pos, 0)
elif atomtype == 'rmra':
# reference list
while self._readatom(file):
pass
elif atomtype == 'rmda':
# reference
atomdata = file.read(atomsize - 8)
pos = 0
url = ''
quality = 0
datarate = 0
while pos < atomsize - 8:
(datasize, datatype) = struct.unpack('>I4s', atomdata[pos:pos + 8])
if datatype == 'rdrf':
rflags, rtype, rlen = struct.unpack('>I4sI', atomdata[pos + 8:pos + 20])
if rtype == 'url ':
url = atomdata[pos + 20:pos + 20 + rlen]
if url.find('\0') > 0:
url = url[:url.find('\0')]
elif datatype == 'rmqu':
quality = struct.unpack('>I', atomdata[pos + 8:pos + 12])[0]
elif datatype == 'rmdr':
datarate = struct.unpack('>I', atomdata[pos + 12:pos + 16])[0]
pos += datasize
if url:
self._references.append((url, quality, datarate))
else:
if not atomtype in ['wide', 'free']:
log.info(u'unhandled base atom %r' % atomtype)
# Skip unknown atoms
try:
file.seek(atomsize - 8, 1)
except IOError:
return 0
return atomsize
Parser = MPEG4 | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_crossovered_analytic(osv.osv_memory):
_name = "account.crossovered.analytic"
_description = "Print Crossovered Analytic"
_columns = {
'date1': fields.date('Start Date', required=True),
'date2': fields.date('End Date', required=True),
'journal_ids': fields.many2many('account.analytic.journal', 'crossovered_journal_rel', 'crossover_id', 'journal_id', 'Analytic Journal'),
'ref': fields.many2one('account.analytic.account', 'Analytic Account Reference', required=True),
'empty_line': fields.boolean('Dont show empty lines'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
cr.execute('SELECT account_id FROM account_analytic_line')
res = cr.fetchall()
acc_ids = [x[0] for x in res]
data = self.read(cr, uid, ids, context=context)[0]
data['ref'] = data['ref'][0]
obj_acc = self.pool.get('account.analytic.account').browse(cr, uid, data['ref'], context=context)
name = obj_acc.name
account_ids = self.pool.get('account.analytic.account').search(cr, uid, [('parent_id', 'child_of', [data['ref']])], context=context)
flag = True
for acc in account_ids:
if acc in acc_ids:
flag = False
break
if flag:
raise osv.except_osv(_('User Error!'),_('There are no analytic lines related to account %s.' % name))
datas = {
'ids': [],
'model': 'account.analytic.account',
'form': data
}
return self.pool['report'].get_action(cr, uid, [], 'account_analytic_plans.report_crossoveredanalyticplans', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from shutil import copyfile, copytree, rmtree
from setuptools import setup
TEMP_PATH = "bigdl/share"
bigdl_home = os.path.abspath(__file__ + "/../../")
try:
exec(open('bigdl/version.py').read())
except IOError:
print("Failed to load Bigdl version file for packaging. You must be in Bigdl's pyspark dir.")
sys.exit(-1)
VERSION = __version__
building_error_msg = """
If you are packing python API from BigDL source, you must build BigDL first
and run sdist.
To build BigDL with maven you can run:
cd $BigDL_HOME
./make-dist.sh
Building the source dist is done in the Python directory:
cd pyspark
python setup.py sdist
pip install dist/*.tar.gz"""
def build_from_source():
code_path = bigdl_home + "/pyspark/bigdl/util/common.py"
print("Checking: %s to see if build from source" % code_path)
if os.path.exists(code_path):
return True
return False
def init_env():
if build_from_source():
print("Start to build distributed package")
print("HOME OF BIGDL: " + bigdl_home)
dist_source = bigdl_home + "/dist"
if not os.path.exists(dist_source):
print(building_error_msg)
sys.exit(-1)
if os.path.exists(TEMP_PATH):
rmtree(TEMP_PATH)
copytree(dist_source, TEMP_PATH)
copyfile(bigdl_home + "/pyspark/bigdl/nn/__init__.py", TEMP_PATH + "/__init__.py")
else:
print("Do nothing for release installation")
def setup_package():
metadata = dict(
name='BigDL',
version=VERSION,
description='Distributed Deep Learning Library for Apache Spark',
author='BigDL Authors',
author_email='bigdl-user-group@googlegroups.com',
license='Apache License, Version 2.0',
url='https://github.com/intel-analytics/Bigdl',
packages=['bigdl',
'bigdl.dataset',
'bigdl.nn',
'bigdl.optim',
'bigdl.util',
'bigdl.share'],
install_requires=['numpy>=1.7'],
dependency_links=['https://d3kbcqa49mib13.cloudfront.net/spark-2.0.0-bin-hadoop2.7.tgz'],
include_package_data=True,
package_data={"bigdl.share": ['bigdl/share/lib', 'bigdl/share/conf', 'bigdl/share/bin']},
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython'],
platforms=['mac', 'linux']
)
setup(**metadata)
if __name__ == '__main__':
try:
init_env()
setup_package()
except Exception as e:
raise e
finally:
if build_from_source() and os.path.exists(TEMP_PATH):
rmtree(TEMP_PATH) | unknown | codeparrot/codeparrot-clean | ||
from .cli import main
main() | python | github | https://github.com/pallets/flask | src/flask/__main__.py |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#######################################################################
### Diálogo de definición de molinos, UI_grinder ###
#######################################################################
from PyQt4 import QtCore, QtGui
from equipment.solids import Grinder
from UI import UI_corriente
from equipment import parents
from lib import unidades
from tools import costIndex
from UI.widgets import Entrada_con_unidades
BondIndex={ 'Mineral de uranio': 17.93,
'Escoria': 15.76,
'Ferrocromo': 8.87,
'Grafito': 45.03,
'Magnesita': 16.8,
'Mineral de plata': 17.3,
'Molibdeno': 12.97,
'Ferromanganeso': 7.77,
'Arenisca': 11.53,
'Arcilla': 7.1,
'Mineral de níquel': 11.88,
'Mineral de estaño': 10.81,
'Mineral de titanio': 11.88,
'Silicato sódico': 13.0,
'Granito': 14.39,
'Coque': 20.7,
'Taconita': 14.87,
'Hematita especular': 15.4,
'Arena de silicato': 16.46,
'Coque de petróleo': 73.8,
'Gneiss': 20.13,
'Carburo de silicio': 26.17,
'Mineral de zinc': 12.42,
'Granate': 12.37,
'Caliza': 11.61,
'Basalto': 20.41,
'Carbón': 11.37,
'Gabro': 18.45,
'Dolomita': 11.31,
'Coque de petróleo líquido': 38.6,
'Mineral de plomo-zinc': 11.35,
'Sal potásica': 8.23,
'Andesita': 22.13,
'Arcilla calcinada': 1.43,
'Ilmenita': 13.11,
'Mineral de hierro': 15.44,
'Mica': 134.5,
'Hematita': 12.68,
'Fosfato fertilizante': 13.03,
'Cemento en bruto': 10.57,
'Bauxita': 9.45,
'Mineral de plomo': 11.4,
'Trapp': 21.1,
'Cristal': 3.08,
'Sienita': 14.9,
'Coral': 10.16,
'Roca fosfática': 10.13,
'Caliza para cemento': 10.18,
'Silicato': 13.53,
'Aljez': 8.16,
'Mineral de cromo': 9.6,
'Feldespato': 11.67,
'Mineral de cobre': 13.13,
'Pizarra bituminosa': 18.1,
'Cerámica': 15.53,
'Pirita': 8.9,
'Mineral de manganeso': 12.46,
'Pirrotina': 9.57,
'Cianita': 18.87,
'Grava': 25.17,
'Ferrosilicio': 12.83,
'Sílex': 26.16,
'Pizarra, mineral': 16.4,
'Limanita': 8.45,
'Barita': 6.24,
'Esmeril': 58.18,
'Escoria de hornos de hierro': 12.16,
'Mineral de oro': 14.83,
'Pumita': 11.93,
'Rutilo': 12.12,
'Espodumena': 13.7,
'Fluorita': 9.76,
'Clinker de cemento': 13.49,
'Sinterizado': 8.77,
'Galena': 10.19,
'Magnetita': 10.21,
'Cuarcita': 12.18,
'Oolita': 11.33,
'Pizarra, roca': 13.83,
'Mineral de potasio': 8.88,
'Diorita': 19.4,
'Cuarzo': 12.77}
class UI_equipment(parents.UI_equip):
"""Diálogo de definición de molinos trituradores de sólidos"""
def __init__(self, entrada=None, parent=None):
"""entrada: Parametro opcional de clase corriente que indica la corriente de entrada en kla tubería"""
super(UI_equipment, self).__init__(Grinder, entrada=False, salida=False, parent=parent)
self.entrada=entrada
#Pestaña entrada
self.Entrada= UI_corriente.Ui_corriente(entrada)
self.Entrada.Changed.connect(self.cambiar_entrada)
self.tabWidget.insertTab(0, self.Entrada, QtGui.QApplication.translate("equipment", "Entrada", None, QtGui.QApplication.UnicodeUTF8))
#Pestaña calculo
gridLayout_Calculo = QtGui.QGridLayout(self.tabCalculo)
gridLayout_Calculo.addWidget(QtGui.QLabel(QtGui.QApplication.translate("equipment", "Índice de trabajo de bond:", None, QtGui.QApplication.UnicodeUTF8)), 1, 0, 1, 1)
self.Material=QtGui.QComboBox()
self.Material.addItem(QtGui.QApplication.translate("equipment", "Definido por el usuario", None, QtGui.QApplication.UnicodeUTF8))
for key in sorted(BondIndex.keys()):
self.Material.addItem(key)
self.Material.currentIndexChanged[str].connect(self.cambiarBondWordIndex)
gridLayout_Calculo.addWidget(self.Material, 1, 1, 1, 1)
self.BondWorkIndex=Entrada_con_unidades(float)
gridLayout_Calculo.addWidget(self.BondWorkIndex, 1, 2, 1, 1)
gridLayout_Calculo.addItem(QtGui.QSpacerItem(10,10,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding),10,0,1,5)
#Pestaña costos
gridLayout_Costos = QtGui.QGridLayout(self.tabCostos)
gridLayout_Costos.addWidget(QtGui.QLabel(QtGui.QApplication.translate("equipment", "Tipo:", None, QtGui.QApplication.UnicodeUTF8)), 1, 1, 1, 1)
self.tipo=QtGui.QComboBox()
self.tipo.addItem(QtGui.QApplication.translate("equipment", "De cono", None, QtGui.QApplication.UnicodeUTF8))
self.tipo.addItem(QtGui.QApplication.translate("equipment", "Giratorio", None, QtGui.QApplication.UnicodeUTF8))
self.tipo.addItem(QtGui.QApplication.translate("equipment", "Dentado", None, QtGui.QApplication.UnicodeUTF8))
self.tipo.addItem(QtGui.QApplication.translate("equipment", "De martillo", None, QtGui.QApplication.UnicodeUTF8))
self.tipo.addItem(QtGui.QApplication.translate("equipment", "De bolas", None, QtGui.QApplication.UnicodeUTF8))
self.tipo.addItem(QtGui.QApplication.translate("equipment", "Pulverizador", None, QtGui.QApplication.UnicodeUTF8))
self.tipo.currentIndexChanged.connect(self.calcularCostos)
gridLayout_Costos.addWidget(self.tipo, 1, 2, 1, 1)
gridLayout_Costos.addItem(QtGui.QSpacerItem(10,10,QtGui.QSizePolicy.Fixed,QtGui.QSizePolicy.Fixed),2,1,1,2)
self.Costos=costIndex.CostData(1.3, 2)
self.Costos.valueChanged.connect(self.calcularCostos)
gridLayout_Costos.addWidget(self.Costos,4,1,2,5)
gridLayout_Costos.addItem(QtGui.QSpacerItem(20,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding),6,1,1,6)
gridLayout_Costos.addItem(QtGui.QSpacerItem(20,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding),10,1,1,6)
self.groupBox_Costos = QtGui.QGroupBox(QtGui.QApplication.translate("equipment", "Costos calculados", None, QtGui.QApplication.UnicodeUTF8))
gridLayout_Costos.addWidget(self.groupBox_Costos,7,1,1,6)
gridLayout_5 = QtGui.QGridLayout(self.groupBox_Costos)
gridLayout_5.addWidget(QtGui.QLabel(QtGui.QApplication.translate("equipment", "Coste Adquisición:", None, QtGui.QApplication.UnicodeUTF8)),0,1,1,1)
self.C_adq=Entrada_con_unidades(unidades.Currency, retornar=False, readOnly=True)
gridLayout_5.addWidget(self.C_adq,0,2,1,1)
gridLayout_5.addWidget(QtGui.QLabel(QtGui.QApplication.translate("equipment", "Coste Instalación:", None, QtGui.QApplication.UnicodeUTF8)),1,1,1,1)
self.C_inst=Entrada_con_unidades(unidades.Currency, retornar=False, readOnly=True)
gridLayout_5.addWidget(self.C_inst,1,2,1,1)
#Pestaña salida
self.Salida= UI_corriente.Ui_corriente(readOnly=True)
self.tabWidget.insertTab(3, self.Salida,QtGui.QApplication.translate("equipment", "Salida", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setCurrentIndex(0)
def cambiarBondWordIndex(self, txt):
try:
value=BondIndex[str(txt)]
except KeyError:
self.BondWorkIndex.setReadOnly(False)
self.BondWorkIndex.clear()
else:
self.BondWorkIndex.setValue(value)
self.BondWorkIndex.setReadOnly(True)
def cambiar_entrada(self, corriente):
selfentrada=corriente
self.calculo()
def calculo(self):
if self.todos_datos():
self.rellenoSalida()
def rellenoSalida(self):
pass
def todos_datos(self):
pass
def calcularCostos(self, factor=None, indiceBase=None, indiceActual=None):
if self.todos_datos():
if not factor: factor=self.Costos.factor
if not indiceBase: indiceBase=self.Costos.Base
if not indiceActual: indiceActual=self.Costos.Actual
if self.tipo.currentIndex()==0:
self.FireHeater.Coste(factor, indiceBase, indiceActual, 0, self.tipobox.currentIndex(), self.material.currentIndex())
else:
self.FireHeater.Coste(factor, indiceBase, indiceActual, 1, self.tipocilindrico.currentIndex(), self.material.currentIndex())
self.C_adq.setValue(self.FireHeater.C_adq.config())
self.C_inst.setValue(self.FireHeater.C_inst.config())
if __name__ == "__main__":
import sys
from lib.corriente import Corriente, Mezcla, Solid
app = QtGui.QApplication(sys.argv)
agua=Corriente(300, 1, 3600, Mezcla([62], [1]))
dialogo = UI_equipment(agua)
dialogo.show()
sys.exit(app.exec_()) | unknown | codeparrot/codeparrot-clean | ||
import sys
from collections import defaultdict
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import kpi.fields.kpi_uid
from kpi.model_utils import disable_auto_field_update
def migrate_collections_to_assets(apps, schema_editor):
Asset = apps.get_model('kpi', 'Asset')
UserAssetSubscription = apps.get_model('kpi', 'UserAssetSubscription')
Collection = apps.get_model('kpi', 'Collection')
ContentType = apps.get_model('contenttypes', 'ContentType')
ObjectPermission = apps.get_model('kpi', 'ObjectPermission')
Permission = apps.get_model('auth', 'Permission')
TaggedItem = apps.get_model('taggit', 'TaggedItem')
UserCollectionSubscription = apps.get_model(
'kpi', 'UserCollectionSubscription'
)
asset_ct = ContentType.objects.get(app_label='kpi', model='asset')
try:
collection_ct = ContentType.objects.get(app_label='kpi', model='collection')
except ContentType.DoesNotExist:
collection_ct = None
if not Collection.objects.exists():
# There's no work for us to do.
return
if not collection_ct:
raise RuntimeError(
'The database contains collections but no content type for them.'
)
def get_perm_pk(codename):
return Permission.objects.get(codename=codename).pk
perm_map = {
get_perm_pk('view_collection'): get_perm_pk('view_asset'),
get_perm_pk('change_collection'): get_perm_pk('change_asset'),
}
# django won't automatically make new permissions until after migrations
# complete
try:
discover_asset_pk = get_perm_pk('discover_asset')
except Permission.DoesNotExist:
# this seems less awful than using django's private innards (i'm
# looking at you, django.contrib.auth.management.create_permissions)
discover_asset_pk = Permission.objects.create(
codename='discover_asset',
content_type_id=asset_ct.pk,
name='Can discover asset in public lists',
).pk
view_collection_pk = get_perm_pk('view_collection')
# store the pk of the new asset created for each collection. we'll need
# this when handling parents and subscriptions.
collection_pks_to_asset_pks = {
# collection pk: new asset pk
}
def create_asset_from_collection(collection):
"""
migrate a collection to an asset, returning the new asset's pk. does
NOT deal with parents or subscriptions
"""
assert collection.pk not in collection_pks_to_asset_pks
asset = Asset()
asset.asset_type = 'collection'
# easy copy operations
for attr in [
'name',
'owner',
'date_created',
'date_modified',
]:
setattr(asset, attr, getattr(collection, attr))
# for nested collections--do any exist, given that there's no support
# in the ui?--just copy the parent collection id for now. it'll be
# replaced with the appropriate asset id later. note that
# `asset.parent` must be used here instead of `asset.parent_id` because
# we've temporarily altered parent to be a simple integer field instead
# of a foreign key
asset.parent = collection.parent_id
# write to database now so we can create m2m relationships
with disable_auto_field_update(Asset, 'date_created'):
with disable_auto_field_update(Asset, 'date_modified'):
Asset.objects.bulk_create([asset]) # avoid save() shenanigans
collection_pks_to_asset_pks[collection.pk] = asset.pk
# copy permissions
old_perms = ObjectPermission.objects.filter(
content_type=collection_ct, object_id=collection.pk
)
new_perms = []
for collection_perm in old_perms:
asset_perm = ObjectPermission()
asset_perm.content_type = asset_ct
asset_perm.object_id = asset.pk
asset_perm.permission_id = perm_map[collection_perm.permission_id]
for attr in ['user_id', 'deny', 'inherited']:
setattr(asset_perm, attr, getattr(collection_perm, attr))
new_perms.append(asset_perm)
# "public" for a collection meant having both `view_collection`
# assigned to the anonymous user *and* `discoverable_when_public`
# set to `True`
if (
collection_perm.permission_id == view_collection_pk
and collection_perm.user_id == settings.ANONYMOUS_USER_ID
and not collection_perm.deny
):
# `discover_asset` replaces `discoverable_when_public`
asset_perm = ObjectPermission()
asset_perm.content_type = asset_ct
asset_perm.object_id = asset.pk
asset_perm.permission_id = discover_asset_pk
asset_perm.user_id = settings.ANONYMOUS_USER_ID
asset_perm.inherited = collection_perm.inherited
asset_perm.save()
ObjectPermission.objects.bulk_create(new_perms)
old_perms.delete()
# update all tag assignments in place
TaggedItem.objects.filter(
content_type=collection_ct, object_id=collection.pk
).update(content_type=asset_ct, object_id=asset.pk)
return asset.pk
# create new assets for all collections
done = 0
for collection in Collection.objects.iterator():
asset_pk = create_asset_from_collection(collection)
done += 1
if done % 100 == 0:
sys.stdout.write(f'{done} ')
sys.stdout.flush()
# create new asset subscriptions for all collection subscriptions, pointing
# at the new assets we just created
for (
collection_subscription
) in UserCollectionSubscription.objects.iterator():
asset_subscription = UserAssetSubscription()
asset_subscription.user_id = collection_subscription.user_id
asset_subscription.asset_id = collection_pks_to_asset_pks[
collection_subscription.collection_id
]
asset_subscription.save()
# update all parent assignments to use the new assets we just created. at
# this point, `parent` is just an integer field and cannot be accessed as
# `parent_id`
children_of_parent_asset = defaultdict(list)
for pk, parent in Asset.objects.exclude(parent=None).values_list(
'pk', 'parent'
):
children_of_parent_asset[
collection_pks_to_asset_pks[parent]
].append(pk)
for parent, children in children_of_parent_asset.items():
Asset.objects.filter(pk__in=children).update(parent=parent)
class Migration(migrations.Migration):
dependencies = [
('kpi', '0028_assign_manage_asset_permissions'),
]
operations = [
migrations.CreateModel(
name='UserAssetSubscription',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('uid', kpi.fields.kpi_uid.KpiUidField(uid_prefix='b')),
],
),
migrations.AddField(
model_name='userassetsubscription',
name='asset',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to='kpi.Asset'
),
),
migrations.AddField(
model_name='userassetsubscription',
name='user',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterUniqueTogether(
name='userassetsubscription', unique_together={('asset', 'user')},
),
migrations.AlterField(
model_name='asset',
name='asset_type',
field=models.CharField(
choices=[
('text', 'text'),
('empty', 'empty'),
('question', 'question'),
('block', 'block'),
('survey', 'survey'),
('template', 'template'),
('collection', 'collection'),
],
default='survey',
max_length=20,
),
),
# cope with constraint madness by *temporarily* changing the asset
# parent field from a foreign key to a simple integer field. the next
# migration resets it to a foreign key again (but pointing at the asset
# model itself instead of the collection model)
migrations.AlterField(
model_name='asset',
name='parent',
field=models.IntegerField(
blank=True,
null=True,
db_column='parent_id',
),
),
migrations.AlterModelOptions(
name='asset',
options={
'default_permissions': ('add', 'change', 'delete'),
'ordering': ('-date_modified',),
'permissions': (
('view_asset', 'Can view asset'),
('share_asset', "Can change asset's sharing settings"),
('discover_asset', 'Can discover asset in public lists'),
('add_submissions', 'Can submit data to asset'),
('view_submissions', 'Can view submitted data for asset'),
(
'partial_submissions',
'Can make partial actions on submitted data for asset for specific users',
),
(
'change_submissions',
'Can modify submitted data for asset',
),
(
'delete_submissions',
'Can delete submitted data for asset',
),
(
'share_submissions',
"Can change sharing settings for asset's submitted data",
),
(
'validate_submissions',
'Can validate submitted data asset',
),
('from_kc_only', 'INTERNAL USE ONLY; DO NOT ASSIGN'),
),
},
),
migrations.RunPython(migrate_collections_to_assets),
] | unknown | codeparrot/codeparrot-clean | ||
"""
Support for the Locative platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.locative/
"""
import logging
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import HTTP_UNPROCESSABLE_ENTITY, STATE_NOT_HOME
from homeassistant.components.http import HomeAssistantView
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['http']
def setup_scanner(hass, config, see):
"""Setup an endpoint for the Locative application."""
hass.wsgi.register_view(LocativeView(hass, see))
return True
class LocativeView(HomeAssistantView):
"""View to handle locative requests."""
url = "/api/locative"
name = "api:bootstrap"
def __init__(self, hass, see):
"""Initialize Locative url endpoints."""
super().__init__(hass)
self.see = see
def get(self, request):
"""Locative message received as GET."""
return self.post(request)
def post(self, request):
"""Locative message received."""
# pylint: disable=too-many-return-statements
data = request.values
if 'latitude' not in data or 'longitude' not in data:
return ("Latitude and longitude not specified.",
HTTP_UNPROCESSABLE_ENTITY)
if 'device' not in data:
_LOGGER.error("Device id not specified.")
return ("Device id not specified.",
HTTP_UNPROCESSABLE_ENTITY)
if 'id' not in data:
_LOGGER.error("Location id not specified.")
return ("Location id not specified.",
HTTP_UNPROCESSABLE_ENTITY)
if 'trigger' not in data:
_LOGGER.error("Trigger is not specified.")
return ("Trigger is not specified.",
HTTP_UNPROCESSABLE_ENTITY)
device = data['device'].replace('-', '')
location_name = data['id'].lower()
direction = data['trigger']
if direction == 'enter':
self.see(dev_id=device, location_name=location_name)
return "Setting location to {}".format(location_name)
elif direction == 'exit':
current_state = self.hass.states.get(
"{}.{}".format(DOMAIN, device))
if current_state is None or current_state.state == location_name:
self.see(dev_id=device, location_name=STATE_NOT_HOME)
return "Setting location to not home"
else:
# Ignore the message if it is telling us to exit a zone that we
# aren't currently in. This occurs when a zone is entered
# before the previous zone was exited. The enter message will
# be sent first, then the exit message will be sent second.
return 'Ignoring exit from {} (already in {})'.format(
location_name, current_state)
elif direction == 'test':
# In the app, a test message can be sent. Just return something to
# the user to let them know that it works.
return "Received test message."
else:
_LOGGER.error("Received unidentified message from Locative: %s",
direction)
return ("Received unidentified message: {}".format(direction),
HTTP_UNPROCESSABLE_ENTITY) | unknown | codeparrot/codeparrot-clean | ||
## @file
# Generate PCD table for 'Patchable In Module' type PCD with given .map file.
# The Patch PCD table like:
#
# PCD Name Offset in binary
# ======== ================
#
# Copyright (c) 2008 - 2010, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#
#====================================== External Libraries ========================================
import optparse
import os
import re
import array
from Common.BuildToolError import *
import Common.EdkLogger as EdkLogger
from Common.Misc import PeImageClass
from Common.BuildVersion import gBUILD_VERSION
# Version and Copyright
__version_number__ = ("0.10" + " " + gBUILD_VERSION)
__version__ = "%prog Version " + __version_number__
__copyright__ = "Copyright (c) 2008 - 2010, Intel Corporation. All rights reserved."
#====================================== Internal Libraries ========================================
#============================================== Code ===============================================
secRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
def parsePcdInfoFromMapFile(mapfilepath, efifilepath):
""" Parse map file to get binary patch pcd information
@param path Map file absolution path
@return a list which element hold (PcdName, Offset, SectionName)
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
if lines[0].strip().find("Archive member included because of file (symbol)") != -1:
return _parseForGCC(lines)
return _parseGeneral(lines, efifilepath)
def _parseForGCC(lines):
""" Parse map file generated by GCC linker """
status = 0
imageBase = -1
lastSectionName = None
pcds = []
for line in lines:
line = line.strip()
# status machine transection
if status == 0 and line == "Linker script and memory map":
status = 1
continue
elif status == 1 and line == 'START GROUP':
status = 2
continue
# status handler:
if status == 1:
m = re.match('^[\da-fA-FxhH]+ +__image_base__ += +([\da-fA-FhxH]+)', line)
if m != None:
imageBase = int(m.groups(0)[0], 16)
if status == 2:
m = re.match('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)', line)
if m != None:
lastSectionName = m.groups(0)[0]
if status == 2:
m = re.match("^([\da-fA-Fx]+) +[_]+gPcd_BinaryPatch_([\w_\d]+)", line)
if m != None:
assert imageBase != -1, "Fail to get Binary PCD offsest for unknown image base address"
pcds.append((m.groups(0)[1], int(m.groups(0)[0], 16) - imageBase, lastSectionName))
return pcds
def _parseGeneral(lines, efifilepath):
""" For MSFT, ICC, EBC
@param lines line array for map file
@return a list which element hold (PcdName, Offset, SectionName)
"""
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
bPcds = []
for line in lines:
line = line.strip()
if re.match("^Start[' ']+Length[' ']+Name[' ']+Class", line):
status = 1
continue
if re.match("^Address[' ']+Publics by Value[' ']+Rva\+Base", line):
status = 2
continue
if re.match("^entry point at", line):
status = 3
continue
if status == 1 and len(line) != 0:
m = secRe.match(line)
assert m != None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
m = symRe.match(line)
assert m != None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
m2 = re.match('^[_]+gPcd_BinaryPatch_([\w]+)', sym_name)
if m2 != None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
bPcds.append([m2.groups(0)[0], sec[3], sym_offset, vir_addr, sec_no])
if len(bPcds) == 0: return None
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs == None or len(efisecs) == 0:
return None
pcds = []
for pcd in bPcds:
index = 0
for efisec in efisecs:
index = index + 1
if pcd[1].strip() == efisec[0].strip():
pcds.append([pcd[0], efisec[2] + pcd[2], efisec[0]])
elif pcd[4] == index:
pcds.append([pcd[0], efisec[2] + pcd[2], efisec[0]])
return pcds
def generatePcdTable(list, pcdpath):
try:
f = open(pcdpath, 'w')
except:
pass
f.write('PCD Name Offset Section Name\r\n')
for pcditem in list:
f.write('%-30s 0x%-08X %-6s\r\n' % (pcditem[0], pcditem[1], pcditem[2]))
f.close()
#print 'Success to generate Binary Patch PCD table at %s!' % pcdpath
if __name__ == '__main__':
UsageString = "%prog -m <MapFile> -e <EfiFile> -o <OutFile>"
AdditionalNotes = "\nPCD table is generated in file name with .BinaryPcdTable.txt postfix"
parser = optparse.OptionParser(description=__copyright__, version=__version__, usage=UsageString)
parser.add_option('-m', '--mapfile', action='store', dest='mapfile',
help='Absolute path of module map file.')
parser.add_option('-e', '--efifile', action='store', dest='efifile',
help='Absolute path of EFI binary file.')
parser.add_option('-o', '--outputfile', action='store', dest='outfile',
help='Absolute path of output file to store the got patchable PCD table.')
(options, args) = parser.parse_args()
if options.mapfile == None or options.efifile == None:
print parser.get_usage()
elif os.path.exists(options.mapfile) and os.path.exists(options.efifile):
list = parsePcdInfoFromMapFile(options.mapfile, options.efifile)
if list != None:
if options.outfile != None:
generatePcdTable(list, options.outfile)
else:
generatePcdTable(list, options.mapfile.replace('.map', '.BinaryPcdTable.txt'))
else:
print 'Fail to generate Patch PCD Table based on map file and efi file'
else:
print 'Fail to generate Patch PCD Table for fail to find map file or efi file!' | unknown | codeparrot/codeparrot-clean | ||
import hashlib
import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from .forms import ProfileForm
from .models import Profile
from fundraising.models import CTSDonor
def user_profile(request, username):
user = get_object_or_404(User, username=username)
profile, created = Profile.objects.get_or_create(user=user)
donor = CTSDonor.objects.filter(profile=profile).first()
if donor:
donations = donor.donation_set.all()
else:
donations = None
return render(request, "accounts/user_profile.html", {
'user_obj': user,
'email_hash': hashlib.md5(user.email.encode('ascii', 'ignore')).hexdigest(),
'donor': donor,
'donations': donations,
})
@login_required
def edit_profile(request):
profile, created = Profile.objects.get_or_create(user=request.user)
form = ProfileForm(request.POST or None, instance=profile)
if form.is_valid():
form.save()
return redirect('user_profile', request.user.username)
return render(request, "accounts/edit_profile.html", {'form': form})
class JSONResponse(HttpResponse):
def __init__(self, obj):
super().__init__(
json.dumps(obj, indent=(2 if settings.DEBUG else None)),
content_type='application/json',
) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Anatoly Shipitsin <norguhtar at gmail.com>'
"""
Convert .fb2 files to .lrf
"""
import os, re
from calibre.customize.conversion import InputFormatPlugin, OptionRecommendation
from calibre import guess_type
FB2NS = 'http://www.gribuser.ru/xml/fictionbook/2.0'
class FB2Input(InputFormatPlugin):
name = 'FB2 Input'
author = 'Anatoly Shipitsin'
description = 'Convert FB2 files to HTML'
file_types = set(['fb2'])
recommendations = set([
('level1_toc', '//h:h1', OptionRecommendation.MED),
('level2_toc', '//h:h2', OptionRecommendation.MED),
('level3_toc', '//h:h3', OptionRecommendation.MED),
])
options = set([
OptionRecommendation(name='no_inline_fb2_toc',
recommended_value=False, level=OptionRecommendation.LOW,
help=_('Do not insert a Table of Contents at the beginning of the book.'
)
),
])
def convert(self, stream, options, file_ext, log,
accelerators):
from lxml import etree
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.ebooks.metadata.meta import get_metadata
from calibre.ebooks.oeb.base import XLINK_NS, XHTML_NS, RECOVER_PARSER
from calibre.ebooks.chardet import xml_to_unicode
NAMESPACES = {'f':FB2NS, 'l':XLINK_NS}
self.log = log
log.debug('Parsing XML...')
raw = stream.read().replace('\0', '')
raw = xml_to_unicode(raw, strip_encoding_pats=True,
assume_utf8=True, resolve_entities=True)[0]
try:
doc = etree.fromstring(raw)
except etree.XMLSyntaxError:
try:
doc = etree.fromstring(raw, parser=RECOVER_PARSER)
if doc is None:
raise Exception('parse failed')
except:
doc = etree.fromstring(raw.replace('& ', '&'),
parser=RECOVER_PARSER)
if doc is None:
raise ValueError('The FB2 file is not valid XML')
stylesheets = doc.xpath('//*[local-name() = "stylesheet" and @type="text/css"]')
css = ''
for s in stylesheets:
css += etree.tostring(s, encoding=unicode, method='text',
with_tail=False) + '\n\n'
if css:
import cssutils, logging
parser = cssutils.CSSParser(fetcher=None,
log=logging.getLogger('calibre.css'))
XHTML_CSS_NAMESPACE = '@namespace "%s";\n' % XHTML_NS
text = XHTML_CSS_NAMESPACE + css
log.debug('Parsing stylesheet...')
stylesheet = parser.parseString(text)
stylesheet.namespaces['h'] = XHTML_NS
css = unicode(stylesheet.cssText).replace('h|style', 'h|span')
css = re.sub(r'name\s*=\s*', 'class=', css)
self.extract_embedded_content(doc)
log.debug('Converting XML to HTML...')
ss = open(P('templates/fb2.xsl'), 'rb').read()
if options.no_inline_fb2_toc:
log('Disabling generation of inline FB2 TOC')
ss = re.compile(r'<!-- BUILD TOC -->.*<!-- END BUILD TOC -->',
re.DOTALL).sub('', ss)
styledoc = etree.fromstring(ss)
transform = etree.XSLT(styledoc)
result = transform(doc)
for img in result.xpath('//img[@src]'):
src = img.get('src')
img.set('src', self.binary_map.get(src, src))
index = transform.tostring(result)
open(u'index.xhtml', 'wb').write(index)
open(u'inline-styles.css', 'wb').write(css)
stream.seek(0)
mi = get_metadata(stream, 'fb2')
if not mi.title:
mi.title = _('Unknown')
if not mi.authors:
mi.authors = [_('Unknown')]
cpath = None
if mi.cover_data and mi.cover_data[1]:
with open(u'fb2_cover_calibre_mi.jpg', 'wb') as f:
f.write(mi.cover_data[1])
cpath = os.path.abspath(u'fb2_cover_calibre_mi.jpg')
else:
for img in doc.xpath('//f:coverpage/f:image', namespaces=NAMESPACES):
href = img.get('{%s}href'%XLINK_NS, img.get('href', None))
if href is not None:
if href.startswith('#'):
href = href[1:]
cpath = os.path.abspath(href)
break
opf = OPFCreator(os.getcwdu(), mi)
entries = [(f2, guess_type(f2)[0]) for f2 in os.listdir(u'.')]
opf.create_manifest(entries)
opf.create_spine([u'index.xhtml'])
if cpath:
opf.guide.set_cover(cpath)
with open(u'metadata.opf', 'wb') as f:
opf.render(f)
return os.path.join(os.getcwdu(), u'metadata.opf')
def extract_embedded_content(self, doc):
from calibre.ebooks.fb2 import base64_decode
self.binary_map = {}
for elem in doc.xpath('./*'):
if elem.text and 'binary' in elem.tag and 'id' in elem.attrib:
ct = elem.get('content-type', '')
fname = elem.attrib['id']
ext = ct.rpartition('/')[-1].lower()
if ext in ('png', 'jpeg', 'jpg'):
if fname.lower().rpartition('.')[-1] not in {'jpg', 'jpeg',
'png'}:
fname += '.' + ext
self.binary_map[elem.get('id')] = fname
raw = elem.text.strip()
try:
data = base64_decode(raw)
except TypeError:
self.log.exception('Binary data with id=%s is corrupted, ignoring'%(
elem.get('id')))
else:
with open(fname, 'wb') as f:
f.write(data) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_igmp_snooping
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages IGMP snooping global configuration.
description:
- Manages IGMP snooping global configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- When C(state=default), params will be reset to a default state.
- C(group_timeout) also accepts I(never) as an input.
options:
snooping:
description:
- Enables/disables IGMP snooping on the switch.
required: false
default: null
choices: ['true', 'false']
group_timeout:
description:
- Group membership timeout value for all VLANs on the device.
Accepted values are integer in range 1-10080, I(never) and
I(default).
required: false
default: null
link_local_grp_supp:
description:
- Global link-local groups suppression.
required: false
default: null
choices: ['true', 'false']
report_supp:
description:
- Global IGMPv1/IGMPv2 Report Suppression.
required: false
default: null
v3_report_supp:
description:
- Global IGMPv3 Report Suppression and Proxy Reporting.
required: false
default: null
choices: ['true', 'false']
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','default']
'''
EXAMPLES = '''
# ensure igmp snooping params supported in this module are in there default state
- nxos_igmp_snooping:
state: default
# ensure following igmp snooping params are in the desired state
- nxos_igmp_snooping:
group_timeout: never
snooping: true
link_local_grp_supp: false
optimize_mcast_flood: false
report_supp: true
v3_report_supp: true
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: ["ip igmp snooping link-local-groups-suppression",
"ip igmp snooping group-timeout 50",
"no ip igmp snooping report-suppression",
"no ip igmp snooping v3-report-suppression",
"no ip igmp snooping"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, output='text'):
command = {
'command': command,
'output': output,
}
return run_commands(module, [command])
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_group_timeout(config):
match = re.search(r' Group timeout configured: (\S+)', config, re.M)
if match:
value = match.group(1)
else:
value = ''
return value
def get_snooping(config):
REGEX = re.compile(r'{0}$'.format('no ip igmp snooping'), re.M)
value = False
try:
if REGEX.search(config):
value = False
except TypeError:
value = True
return value
def get_igmp_snooping(module):
command = 'show ip igmp snooping'
existing = {}
try:
body = execute_show_command(command, module, output='json')[0]
except IndexError:
body = []
if body:
snooping = str(body.get('enabled')).lower()
if snooping == 'true' or snooping == 'enabled':
existing['snooping'] = True
else:
existing['snooping'] = False
report_supp = str(body.get('grepsup')).lower()
if report_supp == 'true' or report_supp == 'enabled':
existing['report_supp'] = True
else:
existing['report_supp'] = False
link_local_grp_supp = str(body.get('glinklocalgrpsup')).lower()
if link_local_grp_supp == 'true' or link_local_grp_supp == 'enabled':
existing['link_local_grp_supp'] = True
else:
existing['link_local_grp_supp'] = False
v3_report_supp = str(body.get('gv3repsup')).lower()
if v3_report_supp == 'true' or v3_report_supp == 'enabled':
existing['v3_report_supp'] = True
else:
existing['v3_report_supp'] = False
command = 'show ip igmp snooping'
body = execute_show_command(command, module)[0]
if body:
existing['group_timeout'] = get_group_timeout(body)
return existing
def config_igmp_snooping(delta, existing, default=False):
CMDS = {
'snooping': 'ip igmp snooping',
'group_timeout': 'ip igmp snooping group-timeout {}',
'link_local_grp_supp': 'ip igmp snooping link-local-groups-suppression',
'v3_report_supp': 'ip igmp snooping v3-report-suppression',
'report_supp': 'ip igmp snooping report-suppression'
}
commands = []
command = None
for key, value in delta.items():
if value:
if default and key == 'group_timeout':
if existing.get(key):
command = 'no ' + CMDS.get(key).format(existing.get(key))
else:
command = CMDS.get(key).format(value)
else:
command = 'no ' + CMDS.get(key).format(value)
if command:
commands.append(command)
command = None
return commands
def get_igmp_snooping_defaults():
group_timeout = 'dummy'
report_supp = True
link_local_grp_supp = True
v3_report_supp = False
snooping = True
args = dict(snooping=snooping, link_local_grp_supp=link_local_grp_supp,
report_supp=report_supp, v3_report_supp=v3_report_supp,
group_timeout=group_timeout)
default = dict((param, value) for (param, value) in args.items()
if value is not None)
return default
def main():
argument_spec = dict(
snooping=dict(required=False, type='bool'),
group_timeout=dict(required=False, type='str'),
link_local_grp_supp=dict(required=False, type='bool'),
report_supp=dict(required=False, type='bool'),
v3_report_supp=dict(required=False, type='bool'),
state=dict(choices=['present', 'default'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
results = {'changed': False, 'commands': [], 'warnings': warnings}
snooping = module.params['snooping']
link_local_grp_supp = module.params['link_local_grp_supp']
report_supp = module.params['report_supp']
v3_report_supp = module.params['v3_report_supp']
group_timeout = module.params['group_timeout']
state = module.params['state']
args = dict(snooping=snooping, link_local_grp_supp=link_local_grp_supp,
report_supp=report_supp, v3_report_supp=v3_report_supp,
group_timeout=group_timeout)
proposed = dict((param, value) for (param, value) in args.items()
if value is not None)
existing = get_igmp_snooping(module)
commands = []
if state == 'present':
delta = dict(
set(proposed.items()).difference(existing.items())
)
if delta:
command = config_igmp_snooping(delta, existing)
if command:
commands.append(command)
elif state == 'default':
proposed = get_igmp_snooping_defaults()
delta = dict(
set(proposed.items()).difference(existing.items())
)
if delta:
command = config_igmp_snooping(delta, existing, default=True)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
results['changed'] = True
if not module.check_mode:
load_config(module, cmds)
if 'configure' in cmds:
cmds.pop(0)
results['commands'] = cmds
module.exit_json(**results)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet import np, npx
from mxnet.test_utils import same, use_np, assert_almost_equal
import random
import pytest
@use_np
@pytest.mark.parametrize('shape',
[(3, 2), (9,17), (2, 7, 1, 8)] + [(i,) for i in range(1,65)])
def test_contrib_intgemm_maxabsolute(shape):
if "intgemm_maxabsolute" not in dir(mx.nd.contrib):
return
# mx.nd API
m = mx.nd.random_uniform(low=-100.0, high=100.0, shape=shape)
fast = mx.nd.contrib.intgemm_maxabsolute(m)
slow = mx.nd.max(mx.nd.abs(m))
assert same(fast, slow)
# np API
m = np.random.uniform(low=-100.0, high=100.0, size=shape)
fast = npx.intgemm_maxabsolute(m).reshape(())
slow = np.max(np.abs(m))
assert same(fast, slow)
@use_np
@pytest.mark.parametrize('shape', [(i,) for i in range(1, 67)] + [(2,3), (130, 12)])
@pytest.mark.parametrize('max_quant', [2.0])
def test_contrib_intgemm_prepare_data(shape, max_quant):
if "intgemm_prepare_data" not in dir(mx.nd.contrib):
return
m = mx.nd.random_uniform(low=-3.0, high=3.0, shape=shape)
scaled = m * 127.0 / max_quant
# Rounding 0.5 can go up or down. Move values away from 0.5.
too_close = mx.nd.abs(mx.nd.round(scaled) - scaled) > 0.45
# Add 0.2 in scaled space so (0.45, 0.55) maps to (0.65, 0.75) which will round consistently.
m += max_quant / 127.0 * 0.2 * too_close
# Reference: scale and round
ref = mx.nd.round(m * 127.0 / max_quant)
# Clip to [-127, 127]. Because otherwise e.g. -129 casts to +127.
ref = mx.nd.broadcast_maximum(ref, mx.nd.array([-127.0]))
ref = mx.nd.broadcast_minimum(ref, mx.nd.array([127.0]))
# Reference: cast to int8
ref = mx.nd.cast(ref, dtype='int8')
# Reference: ban -128
ref = mx.nd.broadcast_maximum(ref, mx.nd.array([-127], dtype = 'int8'))
test = mx.nd.contrib.intgemm_prepare_data(m, mx.nd.array([max_quant]))
assert same(test, ref)
test = npx.intgemm_prepare_data(m.as_np_ndarray(), np.array([max_quant]))
assert same(test, ref.as_np_ndarray())
@use_np
@pytest.mark.parametrize('shape', [(8, 64), (16, 64), (8, 128), (16, 128), (2, 4, 64)])
@pytest.mark.parametrize('max_quant', [0.2, 3.0])
@pytest.mark.parametrize('api', [(mx.nd.contrib, mx.nd), (npx, np)])
def test_contrib_intgemm_weight_consistent(shape, max_quant, api):
# The weight format is actually CPU-dependent so we don't directly test the
# output, but indirectly test that it works.
if "intgemm_prepare_weight" not in dir(mx.nd.contrib):
return
contrib, top = api
max_array = top.array([max_quant])
if top == mx.nd:
m = top.random_uniform(low=-3.0, high=3.0, shape=shape)
else:
m = np.random.uniform(size=shape)
direct = contrib.intgemm_prepare_weight(m, max_array)
quant = contrib.intgemm_prepare_data(m, max_array)
indirect = contrib.intgemm_prepare_weight(quant, already_quantized=True)
# Should get the same data from direct call and already_quantized version.
assert same(direct, indirect)
@use_np
@pytest.mark.parametrize('indices', [
[0,1,2,3,4,5,6,7],
[1,2,1,2,1,2,1,2],
[7,6,5,4,3,2,1,0],
[3,1,4,1,5,9,2,6],
# Since random_uniform doesn't support int8, use python
[random.randint(0,15) for i in range(8)],
[random.randint(0,15) for i in range(16)],
[random.randint(0,15) for i in range(24)]
])
@pytest.mark.parametrize('api', [(mx.nd.contrib, mx.nd), (npx, np)])
def test_contrib_intgemm_take_weight(indices, api):
if "intgemm_take_weight" not in dir(mx.nd.contrib):
return
contrib, top = api
m = top.array([random.randint(-127,127) for i in range(16 * 64)], dtype='int8')
m = m.reshape((16, 64))
indices = top.array(indices, dtype='int32')
# Prepare weight then take.
test = contrib.intgemm_prepare_weight(m, already_quantized=True)
test = contrib.intgemm_take_weight(test, indices)
# Take then prepare.
ref = m.take(indices, axis=0)
ref = contrib.intgemm_prepare_weight(ref, already_quantized=True)
assert same(test, ref)
@use_np
@pytest.mark.parametrize('data_rows', range(1, 5))
@pytest.mark.parametrize('inner', range(64, 256, 64))
@pytest.mark.parametrize('weight_cols', range(8, 24, 8))
@pytest.mark.parametrize('api', [
(mx.nd.contrib, mx.nd, mx.nd.FullyConnected, mx.nd.cast),
(npx, np, npx.fully_connected, npx.cast)])
def test_contrib_intgemm_multiply(data_rows, inner, weight_cols, api):
if "intgemm_fully_connected" not in dir(mx.nd.contrib):
return
contrib, top, fully_connected, cast = api
#The multiplication routine has approximations so everything is tested
#deterministically to ensure bounds are met.
random.seed(1)
# Don't use full range (-127, 127) to avoid saturation.
data = [random.randint(-64, 64) for i in range(data_rows * inner)]
data = top.array(data, dtype='int8').reshape((data_rows, inner))
weight = [random.randint(-64, 64) for i in range(inner * weight_cols)]
weight = top.array(weight, dtype='int8').reshape((weight_cols, inner))
weight_prepared = contrib.intgemm_prepare_weight(weight, already_quantized=True)
# int32 output, no bias
test = contrib.intgemm_fully_connected(data,
weight_prepared,
no_bias=True,
flatten=False,
out_type='int32',
num_hidden=weight_cols)
ref = fully_connected(cast(data, dtype='float32'),
cast(weight, dtype='float32'),
no_bias=True,
flatten=False,
num_hidden=weight_cols)
assert_almost_equal(cast(test, dtype='float32'), ref, rtol=0.01, atol=0.01)
# float32 output, no bias
scale = 3.0
test = contrib.intgemm_fully_connected(data,
weight_prepared,
top.array([scale]),
no_bias=True,
flatten=False,
out_type='float32',
num_hidden=weight_cols)
assert_almost_equal(test, ref * scale, rtol=0.01, atol=0.01)
# int32 output, bias
bias = top.array([random.randint(-60000, 60000) for i in range(weight_cols)], dtype = 'int32')
test = contrib.intgemm_fully_connected(data,
weight_prepared,
bias,
no_bias=False,
flatten=False,
out_type='int32',
num_hidden=weight_cols)
ref = fully_connected(cast(data, dtype='float32'),
cast(weight, dtype='float32'),
cast(bias, dtype='float32'),
no_bias=False,
flatten=False,
num_hidden=weight_cols)
assert_almost_equal(cast(test, dtype='float32'), ref, rtol=0.01, atol=0.01)
# float32 output, bias
# Scaling is applied before bias (and bias is not scaled). So to make the
# reference comparison easy, just scale the bias beforehand.
test = contrib.intgemm_fully_connected(data,
weight_prepared,
top.array([scale]),
cast(bias, dtype='float32') * scale,
no_bias=False,
flatten=False,
out_type='float32',
num_hidden=weight_cols)
assert_almost_equal(test, ref * scale, rtol=0.01, atol=0.01)
# float32 input should work the same as manually prepared int8 input.
data_float = top.array([random.uniform(-3.14, 3.14) for i in range(data_rows * inner)])
data_float = data_float.reshape(data_rows, inner)
direct = contrib.intgemm_fully_connected(data_float,
weight_prepared,
top.array([scale]),
cast(bias, dtype='float32'),
no_bias=False,
flatten=False,
out_type='float32',
num_hidden=weight_cols)
maxabs = contrib.intgemm_maxabsolute(data_float)
data_prepared = contrib.intgemm_prepare_data(data_float, maxabs)
cooked = contrib.intgemm_fully_connected(data_prepared,
weight_prepared,
top.array(scale * maxabs / 127.0),
cast(bias, dtype='float32'),
no_bias=False,
flatten=False,
out_type='float32',
num_hidden=weight_cols)
assert_almost_equal(direct, cooked, rtol=0.01, atol=0.01) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.io;
import static com.google.common.truth.Truth.assertThat;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.base.Optional;
import com.google.common.collect.ImmutableList;
import com.google.common.io.SourceSinkFactory.ByteSourceFactory;
import com.google.common.io.SourceSinkFactory.CharSourceFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import java.io.StringWriter;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import junit.framework.TestSuite;
import org.jspecify.annotations.NullUnmarked;
/**
* A generator of {@code TestSuite} instances for testing {@code CharSource} implementations.
* Generates tests of all methods on a {@code CharSource} given various inputs the source is
* expected to contain.
*
* @author Colin Decker
*/
@AndroidIncompatible // TODO(b/230620681): Make this available (even though we won't run it).
@NullUnmarked
public class CharSourceTester extends SourceSinkTester<CharSource, String, CharSourceFactory> {
private static final ImmutableList<Method> testMethods = getTestMethods(CharSourceTester.class);
static TestSuite tests(String name, CharSourceFactory factory, boolean testAsByteSource) {
TestSuite suite = new TestSuite(name);
for (Entry<String, String> entry : TEST_STRINGS.entrySet()) {
if (testAsByteSource) {
suite.addTest(
suiteForBytes(factory, entry.getValue().getBytes(UTF_8), name, entry.getKey(), true));
} else {
suite.addTest(suiteForString(factory, entry.getValue(), name, entry.getKey()));
}
}
return suite;
}
static TestSuite suiteForBytes(
CharSourceFactory factory, byte[] bytes, String name, String desc, boolean slice) {
TestSuite suite = suiteForString(factory, new String(bytes, UTF_8), name, desc);
ByteSourceFactory byteSourceFactory = SourceSinkFactories.asByteSourceFactory(factory);
suite.addTest(
ByteSourceTester.suiteForBytes(
byteSourceFactory, bytes, name + ".asByteSource[Charset]", desc, slice));
return suite;
}
static TestSuite suiteForString(
CharSourceFactory factory, String string, String name, String desc) {
TestSuite suite = new TestSuite(name + " [" + desc + "]");
for (Method method : testMethods) {
suite.addTest(new CharSourceTester(factory, string, name, desc, method));
}
return suite;
}
private final ImmutableList<String> expectedLines;
private CharSource source;
public CharSourceTester(
CharSourceFactory factory, String string, String suiteName, String caseDesc, Method method) {
super(factory, string, suiteName, caseDesc, method);
this.expectedLines = getLines(expected);
}
@Override
protected void setUp() throws Exception {
this.source = factory.createSource(data);
}
public void testOpenStream() throws IOException {
Reader reader = source.openStream();
StringWriter writer = new StringWriter();
char[] buf = new char[64];
int read;
while ((read = reader.read(buf)) != -1) {
writer.write(buf, 0, read);
}
reader.close();
writer.close();
assertExpectedString(writer.toString());
}
public void testOpenBufferedStream() throws IOException {
BufferedReader reader = source.openBufferedStream();
StringWriter writer = new StringWriter();
char[] buf = new char[64];
int read;
while ((read = reader.read(buf)) != -1) {
writer.write(buf, 0, read);
}
reader.close();
writer.close();
assertExpectedString(writer.toString());
}
public void testCopyTo_appendable() throws IOException {
StringBuilder builder = new StringBuilder();
assertEquals(expected.length(), source.copyTo(builder));
assertExpectedString(builder.toString());
}
public void testCopyTo_charSink() throws IOException {
TestCharSink sink = new TestCharSink();
assertEquals(expected.length(), source.copyTo(sink));
assertExpectedString(sink.getString());
}
public void testRead_toString() throws IOException {
String string = source.read();
assertExpectedString(string);
}
public void testReadFirstLine() throws IOException {
if (expectedLines.isEmpty()) {
assertThat(source.readFirstLine()).isNull();
} else {
assertEquals(expectedLines.get(0), source.readFirstLine());
}
}
public void testReadLines_toList() throws IOException {
assertExpectedLines(source.readLines());
}
public void testIsEmpty() throws IOException {
assertEquals(expected.isEmpty(), source.isEmpty());
}
public void testLength() throws IOException {
assertEquals(expected.length(), source.length());
}
public void testLengthIfKnown() throws IOException {
Optional<Long> lengthIfKnown = source.lengthIfKnown();
if (lengthIfKnown.isPresent()) {
assertEquals(expected.length(), (long) lengthIfKnown.get());
}
}
public void testReadLines_withProcessor() throws IOException {
List<String> list =
source.readLines(
new LineProcessor<List<String>>() {
final List<String> list = new ArrayList<>();
@Override
public boolean processLine(String line) throws IOException {
list.add(line);
return true;
}
@Override
public List<String> getResult() {
return list;
}
});
assertExpectedLines(list);
}
public void testReadLines_withProcessor_stopsOnFalse() throws IOException {
List<String> list =
source.readLines(
new LineProcessor<List<String>>() {
final List<String> list = new ArrayList<>();
@Override
public boolean processLine(String line) throws IOException {
list.add(line);
return false;
}
@Override
public List<String> getResult() {
return list;
}
});
if (expectedLines.isEmpty()) {
assertTrue(list.isEmpty());
} else {
assertEquals(expectedLines.subList(0, 1), list);
}
}
private void assertExpectedString(String string) {
assertEquals(expected, string);
}
private void assertExpectedLines(List<String> list) {
assertEquals(expectedLines, list);
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/io/CharSourceTester.java |
"ArticlePage object for presseurope"
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.capabilities.messages.genericArticle import GenericNewsPage, try_drop_tree, \
clean_relativ_urls
class PresseuropPage(GenericNewsPage):
"PresseuropPage object for presseurop"
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_title_selector = "title"
self.element_author_selector = "div[id=content-author]>a"
self.element_body_selector = "div.block"
def get_body(self):
element_body = self.get_element_body()
try_drop_tree(self.parser, element_body, "li.button-social")
try_drop_tree(self.parser, element_body, "div.sharecount")
clean_relativ_urls(element_body, "http://presseurop.eu")
return self.parser.tostring(element_body)
def get_title(self):
title = GenericNewsPage.get_title(self)
title = title.split('|')[0]
return title
class DailyTitlesPage(PresseuropPage):
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_title_selector = "title"
self.element_author_selector = "div[id=content-author]>a"
self.element_body_selector = "ul.articlebody"
class CartoonPage(PresseuropPage):
"CartoonPage object for presseurop"
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_title_selector = "title"
self.element_author_selector = "div.profilecartoontext>p>a"
self.element_body_selector = "div.panel" | unknown | codeparrot/codeparrot-clean | ||
__author__ = 'Cedric'
# each information will be used to sort the properties for the given policy
#import random
from monopyly import *
from .Memory import *
from .Policy import *
class VSSmartBuyerNeutral(PlayerAIBase):
'''
'''
def __init__(self):
'''
ctor
'''
# memory information
self.needed_money = 0
self.auction_memory = AuctionMemory()
self.deal_memory = DealMemory()
self.property_policy = AcquiringPolicy(self)
self.house_policy = HousePolicy_v2(self,HousePolicy_v2.HousePolicy.ONE_COMPLETE_SET, HousePolicy_v2.RepartitionPolicy.SAME_SIZE)
self.selling_policy = SellingPolicy(self,self.deal_memory)
#self.selling_policy = SellingPolicy_v2(self,SellingPolicy_v2.SellingPolicy.MORTGAGE_DEAL_STATION_UTILITY_FIRST, SellingPolicy_v2.HousePolicy.ONE_ON_EACH_SET)
self.chance_policy = ChancePolicy(0.25)
self.jail_policy = JailPolicy(0.8, 4, 20)
self.turn = 0
self.money_to_be_taken = 0
self.in_state_of_taking_money = False
'''
PropertyInformation
# information used to know is a property will be bought (or unmortgaged)
buying_property_cash_threshold # this threshold indicate the remaining cash wanted after buying the property (negative means that even if the cash if not available, we want to buy)
buying_property_deal_proposal_factor # the factor will be applied to the price of the property during the deal_proposal pahse (the buyingThreshold should also be taken into acount as we wanted some cash left if needed)
buying_property_deal_proposed_factor # the factor will be applied to the price of the property during the deal_proposed alert (the buyingThreshold should also be taken into acount as we wanted some cash left if needed)
buying_property_auctionFactor # the factor will be applied to the price of the property during the auction alert (the buyingThreshold should also be taken into acount as we wanted some cash left if needed)
# information used to know if house will be build
buying_house_cash_threshold # similar to the property cash threshold, the remaining cash wanted after transaction occurs
buying_house_sorter # value use to sort the property in terms of house building preferences (-1 means that housing is not available)
buying_house_policy # ONE_COMPLETE_SET, ONE_AVAILABLE_PROPERTY, ALL_AVAIALABLE_PROPERTY, ALL_COMPLETE_SET
buying_house_repartition_policy # MAXIMIZE_HOTEL, SAME_SIZE
# information used to know how to retrieve money
# - the inverted buying_house_sorter will be used to know which house's property/set need to sold first
# - the inverted buying_house_cash_threshold will be used to know which property to mortgage first
selling_policy # MORTGAGE_DEAL_STREET_FIRST, SELL_HOUSE_FIRST, MORTGAGE_DEAL_STATION_UTILTY_FIRST,
selling_mortgage_deal_threshold # the threshold to know if we mortgage or try to deal a propery (tested against a random number)
selling_house_policy # ONE_ON_EACH_SET, WHOLE_SET_LESS_HOUSE_FIRST, WHOLE_SET_MAX_HOUSE_FIRST, WHOLE_SET_SMOOTH_HOUSE_ON_BOARD
'''
self.properties_information = {
#brown
Square.Name.OLD_KENT_ROAD: [200, 1.1, 1, 1.25, 150, 5, 0.75],
Square.Name.WHITECHAPEL_ROAD: [200, 1.1, 1, 1.25, 125, 6, 0.75],
#light blue
Square.Name.THE_ANGEL_ISLINGTON: [300, 1.1, 1, 1, 250, 5, 0.75],
Square.Name.EUSTON_ROAD: [300, 1.1, 1, 1, 250, 5, 0.75],
Square.Name.PENTONVILLE_ROAD: [300, 1.1, 1, 1, 225, 6, 0.75],
#rose
Square.Name.PALL_MALL: [150, 1.1, 1.1, 1.2, 250, 7, 0.75],
Square.Name.WHITEHALL: [150, 1.1, 1.1, 1.2, 250, 7, 0.75],
Square.Name.NORTHUMBERLAND_AVENUE: [150, 1.1, 1.1, 1.2, 225, 8, 0.75],
#orange
Square.Name.BOW_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.MARLBOROUGH_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.VINE_STREET: [300, 1, 1, 1, 250, 5, 0.75],
#red
Square.Name.STRAND: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.FLEET_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.TRAFALGAR_SQUARE: [300, 1, 1, 1, 250, 5, 0.75],
#yellow
Square.Name.LEICESTER_SQUARE: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.COVENTRY_STREET: [300, 1, 1, 1, 250, 4, 0.75],
Square.Name.PICCADILLY: [300, 1, 1, 1, 250, 5, 0.75],
#green
Square.Name.REGENT_STREET: [300, 1, 1, 1, 250, 5, 0.75],
Square.Name.OXFORD_STREET: [300, 1, 1, 1, 250, 5, 0.75],
Square.Name.BOND_STREET: [300, 1, 1, 1, 250, 5, 0.75],
#dark blue
Square.Name.PARK_LANE: [100, 1.1, 1.1, 1.2, 250, 5, 0.75],
Square.Name.MAYFAIR: [100, 1.1, 1.1, 1.2, 250, 5, 0.75],
#station
Square.Name.KINGS_CROSS_STATION: [0, 1.25, 1.25, 1.25, 0, -1, 0],
Square.Name.MARYLEBONE_STATION: [0, 1.25, 1.25, 1.25, 0, -1, 0],
Square.Name.FENCHURCH_STREET_STATION: [0, 1.25, 1.25, 1.25, 0, -1, 0],
Square.Name.LIVERPOOL_STREET_STATION: [0, 1.25, 1.25, 1.25, 0, -1, 0],
#company
Square.Name.ELECTRIC_COMPANY: [50, 1, 1, 1.1, 0, -1, 0.5],
Square.Name.WATER_WORKS: [50, 1, 1, 1.1, 0, -1, 0.5],
}
def get_name(self):
return 'VSSmartBuyerNeutral'
def start_of_game(self):
'''
Called at the start of the game.
No response is required.
'''
self.needed_money = 0
self.turn = 0
self.money_to_be_taken = 0
self.in_state_of_taking_money = False
def start_of_turn(self, game_state, player):
'''
Called when an AI's turn starts. All AIs receive this notification.
No response is required.
'''
self.needed_money = 0
self.money_to_be_taken = 0
self.turn += 1
self.in_state_of_taking_money = False
def landed_on_unowned_property(self, game_state, player, property):
'''
Called when the AI lands on an unowned property. Only the active
player receives this notification.
Must return either the BUY or DO_NOT_BUY action from the
PlayerAIBase.Action enum.
The default behaviour is DO_NOT_BUY.
'''
return self.property_policy.acquire_through_landing(game_state,player,property)
def money_given(self, player, amount):
'''
Called when money has been given to the player.
No response is required.
'''
self.in_state_of_taking_money = False
#if self.needed_money > 0:
#Logger.log("PLAYER::IN TURN> money given ("+format(self.turn) + ") : " + format(amount),Logger.WARNING)
pass
def money_will_be_taken(self, player, amount):
'''
Called shortly before money will be taken from the player.
Before the money is taken, there will be an opportunity to
make deals and/or mortgage properties. (This will be done via
subsequent callbacks.)
No response is required.
'''
self.in_state_of_taking_money = True
self.money_to_be_taken = amount
#Logger.log("PLAYER::BEGIN-money_will_be_taken> needed money turn ("+format(self.turn) + ") : " + format(amount) + " vs cash: " + format(player.state.cash),Logger.WARNING)
if amount > player.state.cash:
self.needed_money = amount - player.state.cash
#Logger.log(" money_will_be_taken> needed money turn ("+format(self.turn) + ") : " + format(self.needed_money),Logger.WARNING)
pass
def money_taken(self, player, amount):
'''
Called when money has been taken from the player.
No response is required.
'''
#Logger.log("PLAYER::END-money_taken> turn ("+format(self.turn) + ") money_to_be_taken: " + format(self.money_to_be_taken) + " vs taken money: " + format(amount) + " vs cash: " + format(player.state.cash),Logger.WARNING)
pass
def players_birthday(self):
'''
Called when a player picks up the 'It is your birthday...'
Community Chest card.
You should return "Happy Birthday!" (with this casing and the
exclamation mark). If not, you will have to pay £100 instead of
the standard £10.
'''
return "Happy Birthday!"
def pay_ten_pounds_or_take_a_chance(self, game_state, player):
'''
Called when the player picks up the "Pay a £10 fine or take a Chance" card.
Return either:
PlayerAIBase.Action.PAY_TEN_POUND_FINE
or
PlayerAIBase.Action.TAKE_A_CHANCE
'''
return self.chance_policy.compute()
def property_offered_for_auction(self, game_state, player, property):
'''
Called when a property is put up for auction.
Properties are auctioned when a player lands on an unowned square but does
not want to buy it. All players take part in the auction, including the
player who landed on the square.
The property will be sold to the highest bidder using the eBay rule,
ie, for £1 more than the second-highest bid.
Return the amount you bid. To put in a bid this must be a positive integer.
Zero means that you are not bidding (it does not mean that you are bidding
zero).
The default behaviour is not to bid.
'''
return self.property_policy.acquire_through_auction(game_state,player,property)
def auction_result(self, status, property, player, amount_paid):
'''
Called with the result of an auction. All players receive
this notification.
status is either AUCTION_SUCCEEDED or AUCTION_FAILED.
If the auction succeeded, the property, the player who won
the auction and the amount they paid are passed to the AI.
If the auction failed, the player will be None and the
amount paid will be 0.
No response is required.
'''
if status == PlayerAIBase.Action.AUCTION_SUCCEEDED:
self.auction_memory.add_auction(property,player,amount_paid)
pass
def build_houses(self, game_state, player):
'''
Called near the start of the player's turn to give the option of building houses.
Return a list of tuples indicating which properties you want to build houses
on and how many houses to build on each. For example:
[(park_lane, 3), (mayfair, 4)]
The properties should be Property objects.
Return an empty list if you do not want to build.
Notes:
- You must own a whole set of unmortgaged properties before you can
build houses on it.
- You can build on multiple sets in one turn. Just specify all the streets
and houses you want to build.
- Build five houses on a property to have a "hotel".
- You specify the _additional_ houses you will be building, not the
total after building. For example, if Park Lane already has 3 houses
and you specify (park_lane, 2) you will end up with 5
houses (ie, a hotel).
- Sets must end up with 'balanced' housing. No square in a set can
have more than one more house than any other. If you request an
unbalanced build, the whole transaction will be rolled back, even
if it includes balanced building on other sets as well.
- If you do not have (or cannot raise) enough money to build all the
houses specified, the whole transaction will be rolled back. Between
this function call and money being taken, you will have an opportunity
to mortgage properties or make deals.
The default behaviour is not to build.
'''
return self.house_policy.compute(game_state, player)
def sell_houses(self, game_state, player):
'''
Gives the player the option to sell properties.
This is called when any debt, fine or rent has to be paid. It is
called just before mortgage_properties (below).
Notes:
- You cannot mortgage properties with houses on them, so if you
plan to mortgage, make sure you sell all the houses first.
- For each house sold you receive half the price that they were
bought for.
- Houses on a set must end up 'balanced', ie no property can have
more than one more house than any other property in the set.
Return a list of tuples of the streets and number of houses you
want to sell. For example:
[(old_kent_road, 1), (bow_street, 1)]
The streets should be Property objects.
The default is not to sell any houses.
'''
if self.needed_money > 0:
return self.selling_policy.computeHouse(game_state,player)
return []
def mortgage_properties(self, game_state, player):
'''
Gives the player an option to mortgage properties.
This is called before any debt is paid (house building, rent,
tax, fines from cards etc).
Notes:
- You receive half the face value of each property mortgaged.
- You cannot mortgage properties with houses on them.
(The AI will have been given the option to sell houses before this
function is called.)
Return a list of properties to mortgage, for example:
[bow_street, liverpool_street_station]
The properties should be Property objects.
Return an empty list if you do not want to mortgage anything.
The default behaviour is not to mortgage anything.
'''
if self.needed_money > 0:
return self.selling_policy.computeMortgage(game_state,player)
return []
def unmortgage_properties(self, game_state, player):
'''
Called near the start of the player's turn to give them the
opportunity to unmortgage properties.
Unmortgaging costs half the face value plus 10%. Between deciding
to unmortgage and money being taken the player will be given the
opportunity to make deals or sell other properties. If after this
they do not have enough money, the whole transaction will be aborted,
and no properties will be unmortgaged and no money taken.
Return a list of property names to unmortgage, like:
[old_kent_road, bow_street]
The properties should be Property objects.
The default is to return an empty list, ie to do nothing.
'''
return self.property_policy.acquire_through_unmortgage(game_state,player)
def get_out_of_jail(self, game_state, player):
'''
Called in the player's turn, before the dice are rolled, if the player
is in jail.
There are three possible return values:
PlayerAIBase.Action.BUY_WAY_OUT_OF_JAIL
PlayerAIBase.Action.PLAY_GET_OUT_OF_JAIL_FREE_CARD
PlayerAIBase.Action.STAY_IN_JAIL
Buying your way out of jail will cost £50.
The default action is STAY_IN_JAIL.
'''
return self.jail_policy.compute(self,game_state,player)
def propose_deal(self, game_state, player):
'''
Called to allow the player to propose a deal.
You return a DealProposal object.
If you do not want to make a deal, return None.
If you want to make a deal, you provide this information:
- The player number of the player you are proposing the deal to
- A list of properties offered
- A list of properties wanted
- Maximum cash offered as part of the deal
- Minimum cash wanted as part of the deal.
Properties offered and properties wanted are passed as lists of
Property objects.
If you offer money as part of the deal, set the cash wanted to zero
and vice versa.
Note that the cash limits will not be shown to the proposed-to player.
When the deal is offered to them, they set their own limits for accepting
the deal without seeing your limits. If the limits are acceptable to both
players, the deal will be done at the halfway point.
For example, Player1 proposes:
Propose to: Player2
Properties offered: Mayfair
Properties wanted: (none)
Maximum cash offered: 0
Minimum cash wanted: 500
Player2 accepts with these limits:
Maximum cash offered: 1000
Minimum cash wanted: 0
The deal will be done with Player2 receiving Mayfair and paying £750
to Player1.
The only 'negotiation' is in the managing of cash along with the deal
as discussed above. There is no negotiation about which properties are
part of the deal. If a deal is rejected because it does not contain the
right properties, another deal can be made at another time with different
lists of properties.
Example construction and return of a DealProposal object:
return DealProposal(
propose_to_player_number=2,
properties_offered=[vine_street, bow_street],
properties_wanted=[park_lane],
maximum_cash_offered=200)
The default is for no deal to be proposed.
'''
if self.needed_money > 0:
return self.selling_policy.propose_deal(game_state,player)
return self.property_policy.acquire_through_deal_proposal(game_state,player,player.state.cash - self.money_to_be_taken)
def deal_proposed(self, game_state, player, deal_proposal):
'''
Called when another player proposes a deal to you.
See propose_deal (above) for details of the DealProposal object.
Return a DealResponse object.
To reject a deal:
return DealResponse(DealResponse.Action.REJECT)
To accept a deal:
return DealResponse(DealResponse.Action.ACCEPT, maximum_cash_offered=300)
or
return DealResponse(DealResponse.Action.ACCEPT, minimum_cash_wanted=800)
The default is to reject the deal.
'''
return self.property_policy.acquire_through_deal_being_proposed(game_state,player,deal_proposal)
def deal_result(self, deal_info):
'''
Called when a proposed deal has finished. The players involved in
the deal receive this notification.
deal_info is a PlayerAIBase.DealInfo 'enum' giving indicating
whether the deal succeeded, and if not why not.
No response is required.
'''
pass
def deal_completed(self, deal_result):
'''
Called when a deal has successfully completed to let all
players know the details of the deal which took place.
deal_result is a DealResult object.
Note that the cash_transferred_from_proposer_to_proposee in
the deal_result can be negative if cash was transferred from
the proposee to the proposer.
No response is required.
'''
self.deal_memory.add_deal(deal_result)
pass
def player_went_bankrupt(self, player):
'''
Called when a player goes bankrupt.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
if player.name == self.get_name() and player.net_worth + player.state.cash > 0:
property_net_worth = 0
property_with_house = 0
property_unmortgaged = 0
houses = 0
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
property_unmortgaged += 1
property_net_worth += property.mortgage_value
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
property_with_house += 1
houses += property.number_of_houses
property_net_worth += int(property.house_price/2 * property.number_of_houses)
if property_unmortgaged > 0 or property_with_house > 0 or houses > 0:
Logger.log(player.name + " went bankrupt with a cash of " + format(player.state.cash) + " and a net of " + format(player.net_worth) + "/" + format(property_net_worth), Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_unmortgaged) + " properties unmortgaged", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(property_with_house) + " properties with house", Logger.ERROR)
Logger.log(player.name + " went bankrupt with " + format(houses) + " houses", Logger.ERROR)
for property in player.state.properties:
# We add the mortgage value of properties...
if property.is_mortgaged == False:
Logger.log(player.name + " unmortgage property: " + property.name, Logger.ERROR)
# We add the resale value of houses...
if type(property) == Street:
if property.number_of_houses > 0:
Logger.log(player.name + " housed property: " + property.name + " / " + format(property.number_of_houses), Logger.ERROR)
#exit(-1)
pass
def player_ran_out_of_time(self, player):
'''
Called when a player is removed from the game because
they ran out of processing time.
All non-bankrupt players receive this notification.
player is a Player object.
No response is required.
'''
pass
def game_over(self, winner, maximum_rounds_played):
'''
Called when the game is over.
All players receive this notification.
winner is the winning player (a Player object) or None if the
game was drawn.
maximum_rounds_played is True if the game went to the round-limit.
No response is required.
'''
pass
def ai_error(self, message):
'''
Called if the return value from any of the Player AI functions
was invalid. for example, if it was not of the expected type.
No response is required.
'''
pass | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import value as value_module
class SkipValue(value_module.Value):
def __init__(self, page, reason, description=None):
"""A value representing a skipped page.
Args:
page: The skipped page object.
reason: The string reason the page was skipped.
"""
super(SkipValue, self).__init__(page, 'skip', '', True, description, None)
self._reason = reason
def __repr__(self):
page_name = self.page.display_name
return 'SkipValue(%s, %s)' % (page_name, self._reason)
@property
def reason(self):
return self._reason
def GetBuildbotDataType(self, output_context):
return None
def GetBuildbotValue(self):
return None
def GetChartAndTraceNameForPerPageResult(self):
return None
def GetRepresentativeNumber(self):
return None
def GetRepresentativeString(self):
return None
@staticmethod
def GetJSONTypeName():
return 'skip'
def AsDict(self):
d = super(SkipValue, self).AsDict()
d['reason'] = self._reason
return d
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
del kwargs['name']
del kwargs['units']
if 'important' in kwargs:
del kwargs['important']
kwargs['reason'] = value_dict['reason']
if 'tir_label' in kwargs:
del kwargs['tir_label']
return SkipValue(**kwargs)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert False, 'Should not be called.'
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
assert False, 'Should not be called.' | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2004,2006,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import atsc # qa code needs to run without being installed
#from gnuradio import atsc
from atsc_utils import *
import sys
class memoize(object):
def __init__(self, thunk):
self.thunk = thunk
self.cached = False
self.value = None
def __call__(self):
if self.cached:
return self.value
self.value = self.thunk()
self.cached = True
return self.value
"""
Make a fake transport stream that's big enough for our purposes.
We generate 8 full fields. This is relatively expensive. It
takes about 2 seconds to execute.
"""
make_transport_stream = \
memoize(lambda : tuple(make_fake_transport_stream_packet(8 * atsc.ATSC_DSEGS_PER_FIELD)))
def pad_transport_stream(src):
"""
An MPEG transport stream packet is 188 bytes long. Internally we use a packet
that is 256 bytes long to help with buffer alignment. This function adds the
appropriate trailing padding to convert each packet from 188 to 256 bytes.
"""
return pad_stream(src, atsc.sizeof_atsc_mpeg_packet, atsc.sizeof_atsc_mpeg_packet_pad)
def depad_transport_stream(src):
"""
An MPEG transport stream packet is 188 bytes long. Internally we use a packet
that is 256 bytes long to help with buffer alignment. This function removes the
trailing padding to convert each packet from 256 back to 188 bytes.
"""
return depad_stream(src, atsc.sizeof_atsc_mpeg_packet, atsc.sizeof_atsc_mpeg_packet_pad)
class vector_source_ts(gr.hier_block2):
"""
MPEG Transport stream source for testing.
"""
def __init__(self, ts):
"""
Pad tranport stream packets to 256 bytes and reformat appropriately.
@param ts: MPEG transport stream.
@type ts: sequence of ints in [0,255]; len(ts) % 188 == 0
"""
src = gr.vector_source_b(pad_transport_stream(ts))
s2v = gr.stream_to_vector(gr.sizeof_char, atsc.sizeof_atsc_mpeg_packet)
gr.hier_block2.__init__(self, "vector_source_ts",
gr.io_signature(0, 0, 0),
s2v.output_signature())
self.connect(src, s2v, self)
class vector_sink_ts(gr.hier_block2):
"""
MPEG Transport stream sink for testing.
"""
def __init__(self):
"""
"""
v2s = gr.vector_to_stream(gr.sizeof_char, atsc.sizeof_atsc_mpeg_packet)
self.sink = gr.vector_sink_b()
gr.hier_block2.__init__(self, "vector_sink_ts",
v2s.input_signature(),
gr.io_signature(0, 0, 0))
self.connect(self, v2s, self.sink)
def data(self):
"""
Extracts tranport stream from sink and returns it to python.
Depads tranport stream packets from 256 back to 188 bytes.
@rtype: tuple of ints in [0,255]; len(result) % 188 == 0
"""
return tuple(depad_transport_stream(self.sink.data()))
class qa_atsc(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
# The tests are run in alphabetical order
def test_loopback_000(self):
"""
Loopback randomizer to derandomizer
"""
src_data = make_transport_stream()
expected_result = src_data
src = vector_source_ts(src_data)
rand = atsc.randomizer()
derand = atsc.derandomizer()
dst = vector_sink_ts()
self.tb.connect(src, rand, derand, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (expected_result, result_data)
def test_loopback_001(self):
"""
Loopback randomizer/rs_encoder to rs_decoder/derandomizer
"""
src_data = make_transport_stream()
expected_result = src_data
src = vector_source_ts(src_data)
rand = atsc.randomizer()
rs_enc = atsc.rs_encoder()
rs_dec = atsc.rs_decoder()
derand = atsc.derandomizer()
dst = vector_sink_ts()
self.tb.connect(src, rand, rs_enc, rs_dec, derand, dst)
self.tb.run ()
result_data = dst.data ()
self.assertEqual (expected_result, result_data)
def test_loopback_002(self):
"""
Loopback randomizer/rs_encoder/interleaver to
deinterleaver/rs_decoder/derandomizer
"""
src_data = make_transport_stream()
interleaver_delay = 52
expected_result = src_data[0:len(src_data)-(interleaver_delay*atsc.ATSC_MPEG_PKT_LENGTH)]
src = vector_source_ts(src_data)
rand = atsc.randomizer()
rs_enc = atsc.rs_encoder()
inter = atsc.interleaver()
deinter = atsc.deinterleaver()
rs_dec = atsc.rs_decoder()
derand = atsc.derandomizer()
dst = vector_sink_ts()
self.tb.connect(src, rand, rs_enc, inter, deinter, rs_dec, derand, dst)
self.tb.run ()
result_data = dst.data ()
result_data = result_data[(interleaver_delay*atsc.ATSC_MPEG_PKT_LENGTH):len(result_data)]
self.assertEqual (expected_result, result_data)
def test_loopback_003(self):
"""
Loopback randomizer/rs_encoder/interleaver/trellis_encoder
via ds_to_softds to
viterbi_decoder/deinterleaver/rs_decoder/derandomizer
"""
src_data = make_transport_stream()
interleaver_delay = 52
viterbi_delay = 12
expected_result = src_data[0:len(src_data)-((interleaver_delay+viterbi_delay)*atsc.ATSC_MPEG_PKT_LENGTH)]
src = vector_source_ts(src_data)
rand = atsc.randomizer()
rs_enc = atsc.rs_encoder()
inter = atsc.interleaver()
trellis = atsc.trellis_encoder()
softds = atsc.ds_to_softds()
viterbi = atsc.viterbi_decoder()
deinter = atsc.deinterleaver()
rs_dec = atsc.rs_decoder()
derand = atsc.derandomizer()
dst = vector_sink_ts()
self.tb.connect(src, rand, rs_enc, inter, trellis, softds, viterbi, deinter, rs_dec, derand, dst)
self.tb.run ()
result_data = dst.data ()[((interleaver_delay+viterbi_delay)*atsc.ATSC_MPEG_PKT_LENGTH):len(dst.data())]
self.assertEqual (expected_result, result_data)
if __name__ == '__main__':
gr_unittest.main() | unknown | codeparrot/codeparrot-clean | ||
#******************************************************************************
# MAC path finder - Server part
# equivalent to traceroute but based on MAC address
# Version: 1.0
# Revision history:
# 1.0 - 30/06/2014 : Initial coding (Yannick Castano, Hewlett-Packard)
#
# Pre-requisite: LLDP enables on all switch to switch interfaces
# Management IP address configured on the lowest VLAN interface
#
# Remarks: This script needs more testing
# You can launch server script through scheduler job command
# Any comment/feedback is welcome: castano@hp.com
#******************************************************************************
import socket
import os
import re
import sys
import comware
__author__ = 'Yannick Castano'
#******************************************************************************
# Global variables
#******************************************************************************
SERVERPORT = 50000
CLIENTPORT = 50001
#******************************************************************************
# Procedures
#******************************************************************************
def get_lldp_mgt_ip():
command = comware.CLI('display lldp local-information', False)
output = command.get_output()
local_ip = 'none'
#Extracts the LLDP local IP management address
for s in output:
if s.find('Management address') != -1:
# keeps only the line with the management IP address
if '.' in s.split()[3]:
local_ip = s.split()[3]
break # stops at the first management IP address
return local_ip
def find_local_mac_next_if(mac):
command = comware.CLI('display mac-address ' + mac, False)
output = command.get_output()
if len(output) <= 2:
# no mac address corresponding, result is empty
return 'none'
else:
# get the third line and extract the local interface
# first line is the command, second line the header
result = output[2].split()
return result [3]
def find_local_arp_ip(mac):
command = comware.CLI('display arp | include ' + mac, False)
output = command.get_output()
if len(output) == 1:
# no mac address corresponding, result is empty
return 'none'
else:
# get the first line and extract the IP address
result = output[1].split()
return result [0]
def find_lldp_neighbor(interface):
# LLDP rule: the lowest interface ID with an IP address is used as management interface (loopback are excluded)
command = comware.CLI('display lldp neighbor-information interface ' + interface + ' verbose', False)
output = command.get_output()
neighbor = ['none', 'none']
if len(output) != 1:
# extracts the LLDP peer name
for s in output:
if s.find('System name') != -1:
neighbor[0] = s.split()[3]
# extracts the LLDP peer IP management address
for s in output:
if s.find('Management address') != -1:
# keeps only the line with the management IP address
if '.' in s.split()[3]:
neighbor[1] = s.split()[3]
return neighbor
def send_tracemac_request(originhost,neighbor_ip,mac):
# sending on UDP port 50000
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((neighbor_ip, SERVERPORT))
sock.sendall(originhost + ' ' + mac)
sock.close()
return
def send_tracemac_response(remotehost,message):
# sending on UDP port 50001
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((remotehost, CLIENTPORT))
sock.sendall(message)
sock.close()
return
def convert_interface_name(intf):
# currently supports 1G, 10G and 40G interfaces
if intf.find('XGE') != -1:
interface = intf.replace('XGE','Ten-GigabitEthernet')
elif intf.find('FGE') != -1:
interface = intf.replace ('FGE', 'FortyGigE')
elif intf.find('GE') != -1:
interface = intf.replace ('GE', 'GigabitEthernet')
return interface
#******************************************************************************
# Main code
#******************************************************************************
localhost = get_lldp_mgt_ip()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((localhost, SERVERPORT))
print 'Listening on ' + localhost
while True:
received_data = sock.recv(1024)
command_data = received_data.split()
remotehost = command_data[0]
mac = command_data[1]
print 'Sender is: ', remotehost
print 'Mac to find is: ', mac
# Try to find if this MAC has a local ARP entry
ip = find_local_arp_ip(mac.lower())
if ip != 'none':
send_tracemac_response (remotehost,'[' + localhost + ']:\t MAC address {} has IP address {}'.format(mac,ip))
# Find the interface where this MAC has been learned
intf = find_local_mac_next_if(mac.lower())
if intf == 'none':
send_tracemac_response (remotehost,'[' + localhost + ']:\t No corresponding MAC address on this device')
else:
interface = convert_interface_name(intf)
neighbor = find_lldp_neighbor(interface)
if neighbor[0] == 'none':
send_tracemac_response (remotehost,'[' + localhost + ']:\t MAC address {} is on {}'.format(mac,interface))
send_tracemac_response (remotehost,'END')
else:
send_tracemac_response (remotehost,'[' + localhost + ']:\t MAC address {} is on {} via switch {} (IP: {})'.format(mac,interface,neighbor[0],neighbor[1]))
#sending request to the next switch
send_tracemac_request(remotehost,neighbor[1],mac) | unknown | codeparrot/codeparrot-clean | ||
import os
import argparse
from unittest import mock
from OpenSSL import SSL
import pytest
from mitmproxy.tools import cmdline
from mitmproxy import options
from mitmproxy.proxy import ProxyConfig
from mitmproxy.proxy.server import DummyServer, ProxyServer, ConnectionHandler
from mitmproxy.proxy import config
from mitmproxy.test import tutils
from ..conftest import skip_windows
class MockParser(argparse.ArgumentParser):
"""
argparse.ArgumentParser sys.exits() by default.
Make it more testable by throwing an exception instead.
"""
def error(self, message):
raise Exception(message)
class TestProcessProxyOptions:
def p(self, *args):
parser = MockParser()
cmdline.common_options(parser)
args = parser.parse_args(args=args)
opts = options.Options()
opts.merge(cmdline.get_common_options(args))
pconf = config.ProxyConfig(opts)
return parser, pconf
def assert_noerr(self, *args):
m, p = self.p(*args)
assert p
return p
def test_simple(self):
assert self.p()
def test_cadir(self):
with tutils.tmpdir() as cadir:
self.assert_noerr("--cadir", cadir)
@mock.patch("mitmproxy.platform.original_addr", None)
def test_no_transparent(self):
with pytest.raises(Exception, match="Transparent mode not supported"):
self.p("-T")
@mock.patch("mitmproxy.platform.original_addr")
def test_modes(self, _):
self.assert_noerr("-R", "http://localhost")
with pytest.raises(Exception, match="expected one argument"):
self.p("-R")
with pytest.raises(Exception, match="Invalid server specification"):
self.p("-R", "reverse")
self.assert_noerr("-T")
self.assert_noerr("-U", "http://localhost")
with pytest.raises(Exception, match="Invalid server specification"):
self.p("-U", "upstream")
self.assert_noerr("--upstream-auth", "test:test")
with pytest.raises(Exception, match="expected one argument"):
self.p("--upstream-auth")
with pytest.raises(Exception, match="mutually exclusive"):
self.p("-R", "http://localhost", "-T")
def test_client_certs(self):
with tutils.tmpdir() as cadir:
self.assert_noerr("--client-certs", cadir)
self.assert_noerr(
"--client-certs",
os.path.join(tutils.test_data.path("mitmproxy/data/clientcert"), "client.pem"))
with pytest.raises(Exception, match="path does not exist"):
self.p("--client-certs", "nonexistent")
def test_certs(self):
self.assert_noerr(
"--cert",
tutils.test_data.path("mitmproxy/data/testkey.pem"))
with pytest.raises(Exception, match="does not exist"):
self.p("--cert", "nonexistent")
def test_insecure(self):
p = self.assert_noerr("--insecure")
assert p.openssl_verification_mode_server == SSL.VERIFY_NONE
def test_upstream_trusted_cadir(self):
expected_dir = "/path/to/a/ca/dir"
p = self.assert_noerr("--upstream-trusted-cadir", expected_dir)
assert p.options.ssl_verify_upstream_trusted_cadir == expected_dir
def test_upstream_trusted_ca(self):
expected_file = "/path/to/a/cert/file"
p = self.assert_noerr("--upstream-trusted-ca", expected_file)
assert p.options.ssl_verify_upstream_trusted_ca == expected_file
class TestProxyServer:
@skip_windows
def test_err(self):
# binding to 0.0.0.0:1 works without special permissions on Windows
conf = ProxyConfig(options.Options(listen_port=1))
with pytest.raises(Exception, match="Error starting proxy server"):
ProxyServer(conf)
def test_err_2(self):
conf = ProxyConfig(options.Options(listen_host="256.256.256.256"))
with pytest.raises(Exception, match="Error starting proxy server"):
ProxyServer(conf)
class TestDummyServer:
def test_simple(self):
d = DummyServer(None)
d.set_channel(None)
d.shutdown()
class TestConnectionHandler:
def test_fatal_error(self, capsys):
config = mock.Mock()
root_layer = mock.Mock()
root_layer.side_effect = RuntimeError
config.options.mode.return_value = root_layer
channel = mock.Mock()
def ask(_, x):
return x
channel.ask = ask
c = ConnectionHandler(
mock.MagicMock(),
("127.0.0.1", 8080),
config,
channel
)
c.handle()
_, err = capsys.readouterr()
assert "mitmproxy has crashed" in err | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import time
from datetime import datetime
from openerp.tools.translate import _
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'move_dest_id_lines': fields.one2many('stock.move','move_dest_id', 'Children Moves')
}
class mrp_production_workcenter_line(osv.osv):
def _get_date_end(self, cr, uid, ids, field_name, arg, context=None):
""" Finds ending date.
@return: Dictionary of values.
"""
ops = self.browse(cr, uid, ids, context=context)
date_and_hours_by_cal = [(op.date_planned, op.hour, op.workcenter_id.calendar_id.id) for op in ops if op.date_planned]
intervals = self.pool.get('resource.calendar').interval_get_multi(cr, uid, date_and_hours_by_cal)
res = {}
for op in ops:
res[op.id] = False
if op.date_planned:
i = intervals.get((op.date_planned, op.hour, op.workcenter_id.calendar_id.id))
if i:
res[op.id] = i[-1][1].strftime('%Y-%m-%d %H:%M:%S')
else:
res[op.id] = op.date_planned
return res
def onchange_production_id(self, cr, uid, ids, production_id, context=None):
if not production_id:
return {}
production = self.pool.get('mrp.production').browse(cr, uid, production_id, context=None)
result = {
'product': production.product_id.id,
'qty': production.product_qty,
'uom': production.product_uom.id,
}
return {'value': result}
_inherit = 'mrp.production.workcenter.line'
_order = "sequence, date_planned"
_columns = {
'state': fields.selection([('draft','Draft'),('cancel','Cancelled'),('pause','Pending'),('startworking', 'In Progress'),('done','Finished')],'Status', readonly=True, copy=False,
help="* When a work order is created it is set in 'Draft' status.\n" \
"* When user sets work order in start mode that time it will be set in 'In Progress' status.\n" \
"* When work order is in running mode, during that time if user wants to stop or to make changes in order then can set in 'Pending' status.\n" \
"* When the user cancels the work order it will be set in 'Canceled' status.\n" \
"* When order is completely processed that time it is set in 'Finished' status."),
'date_planned': fields.datetime('Scheduled Date', select=True),
'date_planned_end': fields.function(_get_date_end, string='End Date', type='datetime'),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'delay': fields.float('Working Hours',help="The elapsed time between operation start and stop in this Work Center",readonly=True),
'production_state':fields.related('production_id','state',
type='selection',
selection=[('draft','Draft'),('confirmed','Waiting Goods'),('ready','Ready to Produce'),('in_production','In Production'),('cancel','Canceled'),('done','Done')],
string='Production Status', readonly=True),
'product':fields.related('production_id','product_id',type='many2one',relation='product.product',string='Product',
readonly=True),
'qty':fields.related('production_id','product_qty',type='float',string='Qty',readonly=True, store=True),
'uom':fields.related('production_id','product_uom',type='many2one',relation='product.uom',string='Unit of Measure',readonly=True),
}
_defaults = {
'state': 'draft',
'delay': 0.0,
'production_state': 'draft'
}
def modify_production_order_state(self, cr, uid, ids, action):
""" Modifies production order state if work order state is changed.
@param action: Action to perform.
@return: Nothing
"""
prod_obj_pool = self.pool.get('mrp.production')
oper_obj = self.browse(cr, uid, ids)[0]
prod_obj = oper_obj.production_id
if action == 'start':
if prod_obj.state =='confirmed':
prod_obj_pool.force_production(cr, uid, [prod_obj.id])
prod_obj_pool.signal_workflow(cr, uid, [prod_obj.id], 'button_produce')
elif prod_obj.state =='ready':
prod_obj_pool.signal_workflow(cr, uid, [prod_obj.id], 'button_produce')
elif prod_obj.state =='in_production':
return
else:
raise osv.except_osv(_('Error!'),_('Manufacturing order cannot be started in state "%s"!') % (prod_obj.state,))
else:
open_count = self.search_count(cr,uid,[('production_id','=',prod_obj.id), ('state', '!=', 'done')])
flag = not bool(open_count)
if flag:
for production in prod_obj_pool.browse(cr, uid, [prod_obj.id], context= None):
if production.move_lines or production.move_created_ids:
prod_obj_pool.action_produce(cr,uid, production.id, production.product_qty, 'consume_produce', context = None)
prod_obj_pool.signal_workflow(cr, uid, [oper_obj.production_id.id], 'button_produce_done')
return
def write(self, cr, uid, ids, vals, context=None, update=True):
result = super(mrp_production_workcenter_line, self).write(cr, uid, ids, vals, context=context)
prod_obj = self.pool.get('mrp.production')
if vals.get('date_planned', False) and update:
for prod in self.browse(cr, uid, ids, context=context):
if prod.production_id.workcenter_lines:
dstart = min(vals['date_planned'], prod.production_id.workcenter_lines[0]['date_planned'])
prod_obj.write(cr, uid, [prod.production_id.id], {'date_start':dstart}, context=context, mini=False)
return result
def action_draft(self, cr, uid, ids, context=None):
""" Sets state to draft.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_start_working(self, cr, uid, ids, context=None):
""" Sets state to start working and writes starting date.
@return: True
"""
self.modify_production_order_state(cr, uid, ids, 'start')
self.write(cr, uid, ids, {'state':'startworking', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_done(self, cr, uid, ids, context=None):
""" Sets state to done, writes finish date and calculates delay.
@return: True
"""
delay = 0.0
date_now = time.strftime('%Y-%m-%d %H:%M:%S')
obj_line = self.browse(cr, uid, ids[0])
date_start = datetime.strptime(obj_line.date_start,'%Y-%m-%d %H:%M:%S')
date_finished = datetime.strptime(date_now,'%Y-%m-%d %H:%M:%S')
delay += (date_finished-date_start).days * 24
delay += (date_finished-date_start).seconds / float(60*60)
self.write(cr, uid, ids, {'state':'done', 'date_finished': date_now,'delay':delay}, context=context)
self.modify_production_order_state(cr,uid,ids,'done')
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Sets state to cancel.
@return: True
"""
return self.write(cr, uid, ids, {'state':'cancel'}, context=context)
def action_pause(self, cr, uid, ids, context=None):
""" Sets state to pause.
@return: True
"""
return self.write(cr, uid, ids, {'state':'pause'}, context=context)
def action_resume(self, cr, uid, ids, context=None):
""" Sets state to startworking.
@return: True
"""
return self.write(cr, uid, ids, {'state':'startworking'}, context=context)
class mrp_production(osv.osv):
_inherit = 'mrp.production'
_columns = {
'allow_reorder': fields.boolean('Free Serialisation', help="Check this to be able to move independently all production orders, without moving dependent ones."),
}
def _production_date_end(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates planned end date of production order.
@return: Dictionary of values
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = prod.date_planned
for line in prod.workcenter_lines:
result[prod.id] = max(line.date_planned_end, result[prod.id])
return result
def action_production_end(self, cr, uid, ids, context=None):
""" Finishes work order if production order is done.
@return: Super method
"""
obj = self.browse(cr, uid, ids, context=context)[0]
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for workcenter_line in obj.workcenter_lines:
if workcenter_line.state == 'draft':
workcenter_line.signal_workflow('button_start_working')
workcenter_line.signal_workflow('button_done')
return super(mrp_production,self).action_production_end(cr, uid, ids, context=context)
def action_in_production(self, cr, uid, ids, context=None):
""" Changes state to In Production and writes starting date.
@return: True
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for prod in self.browse(cr, uid, ids):
if prod.workcenter_lines:
workcenter_pool.signal_workflow(cr, uid, [prod.workcenter_lines[0].id], 'button_start_working')
return super(mrp_production,self).action_in_production(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels work order if production order is canceled.
@return: Super method
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
obj = self.browse(cr, uid, ids,context=context)[0]
workcenter_pool.signal_workflow(cr, uid, [record.id for record in obj.workcenter_lines], 'button_cancel')
return super(mrp_production,self).action_cancel(cr,uid,ids,context=context)
def _compute_planned_workcenter(self, cr, uid, ids, context=None, mini=False):
""" Computes planned and finished dates for work order.
@return: Calculated date
"""
dt_end = datetime.now()
if context is None:
context = {}
for po in self.browse(cr, uid, ids, context=context):
dt_end = datetime.strptime(po.date_planned, '%Y-%m-%d %H:%M:%S')
if not po.date_start:
self.write(cr, uid, [po.id], {
'date_start': po.date_planned
}, context=context, update=False)
old = None
for wci in range(len(po.workcenter_lines)):
wc = po.workcenter_lines[wci]
if (old is None) or (wc.sequence>old):
dt = dt_end
if context.get('__last_update'):
del context['__last_update']
if (wc.date_planned < dt.strftime('%Y-%m-%d %H:%M:%S')) or mini:
self.pool.get('mrp.production.workcenter.line').write(cr, uid, [wc.id], {
'date_planned': dt.strftime('%Y-%m-%d %H:%M:%S')
}, context=context, update=False)
i = self.pool.get('resource.calendar').interval_get(
cr,
uid,
#passing False makes resource_resource._schedule_hours run 1000 iterations doing nothing
wc.workcenter_id.calendar_id and wc.workcenter_id.calendar_id.id or None,
dt,
wc.hour or 0.0
)
if i:
dt_end = max(dt_end, i[-1][1])
else:
dt_end = datetime.strptime(wc.date_planned_end, '%Y-%m-%d %H:%M:%S')
old = wc.sequence or 0
super(mrp_production, self).write(cr, uid, [po.id], {
'date_finished': dt_end
})
return dt_end
def _move_pass(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves finding interval from resource calendar.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
todo = list(po.move_lines)
dt = datetime.strptime(po.date_start,'%Y-%m-%d %H:%M:%S')
while todo:
l = todo.pop(0)
if l.state in ('done','cancel','draft'):
continue
todo += l.move_dest_id_lines
date_end = l.production_id.date_finished
if date_end and datetime.strptime(date_end, '%Y-%m-%d %H:%M:%S') > dt:
if l.production_id.state not in ('done','cancel'):
for wc in l.production_id.workcenter_lines:
i = self.pool.get('resource.calendar').interval_min_get(
cr,
uid,
wc.workcenter_id.calendar_id.id or False,
dt, wc.hour or 0.0
)
dt = i[0][0]
if l.production_id.date_start > dt.strftime('%Y-%m-%d %H:%M:%S'):
self.write(cr, uid, [l.production_id.id], {'date_start':dt.strftime('%Y-%m-%d %H:%M:%S')}, mini=True)
return True
def _move_futur(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
for line in po.move_created_ids:
l = line
while l.move_dest_id:
l = l.move_dest_id
if l.state in ('done','cancel','draft'):
break
if l.production_id.state in ('done','cancel'):
break
if l.production_id and (l.production_id.date_start < po.date_finished):
self.write(cr, uid, [l.production_id.id], {'date_start': po.date_finished})
break
return True
def write(self, cr, uid, ids, vals, context=None, update=True, mini=True):
direction = {}
if vals.get('date_start', False):
for po in self.browse(cr, uid, ids, context=context):
direction[po.id] = cmp(po.date_start, vals.get('date_start', False))
result = super(mrp_production, self).write(cr, uid, ids, vals, context=context)
if (vals.get('workcenter_lines', False) or vals.get('date_start', False) or vals.get('date_planned', False)) and update:
self._compute_planned_workcenter(cr, uid, ids, context=context, mini=mini)
for d in direction:
if direction[d] == 1:
# the production order has been moved to the passed
self._move_pass(cr, uid, [d], context=context)
pass
elif direction[d] == -1:
self._move_futur(cr, uid, [d], context=context)
# the production order has been moved to the future
pass
return result
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product and planned date of work order.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
result = super(mrp_production, self).action_compute(cr, uid, ids, properties=properties, context=context)
self._compute_planned_workcenter(cr, uid, ids, context=context)
return result
class mrp_operations_operation_code(osv.osv):
_name="mrp_operations.operation.code"
_columns={
'name': fields.char('Operation Name', required=True),
'code': fields.char('Code', size=16, required=True),
'start_stop': fields.selection([('start','Start'),('pause','Pause'),('resume','Resume'),('cancel','Cancelled'),('done','Done')], 'Status', required=True),
}
class mrp_operations_operation(osv.osv):
_name="mrp_operations.operation"
def _order_date_search_production(self, cr, uid, ids, context=None):
""" Finds operations for a production order.
@return: List of ids
"""
operation_ids = self.pool.get('mrp_operations.operation').search(cr, uid, [('production_id','=',ids[0])], context=context)
return operation_ids
def _get_order_date(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates planned date for an operation.
@return: Dictionary of values
"""
res={}
operation_obj = self.browse(cr, uid, ids, context=context)
for operation in operation_obj:
res[operation.id] = operation.production_id.date_planned
return res
def calc_delay(self, cr, uid, vals):
""" Calculates delay of work order.
@return: Delay
"""
code_lst = []
time_lst = []
code_ids = self.pool.get('mrp_operations.operation.code').search(cr, uid, [('id','=',vals['code_id'])])
code = self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids)[0]
oper_ids = self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs = self.browse(cr,uid,oper_ids)
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
time_lst.append(oper.date_start)
code_lst.append(code.start_stop)
time_lst.append(vals['date_start'])
diff = 0
for i in range(0,len(code_lst)):
if code_lst[i] == 'pause' or code_lst[i] == 'done' or code_lst[i] == 'cancel':
if not i: continue
if code_lst[i-1] not in ('resume','start'):
continue
a = datetime.strptime(time_lst[i-1],'%Y-%m-%d %H:%M:%S')
b = datetime.strptime(time_lst[i],'%Y-%m-%d %H:%M:%S')
diff += (b-a).days * 24
diff += (b-a).seconds / float(60*60)
return diff
def check_operation(self, cr, uid, vals):
""" Finds which operation is called ie. start, pause, done, cancel.
@param vals: Dictionary of values.
@return: True or False
"""
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr,uid,code_ids)[0]
code_lst = []
oper_ids=self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs=self.browse(cr,uid,oper_ids)
if not oper_objs:
if code.start_stop!='start':
raise osv.except_osv(_('Sorry!'),_('Operation is not started yet!'))
return False
else:
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
if code.start_stop=='start':
if 'start' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation has already started! You can either Pause/Finish/Cancel the operation.'))
return False
if code.start_stop=='pause':
if code_lst[len(code_lst)-1]!='resume' and code_lst[len(code_lst)-1]!='start':
raise osv.except_osv(_('Error!'),_('In order to Pause the operation, it must be in the Start or Resume state!'))
return False
if code.start_stop=='resume':
if code_lst[len(code_lst)-1]!='pause':
raise osv.except_osv(_('Error!'),_('In order to Resume the operation, it must be in the Pause state!'))
return False
if code.start_stop=='done':
if code_lst[len(code_lst)-1]!='start' and code_lst[len(code_lst)-1]!='resume':
raise osv.except_osv(_('Sorry!'),_('In order to Finish the operation, it must be in the Start or Resume state!'))
return False
if 'cancel' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation is Already Cancelled!'))
return False
if code.start_stop=='cancel':
if not 'start' in code_lst :
raise osv.except_osv(_('Error!'),_('No operation to cancel.'))
return False
if 'done' in code_lst:
raise osv.except_osv(_('Error!'),_('Operation is already finished!'))
return False
return True
def write(self, cr, uid, ids, vals, context=None):
oper_objs = self.browse(cr, uid, ids, context=context)[0]
vals['production_id']=oper_objs.production_id.id
vals['workcenter_id']=oper_objs.workcenter_id.id
if 'code_id' in vals:
self.check_operation(cr, uid, vals)
if 'date_start' in vals:
vals['date_start']=vals['date_start']
vals['code_id']=oper_objs.code_id.id
delay=self.calc_delay(cr, uid, vals)
wc_op_id=self.pool.get('mrp.production.workcenter.line').search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
self.pool.get('mrp.production.workcenter.line').write(cr,uid,wc_op_id,{'delay':delay})
return super(mrp_operations_operation, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids, context=context)[0]
wc_op_id=workcenter_pool.search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
if code.start_stop in ('start','done','pause','cancel','resume'):
if not wc_op_id:
production_obj=self.pool.get('mrp.production').browse(cr, uid, vals['production_id'], context=context)
wc_op_id.append(workcenter_pool.create(cr,uid,{'production_id':vals['production_id'],'name':production_obj.product_id.name,'workcenter_id':vals['workcenter_id']}))
if code.start_stop=='start':
workcenter_pool.action_start_working(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_start_working')
if code.start_stop=='done':
workcenter_pool.action_done(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_done')
self.pool.get('mrp.production').write(cr,uid,vals['production_id'],{'date_finished':datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
if code.start_stop=='pause':
workcenter_pool.action_pause(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_pause')
if code.start_stop=='resume':
workcenter_pool.action_resume(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_resume')
if code.start_stop=='cancel':
workcenter_pool.action_cancel(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_cancel')
if not self.check_operation(cr, uid, vals):
return
delay=self.calc_delay(cr, uid, vals)
line_vals = {}
line_vals['delay'] = delay
if vals.get('date_start',False):
if code.start_stop == 'done':
line_vals['date_finished'] = vals['date_start']
elif code.start_stop == 'start':
line_vals['date_start'] = vals['date_start']
self.pool.get('mrp.production.workcenter.line').write(cr, uid, wc_op_id, line_vals, context=context)
return super(mrp_operations_operation, self).create(cr, uid, vals, context=context)
def initialize_workflow_instance(self, cr, uid, context=None):
mrp_production_workcenter_line = self.pool.get('mrp.production.workcenter.line')
line_ids = mrp_production_workcenter_line.search(cr, uid, [], context=context)
mrp_production_workcenter_line.create_workflow(cr, uid, line_ids)
return True
_columns={
'production_id':fields.many2one('mrp.production','Production',required=True),
'workcenter_id':fields.many2one('mrp.workcenter','Work Center',required=True),
'code_id':fields.many2one('mrp_operations.operation.code','Code',required=True),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'order_date': fields.function(_get_order_date,string='Order Date',type='date',store={'mrp.production':(_order_date_search_production,['date_planned'], 10)}),
}
_defaults={
'date_start': lambda *a:datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Block split point selection utilities. */
#ifndef BROTLI_ENC_BLOCK_SPLITTER_H_
#define BROTLI_ENC_BLOCK_SPLITTER_H_
#include "../common/platform.h"
#include "command.h"
#include "memory.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
typedef struct BlockSplit {
size_t num_types; /* Amount of distinct types */
size_t num_blocks; /* Amount of values in types and length */
uint8_t* types;
uint32_t* lengths;
size_t types_alloc_size;
size_t lengths_alloc_size;
} BlockSplit;
BROTLI_INTERNAL void BrotliInitBlockSplit(BlockSplit* self);
BROTLI_INTERNAL void BrotliDestroyBlockSplit(MemoryManager* m,
BlockSplit* self);
BROTLI_INTERNAL void BrotliSplitBlock(MemoryManager* m,
const Command* cmds,
const size_t num_commands,
const uint8_t* data,
const size_t offset,
const size_t mask,
const BrotliEncoderParams* params,
BlockSplit* literal_split,
BlockSplit* insert_and_copy_split,
BlockSplit* dist_split);
#if defined(__cplusplus) || defined(c_plusplus)
} /* extern "C" */
#endif
#endif /* BROTLI_ENC_BLOCK_SPLITTER_H_ */ | c | github | https://github.com/nodejs/node | deps/brotli/c/enc/block_splitter.h |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define, and
the use of the environment during regeneration when the gyp file changes.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
try:
os.environ['GYP_DEFINES'] = 'value=50'
test.run_gyp('defines.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
os.environ['GYP_DEFINES'] = ''
del os.environ['GYP_DEFINES']
test.build('defines.gyp')
expect = """\
FOO is defined
VALUE is 1
2*PAREN_VALUE is 12
HASH_VALUE is a#1
"""
test.run_built_executable('defines', stdout=expect)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('defines.gyp', test.read('defines-env.gyp'))
test.build('defines.gyp', test.ALL)
expect = """\
VALUE is 50
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test() | unknown | codeparrot/codeparrot-clean | ||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import stat
import tempfile
import multiprocessing
import time
import warnings
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
KEYCZAR_AVAILABLE=False
try:
try:
# some versions of pycrypto may not have this?
from Crypto.pct_warnings import PowmInsecureWarning
except ImportError:
PowmInsecureWarning = RuntimeWarning
with warnings.catch_warnings(record=True) as warning_handler:
warnings.simplefilter("error", PowmInsecureWarning)
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
except PowmInsecureWarning:
display.system_warning(
"The version of gmp you have installed has a known issue regarding " + \
"timing vulnerabilities when used with pycrypto. " + \
"If possible, you should update it (i.e. yum update gmp)."
)
warnings.resetwarnings()
warnings.simplefilter("ignore")
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_text, to_bytes
__all__ = ['do_encrypt']
_LOCK = multiprocessing.Lock()
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
if crypt._salt_is_bytes:
salt = to_bytes(salt, encoding='ascii', errors='strict')
else:
salt = to_text(salt, encoding='ascii', errors='strict')
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise AnsibleError("passlib must be installed to encrypt vars_prompt values")
# Hashes from passlib.hash should be represented as ascii strings of hex
# digits so this should not traceback. If it's not representable as such
# we need to traceback and then blacklist such algorithms because it may
# impact calling code.
return to_text(result, errors='strict')
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
if not os.path.exists(key_path):
# avoid race with multiple forks trying to create paths on host
# but limit when locking is needed to creation only
with(_LOCK):
if not os.path.exists(key_path):
# use a temp directory and rename to ensure the directory
# searched for only appears after permissions applied.
tmp_dir = tempfile.mkdtemp(dir=os.path.dirname(key_path))
os.chmod(tmp_dir, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
os.rename(tmp_dir, key_path)
elif not os.path.isdir(key_path):
raise AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
raise AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files '
'contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR,
int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
# avoid race with multiple forks trying to create key
# but limit when locking is needed to creation only
with(_LOCK):
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
# use temp file to ensure file only appears once it has
# desired contents and permissions
with tempfile.NamedTemporaryFile(mode='w', dir=os.path.dirname(key_path), delete=False) as fh:
tmp_key_path = fh.name
fh.write(str(key))
os.chmod(tmp_key_path, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
os.rename(tmp_key_path, key_path)
return key
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
raise AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to '
'correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def keyczar_encrypt(key, msg):
return key.Encrypt(msg.encode('utf-8'))
def keyczar_decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise AnsibleError("decryption failed") | unknown | codeparrot/codeparrot-clean | ||
#pragma once
#include <c10/core/impl/DeviceGuardImplInterface.h>
namespace c10::impl {
/**
* An implementation of DeviceGuardImplInterface which delegates
* to virtual dispatch on the DeviceGuardImpl registry.
*/
class VirtualGuardImpl final : public DeviceGuardImplInterface {
public:
VirtualGuardImpl(DeviceType device_type)
: impl_(getDeviceGuardImpl(device_type)) {}
// This constructor exists purely for testing
VirtualGuardImpl(const DeviceGuardImplInterface* impl) : impl_(impl) {}
// Copying and moving is OK!
VirtualGuardImpl(const VirtualGuardImpl&) = default;
VirtualGuardImpl& operator=(const VirtualGuardImpl&) = default;
VirtualGuardImpl(VirtualGuardImpl&&) noexcept = default;
VirtualGuardImpl& operator=(VirtualGuardImpl&&) noexcept = default;
~VirtualGuardImpl() override = default;
DeviceType type() const override {
return impl_->type();
}
Device exchangeDevice(Device d) const override {
return impl_->exchangeDevice(d);
}
Device getDevice() const override {
return impl_->getDevice();
}
void setDevice(Device d) const override {
impl_->setDevice(d);
}
void uncheckedSetDevice(Device d) const noexcept override {
impl_->uncheckedSetDevice(d);
}
Stream getStream(Device d) const override {
return impl_->getStream(d);
}
Stream getNewStream(Device d, int priority = 0) const override {
return impl_->getNewStream(d, priority);
}
Stream getDefaultStream(Device d) const override {
return impl_->getDefaultStream(d);
}
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
const override {
return impl_->getStreamFromGlobalPool(d, isHighPriority);
}
Stream exchangeStream(Stream s) const override {
return impl_->exchangeStream(s);
}
void* getStreamNativeHandle(const Stream s) const override {
return impl_->getStreamNativeHandle(s);
}
DeviceIndex deviceCount() const noexcept override {
return impl_->deviceCount();
}
DeviceCapability getDeviceCapability(Device d) const override {
return impl_->getDeviceCapability(d);
}
// Event functions
void record(
void** event,
const Stream& stream,
const DeviceIndex device_index,
const EventFlag flag) const override {
impl_->record(event, stream, device_index, flag);
}
void block(void* event, const Stream& stream) const override {
impl_->block(event, stream);
}
bool queryEvent(void* event) const override {
return impl_->queryEvent(event);
}
void destroyEvent(void* event, const DeviceIndex device_index)
const noexcept override {
impl_->destroyEvent(event, device_index);
}
bool queryStream(const Stream& stream) const override {
return impl_->queryStream(stream);
}
void synchronizeStream(const Stream& stream) const override {
impl_->synchronizeStream(stream);
}
void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
const override {
impl_->recordDataPtrOnStream(data_ptr, stream);
}
double elapsedTime(void* event1, void* event2, const DeviceIndex device_index)
const override {
return impl_->elapsedTime(event1, event2, device_index);
}
void synchronizeEvent(void* event) const override {
impl_->synchronizeEvent(event);
}
void synchronizeDevice(const DeviceIndex device_index) const override {
impl_->synchronizeDevice(device_index);
}
private:
const DeviceGuardImplInterface* impl_ = nullptr;
};
} // namespace c10::impl | c | github | https://github.com/pytorch/pytorch | c10/core/impl/VirtualGuardImpl.h |
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_map)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp857',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: None, # UNDEFINED
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: None, # UNDEFINED
0x00e8: 0x00d7, # MULTIPLICATION SIGN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: None, # UNDEFINED
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR
u'\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\ufffe' # 0x00d5 -> UNDEFINED
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\ufffe' # 0x00e7 -> UNDEFINED
u'\xd7' # 0x00e8 -> MULTIPLICATION SIGN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE
u'\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\ufffe' # 0x00f2 -> UNDEFINED
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x00e8, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS
0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE
0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE
0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I
0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
} | unknown | codeparrot/codeparrot-clean | ||
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer\Advisory;
use Composer\Semver\Constraint\ConstraintInterface;
use DateTimeImmutable;
class SecurityAdvisory extends PartialSecurityAdvisory
{
/**
* @var string
* @readonly
*/
public $title;
/**
* @var string|null
* @readonly
*/
public $cve;
/**
* @var string|null
* @readonly
*/
public $link;
/**
* @var DateTimeImmutable
* @readonly
*/
public $reportedAt;
/**
* @var non-empty-array<array{name: string, remoteId: string}>
* @readonly
*/
public $sources;
/**
* @var string|null
* @readonly
*/
public $severity;
/**
* @param non-empty-array<array{name: string, remoteId: string}> $sources
*/
public function __construct(string $packageName, string $advisoryId, ConstraintInterface $affectedVersions, string $title, array $sources, DateTimeImmutable $reportedAt, ?string $cve = null, ?string $link = null, ?string $severity = null)
{
parent::__construct($packageName, $advisoryId, $affectedVersions);
$this->title = $title;
$this->sources = $sources;
$this->reportedAt = $reportedAt;
$this->cve = $cve;
$this->link = $link;
$this->severity = $severity;
}
/**
* @internal
*/
public function toIgnoredAdvisory(?string $ignoreReason): IgnoredSecurityAdvisory
{
return new IgnoredSecurityAdvisory(
$this->packageName,
$this->advisoryId,
$this->affectedVersions,
$this->title,
$this->sources,
$this->reportedAt,
$this->cve,
$this->link,
$ignoreReason,
$this->severity
);
}
/**
* @return mixed
*/
#[\ReturnTypeWillChange]
public function jsonSerialize()
{
$data = parent::jsonSerialize();
$data['reportedAt'] = $data['reportedAt']->format(DATE_RFC3339);
return $data;
}
} | php | github | https://github.com/composer/composer | src/Composer/Advisory/SecurityAdvisory.php |
"""
This module contains ordinary kalman filter classes
"""
from regression import *
import csv, scipy
from timeSeriesFrame import TimeSeriesFrame, StylusReader
try:
from clibregression import kalman_predict, kalman_upd, kalman_filter
except ImportError:
print "Cannot import clibregression"
from libregression import kalman_predict, kalman_upd, kalman_filter
DEBUG = 0
KAPPA = 1./100.0
class KalmanFilter(Regression):
"""
This is a Kalman filter Class subclassed from Regression
"""
intercept = True
def __init__(self,
respond = None,
regressors = None,
intercept = False,
Sigma = None,
sigma = None,
initBeta = None,
initVariance = None,
Phi = None,
**args):
"""
:param respond: Dependent time series
:type respond: TimeSeriesFrame<double>
:param regressors: Independent time serieses
:type regressors: TimeSeriesFrame<double>
:param intercept: include/exclude intercept in the regression
:type intercept: boolean
"""
Regression.__init__(self, respond, regressors, intercept, **args)
if ( initBeta is None) and self.intercept:
self.initBeta = scipy.ones((self.n, 1))/float(self.n - 1)
self.initBeta[0] = 0
elif initBeta is not None and self.intercept:
self.initBeta = scipy.ones((n, 1))/float(n)
elif (initBeta is None) and (not self.intercept):
self.initBeta = scipy.ones((self.n, 1))/float(self.n)
else:
self.initBeta = scipy.zeros((self.n, 1))
self.initBeta = initBeta
if initVariance and self.intercept:
self.initVariance = scipy.zeros((self.n, self.n))
self.initVariance[1:, 1:] = initVariance
elif initVariance and (not self.intercept):
self.initVariance = initVariance
else:
self.initVariance = scipy.zeros((self.n, self.n))
if Phi is None:
self.Phi = scipy.identity(self.n)
else:
self.Phi = Phi
self.paras = args.get("paras")
self.Sigma = Sigma
self.sigma = sigma
def train(self):
"""
This fucntion will start the estimation. This is separated from addData.
"""
beta = scipy.empty((self.t, self.n))
b = self.initBeta
V = self.initVariance
Phi = self.Phi
S = self.Sigma
s = self.sigma
y = self.respond.data
X = self.regressors.data
beta = kalman_filter(b, V, Phi, y, X, s, S)
self.est = TimeSeriesFrame(beta,
self.regressors.rheader,
self.regressors.cheader)
return self
# class KalmanSmoother(KalmanFilter):
# """
# This is a Kalman Smoother Class subclassed from Kalman Filter
# """
# intercept = True
# def __init__(self,
# respond = None,
# regressors = None,
# intercept = False,
# Sigma = None,
# sigma = None,
# initBeta = None,
# initVariance = None,
# Phi = None,
# **args):
# """
# :param respond: Dependent time series
# :type respond: TimeSeriesFrame<double>
# :param regressors: Independent time serieses
# :type regressors: TimeSeriesFrame<double>
# :param intercept: include/exclude intercept in the regression
# :type intercept: boolean
# """
# KalmanFilter.__init__(self, respond, regressors, intercept, Sigma, sigma, initBeta, initVariance, Phi, **args)
# def train(self):
# """
# This fucntion will start the estimation. This is separated from addData.
# """
# beta = scipy.empty((self.t, self.n))
# b = self.initBeta
# V = self.initVariance
# Phi = self.Phi
# S = self.Sigma
# s = self.sigma
# y = self.respond.data
# X = self.regressors.data
# beta = kalman_smoother(b, V, Phi, y, X, s, S)
# self.est = TimeSeriesFrame(beta,
# self.regressors.rheader,
# self.regressors.cheader)
# return self
class ECKalmanFilter(KalmanFilter, ECRegression):
"""This is a KalmanFilter Class subclassed from Regression"""
intercept = True
def __init__(self,
respond = None,
regressors = None,
intercept = False,
Sigma = None,
sigma = None,
initBeta = None,
initVariance = None,
Phi = None,
D = None,
d = scipy.matrix(1.00),
**args):
"""Input: paras where they are expected to be tuple or dictionary"""
KalmanFilter.__init__(self,
respond,
regressors,
intercept,
Sigma,
sigma,
initBeta,
initVariance,
Phi,
**args)
ECRegression.__init__(self,
respond,
regressors,
intercept,
D,
d,
**args)
def train(self):
"""This fucntion will start the estimation. This is separated from addData."""
beta = scipy.empty((self.t,self.n))
b = self.initBeta
V = self.initVariance
Phi = self.Phi
S = self.Sigma
(b, V) = kalman_predict(b,V,Phi, S)
y = self.respond.data
X = self.regressors.data
D = self.D
d = self.d
s = self.sigma
beta = kalman_filter(b, V, Phi, y, X, s, S, 1, D, d)
self.est = TimeSeriesFrame(beta, self.regressors.rheader, self.regressors.cheader)
return self
class ICKalmanFilter(ECKalmanFilter, ICRegression):
"""This is a KalmanFilter Class subclassed from Regression"""
intercept = True
def __init__(self,
respond = None,
regressors = None,
intercept = False,
Sigma = None,
sigma = None,
initBeta = None,
initVariance = None,
Phi = None,
D = None,
d = scipy.matrix(1.00),
G = None,
a = None,
b = None,
**args):
"""Input: paras where they are expected to be tuple or dictionary"""
ECKalmanFilter.__init__(self,
respond,
regressors,
intercept,
Sigma,
sigma,
initBeta,
initVariance,
Phi,
D,
d,
**args)
ICRegression.__init__(self,
respond,
regressors,
intercept,
D,
d,
G,
a,
b,
**args)
def train(self):
"""
This fucntion will start the estimation. This is separated from addData.
"""
beta = scipy.empty((self.t,self.n))
b = self.initBeta
V = self.initVariance
Phi = self.Phi
s = self.sigma
S = self.Sigma
y = self.respond.data
X = self.regressors.data
D = self.D
d = self.d
G = self.G
a = self.a
c = self.b
beta = kalman_filter(b, V, Phi, y, X, s, S, 2, D, d, G, a, c)
self.est = TimeSeriesFrame(beta, self.regressors.rheader, self.regressors.cheader)
return self
def main():
# try:
intercept = False
stock_data = list(csv.reader(open("sine_wave.csv", "rb")))
stock = StylusReader(stock_data)
del stock_data
respond = stock[:, 0]
regressors = stock[:, 1:]
initBeta = scipy.matrix([0.55, 0.45]).T
Sigma = scipy.matrix([[0.123873, -0.12387], [-0.12387,0.123873]])
obj = KalmanFilter(respond, regressors, intercept, Sigma*KAPPA, 0.12, initBeta = initBeta)
# obj = KalmanFilter(respond, regressors, intercept, scipy.identity(7), 1.)
obj.train()
# print obj.getEstimate().data
# print obj.getEstimate(date(2001,1,1))
# print obj.predict()
# print obj.predict(date(2001,1,1))
# obj.est.toCSV("simulated_dodge_cox.csv")
# print obj.R2()
obj.getEstimate().plot()
# import code; code.interact(local=locals())
# except:
# from print_exc_plus import print_exc_plus
#print_exc_plus()
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kern import Kern
import numpy as np
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
class Static(Kern):
def __init__(self, input_dim, variance, active_dims, name):
super(Static, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', variance, Logexp())
self.link_parameters(self.variance)
def Kdiag(self, X):
ret = np.empty((X.shape[0],), dtype=np.float64)
ret[:] = self.variance
return ret
def gradients_X(self, dL_dK, X, X2=None):
return np.zeros(X.shape)
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def gradients_XX(self, dL_dK, X, X2):
if X2 is None:
X2 = X
return np.zeros((X.shape[0], X2.shape[0], X.shape[1]), dtype=np.float64)
def gradients_XX_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def gradients_Z_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(Z.shape)
def gradients_qX_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
return np.zeros(variational_posterior.shape), np.zeros(variational_posterior.shape)
def psi0(self, Z, variational_posterior):
return self.Kdiag(variational_posterior.mean)
def psi1(self, Z, variational_posterior):
return self.K(variational_posterior.mean, Z)
def psi2(self, Z, variational_posterior):
K = self.K(variational_posterior.mean, Z)
return np.einsum('ij,ik->jk',K,K) #K[:,:,None]*K[:,None,:] # NB. more efficient implementations on inherriting classes
def input_sensitivity(self, summarize=True):
if summarize:
return super(Static, self).input_sensitivity(summarize=summarize)
else:
return np.ones(self.input_dim) * self.variance
class White(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='white'):
super(White, self).__init__(input_dim, variance, active_dims, name)
def K(self, X, X2=None):
if X2 is None:
return np.eye(X.shape[0])*self.variance
else:
return np.zeros((X.shape[0], X2.shape[0]))
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_full(self, dL_dK, X, X2=None):
if X2 is None:
self.variance.gradient = np.trace(dL_dK)
else:
self.variance.gradient = 0.
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum()
class Bias(Static):
def __init__(self, input_dim, variance=1., active_dims=None, name='bias'):
super(Bias, self).__init__(input_dim, variance, active_dims, name)
def K(self, X, X2=None):
shape = (X.shape[0], X.shape[0] if X2 is None else X2.shape[0])
ret = np.empty(shape, dtype=np.float64)
ret[:] = self.variance
return ret
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = dL_dK.sum()
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = dL_dKdiag.sum()
def psi2(self, Z, variational_posterior):
ret = np.empty((Z.shape[0], Z.shape[0]), dtype=np.float64)
ret[:] = self.variance*self.variance*variational_posterior.shape[0]
return ret
def psi2n(self, Z, variational_posterior):
ret = np.empty((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
ret[:] = self.variance*self.variance
return ret
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum() + dL_dpsi1.sum() + 2.*self.variance*dL_dpsi2.sum()*variational_posterior.shape[0]
class Fixed(Static):
def __init__(self, input_dim, covariance_matrix, variance=1., active_dims=None, name='fixed'):
"""
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance of the kernel
:type variance: float
"""
super(Fixed, self).__init__(input_dim, variance, active_dims, name)
self.fixed_K = covariance_matrix
def K(self, X, X2):
return self.variance * self.fixed_K
def Kdiag(self, X):
return self.variance * self.fixed_K.diagonal()
def update_gradients_full(self, dL_dK, X, X2=None):
self.variance.gradient = np.einsum('ij,ij', dL_dK, self.fixed_K)
def update_gradients_diag(self, dL_dKdiag, X):
self.variance.gradient = np.einsum('i,i', dL_dKdiag, self.fixed_K)
def psi2(self, Z, variational_posterior):
return np.zeros((Z.shape[0], Z.shape[0]), dtype=np.float64)
def psi2n(self, Z, variational_posterior):
return np.zeros((1, Z.shape[0], Z.shape[0]), dtype=np.float64)
def update_gradients_expectations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, Z, variational_posterior):
self.variance.gradient = dL_dpsi0.sum() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function, unicode_literals
from io import BytesIO
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.urlresolvers import reverse
from ...base import ArticleWebTestBase
class AttachmentTests(ArticleWebTestBase):
def setUp(self):
super(AttachmentTests, self).setUp()
self.article = self.root_article
self.test_data = "This is a plain text file"
self.test_description = 'My file'
def _createTxtFilestream(self, strData, **kwargs):
"""
Helper function to create filestream for upload.
Parameters :
strData : str, test string data
Optional Arguments :
filename : str, Defaults to 'test.txt'
"""
filename = kwargs.get('filename', 'test.txt')
data = strData.encode('utf-8')
filedata = BytesIO(data)
filestream = InMemoryUploadedFile(
filedata,
None,
filename,
'text',
len(data),
None
)
return filestream
def _create_test_attachment(self):
url = reverse('wiki:attachments_index', kwargs={'path': ''})
filestream = self._createTxtFilestream(self.test_data)
response = self.c.post(url,
{'description': self.test_description,
'file': filestream,
'save': '1',
})
self.assertRedirects(response, url)
def test_upload(self):
"""
Tests that simple file upload uploads correctly
Uploading a file should preserve the original filename.
Uploading should not modify file in any way.
"""
self._create_test_attachment()
# Check the object was created.
attachment = self.article.shared_plugins_set.all()[0].attachment
self.assertEqual(attachment.original_filename, 'test.txt')
self.assertEqual(attachment.current_revision.file.file.read(), self.test_data.encode('utf-8'))
def test_replace(self):
"""
Tests that previous revisions are not deleted
Tests that only the most recent revision is deleted when
"replace" is checked.
"""
# Upload initial file
url = reverse('wiki:attachments_index', kwargs={'path': ''})
data = "This is a plain text file"
filestream = self._createTxtFilestream(data)
self.c.post(url, {'description': 'My file', 'file': filestream, 'save': '1', })
attachment = self.article.shared_plugins_set.all()[0].attachment
# uploading for the first time should mean that there is only one revision.
self.assertEqual(attachment.attachmentrevision_set.count(), 1)
# Change url to replacement page.
url = reverse(
'wiki:attachments_replace',
kwargs={'attachment_id': attachment.id, 'article_id': self.article.id}
)
# Upload replacement without removing revisions
replacement_data = data + ' And this is my edit'
replacement_filestream = self._createTxtFilestream(replacement_data)
self.c.post(
url,
{
'description': 'Replacement upload',
'file': replacement_filestream,
}
)
attachment = self.article.shared_plugins_set.all()[0].attachment
# Revision count should be two
self.assertEqual(attachment.attachmentrevision_set.count(), 2)
# Original filenames should not be modified
self.assertEqual(attachment.original_filename, 'test.txt')
# Latest revision should equal replacment_data
self.assertEqual(attachment.current_revision.file.file.read(), replacement_data.encode('utf-8'))
first_replacement = attachment.current_revision
# Upload another replacement, this time removing most recent revision
replacement_data2 = data + ' And this is a different edit'
replacement_filestream2 = self._createTxtFilestream(replacement_data2)
self.c.post(
url,
{
'description': 'Replacement upload',
'file': replacement_filestream2,
'replace': 'on',
}
)
attachment = self.article.shared_plugins_set.all()[0].attachment
# Revision count should still be two
self.assertEqual(attachment.attachmentrevision_set.count(), 2)
# Latest revision should equal replacment_data2
self.assertEqual(attachment.current_revision.file.file.read(), replacement_data2.encode('utf-8'))
# The first replacement should no longer be in the filehistory
self.assertNotIn(first_replacement, attachment.attachmentrevision_set.all())
def test_search(self):
"""
Call the search view
"""
self._create_test_attachment()
url = reverse('wiki:attachments_search', kwargs={'path': ''})
response = self.c.get(url, {'query': self.test_description})
self.assertContains(response, self.test_description) | unknown | codeparrot/codeparrot-clean | ||
# -*- mode: python -*-
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author: "Benno Joy (@bennojoy)"
module: include_vars
short_description: Load variables from files, dynamically within a task.
description:
- Loads variables from a YAML file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from.
options:
free-form:
description:
- The file name from which variables should be loaded, if called from a role it will look for
the file in vars/ subdirectory of the role, otherwise the path would be relative to playbook. An absolute path can also be provided.
required: true
version_added: "1.4"
'''
EXAMPLES = """
# Conditionally decide to load in variables when x is 0, otherwise do not.
- include_vars: contingency_plan.yml
when: x == 0
# Load a variable file based on the OS type, or a default if not found.
- include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_distribution }}.yml"
- "{{ ansible_os_family }}.yml"
- "default.yml"
""" | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.consumer.internals;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.MetricNameTemplate;
import org.apache.kafka.common.metrics.KafkaMetric;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.metrics.stats.Avg;
import org.apache.kafka.common.metrics.stats.Max;
import org.apache.kafka.common.utils.MockTime;
import org.apache.kafka.common.utils.Time;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
class ShareFetchMetricsManagerTest {
private static final double EPSILON = 0.0001;
private final Time time = new MockTime(1, 0, 0);
private ShareFetchMetricsManager shareFetchMetricsManager;
private ShareFetchMetricsRegistry shareFetchMetricsRegistry;
private Metrics metrics;
@BeforeEach
public void setup() {
metrics = new Metrics(time);
shareFetchMetricsRegistry = new ShareFetchMetricsRegistry(CONSUMER_SHARE_METRIC_GROUP_PREFIX);
shareFetchMetricsManager = new ShareFetchMetricsManager(metrics, shareFetchMetricsRegistry);
}
@AfterEach
public void tearDown() {
if (metrics != null) {
metrics.close();
metrics = null;
}
shareFetchMetricsManager = null;
}
@Test
public void testLatency() {
shareFetchMetricsManager.recordLatency("", 101);
time.sleep(metrics.config().timeWindowMs() + 1);
shareFetchMetricsManager.recordLatency("", 155);
assertEquals(155, (double) getMetric(shareFetchMetricsRegistry.fetchLatencyMax).metricValue(), EPSILON);
assertEquals(128, (double) getMetric(shareFetchMetricsRegistry.fetchLatencyAvg).metricValue(), EPSILON);
}
@Test
public void testNodeLatency() {
String connectionId = "0";
MetricName nodeLatencyAvg = metrics.metricName("request-latency-avg", "group");
MetricName nodeLatencyMax = metrics.metricName("request-latency-max", "group");
registerNodeLatencyMetric(connectionId, nodeLatencyAvg, nodeLatencyMax);
shareFetchMetricsManager.recordLatency(connectionId, 123);
time.sleep(metrics.config().timeWindowMs() + 1);
shareFetchMetricsManager.recordLatency(connectionId, 456);
assertEquals(289.5, metricValue(shareFetchMetricsRegistry.fetchLatencyAvg), EPSILON);
assertEquals(456, metricValue(shareFetchMetricsRegistry.fetchLatencyMax), EPSILON);
assertEquals(289.5, metricValue(nodeLatencyAvg), EPSILON);
assertEquals(456, metricValue(nodeLatencyMax), EPSILON);
// Record metric against another node.
shareFetchMetricsManager.recordLatency("1", 501);
assertEquals(360, metricValue(shareFetchMetricsRegistry.fetchLatencyAvg), EPSILON);
assertEquals(501, metricValue(shareFetchMetricsRegistry.fetchLatencyMax), EPSILON);
// Node specific metric should not be affected.
assertEquals(289.5, metricValue(nodeLatencyAvg), EPSILON);
assertEquals(456, metricValue(nodeLatencyMax), EPSILON);
}
@Test
public void testBytesFetched() {
shareFetchMetricsManager.recordBytesFetched(2);
time.sleep(metrics.config().timeWindowMs() + 1);
shareFetchMetricsManager.recordBytesFetched(10);
assertEquals(10, (double) getMetric(shareFetchMetricsRegistry.fetchSizeMax).metricValue());
assertEquals(6, (double) getMetric(shareFetchMetricsRegistry.fetchSizeAvg).metricValue(), EPSILON);
}
@Test
public void testRecordsFetched() {
shareFetchMetricsManager.recordRecordsFetched(7);
time.sleep(metrics.config().timeWindowMs() + 1);
shareFetchMetricsManager.recordRecordsFetched(9);
assertEquals(9, (double) getMetric(shareFetchMetricsRegistry.recordsPerRequestMax).metricValue());
assertEquals(8, (double) getMetric(shareFetchMetricsRegistry.recordsPerRequestAvg).metricValue(), EPSILON);
}
@Test
public void testAcknowledgements() {
shareFetchMetricsManager.recordAcknowledgementSent(5);
shareFetchMetricsManager.recordFailedAcknowledgements(2);
assertEquals(5, (double) getMetric(shareFetchMetricsRegistry.acknowledgementSendTotal).metricValue());
assertEquals(2, (double) getMetric(shareFetchMetricsRegistry.acknowledgementErrorTotal).metricValue());
}
@Test
public void testCloseRemovesAllSensors() throws IOException {
// Define all sensor names that should be created and removed
String[] sensorNames = {
"fetch-throttle-time",
"bytes-fetched",
"records-fetched",
"fetch-latency",
"sent-acknowledgements",
"failed-acknowledgements"
};
// Verify that sensors exist before closing
for (String sensorName : sensorNames) {
assertNotNull(metrics.getSensor(sensorName), "Sensor " + sensorName + " should exist before closing");
}
// Close the metrics manager
shareFetchMetricsManager.close();
// Verify that all sensors are removed
for (String sensorName : sensorNames) {
assertNull(metrics.getSensor(sensorName), "Sensor " + sensorName + " should be removed after closing");
}
}
private KafkaMetric getMetric(MetricNameTemplate name) {
return metrics.metric(metrics.metricInstance(name));
}
private void registerNodeLatencyMetric(String connectionId, MetricName nodeLatencyAvg, MetricName nodeLatencyMax) {
String nodeTimeName = "node-" + connectionId + ".latency";
Sensor nodeRequestTime = metrics.sensor(nodeTimeName);
nodeRequestTime.add(nodeLatencyAvg, new Avg());
nodeRequestTime.add(nodeLatencyMax, new Max());
}
private double metricValue(MetricNameTemplate name) {
MetricName metricName = metrics.metricInstance(name);
return metricValue(metricName);
}
private double metricValue(MetricName metricName) {
KafkaMetric metric = metrics.metric(metricName);
return (Double) metric.metricValue();
}
} | java | github | https://github.com/apache/kafka | clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManagerTest.java |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TruffeUser.email_perso'
db.add_column(u'users_truffeuser', 'email_perso',
self.gf('django.db.models.fields.EmailField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding unique constraint on 'TruffeUser', fields ['email']
db.create_unique(u'users_truffeuser', ['email'])
def backwards(self, orm):
# Deleting field 'TruffeUser.email_perso'
db.delete_column(u'users_truffeuser', 'email_perso')
# Removing unique constraint on 'TruffeUser', fields ['email']
db.delete_unique(u'users_truffeuser', ['email'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255'}),
'email_perso': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'users.userprivacy': {
'Meta': {'object_name': 'UserPrivacy'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
}
}
complete_apps = ['users'] | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.index.sample;
import org.springframework.stereotype.Component;
/**
* Test candidate for {@link Component}.
*
* @author Stephane Nicoll
*/
@Component
public class SampleComponent {
} | java | github | https://github.com/spring-projects/spring-framework | spring-context-indexer/src/test/java/org/springframework/context/index/sample/SampleComponent.java |
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.cli;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import org.springframework.boot.cli.command.Command;
import org.springframework.boot.cli.command.CommandFactory;
import org.springframework.boot.cli.command.core.VersionCommand;
import org.springframework.boot.cli.command.encodepassword.EncodePasswordCommand;
import org.springframework.boot.cli.command.init.InitCommand;
/**
* Default implementation of {@link CommandFactory}.
*
* @author Dave Syer
* @since 1.0.0
*/
public class DefaultCommandFactory implements CommandFactory {
private static final List<Command> DEFAULT_COMMANDS;
static {
List<Command> defaultCommands = new ArrayList<>();
defaultCommands.add(new VersionCommand());
defaultCommands.add(new InitCommand());
defaultCommands.add(new EncodePasswordCommand());
DEFAULT_COMMANDS = Collections.unmodifiableList(defaultCommands);
}
@Override
public Collection<Command> getCommands() {
return DEFAULT_COMMANDS;
}
} | java | github | https://github.com/spring-projects/spring-boot | cli/spring-boot-cli/src/main/java/org/springframework/boot/cli/DefaultCommandFactory.java |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, api
class StockPicking(models.Model):
_inherit = 'stock.picking'
@api.multi
def action_open_landed_cost(self):
self.ensure_one()
line_obj = self.env['purchase.cost.distribution.line']
lines = line_obj.search([('picking_id', '=', self.id)])
if lines:
mod_obj = self.env['ir.model.data']
model, action_id = tuple(
mod_obj.get_object_reference(
'purchase_landed_cost',
'action_purchase_cost_distribution'))
action = self.env[model].browse(action_id).read()[0]
ids = set([x.distribution.id for x in lines])
if len(ids) == 1:
res = mod_obj.get_object_reference(
'purchase_landed_cost', 'purchase_cost_distribution_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = list(ids)[0]
else:
action['domain'] = "[('id', 'in', %s)]" % list(ids)
return action | unknown | codeparrot/codeparrot-clean | ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.human import (
AsyncHumanApprovalCallbackHandler,
HumanApprovalCallbackHandler,
HumanRejectedException,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"HumanRejectedException": "langchain_community.callbacks.human",
"HumanApprovalCallbackHandler": "langchain_community.callbacks.human",
"AsyncHumanApprovalCallbackHandler": "langchain_community.callbacks.human",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AsyncHumanApprovalCallbackHandler",
"HumanApprovalCallbackHandler",
"HumanRejectedException",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/callbacks/human.py |
from django.core.paginator import Paginator, InvalidPage
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.encoding import smart_str
from django.views.generic.base import TemplateResponseMixin, View
class MultipleObjectMixin(object):
allow_empty = True
queryset = None
model = None
paginate_by = None
context_object_name = None
paginator_class = Paginator
def get_queryset(self):
"""
Get the list of items for this view. This must be an interable, and may
be a queryset (in which qs-specific behavior will be enabled).
"""
if self.queryset is not None:
queryset = self.queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise ImproperlyConfigured(u"'%s' must define 'queryset' or 'model'"
% self.__class__.__name__)
return queryset
def paginate_queryset(self, queryset, page_size):
"""
Paginate the queryset, if needed.
"""
paginator = self.get_paginator(queryset, page_size, allow_empty_first_page=self.get_allow_empty())
if paginator.num_pages > 1:
page = self.kwargs.get('page') or self.request.GET.get('page') or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404("Page is not 'last', nor can it be converted to an int.")
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, True)
except InvalidPage:
raise Http404(u'Invalid page (%s)' % page_number)
else:
return (None, None, queryset, False)
def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by, or ``None`` for no pagination.
"""
return self.paginate_by
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True):
"""
Return an instance of the paginator for this view.
"""
return self.paginator_class(queryset, per_page, orphans=orphans, allow_empty_first_page=allow_empty_first_page)
def get_allow_empty(self):
"""
Returns ``True`` if the view should display empty lists, and ``False``
if a 404 should be raised instead.
"""
return self.allow_empty
def get_context_object_name(self, object_list):
"""
Get the name of the item to be used in the context.
"""
if self.context_object_name:
return self.context_object_name
elif hasattr(object_list, 'model'):
return smart_str(object_list.model._meta.verbose_name_plural)
else:
return None
def get_context_data(self, **kwargs):
"""
Get the context for this view.
"""
queryset = kwargs.pop('object_list')
page_size = self.get_paginate_by(queryset)
if page_size:
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context = {
'paginator': paginator,
'page_obj': page,
'is_paginated': is_paginated,
'object_list': queryset
}
else:
context = {
'paginator': None,
'page_obj': None,
'is_paginated': False,
'object_list': queryset
}
context.update(kwargs)
context_object_name = self.get_context_object_name(queryset)
if context_object_name is not None:
context[context_object_name] = queryset
return context
class BaseListView(MultipleObjectMixin, View):
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty and len(self.object_list) == 0:
raise Http404(u"Empty list and '%s.allow_empty' is False."
% self.__class__.__name__)
context = self.get_context_data(object_list=self.object_list)
return self.render_to_response(context)
class MultipleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_suffix = '_list'
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if get_template is overridden.
"""
names = super(MultipleObjectTemplateResponseMixin, self).get_template_names()
# If the list is a queryset, we'll invent a template name based on the
# app and model name. This name gets put at the end of the template
# name list so that user-supplied names override the automatically-
# generated ones.
if hasattr(self.object_list, 'model'):
opts = self.object_list.model._meta
names.append("%s/%s%s.html" % (opts.app_label, opts.object_name.lower(), self.template_name_suffix))
return names
class ListView(MultipleObjectTemplateResponseMixin, BaseListView):
"""
Render some list of objects, set by `self.model` or `self.queryset`.
`self.queryset` can actually be any iterable of items, not just a queryset.
""" | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.