code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
__author__ = 'mitchell'
device_id = "123@rayleigh"
devices_data_resp = [
{
"bts": [
-2.6026,
51.4546,
1106
],
"tzo": 2,
"vsn": "CHANGE_ME",
"payment_to": 123,
"model": "GSM Control Expert",
"last_call": 123,
"name": "CHANGE_ME",
"id": device_id,
"privilege": "owner",
"created": 123,
"sensors_access": "all",
"owner": "CHANGE_ME",
"ccid": "123",
"phone": "123",
"provider": None,
"provider_txt": None,
"location": "123"
},
]
sensor_data_resp = {
device_id: {
"1": {
"last_value": 1,
"last_call_delta": 25656,
"last_call": 1467799704984,
"direction": "input",
"negated": 1,
"name": "DIN1",
"type": "bool"
},
"129": {
"last_value": 0.9500,
"last_call_delta": 25690,
"last_call": 1467799704950,
"direction": "input",
"name": "AIN1",
"type": "float",
"unit": "V",
"fixed": 2
},
"130": {
"last_value": 0.9400,
"last_call_delta": 25692,
"last_call": 1467799704948,
"direction": "input",
"name": "AIN2",
"type": "float",
"unit": "V",
"fixed": 2
},
"158": {
"last_value": -65.0000,
"last_call_delta": 25638,
"last_call": 1467799705002,
"direction": "input",
"name": "GSM",
"type": "float",
"unit": "dbm",
"fixed": 0
},
"159": {
"last_value": 22.7000,
"last_call_delta": 25674,
"last_call": 1467799704966,
"direction": "input",
"name": "Temperatura",
"type": "float",
"unit": "°C",
"fixed": 1
},
"160": {
"last_value": 17.0400,
"last_call_delta": 25660,
"last_call": 1467799704980,
"direction": "input",
"name": "Zasilanie",
"type": "float",
"unit": "V",
"fixed": 2
},
"17": {
"last_value": 0,
"last_call_delta": 25646,
"last_call": 1467799704994,
"direction": "output",
"name": "OUT1",
"type": "bool",
"lock": 1413288722000
},
"18": {
"last_value": 0,
"last_call_delta": 25642,
"last_call": 1467799704998,
"direction": "output",
"name": "OUT2",
"type": "bool"
},
"2": {
"last_value": 1,
"last_call_delta": 25651,
"last_call": 1467799704989,
"direction": "input",
"negated": 1,
"name": "DIN2",
"type": "bool"
},
"70": {
"last_value": None,
"direction": "input",
"name": "Temperatura EXT1",
"type": "float",
"unit": "°C",
"fixed": 1
},
"71": {
"last_value": None,
"direction": "input",
"name": "Temperatura EXT2",
"type": "float",
"unit": "°C",
"fixed": 1
},
"72": {
"last_value": None,
"direction": "input",
"name": "Temperatura EXT3",
"type": "float",
"unit": "°C",
"fixed": 1
},
"bts": {
"last_value": [
1467799689000,
"234",
"50",
"77A6",
"559D"
],
"last_call_delta": 25636,
"last_call": 1467799705004,
"enabled": 0,
"direction": "input",
"type": "bts"
},
"call": {
"last_value": None,
"type": "string"
},
"e1": {
"last_value": [
1413288726967,
0.0000,
0.0000,
0.0000,
0.0000,
0.0000,
0.0000,
0.0000,
0.0000
],
"last_call_delta": 25686,
"last_call": 1467799704954,
"mbid": "C0",
"name": "Chiller 1",
"type": "e3p2",
"interval": 55
},
"e1.i3p": {
"last_value": [
1.5600,
1.2000,
0.9600
],
"last_call_delta": 25678,
"last_call": 1467799704962,
"direction": "input",
"type": "float3"
},
"e1.kwh": {
"last_value": 69525.0000,
"last_call_delta": 25686,
"last_call": 1467799704954,
"direction": "input",
"type": "float"
},
"e1.v3p": {
"last_value": [
225.5000,
226.6000,
227.7000
],
"last_call_delta": 25681,
"last_call": 1467799704959,
"direction": "input",
"type": "float3"
},
"virt1": {
"last_value": None,
"formula": "fixed((val('e1.v3p',0)+val('e1.v3p',1)+val('e1.v3p',2))\/3,1)",
"name": "Chiller 1",
"type": "virtual",
"display": {
"max": 250,
"low": 222,
"alarm": 245,
"high": 240,
"min": 220,
"type": "cgauge3"
},
"dunit": "V"
}
}
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# ---------------------------------------------------------------------------- #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# ---------------------------------------------------------------------------- #
DEFAULT_MSG_SETTINGS = {
'joinmsg': r"/u Has joined.",
'leavemsg': r"/u Has left.",
'timefmt': "[%l:%M:%S]",
'colors': {
"normal": "#CCCCCC",
"user": "#BFF0FF",
"whisper_recv": "#7BA59E",
"whisper_send": "#1E90FF",
"user_warning": "#FF5600",
"chan": "#1A0000",
"bot": "#1AAFBB",
"user_mentioned": "#757200",
"important": "#A5F378",
"server": "#757575",
"admin": "#800080",
"help": "#DDDDDD",
"notice": "#E5E545",
"warn": "#FFA500",
"error": "#FF0000",
"divline": "#CCCCCC"
}
};
# msgcodes
NORMAL = 0 # normal text
USER = 1 # users own text
WHISPER_RECV = 2 # recieved whisper
WHISPER_SEND = 3 # sent whisper
USER_WARNING = 4 # warning text recieved by user (ie you are banned)
CHAN = 5 # for user joined/left/kicked/booted/banned etc.
USER_MENTIONED = 7 # text where own username was mentioned
INFO = 8
IMPORTANT = 9 # text marked as important
SERVER = 10 # server messages
ADMIN = 11 # admin message
NOTICE = 13
WARN = 14
ERROR = 15
# local
DIV_LINE = 20 # normal divider line
DIV_LINE_DASH = 21 # dashed divider line
BOT = 50 # channel bot
BOT2 = 51 # channel bot2
DIV_LINE_CODES = 20, 21
ALL_CODES = (0,1,2,3,4,5,6, 8,9, 10,11,12,13,14,15, 20,21, 50, 51)
import session
class _Channels(list):
def __init__(self):
pass
def __del__(self):
self.clear_all
self = None
def clear_all(self):
"""Clear all messages on all channels."""
for i in self: i.clear
def close_all(self):
"""Close all channels."""
for i in self: i.close()
def chans_with_unread(self):
"""Return an iterator of all channels with new messages."""
return iter(i for i in self if i._unread >= 1)
def clear_unread(self):
"""Reset the number of unread messages to 0 on all channels."""
for i in self: i._unread = 0
class GlobalChan:
def __init__(self):
pass
def _get_default_codes(self):
return session.user.config['msg-globchan-showcodes-default']
def _set_default_codes(self, codes):
session.user.config['msg-globchan-showcodes-default'] = list(set(codes))
default_codes = property(_get_default_codes, _set_default_codes,
doc="The default shown message codes.")
def _get_shown(self):
session.user.config['msg-globchan-showcodes-last']
def _set_shown(self):
session.user.config['msg-globchan-showcodes-last'] = list(set(codes))
codes = property(_get_lastcodes, _set_lastcodes,
"The currently shown message codes.")
def hidden_codes(self):
"""Return an iterator of all currently hidden message codes."""
return iter(i for i in ALL_CODES if i not in self.codes)
def hide_codes(self, *codes):
"""Hide given codes."""
self.codes = [i for i in self.codes if i not in codes]
def show_codes(self, *codes):
"""Show given codes."""
self.codes += codes
def reset_codes(self):
"""Reset current codes to defaults."""
self.codes = self.default_codes
class _Console(list):
__slots__ = list.__slots__ + "_unread", "_closed"
def __init__(self):
self._unread = 0
self._closed = False
def clear(self):
self._items = []
self._unread = 0
@property
def unread(self):
"""The number new messages since this channel was last viewed."""
return self._unread
def clear_unread(self):
"""Resets the number of unread messages to 0."""
self._unread = 0
@property
def last_timestamp(self):
"""The last timestamp."""
return self[-1].timestamp
@property
def last(self):
"""The last message or log."""
return self[-1]
def add_div_line(self, code):
if code in DIV_LINE_CODES:
self.append(DivLine(code))
def add_new(self, msg, msgcode='default'):
"""Append a new message to this channel."""
if msg:
if (msgcode == 'default' or not msgcode):
self.append(Message(self._default_msgcode, msg))
else: self.append(Message(msgcode, msg))
self._unread += 1
def _dump(self, filepath):
"""Dump all messages from this channel to a text file."""
with open(filepath, 'wb') as f:
f.write(self.name)
for i in self:
f.write(i.timestamp, i)
def close(self):
"""Close this channel."""
self._closed = True
class Chan(_Console):
"""Message channel class."""
__slots__ = _Console.__slots__ + "_name"
_default_msgcode = NORMAL
def __init__(self, name):
self._name = name
self._unread = 0
self._closed = False
def __del__(self):
self = None
self._name = None
class Message(str):
"""Text message with timestamp."""
def __init__(self, msgcode, *args, **kwargs):
import time
self._ts = int(time.time())
self._msgcode = msgcode
@classmethod
def _from_str(self, s):
"""Create a message by parsing a text string. Used to load messages."""
self._ts, self._msgcode = s.split(';')
super().__init__(s.split(':')[1])
def __str__(self): return "{};{}:{}".format(self._ts, self._msgcode, self)
__repr__ = __str__
@property
def timestamp(self):
"""The timestamp for this message."""
import datetime
return datetime.datetime.fromtimestamp(self._ts)
class DivLine:
"""Text Divider Line"""
__slots__ = "_msgcode"
def __init__(self, msgcode):
self._msgcode = msgcode
def _get_divline(code, length):
if code == DIV_LINE_DASH: return "-"*length
else: return "—"*length
class Console(_Console):
"""Error console."""
_default_msgcode = ERROR
_default_verbosity = 2
def __init__(self):
self.name = "console"
def log(self, msg, msgcode='default'):
"""Append a new message to this channel."""
if msg:
if (msgcode == 'default' or not msgcode):
self.append(Message(self._default_msgcode, msg))
else: self.append(Message(msgcode, msg))
self._unread += 1
def _log_err(self, msg):
if msg:
self.append(Message(ERROR, msg))
self._unread += 1
def _log_warn(self, msg):
if msg:
self.append(Message(WARN, msg))
self._unread += 1
def _maxverbosity():
import session
try: return session._user['config']['console.verbosity']
except: return Console._default_verbosity
def _log_info(self, msg, verbosity=2):
if (msg and verbosity <= _maxverbosity()):
self.append(Message(INFO, msg))
self._unread += 1
Console.log.error = _log_err
Console.log.warn = _log_warn
Console.log.info = _log_info
|
unknown
|
codeparrot/codeparrot-clean
| ||
- Feature Name: Enum Data Types in CockroachDB
- Status: accepted
- Start Date: 2020-03-31
- Authors: Rohan Yadav, Lucy Zhang, Andrew Werner, Jordan Lewis
- RFC PR: #47070
- Cockroach Issue: #24873
# Summary
This RFC proposes adding enum types to CockroachDB.
# Background
Enum types are a class of user defined types where the values in
the type are constrained to a fixed set
of user specified values. The system then ensures type safety over operations
on this type. This includes ensuring that only values that are members of the
enum can be inserted into a column of the enum type, and that enums can only
be compared to other values of the same enum type. For example, consider an
application that needs to store events and the days of the week that they happen.
This application could use an enum to represent the days of the week.
```sql
CREATE TYPE day ENUM AS ('monday', 'tuesday', 'wednesday'...);
CREATE TABLE events (id INT, dayofweek day);
INSERT INTO events VALUES (1, 'monday');
```
# Overview
To implement enum types in CockroachDB, we have to touch many layers
of the system. In particular, we need to introduce a way of storing
metadata about enums durably in the database. We then need a way to
cache this metadata so that lookups on this metadata is fast, as well
as a way to invalidate this cache when enum metadata changes. When
enum metadata changes, we need to ensure that these changes do not
result in some nodes in the cluster entering a situation where
they are unable to process enum values they find. Lastly, we need
to define a physical layout for enums and integrate enums within
the type system and SQL execution stack.
# Detailed Explanation
## Metadata Storage
Enums themselves are a special case of user-defined types. In order
to lay the groundwork for future work in this area, we propose storing
metadata about an enum in a new descriptor called a `TypeDescriptor`.
This descriptor will be added to the descriptor union alongside table and
database descriptors. The descriptor will store metadata about the type,
including the parent database and schema IDs, a unique ID for the type, and
the name of the type. The descriptor will also include specific information
for the kind of type being stored in the descriptor (as of now there
would only be enums). For enums, this information would include the mapping
of the enum's values to their physical representations. A proposal of the
descriptor's contents is below:
```proto
message TypeDescriptor {
// Used by all kinds of user-defined types.
// Parent database and schema.
uint32 parent_id;
uint32 parent_schema_id;
// ID and Postgres compatible OID of the type.
uint32 id;
uint32 oid;
// Visible name of the type.
string name;
// Enum specific fields.
message enum_members {
byte[] physical_encoding;
string name;
};
enum_members[] members;
}
```
These descriptors
will be stored in the `system.descriptor` table and will use the leasing
and versioning system being built. There is ongoing work on unifying
the leasing interface so that components are easily shared across
different descriptor types, and we will take advantage of these
systems once they are available. The leasing system will enable caching
and cache invalidation of type descriptors. Until the leasing system
is ready for integration, we will first implement a prototype
that either doesn't use a cache or uses a simple incoherent cache for
`TypeDescriptor` access.
## Name Resolution
Enums are scoped within a database and a schema. In Postgres, enums
cannot be accessed from other databases -- they can only be accessed from
different schemas in the same database. However, there is no core reason
that CockroachDB cannot support this. In fact, we might need to support
references of types across databases to be in line with other cross
database references that we currently support. The topic of cross database
references has come up in discussion about
[user defined schemas](https://github.com/cockroachdb/cockroach/pull/48276)
as well. The direction that we take in allowing cross database references
vs allowing only cross schema references will follow what has been decided
in that context.
Table and type names exist within the same namespace in Postgres. This means
that it is possible to create a type and table of the same name within
the same schema. Additionally, tables in Postgres are types themselves
as a record type where each field is typed like the tables columns. Therefore,
we will store type namespace entries along with table namespace entries
in the `system.namespace` table. This allows namespace conflicts between
types and tables to be properly detected, as well as allowing us to reuse
a large amount of name resolution logic that exists for table name lookup.
This strategy also will allow the user defined types implementation to
adapt to new features like user defined schemas without extra work.
## ID's and OID's
All user defined types will need a stable ID that they are uniquely addressable
by from within CockroachDB, as well as an OID that can be used for Postgres
compliant operations. Importantly, the OIDs cannot conflict
with existing type OIDs. Our strategy is to construct OIDs from the stable ID.
In particular, the OID of a user defined type is equal to
`ID + oidext.CockroachPredefinedOIDMax`. This strategy allows us to easily
map back and forth between OIDs and IDs, and avoid using multiple counters for
essentially the same information. The offset ensures that no user defined
types have OIDs that conflict with any preexisting OIDs. This approach will
naturally extend when we allow treating tables as types.
## Changing Enum Definitions
There are a few ways that enums can change over time.
* The name can change.
* The schema the enum is in can change.
* A new enum member can be added to the set of values.
* A member in the enum can be renamed.
* The enum can be dropped.
In order to rename an enum or a value in an enum can be done with a write
to the enum descriptor and then waiting for all nodes to agree on the new value.
There are plans to lift operations on descriptor names off of the individual
descriptors, because such operations are common to all of them. This work
would involve moving the draining names off of descriptors as well. It's
possible that this work would be part of or take advantage of this effort.
The case of adding a new enum element is more difficult. The key difficulty comes
from ensuring that a node does not attempt to translate a physical layout that it
does not know about yet into a user facing representation of the enum. If we naively
just add the new enum value to the enum metadata, it is possible that another node
reads a newly written enum from disk and is unsure how to decode it. Consider the
following sequence of events:
* Node 1 receives a new enum element `foo` to its enum descriptor and blocks on
`WaitForOneVersion`
* Node 2 receives the new enum descriptor update and writes a value with `foo`
* Node 3 tries to read the value of `foo` before receiving the update to
its enum descriptor.
In order to avoid these situations, we propose an extension of the strategy
used for performing online schema changes. As a reminder, when we add a new
schema object to a table, it moves through a series of states before becoming
usable. As the object moves through these states, the types of operations
that are allowed upon the object change. Between each state, we require that
all nodes in the cluster agree on the new version of the schema object.
For more details, refer to the
[online schema changes RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20151014_online_schema_change.md).
We propose a similar state
progression to adding new elements to an enum type.
1. When a new value is added
to an enum, it is instead placed into a "read only" state.
2. After all nodes agree on the "read only" state, the new enum value
is promoted into the set of writeable values in the enum.
This process ensures that all nodes know
about all potential enum values before they have a chance to be written.
This approach has the drawback of not being able to add an enum value and
then insert that value in the same transaction. This drawback is similar
to our existing limitation of not being able to add a column and insert
into it in the same transaction.
This enum schema change will be implemented with a new job, rather than
trying to build off of the existing table schema changer. While conceptually
similar to a table schema change, there is not much implementation to share.
This new job will
1. Collect all "read only" enum values and wait for one version in the cluster.
2. Transition these values to "public", and then wait for one version in the cluster.
A rollback of this job can just remove the "read-only" values.
Additionally, enums don't really need a concept of mutations like tables. The
members of an enum in the enum's `TypeDescriptor` can be tagged with whether
the member is "read only" or public.
In Postgres, if an enum is dropped without `CASCADE`, the operation will not succeed
if there are any tables that use the enum. If an enum is dropped with
`CASCADE`, all dependent columns are dropped as well. If the database
that an enum is created within is dropped, then the enum
is dropped as well. In order to maintain this information, the
descriptors that represent an enum need to hold back-references to
the tables and columns that use them. We expect the descriptor leasing
system being developed to manage invalidation of cached enums when enums
are destroyed in these cases.
## Physical Layout
At first, it may seem that a valid implementation of enum values is
to map each to an integer, and then store these integers on disk.
This implementation seems like it would supply all the ordering
guarantees needed of enums. However, Postgres allows for adding
new enums and specifying the order of the newly created enum
with respect to an existing value of the enum. This looks like:
```sql
CREATE TYPE t ENUM AS ('v1', 'v2');
ALTER TYPE t ADD VALUE 'v1.5' AFTER 'v1'
```
This means add the value `v1.5` to the enum `t` and order it
after the value `v1`. Using just integers as the backing value
for enums would not allow us to handle this sort of case.
Postgres implements this feature on enums by storing a sorting
order for enums as a float. When a new value is added like this,
Postgres takes the sort orders of the enums that the new enum is
being inserted in between, and creates a float that bisects the
range between the two orders. Concretely, if `v1` had a sort order
of `1.5` and `v2` had a sort order of `2.0`, then `v1.5` would be
inserted with a sort order of `1.75`. However, once the floating
point precision limit has been reached, Postgres rewrites all
sort orders to integral values. Postgres can do this because it
doesn't require a stable disk encoding for enums. In our case,
we need to have a stable encoding to store data on disk if an enum
is used in an index, and cannot afford to rewrite all tables using an
enum if the enum sort order has changed.
We propose a different strategy that is related to this idea of
bisecting ranges, but doesn't suffer from problems due to floating
point arithmetic precision. The general idea is to use byte arrays
to hold the sort order of our enums, and reserve some bytes in the
arrays to create the ordering that we need. In particular we reserve
the minimum byte (`0`) and have a maximum allowed byte. In practice
this will be `255`. An example of the encoding scheme is below.
Assume we started with 3 elements (`a`, `b`, `c`), and let the maximum byte value be 3.
The sort order byte arrays for each element would be:
```
a 1/
b 2/
c 3/
```
To add an element after `b` we can create a new key that sits in the middle of the range
between `b` and `c`.
```
a 1/
b 2/
d 2/2/
c 3/
```
Now lets add more values before `d`. The first one is easy:
```
a 1/
b 2/
e 2/1/
d 2/2/
c 3/
```
The tricky case is adding a value before `e`. Because we reserved the minimum byte, we can
append it and then bisect the range again.
```
a 1/
b 2/
f 2/0/2
e 2/1/
d 2/2/
c 3/
```
This strategy can be extended indefinitely as long as this pattern is followed to reserve
the minimum byte. A prototype of the exact algorithm is included as part of the RFC PR.
This sort order byte array will be the physical layout and identifier of the enum. We expect
that for small enums only a byte or two will be used to hold all the values, and that our
compression strategies at the storage layer will compress this data well.
Since the common case of adding members to an enum is to add a member at the beginning
or end of the set of values, we can adjust the algorithm slightly to better
handle this case. When generating a new key byte where one of the endpoints is
the min or max element, the algorithm can add or subtract a small constant from
the existing key rather than bisecting the range. This allows for adding many
more elements to the beginning or end of the range without increasing the
number of bytes used to store the enum. The algorithm can be found implemented in
[this PR](https://github.com/cockroachdb/cockroach/pull/47939).
## Parsing
Currently, the CockroachDB grammar is not equipped to handle type names
that are qualified due to changes made in the past that separated parsing of
object and type identifiers. Some of these changes will have to be
reverted/adapted in order to allow for types to have qualifications again.
The work to allow the parser to recognize qualified names has been done in
[this PR](https://github.com/cockroachdb/cockroach/pull/47216).
## Type System Changes
The type system of CockroachDB currently makes an assumption that anywhere
a type is present in an AST, that type is statically known. In code, this
means that every AST object that holds a type (like a `CastExpr` or
`ColumnDef`) holds a `*types.T`, which is constructed at parse time.
As part of implementing user defined types, the type system must be taught
that all types are no longer statically known. The general idea is to change
the types in AST nodes to a new interface representing an unresolved type
reference. These type references can then be resolved into `*types.T` through
type resolution. Additionally, we must enforce that types are only attempted
to be accessed after type checking, when all type references have been resolved.
A prototype of this approach can be found in
[this PR](https://github.com/cockroachdb/cockroach/pull/47386).
After the process of type resolution, enums need a `types.T` for interaction
with other components of the system. We will introduce a new family for enums,
and the `types.T` for an enums will contain the stable ID for the
`TypeDescriptor` that backs the type. The `types.T` will also contain extra
fields for an enum like the mapping of names to values. Importantly, these
extra fields will not be serialized as part of the proto. Instead, when a
type is resolved, the returned `*types.T` will be hydrated to populate these
fields.
A potential option was to avoid using
a `TypeDescriptor` and instead just extend the `types.T` proto to contain
necessary fields for user defined types. However, this is not feasible because
the `types.T` proto's are stored on disk in various descriptors. It is too
expensive to update all descriptors that contain a type every time the type
is altered.
A new `Datum` `DEnum` will be introduced to represent values of the
enums at runtime. A `DEnum` will store the physical representation of the
enum as well as the hydrated `*types.T` of its type. The extra fields in the
`*types.T` that hold information about enum values will be used for datum
operations without the need to thread ID resolution capabilities to evaluation
of operations on datums.
When a user-defined type is created in Postgres, Postgres will automatically
create an alias for an array of the new type. For example, if a user creates
a type `days`, the system would also create the type `_days` as an alias for
`days[]`. This type tracks changes made to the referenced type as it
moves through schemas and is dropped.
## Semantic Analysis Changes
The optimizer will need to be taught about the check constraint implied by
a column being of an enum type. Additionally, it will need to be taught how
to convert enum values from their input string representation into their
`Datum` physical representation.
The `Catalog` that is used by the optimizer will need to be extended to support
resolution of types. The way that the catalog represents user defined types is
important for invalidation of cached plans. If a type is updated, all plans
containing data sources using the type need to be invalidated.
## DistSQL
The gateway node that plans a SQL query has access to all resolved type
information for the query. Remote nodes that different parts of the query
are planned on need access this information in order to correctly execute
the query. In particular, these nodes need to hydrate their `*types.T`
containers with metadata and they need to parse and type check serialized
expressions. The hydration of `*types.T` objects can be done at operator
initialization. The trickier problem is type checking serialized expressions --
we don't want to pay the cost of name resolution again. Our strategy is to
serialize user defined type references with their OIDs similar to how column
references are serialized. All explicit references to user defined types (i.e.
in casts or user defined type value literals) will be serialized like `@<OID>`.
The expression initialization process will resolve these OID references to the
correct `TypeDescriptor`. To actually resolve these references, we access the set
of leased descriptors through a `descs.Collection` that is initialized for each
flow.
# Alternatives
## Namespacing and Metadata Storage
During discussion of the RFC, some alternatives were debated. In particular,
the ideas of using a separate namespace table for types and/or a separate
descriptor table for metadata storage. The benefit of a separate namespace
table is that it has the potential of making future work in allowing tables
to be interpreted as types more straightforward. However, using a separate
namespace table complicates existing name resolution and conflict detection
strategies. A separate descriptor table allows for scans over all tables or
types to not have to touch descriptors of different types, which is a
performance improvement for catalog table operations. However, this problem
is somewhat orthogonal to this work, and would be better solved by building
some sort of indexing structure on the `system.descriptor` table.
Using the existing namespace table allows most of the existing name resolution
code to be used directly, and using the same descriptor table allows for
leasing primitives to be built on only one system table.
## Overall Alternative
One alternative approach to this physical layout was to store just an
enum ID on disk, and store ordering and representation information in
a separate lookup table. When operations like on enums would involve
joining or rendering the enums, a join would be produced against this
reference table. This allows for easy changing of enum data, but
results in a variety of complexity during planning.
# Unresolved questions
It is unclear what interactions will arise between this work and the
planned/ongoing work with user defined schemas.
|
unknown
|
github
|
https://github.com/cockroachdb/cockroach
|
docs/RFCS/20200331_enums.md
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import re
from tempest_lib import exceptions as lib_exc
from tempest.api.identity import base
from tempest import clients
from tempest.common import cred_provider
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import timeutils
from tempest import test
CONF = config.CONF
class BaseTrustsV3Test(base.BaseIdentityV3AdminTest):
def setUp(self):
super(BaseTrustsV3Test, self).setUp()
# Use alt_username as the trustee
if not CONF.identity_feature_enabled.trust:
raise self.skipException("Trusts aren't enabled")
self.trustee_username = CONF.identity.alt_username
self.trust_id = None
def tearDown(self):
if self.trust_id:
# Do the delete in tearDown not addCleanup - we want the test to
# fail in the event there is a bug which causes undeletable trusts
self.delete_trust()
super(BaseTrustsV3Test, self).tearDown()
def create_trustor_and_roles(self):
# Get trustor project ID, use the admin project
self.trustor_project_name = self.client.tenant_name
self.trustor_project_id = self.get_tenant_by_name(
self.trustor_project_name)['id']
self.assertIsNotNone(self.trustor_project_id)
# Create a trustor User
self.trustor_username = data_utils.rand_name('user-')
u_desc = self.trustor_username + 'description'
u_email = self.trustor_username + '@testmail.xx'
self.trustor_password = data_utils.rand_name('pass-')
user = self.client.create_user(
self.trustor_username,
description=u_desc,
password=self.trustor_password,
email=u_email,
project_id=self.trustor_project_id)
self.trustor_user_id = user['id']
# And two roles, one we'll delegate and one we won't
self.delegated_role = data_utils.rand_name('DelegatedRole-')
self.not_delegated_role = data_utils.rand_name('NotDelegatedRole-')
role = self.client.create_role(self.delegated_role)
self.delegated_role_id = role['id']
role = self.client.create_role(self.not_delegated_role)
self.not_delegated_role_id = role['id']
# Assign roles to trustor
self.client.assign_user_role(self.trustor_project_id,
self.trustor_user_id,
self.delegated_role_id)
self.client.assign_user_role(self.trustor_project_id,
self.trustor_user_id,
self.not_delegated_role_id)
# Get trustee user ID, use the demo user
trustee_username = self.non_admin_client.user
self.trustee_user_id = self.get_user_by_name(trustee_username)['id']
self.assertIsNotNone(self.trustee_user_id)
# Initialize a new client with the trustor credentials
creds = cred_provider.get_credentials(
username=self.trustor_username,
password=self.trustor_password,
tenant_name=self.trustor_project_name)
os = clients.Manager(credentials=creds)
self.trustor_client = os.identity_v3_client
def cleanup_user_and_roles(self):
if self.trustor_user_id:
self.client.delete_user(self.trustor_user_id)
if self.delegated_role_id:
self.client.delete_role(self.delegated_role_id)
if self.not_delegated_role_id:
self.client.delete_role(self.not_delegated_role_id)
def create_trust(self, impersonate=True, expires=None):
trust_create = self.trustor_client.create_trust(
trustor_user_id=self.trustor_user_id,
trustee_user_id=self.trustee_user_id,
project_id=self.trustor_project_id,
role_names=[self.delegated_role],
impersonation=impersonate,
expires_at=expires)
self.trust_id = trust_create['id']
return trust_create
def validate_trust(self, trust, impersonate=True, expires=None,
summary=False):
self.assertIsNotNone(trust['id'])
self.assertEqual(impersonate, trust['impersonation'])
# FIXME(shardy): ref bug #1246383 we can't check the
# microsecond component of the expiry time, because mysql
# <5.6.4 doesn't support microseconds.
# expected format 2013-12-20T16:08:36.036987Z
if expires is not None:
expires_nousec = re.sub(r'\.([0-9]){6}Z', '', expires)
self.assertTrue(trust['expires_at'].startswith(expires_nousec))
else:
self.assertIsNone(trust['expires_at'])
self.assertEqual(self.trustor_user_id, trust['trustor_user_id'])
self.assertEqual(self.trustee_user_id, trust['trustee_user_id'])
self.assertIn('v3/OS-TRUST/trusts', trust['links']['self'])
self.assertEqual(self.trustor_project_id, trust['project_id'])
if not summary:
self.assertEqual(self.delegated_role, trust['roles'][0]['name'])
self.assertEqual(1, len(trust['roles']))
def get_trust(self):
trust_get = self.trustor_client.get_trust(self.trust_id)
return trust_get
def validate_role(self, role):
self.assertEqual(self.delegated_role_id, role['id'])
self.assertEqual(self.delegated_role, role['name'])
self.assertIn('v3/roles/%s' % self.delegated_role_id,
role['links']['self'])
self.assertNotEqual(self.not_delegated_role_id, role['id'])
self.assertNotEqual(self.not_delegated_role, role['name'])
self.assertNotIn('v3/roles/%s' % self.not_delegated_role_id,
role['links']['self'])
def check_trust_roles(self):
# Check we find the delegated role
roles_get = self.trustor_client.get_trust_roles(
self.trust_id)
self.assertEqual(1, len(roles_get))
self.validate_role(roles_get[0])
role_get = self.trustor_client.get_trust_role(
self.trust_id, self.delegated_role_id)
self.validate_role(role_get)
role_get = self.trustor_client.check_trust_role(
self.trust_id, self.delegated_role_id)
# And that we don't find not_delegated_role
self.assertRaises(lib_exc.NotFound,
self.trustor_client.get_trust_role,
self.trust_id,
self.not_delegated_role_id)
self.assertRaises(lib_exc.NotFound,
self.trustor_client.check_trust_role,
self.trust_id,
self.not_delegated_role_id)
def delete_trust(self):
self.trustor_client.delete_trust(self.trust_id)
self.assertRaises(lib_exc.NotFound,
self.trustor_client.get_trust,
self.trust_id)
self.trust_id = None
class TrustsV3TestJSON(BaseTrustsV3Test):
def setUp(self):
super(TrustsV3TestJSON, self).setUp()
self.create_trustor_and_roles()
self.addCleanup(self.cleanup_user_and_roles)
@test.attr(type='smoke')
def test_trust_impersonate(self):
# Test case to check we can create, get and delete a trust
# updates are not supported for trusts
trust = self.create_trust()
self.validate_trust(trust)
trust_get = self.get_trust()
self.validate_trust(trust_get)
self.check_trust_roles()
@test.attr(type='smoke')
def test_trust_noimpersonate(self):
# Test case to check we can create, get and delete a trust
# with impersonation=False
trust = self.create_trust(impersonate=False)
self.validate_trust(trust, impersonate=False)
trust_get = self.get_trust()
self.validate_trust(trust_get, impersonate=False)
self.check_trust_roles()
@test.attr(type='smoke')
def test_trust_expire(self):
# Test case to check we can create, get and delete a trust
# with an expiry specified
expires_at = timeutils.utcnow() + datetime.timedelta(hours=1)
expires_str = timeutils.isotime(at=expires_at, subsecond=True)
trust = self.create_trust(expires=expires_str)
self.validate_trust(trust, expires=expires_str)
trust_get = self.get_trust()
self.validate_trust(trust_get, expires=expires_str)
self.check_trust_roles()
@test.attr(type='smoke')
def test_trust_expire_invalid(self):
# Test case to check we can check an invlaid expiry time
# is rejected with the correct error
# with an expiry specified
expires_str = 'bad.123Z'
self.assertRaises(lib_exc.BadRequest,
self.create_trust,
expires=expires_str)
@test.attr(type='smoke')
def test_get_trusts_query(self):
self.create_trust()
trusts_get = self.trustor_client.get_trusts(
trustor_user_id=self.trustor_user_id)
self.assertEqual(1, len(trusts_get))
self.validate_trust(trusts_get[0], summary=True)
@test.attr(type='smoke')
def test_get_trusts_all(self):
self.create_trust()
trusts_get = self.client.get_trusts()
trusts = [t for t in trusts_get
if t['id'] == self.trust_id]
self.assertEqual(1, len(trusts))
self.validate_trust(trusts[0], summary=True)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Provides unit tests for SSL based authentication portions
of the external_auth app.
"""
import copy
import unittest
from django.conf import settings
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.urlresolvers import reverse
from django.test.client import Client
from django.test.client import RequestFactory
from django.test.utils import override_settings
from edxmako.middleware import MakoMiddleware
from external_auth.models import ExternalAuthMap
import external_auth.views
from mock import Mock
from student.models import CourseEnrollment
from student.roles import CourseStaffRole
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
FEATURES_WITH_SSL_AUTH = settings.FEATURES.copy()
FEATURES_WITH_SSL_AUTH['AUTH_USE_CERTIFICATES'] = True
FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP = FEATURES_WITH_SSL_AUTH.copy()
FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP['AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'] = True
FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE = FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP.copy()
FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE['BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'] = True
FEATURES_WITHOUT_SSL_AUTH = settings.FEATURES.copy()
FEATURES_WITHOUT_SSL_AUTH['AUTH_USE_CERTIFICATES'] = False
CACHES_ENABLE_GENERAL = copy.deepcopy(settings.CACHES)
CACHES_ENABLE_GENERAL['general']['BACKEND'] = 'django.core.cache.backends.locmem.LocMemCache'
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH)
@override_settings(CACHES=CACHES_ENABLE_GENERAL)
class SSLClientTest(ModuleStoreTestCase):
"""
Tests SSL Authentication code sections of external_auth
"""
AUTH_DN = '/C=US/ST=Massachusetts/O=Massachusetts Institute of Technology/OU=Client CA v1/CN={0}/emailAddress={1}'
USER_NAME = 'test_user_ssl'
USER_EMAIL = 'test_user_ssl@EDX.ORG'
MOCK_URL = '/'
def _create_ssl_request(self, url):
"""Creates a basic request for SSL use."""
request = self.factory.get(url)
request.META['SSL_CLIENT_S_DN'] = self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
MakoMiddleware().process_request(request)
return request
def _create_normal_request(self, url):
"""Creates sessioned request without SSL headers"""
request = self.factory.get(url)
request.user = AnonymousUser()
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
MakoMiddleware().process_request(request)
return request
def setUp(self):
"""Setup test case by adding primary user."""
super(SSLClientTest, self).setUp()
self.client = Client()
self.factory = RequestFactory()
self.mock = Mock()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_ssl_login_with_signup_lms(self):
"""
Validate that an SSL login creates an eamap user and
redirects them to the signup page.
"""
response = external_auth.views.ssl_login(self._create_ssl_request('/'))
# Response should contain template for signup form, eamap should have user, and internal
# auth should not have a user
self.assertIn('<form role="form" id="register-form" method="post"', response.content)
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
with self.assertRaises(User.DoesNotExist):
User.objects.get(email=self.USER_EMAIL)
@unittest.skipUnless(settings.ROOT_URLCONF == 'cms.urls', 'Test only valid in cms')
def test_ssl_login_with_signup_cms(self):
"""
Validate that an SSL login creates an eamap user and
redirects them to the signup page on CMS.
"""
self.client.get(
reverse('contentstore.views.login_page'),
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
)
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
with self.assertRaises(User.DoesNotExist):
User.objects.get(email=self.USER_EMAIL)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_ssl_login_without_signup_lms(self):
"""
Test IMMEDIATE_SIGNUP feature flag and ensure the user account is automatically created
and the user is redirected to slash.
"""
external_auth.views.ssl_login(self._create_ssl_request('/'))
# Assert our user exists in both eamap and Users, and that we are logged in
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
try:
User.objects.get(email=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to internal users, exception was {0}'.format(str(ex)))
@unittest.skipUnless(settings.ROOT_URLCONF == 'cms.urls', 'Test only valid in cms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_ssl_login_without_signup_cms(self):
"""
Test IMMEDIATE_SIGNUP feature flag and ensure the user account is
automatically created on CMS, and that we are redirected
to courses.
"""
response = self.client.get(
reverse('contentstore.views.login_page'),
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
)
self.assertEqual(response.status_code, 302)
self.assertIn('/course', response['location'])
# Assert our user exists in both eamap and Users, and that we are logged in
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
try:
User.objects.get(email=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to internal users, exception was {0}'.format(str(ex)))
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_default_login_decorator_ssl(self):
"""
Make sure that SSL login happens if it is enabled on protected
views instead of showing the login form.
"""
response = self.client.get(reverse('dashboard'), follows=True)
self.assertEqual(response.status_code, 302)
self.assertIn(reverse('signin_user'), response['location'])
response = self.client.get(
reverse('dashboard'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertEquals(('http://testserver/dashboard', 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_registration_page_bypass(self):
"""
This tests to make sure when immediate signup is on that
the user doesn't get presented with the registration page.
"""
response = self.client.get(
reverse('register_user'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertEquals(('http://testserver/dashboard', 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@unittest.skipUnless(settings.ROOT_URLCONF == 'cms.urls', 'Test only valid in cms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_cms_registration_page_bypass(self):
"""
This tests to make sure when immediate signup is on that
the user doesn't get presented with the registration page.
"""
response = self.client.get(
reverse('signup'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
)
self.assertEqual(response.status_code, 404)
# assert that we are logged in
self.assertIn(SESSION_KEY, self.client.session)
# Now that we are logged in, make sure we don't see the registration page
response = self.client.get(reverse('signup'), follow=True)
self.assertEqual(response.status_code, 404)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_signin_page_bypass(self):
"""
This tests to make sure when ssl authentication is on
that user doesn't get presented with the login page if they
have a certificate.
"""
# Test that they do signin if they don't have a cert
response = self.client.get(reverse('signin_user'))
self.assertEqual(200, response.status_code)
self.assertTrue('login-and-registration-container' in response.content)
# And get directly logged in otherwise
response = self.client.get(
reverse('signin_user'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertEquals(('http://testserver/dashboard', 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_ssl_bad_eamap(self):
"""
This tests the response when a user exists but their eamap
password doesn't match their internal password.
The internal password use for certificates has been removed
and this should not fail.
"""
# Create account, break internal password, and activate account
external_auth.views.ssl_login(self._create_ssl_request('/'))
user = User.objects.get(email=self.USER_EMAIL)
user.set_password('not autogenerated')
user.is_active = True
user.save()
# Make sure we can still login
self.client.get(
reverse('signin_user'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertIn(SESSION_KEY, self.client.session)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(FEATURES=FEATURES_WITHOUT_SSL_AUTH)
def test_ssl_decorator_no_certs(self):
"""Make sure no external auth happens without SSL enabled"""
dec_mock = external_auth.views.ssl_login_shortcut(self.mock)
request = self._create_normal_request(self.MOCK_URL)
request.user = AnonymousUser()
# Call decorated mock function to make sure it passes
# the call through without hitting the external_auth functions and
# thereby creating an external auth map object.
dec_mock(request)
self.assertTrue(self.mock.called)
self.assertEqual(0, len(ExternalAuthMap.objects.all()))
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_ssl_login_decorator(self):
"""Create mock function to test ssl login decorator"""
dec_mock = external_auth.views.ssl_login_shortcut(self.mock)
# Test that anonymous without cert doesn't create authmap
request = self._create_normal_request(self.MOCK_URL)
dec_mock(request)
self.assertTrue(self.mock.called)
self.assertEqual(0, len(ExternalAuthMap.objects.all()))
# Test valid user
self.mock.reset_mock()
request = self._create_ssl_request(self.MOCK_URL)
dec_mock(request)
self.assertFalse(self.mock.called)
self.assertEqual(1, len(ExternalAuthMap.objects.all()))
# Test logged in user gets called
self.mock.reset_mock()
request = self._create_ssl_request(self.MOCK_URL)
request.user = UserFactory()
dec_mock(request)
self.assertTrue(self.mock.called)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_IMMEDIATE_SIGNUP)
def test_ssl_decorator_auto_signup(self):
"""
Test that with auto signup the decorator
will bypass registration and call retfun.
"""
dec_mock = external_auth.views.ssl_login_shortcut(self.mock)
request = self._create_ssl_request(self.MOCK_URL)
dec_mock(request)
# Assert our user exists in both eamap and Users
try:
ExternalAuthMap.objects.get(external_id=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to external auth map, exception was {0}'.format(str(ex)))
try:
User.objects.get(email=self.USER_EMAIL)
except ExternalAuthMap.DoesNotExist, ex:
self.fail('User did not get properly added to internal users, exception was {0}'.format(str(ex)))
self.assertEqual(1, len(ExternalAuthMap.objects.all()))
self.assertTrue(self.mock.called)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE)
def test_ssl_lms_redirection(self):
"""
Auto signup auth user and ensure they return to the original
url they visited after being logged in.
"""
course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course'
)
external_auth.views.ssl_login(self._create_ssl_request('/'))
user = User.objects.get(email=self.USER_EMAIL)
CourseEnrollment.enroll(user, course.id)
course_private_url = '/courses/MITx/999/Robot_Super_Course/courseware'
self.assertFalse(SESSION_KEY in self.client.session)
response = self.client.get(
course_private_url,
follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL),
HTTP_ACCEPT='text/html'
)
self.assertEqual(('http://testserver{0}'.format(course_private_url), 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@unittest.skipUnless(settings.ROOT_URLCONF == 'cms.urls', 'Test only valid in cms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE)
def test_ssl_cms_redirection(self):
"""
Auto signup auth user and ensure they return to the original
url they visited after being logged in.
"""
course = CourseFactory.create(
org='MITx',
number='999',
display_name='Robot Super Course'
)
external_auth.views.ssl_login(self._create_ssl_request('/'))
user = User.objects.get(email=self.USER_EMAIL)
CourseEnrollment.enroll(user, course.id)
CourseStaffRole(course.id).add_users(user)
course_private_url = reverse('course_handler', args=(unicode(course.id),))
self.assertFalse(SESSION_KEY in self.client.session)
response = self.client.get(
course_private_url,
follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL),
HTTP_ACCEPT='text/html'
)
self.assertEqual(('http://testserver{0}'.format(course_private_url), 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@override_settings(FEATURES=FEATURES_WITH_SSL_AUTH_AUTO_ACTIVATE)
def test_ssl_logout(self):
"""
Because the branding view is cached for anonymous users and we
use that to login users, the browser wasn't actually making the
request to that view as the redirect was being cached. This caused
a redirect loop, and this test confirms that that won't happen.
Test is only in LMS because we don't use / in studio to login SSL users.
"""
response = self.client.get(
reverse('dashboard'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))
self.assertEquals(('http://testserver/dashboard', 302),
response.redirect_chain[-1])
self.assertIn(SESSION_KEY, self.client.session)
response = self.client.get(
reverse('logout'), follow=True,
SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)
)
# Make sure that even though we logged out, we have logged back in
self.assertIn(SESSION_KEY, self.client.session)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#! /usr/bin/env python3
# Format du output in a tree shape
import os, sys, errno
def main():
p = os.popen('du ' + ' '.join(sys.argv[1:]), 'r')
total, d = None, {}
for line in p.readlines():
i = 0
while line[i] in '0123456789': i = i+1
size = eval(line[:i])
while line[i] in ' \t': i = i+1
filename = line[i:-1]
comps = filename.split('/')
if comps[0] == '': comps[0] = '/'
if comps[len(comps)-1] == '': del comps[len(comps)-1]
total, d = store(size, comps, total, d)
try:
display(total, d)
except IOError as e:
if e.errno != errno.EPIPE:
raise
def store(size, comps, total, d):
if comps == []:
return size, d
if comps[0] not in d:
d[comps[0]] = None, {}
t1, d1 = d[comps[0]]
d[comps[0]] = store(size, comps[1:], t1, d1)
return total, d
def display(total, d):
show(total, d, '')
def show(total, d, prefix):
if not d: return
list = []
sum = 0
for key in d.keys():
tsub, dsub = d[key]
list.append((tsub, key))
if tsub is not None: sum = sum + tsub
## if sum < total:
## list.append((total - sum, os.curdir))
list.sort()
list.reverse()
width = len(repr(list[0][0]))
for tsub, key in list:
if tsub is None:
psub = prefix
else:
print(prefix + repr(tsub).rjust(width) + ' ' + key)
psub = prefix + ' '*(width-1) + '|' + ' '*(len(key)+1)
if key in d:
show(tsub, d[key][1], psub)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Principal Component Analysis."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from math import lgamma, log, sqrt
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from scipy.sparse.linalg import svds
from sklearn.base import _fit_context
from sklearn.decomposition._base import _BasePCA
from sklearn.utils import check_random_state
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils._array_api import device, get_namespace
from sklearn.utils._param_validation import Interval, RealNotInt, StrOptions
from sklearn.utils.extmath import _randomized_svd, fast_logdet, svd_flip
from sklearn.utils.sparsefuncs import _implicit_column_offset, mean_variance_axis
from sklearn.utils.validation import check_is_fitted, validate_data
def _assess_dimension(spectrum, rank, n_samples):
"""Compute the log-likelihood of a rank ``rank`` dataset.
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``. This implements the method of
T. P. Minka.
Parameters
----------
spectrum : ndarray of shape (n_features,)
Data spectrum.
rank : int
Tested rank value. It should be strictly lower than n_features,
otherwise the method isn't specified (division by zero in equation
(31) from the paper).
n_samples : int
Number of samples.
Returns
-------
ll : float
The log-likelihood.
References
----------
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
<https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_
"""
xp, _ = get_namespace(spectrum)
n_features = spectrum.shape[0]
if not 1 <= rank < n_features:
raise ValueError("the tested rank should be in [1, n_features - 1]")
eps = 1e-15
if spectrum[rank - 1] < eps:
# When the tested rank is associated with a small eigenvalue, there's
# no point in computing the log-likelihood: it's going to be very
# small and won't be the max anyway. Also, it can lead to numerical
# issues below when computing pa, in particular in log((spectrum[i] -
# spectrum[j]) because this will take the log of something very small.
return -xp.inf
pu = -rank * log(2.0)
for i in range(1, rank + 1):
pu += (
lgamma((n_features - i + 1) / 2.0) - log(xp.pi) * (n_features - i + 1) / 2.0
)
pl = xp.sum(xp.log(spectrum[:rank]))
pl = -pl * n_samples / 2.0
v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank))
pv = -log(v) * n_samples * (n_features - rank) / 2.0
m = n_features * rank - rank * (rank + 1.0) / 2.0
pp = log(2.0 * xp.pi) * (m + rank) / 2.0
pa = 0.0
spectrum_ = xp.asarray(spectrum, copy=True)
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, spectrum.shape[0]):
pa += log(
(spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i])
) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0
return ll
def _infer_dimension(spectrum, n_samples):
"""Infers the dimension of a dataset with a given spectrum.
The returned value will be in [1, n_features - 1].
"""
xp, _ = get_namespace(spectrum)
ll = xp.empty_like(spectrum)
ll[0] = -xp.inf # we don't want to return n_components = 0
for rank in range(1, spectrum.shape[0]):
ll[rank] = _assess_dimension(spectrum, rank, n_samples)
return xp.argmax(ll)
class PCA(_BasePCA):
"""Principal component analysis (PCA).
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
With sparse inputs, the ARPACK implementation of the truncated SVD can be
used (i.e. through :func:`scipy.sparse.linalg.svds`). Alternatively, one
may consider :class:`TruncatedSVD` where the data are not centered.
Notice that this class only supports sparse inputs for some solvers such as
"arpack" and "covariance_eigh". See :class:`TruncatedSVD` for an
alternative with sparse data.
For a usage example, see
:ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py`
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, float or 'mle', default=None
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's
MLE is used to guess the dimension. Use of ``n_components == 'mle'``
will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.
If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the
number of components such that the amount of variance that needs to be
explained is greater than the percentage specified by n_components.
If ``svd_solver == 'arpack'``, the number of components must be
strictly less than the minimum of n_features and n_samples.
Hence, the None case results in::
n_components == min(n_samples, n_features) - 1
copy : bool, default=True
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, default=False
When True (False by default) the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
svd_solver : {'auto', 'full', 'covariance_eigh', 'arpack', 'randomized'},\
default='auto'
"auto" :
The solver is selected by a default 'auto' policy is based on `X.shape` and
`n_components`: if the input data has fewer than 1000 features and
more than 10 times as many samples, then the "covariance_eigh"
solver is used. Otherwise, if the input data is larger than 500x500
and the number of components to extract is lower than 80% of the
smallest dimension of the data, then the more efficient
"randomized" method is selected. Otherwise the exact "full" SVD is
computed and optionally truncated afterwards.
"full" :
Run exact full SVD calling the standard LAPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
"covariance_eigh" :
Precompute the covariance matrix (on centered data), run a
classical eigenvalue decomposition on the covariance matrix
typically using LAPACK and select the components by postprocessing.
This solver is very efficient for n_samples >> n_features and small
n_features. It is, however, not tractable otherwise for large
n_features (large memory footprint required to materialize the
covariance matrix). Also note that compared to the "full" solver,
this solver effectively doubles the condition number and is
therefore less numerical stable (e.g. on input data with a large
range of singular values).
"arpack" :
Run SVD truncated to `n_components` calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
`0 < n_components < min(X.shape)`
"randomized" :
Run randomized SVD by the method of Halko et al.
.. versionadded:: 0.18.0
.. versionchanged:: 1.5
Added the 'covariance_eigh' solver.
tol : float, default=0.0
Tolerance for singular values computed by svd_solver == 'arpack'.
Must be of range [0.0, infinity).
.. versionadded:: 0.18.0
iterated_power : int or 'auto', default='auto'
Number of iterations for the power method computed by
svd_solver == 'randomized'.
Must be of range [0, infinity).
.. versionadded:: 0.18.0
n_oversamples : int, default=10
This parameter is only relevant when `svd_solver="randomized"`.
It corresponds to the additional number of random vectors to sample the
range of `X` so as to ensure proper conditioning. See
:func:`~sklearn.utils.extmath.randomized_svd` for more details.
.. versionadded:: 1.1
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Power iteration normalizer for randomized SVD solver.
Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
for more details.
.. versionadded:: 1.1
random_state : int, RandomState instance or None, default=None
Used when the 'arpack' or 'randomized' solvers are used. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 0.18.0
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Principal axes in feature space, representing the directions of
maximum variance in the data. Equivalently, the right singular
vectors of the centered input data, parallel to its eigenvectors.
The components are sorted by decreasing ``explained_variance_``.
explained_variance_ : ndarray of shape (n_components,)
The amount of variance explained by each of the selected components.
The variance estimation uses `n_samples - 1` degrees of freedom.
Equal to n_components largest eigenvalues
of the covariance matrix of X.
.. versionadded:: 0.18
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of the ratios is equal to 1.0.
singular_values_ : ndarray of shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
.. versionadded:: 0.19
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=0)`.
n_components_ : int
The estimated number of components. When n_components is set
to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
number is estimated from input data. Otherwise it equals the parameter
n_components, or the lesser value of n_features and n_samples
if n_components is None.
n_samples_ : int
Number of samples in the training data.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
compute the estimated data covariance and score samples.
Equal to the average of (min(n_features, n_samples) - n_components)
smallest eigenvalues of the covariance matrix of X.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
KernelPCA : Kernel Principal Component Analysis.
SparsePCA : Sparse Principal Component Analysis.
TruncatedSVD : Dimensionality reduction using truncated SVD.
IncrementalPCA : Incremental Principal Component Analysis.
References
----------
For n_components == 'mle', this class uses the method from:
`Minka, T. P.. "Automatic choice of dimensionality for PCA".
In NIPS, pp. 598-604 <https://tminka.github.io/papers/pca/minka-pca.pdf>`_
Implements the probabilistic PCA model from:
`Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal
component analysis". Journal of the Royal Statistical Society:
Series B (Statistical Methodology), 61(3), 611-622.
<http://www.miketipping.com/papers/met-mppca.pdf>`_
via the score and score_samples methods.
For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
For svd_solver == 'randomized', see:
:doi:`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).
"Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions".
SIAM review, 53(2), 217-288.
<10.1137/090771806>`
and also
:doi:`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).
"A randomized algorithm for the decomposition of matrices".
Applied and Computational Harmonic Analysis, 30(1), 47-68.
<10.1016/j.acha.2010.02.003>`
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(n_components=2)
>>> print(pca.explained_variance_ratio_)
[0.9924 0.0075]
>>> print(pca.singular_values_)
[6.30061 0.54980]
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca.fit(X)
PCA(n_components=2, svd_solver='full')
>>> print(pca.explained_variance_ratio_)
[0.9924 0.00755]
>>> print(pca.singular_values_)
[6.30061 0.54980]
>>> pca = PCA(n_components=1, svd_solver='arpack')
>>> pca.fit(X)
PCA(n_components=1, svd_solver='arpack')
>>> print(pca.explained_variance_ratio_)
[0.99244]
>>> print(pca.singular_values_)
[6.30061]
"""
_parameter_constraints: dict = {
"n_components": [
Interval(Integral, 0, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="neither"),
StrOptions({"mle"}),
None,
],
"copy": ["boolean"],
"whiten": ["boolean"],
"svd_solver": [
StrOptions({"auto", "full", "covariance_eigh", "arpack", "randomized"})
],
"tol": [Interval(Real, 0, None, closed="left")],
"iterated_power": [
StrOptions({"auto"}),
Interval(Integral, 0, None, closed="left"),
],
"n_oversamples": [Interval(Integral, 1, None, closed="left")],
"power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=None,
*,
copy=True,
whiten=False,
svd_solver="auto",
tol=0.0,
iterated_power="auto",
n_oversamples=10,
power_iteration_normalizer="auto",
random_state=None,
):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.n_oversamples = n_oversamples
self.power_iteration_normalizer = power_iteration_normalizer
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
"""
U, S, _, X, x_is_centered, xp = self._fit(X)
if U is not None:
U = U[:, : self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * Vt * V = U * S
U *= S[: self.n_components_]
return U
else: # solver="covariance_eigh" does not compute U at fit time.
return self._transform(X, xp, x_is_centered=x_is_centered)
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
xp, is_array_api_compliant = get_namespace(X)
# Raise an error for sparse input and unsupported svd_solver
if issparse(X) and self.svd_solver not in ["auto", "arpack", "covariance_eigh"]:
raise TypeError(
'PCA only support sparse inputs with the "arpack" and'
f' "covariance_eigh" solvers, while "{self.svd_solver}" was passed. See'
" TruncatedSVD for a possible alternative."
)
if self.svd_solver == "arpack" and is_array_api_compliant:
raise ValueError(
"PCA with svd_solver='arpack' is not supported for Array API inputs."
)
# Validate the data, without ever forcing a copy as any solver that
# supports sparse input data and the `covariance_eigh` solver are
# written in a way to avoid the need for any inplace modification of
# the input data contrary to the other solvers.
# The copy will happen
# later, only if needed, once the solver negotiation below is done.
X = validate_data(
self,
X,
dtype=[xp.float64, xp.float32],
force_writeable=True,
accept_sparse=("csr", "csc"),
ensure_2d=True,
copy=False,
)
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == "auto" and issparse(X):
self._fit_svd_solver = "arpack"
if self.n_components is None:
if self._fit_svd_solver != "arpack":
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
if self._fit_svd_solver == "auto":
# Tall and skinny problems are best handled by precomputing the
# covariance matrix.
if X.shape[1] <= 1_000 and X.shape[0] >= 10 * X.shape[1]:
self._fit_svd_solver = "covariance_eigh"
# Small problem or n_components == 'mle', just call full PCA
elif max(X.shape) <= 500 or n_components == "mle":
self._fit_svd_solver = "full"
elif 1 <= n_components < 0.8 * min(X.shape):
self._fit_svd_solver = "randomized"
# This is also the case of n_components in (0, 1)
else:
self._fit_svd_solver = "full"
# Call different fits for either full or truncated SVD
if self._fit_svd_solver in ("full", "covariance_eigh"):
return self._fit_full(X, n_components, xp, is_array_api_compliant)
elif self._fit_svd_solver in ["arpack", "randomized"]:
return self._fit_truncated(X, n_components, xp)
def _fit_full(self, X, n_components, xp, is_array_api_compliant):
"""Fit the model by computing full SVD on X."""
n_samples, n_features = X.shape
if n_components == "mle":
if n_samples < n_features:
raise ValueError(
"n_components='mle' is only supported if n_samples >= n_features"
)
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError(
f"n_components={n_components} must be between 0 and "
f"min(n_samples, n_features)={min(n_samples, n_features)} with "
f"svd_solver={self._fit_svd_solver!r}"
)
self.mean_ = xp.mean(X, axis=0)
# When X is a scipy sparse matrix, self.mean_ is a numpy matrix, so we need
# to transform it to a 1D array. Note that this is not the case when X
# is a scipy sparse array.
# TODO: remove the following two lines when scikit-learn only depends
# on scipy versions that no longer support scipy.sparse matrices.
self.mean_ = xp.reshape(xp.asarray(self.mean_), (-1,))
if self._fit_svd_solver == "full":
X_centered = xp.asarray(X, copy=True) if self.copy else X
X_centered -= self.mean_
x_is_centered = not self.copy
if not is_array_api_compliant:
# Use scipy.linalg with NumPy/SciPy inputs for the sake of not
# introducing unanticipated behavior changes. In the long run we
# could instead decide to always use xp.linalg.svd for all inputs,
# but that would make this code rely on numpy's SVD instead of
# scipy's. It's not 100% clear whether they use the same LAPACK
# solver by default though (assuming both are built against the
# same BLAS).
U, S, Vt = linalg.svd(X_centered, full_matrices=False)
else:
U, S, Vt = xp.linalg.svd(X_centered, full_matrices=False)
explained_variance_ = (S**2) / (n_samples - 1)
else:
assert self._fit_svd_solver == "covariance_eigh"
# In the following, we center the covariance matrix C afterwards
# (without centering the data X first) to avoid an unnecessary copy
# of X. Note that the mean_ attribute is still needed to center
# test data in the transform method.
#
# Note: at the time of writing, `xp.cov` does not exist in the
# Array API standard:
# https://github.com/data-apis/array-api/issues/43
#
# Besides, using `numpy.cov`, as of numpy 1.26.0, would not be
# memory efficient for our use case when `n_samples >> n_features`:
# `numpy.cov` centers a copy of the data before computing the
# matrix product instead of subtracting a small `(n_features,
# n_features)` square matrix from the gram matrix X.T @ X, as we do
# below.
x_is_centered = False
C = X.T @ X
C -= (
n_samples
* xp.reshape(self.mean_, (-1, 1))
* xp.reshape(self.mean_, (1, -1))
)
C /= n_samples - 1
eigenvals, eigenvecs = xp.linalg.eigh(C)
# When X is a scipy sparse matrix, the following two datastructures
# are returned as instances of the soft-deprecated numpy.matrix
# class. Note that this problem does not occur when X is a scipy
# sparse array (or another other kind of supported array).
# TODO: remove the following two lines when scikit-learn only
# depends on scipy versions that no longer support scipy.sparse
# matrices.
eigenvals = xp.reshape(xp.asarray(eigenvals), (-1,))
eigenvecs = xp.asarray(eigenvecs)
eigenvals = xp.flip(eigenvals, axis=0)
eigenvecs = xp.flip(eigenvecs, axis=1)
# The covariance matrix C is positive semi-definite by
# construction. However, the eigenvalues returned by xp.linalg.eigh
# can be slightly negative due to numerical errors. This would be
# an issue for the subsequent sqrt, hence the manual clipping.
eigenvals[eigenvals < 0.0] = 0.0
explained_variance_ = eigenvals
# Re-construct SVD of centered X indirectly and make it consistent
# with the other solvers.
S = xp.sqrt(eigenvals * (n_samples - 1))
Vt = eigenvecs.T
U = None
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U, Vt, u_based_decision=False)
components_ = Vt
# Get variance explained by singular values
total_var = xp.sum(explained_variance_)
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
# Postprocess the number of components required
if n_components == "mle":
n_components = _infer_dimension(explained_variance_, n_samples)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
# side='right' ensures that number of features selected
# their variance is always greater than n_components float
# passed. More discussion in issue: #15669
ratio_cumsum = xp.cumulative_sum(explained_variance_ratio_)
n_components = (
xp.searchsorted(
ratio_cumsum,
xp.asarray(n_components, device=device(ratio_cumsum)),
side="right",
)
+ 1
)
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = xp.mean(explained_variance_[n_components:])
else:
self.noise_variance_ = 0.0
self.n_samples_ = n_samples
self.n_components_ = n_components
# Assign a copy of the result of the truncation of the components in
# order to:
# - release the memory used by the discarded components,
# - ensure that the kept components are allocated contiguously in
# memory to make the transform method faster by leveraging cache
# locality.
self.components_ = xp.asarray(components_[:n_components, :], copy=True)
# We do the same for the other arrays for the sake of consistency.
self.explained_variance_ = xp.asarray(
explained_variance_[:n_components], copy=True
)
self.explained_variance_ratio_ = xp.asarray(
explained_variance_ratio_[:n_components], copy=True
)
self.singular_values_ = xp.asarray(singular_values_[:n_components], copy=True)
return U, S, Vt, X, x_is_centered, xp
def _fit_truncated(self, X, n_components, xp):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X.
"""
n_samples, n_features = X.shape
svd_solver = self._fit_svd_solver
if isinstance(n_components, str):
raise ValueError(
"n_components=%r cannot be a string with svd_solver='%s'"
% (n_components, svd_solver)
)
elif not 1 <= n_components <= min(n_samples, n_features):
raise ValueError(
"n_components=%r must be between 1 and "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features), svd_solver)
)
elif svd_solver == "arpack" and n_components == min(n_samples, n_features):
raise ValueError(
"n_components=%r must be strictly less than "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features), svd_solver)
)
random_state = check_random_state(self.random_state)
# Center data
total_var = None
if issparse(X):
self.mean_, var = mean_variance_axis(X, axis=0)
total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1
X_centered = _implicit_column_offset(X, self.mean_)
x_is_centered = False
else:
self.mean_ = xp.mean(X, axis=0)
X_centered = xp.asarray(X, copy=True) if self.copy else X
X_centered -= self.mean_
x_is_centered = not self.copy
if svd_solver == "arpack":
v0 = _init_arpack_v0(min(X.shape), random_state)
U, S, Vt = svds(X_centered, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U[:, ::-1], Vt[::-1], u_based_decision=False)
elif svd_solver == "randomized":
# sign flipping is done inside
U, S, Vt = _randomized_svd(
X_centered,
n_components=n_components,
n_oversamples=self.n_oversamples,
n_iter=self.iterated_power,
power_iteration_normalizer=self.power_iteration_normalizer,
flip_sign=False,
random_state=random_state,
)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
self.n_samples_ = n_samples
self.components_ = Vt
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S**2) / (n_samples - 1)
# Workaround in-place variance calculation since at the time numpy
# did not have a way to calculate variance in-place.
#
# TODO: update this code to either:
# * Use the array-api variance calculation, unless memory usage suffers
# * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api
# See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991
if total_var is None:
N = X.shape[0] - 1
X_centered **= 2
total_var = xp.sum(X_centered) / N
self.explained_variance_ratio_ = self.explained_variance_ / total_var
self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
if self.n_components_ < min(n_features, n_samples):
self.noise_variance_ = total_var - xp.sum(self.explained_variance_)
self.noise_variance_ /= min(n_features, n_samples) - n_components
else:
self.noise_variance_ = 0.0
return U, S, Vt, X, x_is_centered, xp
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
xp, _ = get_namespace(X)
X = validate_data(self, X, dtype=[xp.float64, xp.float32], reset=False)
Xr = X - self.mean_
n_features = X.shape[1]
precision = self.get_precision()
log_like = -0.5 * xp.sum(Xr * (Xr @ precision), axis=1)
log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
y : Ignored
Ignored.
Returns
-------
ll : float
Average log-likelihood of the samples under the current model.
"""
xp, _ = get_namespace(X)
return float(xp.mean(self.score_samples(X)))
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
solver = getattr(self, "_fit_svd_solver", self.svd_solver)
tags.array_api_support = solver not in ["arpack", "randomized"] or (
solver == "randomized" and self.power_iteration_normalizer == "QR"
)
tags.input_tags.sparse = self.svd_solver in (
"auto",
"arpack",
"covariance_eigh",
)
return tags
|
python
|
github
|
https://github.com/scikit-learn/scikit-learn
|
sklearn/decomposition/_pca.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# requests python-pillow
import sys
import os
import pathlib
import shutil
import mimetypes
from PIL import Image as img
inf = float('inf')
pref = 'Прогресс:'
def printProgress(iteration, total, prefix=pref, end=False):
print(f'{prefix} {iteration}/{total}', end='\r')
if end:
print('\n')
def cleanpath(l1, pwd):
i = 0
l = l1.copy()
while i < len(l):
f = pwd + '/' + l[i]
try:
t = mimetypes.guess_type(pathlib.Path(f).as_uri())[0].split('/')[0]
except:
l.pop(i)
else:
if t == 'image':
i += 1
else:
l.pop(i)
return l
def process(inDir, outDir, x, y, remove=False):
inlist = cleanpath(os.listdir(inDir), inDir)
outlist = cleanpath(os.listdir(outDir), outDir)
badimg = []
prmax = len(inlist)
if remove:
if len(outlist) > 0:
for i in outlist:
try:
os.remove(outDir + '/' + i)
except:
pass
print('Мусор удалён')
else:
print('Нет мусора')
outlist = []
os.chdir(inDir)
for j in range(len(inlist)):
i = inlist[j]
if not(i in outlist):
with img.open(i) as im:
X, Y = im.width, im.height
if X < x or Y < y:
badimg.append(i + ' ' + str(X) + 'x' + str(Y))
else:
shutil.copy(i, outDir + '/' + i)
printProgress(j + 1, prmax)
printProgress(prmax, prmax, end=True)
return badimg
if __name__ == '__main__':
vals = ['yes', 'true', '!', '+']
try:
inDir, outDir, x, y = sys.argv[1], sys.argv[
2], int(sys.argv[3]), int(sys.argv[4])
except:
print(
'Использование: {0} <Исходный каталог> <Конечный каталог> <Ширина> <Высота> [Удалить старые файлы ({1})]'.format(
sys.argv[0], '|'.join(vals)
),
file=sys.stderr
)
sys.exit(1)
try:
rem = sys.argv[5]
r = rem.lower() in vals
except:
r = False
badimg = process(inDir, outDir, x, y, remove=r)
if len(badimg) > 0:
print('Следующие изображения удалены:', *badimg, sep='\n')
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
package os
import "syscall"
// Pipe returns a connected pair of Files; reads from r return bytes written to w.
// It returns the files and an error, if any.
func Pipe() (r *File, w *File, err error) {
var p [2]int
e := syscall.Pipe2(p[0:], syscall.O_CLOEXEC)
if e != nil {
return nil, nil, NewSyscallError("pipe2", e)
}
return newFile(p[0], "|0", kindPipe, false), newFile(p[1], "|1", kindPipe, false), nil
}
|
go
|
github
|
https://github.com/golang/go
|
src/os/pipe2_unix.go
|
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer\Installer;
use Composer\Repository\InstalledRepositoryInterface;
use Composer\Package\PackageInterface;
/**
* Does not install anything but marks packages installed in the repo
*
* Useful for dry runs
*
* @author Jordi Boggiano <j.boggiano@seld.be>
*/
class NoopInstaller implements InstallerInterface
{
/**
* @inheritDoc
*/
public function supports(string $packageType)
{
return true;
}
/**
* @inheritDoc
*/
public function isInstalled(InstalledRepositoryInterface $repo, PackageInterface $package)
{
return $repo->hasPackage($package);
}
/**
* @inheritDoc
*/
public function download(PackageInterface $package, ?PackageInterface $prevPackage = null)
{
return \React\Promise\resolve(null);
}
/**
* @inheritDoc
*/
public function prepare($type, PackageInterface $package, ?PackageInterface $prevPackage = null)
{
return \React\Promise\resolve(null);
}
/**
* @inheritDoc
*/
public function cleanup($type, PackageInterface $package, ?PackageInterface $prevPackage = null)
{
return \React\Promise\resolve(null);
}
/**
* @inheritDoc
*/
public function install(InstalledRepositoryInterface $repo, PackageInterface $package)
{
if (!$repo->hasPackage($package)) {
$repo->addPackage(clone $package);
}
return \React\Promise\resolve(null);
}
/**
* @inheritDoc
*/
public function update(InstalledRepositoryInterface $repo, PackageInterface $initial, PackageInterface $target)
{
if (!$repo->hasPackage($initial)) {
throw new \InvalidArgumentException('Package is not installed: '.$initial);
}
$repo->removePackage($initial);
if (!$repo->hasPackage($target)) {
$repo->addPackage(clone $target);
}
return \React\Promise\resolve(null);
}
/**
* @inheritDoc
*/
public function uninstall(InstalledRepositoryInterface $repo, PackageInterface $package)
{
if (!$repo->hasPackage($package)) {
throw new \InvalidArgumentException('Package is not installed: '.$package);
}
$repo->removePackage($package);
return \React\Promise\resolve(null);
}
/**
* @inheritDoc
*/
public function getInstallPath(PackageInterface $package)
{
$targetDir = $package->getTargetDir();
return $package->getPrettyName() . ($targetDir ? '/'.$targetDir : '');
}
}
|
php
|
github
|
https://github.com/composer/composer
|
src/Composer/Installer/NoopInstaller.php
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model definitions for simple speech recognition.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import tensorflow as tf
def _next_power_of_two(x):
"""Calculates the smallest enclosing power of two for an input.
Args:
x: Positive float or integer number.
Returns:
Next largest power of two integer.
"""
return 1 if x == 0 else 2**(int(x) - 1).bit_length()
def prepare_model_settings(label_count, sample_rate, clip_duration_ms,
window_size_ms, window_stride_ms, feature_bin_count,
preprocess):
"""Calculates common settings needed for all models.
Args:
label_count: How many classes are to be recognized.
sample_rate: Number of audio samples per second.
clip_duration_ms: Length of each audio clip to be analyzed.
window_size_ms: Duration of frequency analysis window.
window_stride_ms: How far to move in time between frequency windows.
feature_bin_count: Number of frequency bins to use for analysis.
preprocess: How the spectrogram is processed to produce features.
Returns:
Dictionary containing common settings.
Raises:
ValueError: If the preprocessing mode isn't recognized.
"""
desired_samples = int(sample_rate * clip_duration_ms / 1000)
window_size_samples = int(sample_rate * window_size_ms / 1000)
window_stride_samples = int(sample_rate * window_stride_ms / 1000)
length_minus_window = (desired_samples - window_size_samples)
if length_minus_window < 0:
spectrogram_length = 0
else:
spectrogram_length = 1 + int(length_minus_window / window_stride_samples)
if preprocess == 'average':
fft_bin_count = 1 + (_next_power_of_two(window_size_samples) / 2)
average_window_width = int(math.floor(fft_bin_count / feature_bin_count))
fingerprint_width = int(math.ceil(fft_bin_count / average_window_width))
elif preprocess == 'mfcc':
average_window_width = -1
fingerprint_width = feature_bin_count
else:
raise ValueError('Unknown preprocess mode "%s" (should be "mfcc" or'
' "average")' % (preprocess))
fingerprint_size = fingerprint_width * spectrogram_length
return {
'desired_samples': desired_samples,
'window_size_samples': window_size_samples,
'window_stride_samples': window_stride_samples,
'spectrogram_length': spectrogram_length,
'fingerprint_width': fingerprint_width,
'fingerprint_size': fingerprint_size,
'label_count': label_count,
'sample_rate': sample_rate,
'preprocess': preprocess,
'average_window_width': average_window_width,
}
def create_model(fingerprint_input, model_settings, model_architecture,
is_training, runtime_settings=None):
"""Builds a model of the requested architecture compatible with the settings.
There are many possible ways of deriving predictions from a spectrogram
input, so this function provides an abstract interface for creating different
kinds of models in a black-box way. You need to pass in a TensorFlow node as
the 'fingerprint' input, and this should output a batch of 1D features that
describe the audio. Typically this will be derived from a spectrogram that's
been run through an MFCC, but in theory it can be any feature vector of the
size specified in model_settings['fingerprint_size'].
The function will build the graph it needs in the current TensorFlow graph,
and return the tensorflow output that will contain the 'logits' input to the
softmax prediction process. If training flag is on, it will also return a
placeholder node that can be used to control the dropout amount.
See the implementations below for the possible model architectures that can be
requested.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
model_architecture: String specifying which kind of model to create.
is_training: Whether the model is going to be used for training.
runtime_settings: Dictionary of information about the runtime.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
Raises:
Exception: If the architecture type isn't recognized.
"""
if model_architecture == 'single_fc':
return create_single_fc_model(fingerprint_input, model_settings,
is_training)
elif model_architecture == 'conv':
return create_conv_model(fingerprint_input, model_settings, is_training)
elif model_architecture == 'low_latency_conv':
return create_low_latency_conv_model(fingerprint_input, model_settings,
is_training)
elif model_architecture == 'low_latency_svdf':
return create_low_latency_svdf_model(fingerprint_input, model_settings,
is_training, runtime_settings)
elif model_architecture == 'tiny_conv':
return create_tiny_conv_model(fingerprint_input, model_settings,
is_training)
else:
raise Exception('model_architecture argument "' + model_architecture +
'" not recognized, should be one of "single_fc", "conv",' +
' "low_latency_conv, "low_latency_svdf",' +
' or "tiny_conv"')
def load_variables_from_checkpoint(sess, start_checkpoint):
"""Utility function to centralize checkpoint restoration.
Args:
sess: TensorFlow session.
start_checkpoint: Path to saved checkpoint on disk.
"""
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, start_checkpoint)
def create_single_fc_model(fingerprint_input, model_settings, is_training):
"""Builds a model with a single hidden fully-connected layer.
This is a very simple model with just one matmul and bias layer. As you'd
expect, it doesn't produce very accurate results, but it is very fast and
simple, so it's useful for sanity testing.
Here's the layout of the graph:
(fingerprint_input)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
fingerprint_size = model_settings['fingerprint_size']
label_count = model_settings['label_count']
weights = tf.get_variable(
name='weights',
initializer=tf.truncated_normal_initializer(stddev=0.001),
shape=[fingerprint_size, label_count])
bias = tf.get_variable(
name='bias', initializer=tf.zeros_initializer, shape=[label_count])
logits = tf.matmul(fingerprint_input, weights) + bias
if is_training:
return logits, dropout_prob
else:
return logits
def create_conv_model(fingerprint_input, model_settings, is_training):
"""Builds a standard convolutional model.
This is roughly the network labeled as 'cnn-trad-fpool3' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MaxPool]
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MaxPool]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces fairly good quality results, but can involve a large number of
weight parameters and computations. For a cheaper alternative from the same
paper with slightly less accuracy, see 'low_latency_conv' below.
During training, dropout nodes are introduced after each relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = 20
first_filter_count = 64
first_weights = tf.get_variable(
name='first_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[first_filter_height, first_filter_width, 1, first_filter_count])
first_bias = tf.get_variable(
name='first_bias',
initializer=tf.zeros_initializer,
shape=[first_filter_count])
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [1, 1, 1, 1],
'SAME') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
max_pool = tf.nn.max_pool(first_dropout, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')
second_filter_width = 4
second_filter_height = 10
second_filter_count = 64
second_weights = tf.get_variable(
name='second_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[
second_filter_height, second_filter_width, first_filter_count,
second_filter_count
])
second_bias = tf.get_variable(
name='second_bias',
initializer=tf.zeros_initializer,
shape=[second_filter_count])
second_conv = tf.nn.conv2d(max_pool, second_weights, [1, 1, 1, 1],
'SAME') + second_bias
second_relu = tf.nn.relu(second_conv)
if is_training:
second_dropout = tf.nn.dropout(second_relu, dropout_prob)
else:
second_dropout = second_relu
second_conv_shape = second_dropout.get_shape()
second_conv_output_width = second_conv_shape[2]
second_conv_output_height = second_conv_shape[1]
second_conv_element_count = int(
second_conv_output_width * second_conv_output_height *
second_filter_count)
flattened_second_conv = tf.reshape(second_dropout,
[-1, second_conv_element_count])
label_count = model_settings['label_count']
final_fc_weights = tf.get_variable(
name='final_fc_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[second_conv_element_count, label_count])
final_fc_bias = tf.get_variable(
name='final_fc_bias',
initializer=tf.zeros_initializer,
shape=[label_count])
final_fc = tf.matmul(flattened_second_conv, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc
def create_low_latency_conv_model(fingerprint_input, model_settings,
is_training):
"""Builds a convolutional model with low compute requirements.
This is roughly the network labeled as 'cnn-one-fstride4' in the
'Convolutional Neural Networks for Small-footprint Keyword Spotting' paper:
http://www.isca-speech.org/archive/interspeech_2015/papers/i15_1478.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This produces slightly lower quality results than the 'conv' model, but needs
fewer weight parameters and computations.
During training, dropout nodes are introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = input_time_size
first_filter_count = 186
first_filter_stride_x = 1
first_filter_stride_y = 1
first_weights = tf.get_variable(
name='first_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[first_filter_height, first_filter_width, 1, first_filter_count])
first_bias = tf.get_variable(
name='first_bias',
initializer=tf.zeros_initializer,
shape=[first_filter_count])
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [
1, first_filter_stride_y, first_filter_stride_x, 1
], 'VALID') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
first_conv_output_width = math.floor(
(input_frequency_size - first_filter_width + first_filter_stride_x) /
first_filter_stride_x)
first_conv_output_height = math.floor(
(input_time_size - first_filter_height + first_filter_stride_y) /
first_filter_stride_y)
first_conv_element_count = int(
first_conv_output_width * first_conv_output_height * first_filter_count)
flattened_first_conv = tf.reshape(first_dropout,
[-1, first_conv_element_count])
first_fc_output_channels = 128
first_fc_weights = tf.get_variable(
name='first_fc_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[first_conv_element_count, first_fc_output_channels])
first_fc_bias = tf.get_variable(
name='first_fc_bias',
initializer=tf.zeros_initializer,
shape=[first_fc_output_channels])
first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, dropout_prob)
else:
second_fc_input = first_fc
second_fc_output_channels = 128
second_fc_weights = tf.get_variable(
name='second_fc_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[first_fc_output_channels, second_fc_output_channels])
second_fc_bias = tf.get_variable(
name='second_fc_bias',
initializer=tf.zeros_initializer,
shape=[second_fc_output_channels])
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, dropout_prob)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
final_fc_weights = tf.get_variable(
name='final_fc_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[second_fc_output_channels, label_count])
final_fc_bias = tf.get_variable(
name='final_fc_bias',
initializer=tf.zeros_initializer,
shape=[label_count])
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc
def create_low_latency_svdf_model(fingerprint_input, model_settings,
is_training, runtime_settings):
"""Builds an SVDF model with low compute requirements.
This is based in the topology presented in the 'Compressing Deep Neural
Networks using a Rank-Constrained Topology' paper:
https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43813.pdf
Here's the layout of the graph:
(fingerprint_input)
v
[SVDF]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This model produces lower recognition accuracy than the 'conv' model above,
but requires fewer weight parameters and, significantly fewer computations.
During training, dropout nodes are introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
The node is expected to produce a 2D Tensor of shape:
[batch, model_settings['fingerprint_width'] *
model_settings['spectrogram_length']]
with the features corresponding to the same time slot arranged contiguously,
and the oldest slot at index [:, 0], and newest at [:, -1].
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
runtime_settings: Dictionary of information about the runtime.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
Raises:
ValueError: If the inputs tensor is incorrectly shaped.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
# Validation.
input_shape = fingerprint_input.get_shape()
if len(input_shape) != 2:
raise ValueError('Inputs to `SVDF` should have rank == 2.')
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `SVDF` '
'should be defined. Found `None`.')
if input_shape[-1].value % input_frequency_size != 0:
raise ValueError('Inputs feature dimension %d must be a multiple of '
'frame size %d', fingerprint_input.shape[-1].value,
input_frequency_size)
# Set number of units (i.e. nodes) and rank.
rank = 2
num_units = 1280
# Number of filters: pairs of feature and time filters.
num_filters = rank * num_units
# Create the runtime memory: [num_filters, batch, input_time_size]
batch = 1
memory = tf.get_variable(
initializer=tf.zeros_initializer,
shape=[num_filters, batch, input_time_size],
trainable=False,
name='runtime-memory')
# Determine the number of new frames in the input, such that we only operate
# on those. For training we do not use the memory, and thus use all frames
# provided in the input.
# new_fingerprint_input: [batch, num_new_frames*input_frequency_size]
if is_training:
num_new_frames = input_time_size
else:
window_stride_ms = int(model_settings['window_stride_samples'] * 1000 /
model_settings['sample_rate'])
num_new_frames = tf.cond(
tf.equal(tf.count_nonzero(memory), 0),
lambda: input_time_size,
lambda: int(runtime_settings['clip_stride_ms'] / window_stride_ms))
new_fingerprint_input = fingerprint_input[
:, -num_new_frames*input_frequency_size:]
# Expand to add input channels dimension.
new_fingerprint_input = tf.expand_dims(new_fingerprint_input, 2)
# Create the frequency filters.
weights_frequency = tf.get_variable(
name='weights_frequency',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[input_frequency_size, num_filters])
# Expand to add input channels dimensions.
# weights_frequency: [input_frequency_size, 1, num_filters]
weights_frequency = tf.expand_dims(weights_frequency, 1)
# Convolve the 1D feature filters sliding over the time dimension.
# activations_time: [batch, num_new_frames, num_filters]
activations_time = tf.nn.conv1d(
new_fingerprint_input, weights_frequency, input_frequency_size, 'VALID')
# Rearrange such that we can perform the batched matmul.
# activations_time: [num_filters, batch, num_new_frames]
activations_time = tf.transpose(activations_time, perm=[2, 0, 1])
# Runtime memory optimization.
if not is_training:
# We need to drop the activations corresponding to the oldest frames, and
# then add those corresponding to the new frames.
new_memory = memory[:, :, num_new_frames:]
new_memory = tf.concat([new_memory, activations_time], 2)
tf.assign(memory, new_memory)
activations_time = new_memory
# Create the time filters.
weights_time = tf.get_variable(
name='weights_time',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[num_filters, input_time_size])
# Apply the time filter on the outputs of the feature filters.
# weights_time: [num_filters, input_time_size, 1]
# outputs: [num_filters, batch, 1]
weights_time = tf.expand_dims(weights_time, 2)
outputs = tf.matmul(activations_time, weights_time)
# Split num_units and rank into separate dimensions (the remaining
# dimension is the input_shape[0] -i.e. batch size). This also squeezes
# the last dimension, since it's not used.
# [num_filters, batch, 1] => [num_units, rank, batch]
outputs = tf.reshape(outputs, [num_units, rank, -1])
# Sum the rank outputs per unit => [num_units, batch].
units_output = tf.reduce_sum(outputs, axis=1)
# Transpose to shape [batch, num_units]
units_output = tf.transpose(units_output)
# Appy bias.
bias = tf.get_variable(
name='bias', initializer=tf.zeros_initializer, shape=[num_units])
first_bias = tf.nn.bias_add(units_output, bias)
# Relu.
first_relu = tf.nn.relu(first_bias)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
first_fc_output_channels = 256
first_fc_weights = tf.get_variable(
name='first_fc_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[num_units, first_fc_output_channels])
first_fc_bias = tf.get_variable(
name='first_fc_bias',
initializer=tf.zeros_initializer,
shape=[first_fc_output_channels])
first_fc = tf.matmul(first_dropout, first_fc_weights) + first_fc_bias
if is_training:
second_fc_input = tf.nn.dropout(first_fc, dropout_prob)
else:
second_fc_input = first_fc
second_fc_output_channels = 256
second_fc_weights = tf.get_variable(
name='second_fc_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[first_fc_output_channels, second_fc_output_channels])
second_fc_bias = tf.get_variable(
name='second_fc_bias',
initializer=tf.zeros_initializer,
shape=[second_fc_output_channels])
second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
if is_training:
final_fc_input = tf.nn.dropout(second_fc, dropout_prob)
else:
final_fc_input = second_fc
label_count = model_settings['label_count']
final_fc_weights = tf.get_variable(
name='final_fc_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[second_fc_output_channels, label_count])
final_fc_bias = tf.get_variable(
name='final_fc_bias',
initializer=tf.zeros_initializer,
shape=[label_count])
final_fc = tf.matmul(final_fc_input, final_fc_weights) + final_fc_bias
if is_training:
return final_fc, dropout_prob
else:
return final_fc
def create_tiny_conv_model(fingerprint_input, model_settings, is_training):
"""Builds a convolutional model aimed at microcontrollers.
Devices like DSPs and microcontrollers can have very small amounts of
memory and limited processing power. This model is designed to use less
than 20KB of working RAM, and fit within 32KB of read-only (flash) memory.
Here's the layout of the graph:
(fingerprint_input)
v
[Conv2D]<-(weights)
v
[BiasAdd]<-(bias)
v
[Relu]
v
[MatMul]<-(weights)
v
[BiasAdd]<-(bias)
v
This doesn't produce particularly accurate results, but it's designed to be
used as the first stage of a pipeline, running on a low-energy piece of
hardware that can always be on, and then wake higher-power chips when a
possible utterance has been found, so that more accurate analysis can be done.
During training, a dropout node is introduced after the relu, controlled by a
placeholder.
Args:
fingerprint_input: TensorFlow node that will output audio feature vectors.
model_settings: Dictionary of information about the model.
is_training: Whether the model is going to be used for training.
Returns:
TensorFlow node outputting logits results, and optionally a dropout
placeholder.
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['fingerprint_width']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size, 1])
first_filter_width = 8
first_filter_height = 10
first_filter_count = 8
first_weights = tf.get_variable(
name='first_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[first_filter_height, first_filter_width, 1, first_filter_count])
first_bias = tf.get_variable(
name='first_bias',
initializer=tf.zeros_initializer,
shape=[first_filter_count])
first_conv_stride_x = 2
first_conv_stride_y = 2
first_conv = tf.nn.conv2d(fingerprint_4d, first_weights,
[1, first_conv_stride_y, first_conv_stride_x, 1],
'SAME') + first_bias
first_relu = tf.nn.relu(first_conv)
if is_training:
first_dropout = tf.nn.dropout(first_relu, dropout_prob)
else:
first_dropout = first_relu
first_dropout_shape = first_dropout.get_shape()
first_dropout_output_width = first_dropout_shape[2]
first_dropout_output_height = first_dropout_shape[1]
first_dropout_element_count = int(
first_dropout_output_width * first_dropout_output_height *
first_filter_count)
flattened_first_dropout = tf.reshape(first_dropout,
[-1, first_dropout_element_count])
label_count = model_settings['label_count']
final_fc_weights = tf.get_variable(
name='final_fc_weights',
initializer=tf.truncated_normal_initializer(stddev=0.01),
shape=[first_dropout_element_count, label_count])
final_fc_bias = tf.get_variable(
name='final_fc_bias',
initializer=tf.zeros_initializer,
shape=[label_count])
final_fc = (
tf.matmul(flattened_first_dropout, final_fc_weights) + final_fc_bias)
if is_training:
return final_fc, dropout_prob
else:
return final_fc
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Use this module to get and run all tk tests.
Tkinter tests should live in a package inside the directory where this file
lives, like test_tkinter.
Extensions also should live in packages following the same rule as above.
"""
import os
import sys
import unittest
import importlib
import test.test_support
this_dir_path = os.path.abspath(os.path.dirname(__file__))
_tk_unavailable = None
def check_tk_availability():
"""Check that Tk is installed and available."""
global _tk_unavailable
if _tk_unavailable is None:
_tk_unavailable = False
if sys.platform == 'darwin':
# The Aqua Tk implementations on OS X can abort the process if
# being called in an environment where a window server connection
# cannot be made, for instance when invoked by a buildbot or ssh
# process not running under the same user id as the current console
# user. To avoid that, raise an exception if the window manager
# connection is not available.
from ctypes import cdll, c_int, pointer, Structure
from ctypes.util import find_library
app_services = cdll.LoadLibrary(find_library("ApplicationServices"))
if app_services.CGMainDisplayID() == 0:
_tk_unavailable = "cannot run without OS X window manager"
else:
class ProcessSerialNumber(Structure):
_fields_ = [("highLongOfPSN", c_int),
("lowLongOfPSN", c_int)]
psn = ProcessSerialNumber()
psn_p = pointer(psn)
if ( (app_services.GetCurrentProcess(psn_p) < 0) or
(app_services.SetFrontProcess(psn_p) < 0) ):
_tk_unavailable = "cannot run without OS X gui process"
else: # not OS X
import Tkinter
try:
Tkinter.Button()
except Tkinter.TclError as msg:
# assuming tk is not available
_tk_unavailable = "tk not available: %s" % msg
if _tk_unavailable:
raise unittest.SkipTest(_tk_unavailable)
return
def is_package(path):
for name in os.listdir(path):
if name in ('__init__.py', '__init__.pyc', '__init.pyo'):
return True
return False
def get_tests_modules(basepath=this_dir_path, gui=True, packages=None):
"""This will import and yield modules whose names start with test_
and are inside packages found in the path starting at basepath.
If packages is specified it should contain package names that want
their tests colleted.
"""
py_ext = '.py'
for dirpath, dirnames, filenames in os.walk(basepath):
for dirname in list(dirnames):
if dirname[0] == '.':
dirnames.remove(dirname)
if is_package(dirpath) and filenames:
pkg_name = dirpath[len(basepath) + len(os.sep):].replace('/', '.')
if packages and pkg_name not in packages:
continue
filenames = filter(
lambda x: x.startswith('test_') and x.endswith(py_ext),
filenames)
for name in filenames:
try:
yield importlib.import_module(
".%s" % name[:-len(py_ext)], pkg_name)
except test.test_support.ResourceDenied:
if gui:
raise
def get_tests(text=True, gui=True, packages=None):
"""Yield all the tests in the modules found by get_tests_modules.
If nogui is True, only tests that do not require a GUI will be
returned."""
attrs = []
if text:
attrs.append('tests_nogui')
if gui:
attrs.append('tests_gui')
for module in get_tests_modules(gui=gui, packages=packages):
for attr in attrs:
for test in getattr(module, attr, ()):
yield test
if __name__ == "__main__":
test.test_support.use_resources = ['gui']
test.test_support.run_unittest(*get_tests())
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Test Commands
All test commands are denoted with the `.testOnly()` modifier to the `MONGO_REGISTER_COMMAND` invocation.
For example:
```c++
MONGO_REGISTER_COMMAND(EchoCommand).testOnly();
```
## How to enable
To be able to run these commands, the server must be started with the `enableTestCommands=1`
server parameter (e.g. `--setParameter enableTestCommands=1`). Resmoke.py often sets this server
parameter for testing.
## Examples
Some often-used commands that are test-only:
- [configureFailPoint][fail_point_cmd]
- [replSetTest][repl_set_test_cmd]
- [sleep][sleep_cmd]
As a very rough estimate, about 10% of all server commands are test-only. These additional commands
will appear in `db.runCommand({listCommands: 1})` when the server has test commands enabled.
## Test Command Infrastructure
A few pointers to relevant code that sets this up:
- [test_commands_enabled.h][test_commands_enabled]
- [MONGO_REGISTER_COMMAND][register_command]
[empty_capped_cmd]: ../src/mongo/db/commands/test_commands.cpp
[fail_point_cmd]: ../src/mongo/db/commands/fail_point_cmd.cpp
[register_command]: ../src/mongo/db/commands.h
[repl_set_test_cmd]: ../src/mongo/db/repl/repl_set_commands.cpp
[sleep_cmd]: ../src/mongo/db/commands/sleep_command.cpp
[test_commands_enabled]: ../src/mongo/db/commands/test_commands_enabled.h
|
unknown
|
github
|
https://github.com/mongodb/mongo
|
docs/test_commands.md
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""Package marker file."""
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Consume and serialize all of the data from a running TensorBoard instance.
This program connects to a live TensorBoard backend at given port, and saves
all of the data to local disk JSON in a predictable format.
This makes it easy to mock out the TensorBoard backend so that the frontend
may be tested in isolation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import os.path
import shutil
import threading
import urllib
import six
from six.moves import http_client
import tensorflow as tf
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
tf.flags.DEFINE_string('logdir', None, """the logdir to pass to the TensorBoard
backend; data will be read from this logdir for serialization.""")
tf.flags.DEFINE_string('target', None, """The directoy where serialized data
will be written""")
tf.flags.DEFINE_boolean('overwrite', False, """Whether to remove and overwrite
TARGET if it already exists.""")
tf.flags.DEFINE_boolean(
'purge_orphaned_data', True, 'Whether to purge data that '
'may have been orphaned due to TensorBoard restarts. '
'Disabling purge_orphaned_data can be used to debug data '
'disappearance.')
FLAGS = tf.flags.FLAGS
BAD_CHARACTERS = "#%&{}\\/<>*? $!'\":@+`|="
def Url(route, params):
"""Takes route and query params, and produce encoded url for that asset."""
out = route
if params:
# sorting ensures a unique filename for each query
sorted_params = sorted(six.iteritems(params))
out += '?' + urllib.urlencode(sorted_params)
return out
def Clean(s):
"""Clean a string so it can be used as a filepath."""
for c in BAD_CHARACTERS:
s = s.replace(c, '_')
return s
class TensorBoardStaticSerializer(object):
"""Serialize all the routes from a TensorBoard server to static json."""
def __init__(self, connection, target_path):
self.connection = connection
EnsureDirectoryExists(os.path.join(target_path, 'data'))
self.path = target_path
def GetAndSave(self, url):
"""GET the given url. Serialize the result at clean path version of url."""
self.connection.request('GET', '/data/' + url)
response = self.connection.getresponse()
destination = self.path + '/data/' + Clean(url)
if response.status != 200:
raise IOError(url)
content = response.read()
with open(destination, 'w') as f:
f.write(content)
return content
def GetRouteAndSave(self, route, params=None):
"""GET given route and params. Serialize the result. Return as JSON."""
url = Url(route, params)
return json.loads(self.GetAndSave(url))
def Run(self):
"""Serialize everything from a TensorBoard backend."""
# get the runs object, which is an index for every tag.
runs = self.GetRouteAndSave('runs')
# collect sampled data.
self.GetRouteAndSave('scalars')
# now let's just download everything!
for run, tag_type_to_tags in six.iteritems(runs):
for tag_type, tags in six.iteritems(tag_type_to_tags):
try:
if tag_type == 'graph':
# in this case, tags is a bool which specifies if graph is present.
if tags:
self.GetRouteAndSave('graph', {run: run})
elif tag_type == 'images':
for t in tags:
images = self.GetRouteAndSave('images', {'run': run, 'tag': t})
for im in images:
url = 'individualImage?' + im['query']
# pull down the images themselves.
self.GetAndSave(url)
else:
for t in tags:
# Save this, whatever it is :)
self.GetRouteAndSave(tag_type, {'run': run, 'tag': t})
except IOError as e:
PrintAndLog('Retrieval failed for %s/%s/%s' % (tag_type, run, tags),
tf.logging.WARN)
PrintAndLog('Got Exception: %s' % e, tf.logging.WARN)
PrintAndLog('continuing...', tf.logging.WARN)
continue
def EnsureDirectoryExists(path):
if not os.path.exists(path):
os.makedirs(path)
def PrintAndLog(msg, lvl=tf.logging.INFO):
tf.logging.log(lvl, msg)
print(msg)
def main(unused_argv=None):
target = FLAGS.target
logdir = FLAGS.logdir
if not target or not logdir:
PrintAndLog('Both --target and --logdir are required.', tf.logging.ERROR)
return -1
if os.path.exists(target):
if FLAGS.overwrite:
if os.path.isdir(target):
shutil.rmtree(target)
else:
os.remove(target)
else:
PrintAndLog('Refusing to overwrite target %s without --overwrite' %
target, tf.logging.ERROR)
return -2
path_to_run = server.ParseEventFilesSpec(FLAGS.logdir)
PrintAndLog('About to load Multiplexer. This may take some time.')
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE,
purge_orphaned_data=FLAGS.purge_orphaned_data)
server.ReloadMultiplexer(multiplexer, path_to_run)
PrintAndLog('Multiplexer load finished. Starting TensorBoard server.')
s = server.BuildServer(multiplexer, 'localhost', 0)
server_thread = threading.Thread(target=s.serve_forever)
server_thread.daemon = True
server_thread.start()
connection = http_client.HTTPConnection('localhost', s.server_address[1])
PrintAndLog('Server setup! Downloading data from the server.')
x = TensorBoardStaticSerializer(connection, target)
x.Run()
PrintAndLog('Done downloading data.')
connection.close()
s.shutdown()
s.server_close()
if __name__ == '__main__':
tf.app.run()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
from __future__ import print_function
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as nd
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = nd.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(nd.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(nd.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(nd.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(nd.convolve(image, np.real(kernel), mode='wrap')**2 +
nd.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigiq_regkey_pool
short_description: Manages registration key pools on BIG-IQ
description:
- Manages registration key (regkey) pools on a BIG-IQ. These pools function as
a container in-which you will add lists of registration keys. To add registration
keys, use the C(bigiq_regkey_license) module.
version_added: "2.5"
options:
name:
description:
- Specifies the name of the registration key pool.
- You must be mindful to name your registration pools unique names. While
BIG-IQ does not require this, this module does. If you do not do this,
the behavior of the module is undefined and you may end up putting
licenses in the wrong registration key pool.
required: True
description:
description:
- A description to attach to the pool.
state:
description:
- The state of the regkey pool on the system.
- When C(present), guarantees that the pool exists.
- When C(absent), removes the pool, and the licenses it contains, from the
system.
default: present
choices:
- absent
- present
requirements:
- BIG-IQ >= 5.3.0
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a registration key (regkey) pool to hold individual device licenses
bigiq_regkey_pool:
name: foo-pool
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: New description of the regkey pool.
returned: changed
type: string
sample: My description
'''
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
from ansible.module_utils.six import iteritems
from collections import defaultdict
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
'description'
]
returnables = [
'description'
]
updatables = [
'description'
]
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleParameters(Parameters):
@property
def uuid(self):
"""Returns UUID of a given name
Will search for a given name and return the first one returned to us. If no name,
and therefore no ID, is found, will return the string "none". The string "none"
is returned because if we were to return the None value, it would cause the
license loading code to append a None string to the URI; essentially asking the
remote device for its collection (which we dont want and which would cause the SDK
to return an False error.
:return:
"""
collection = self.client.api.cm.device.licensing.pool.regkey.licenses_s.get_collection()
resource = next((x for x in collection if x.name == self._values['name']), None)
if resource:
return resource.id
else:
return "none"
class ApiParameters(Parameters):
@property
def uuid(self):
return self._values['id']
class Changes(Parameters):
pass
class ReportableChanges(Changes):
pass
class UsableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.want = ModuleParameters(self.client.module.params)
self.want.update({'client': client})
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.exists(
id=self.want.uuid
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.client.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.want.api_params()
self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.create(
name=self.want.name,
**params
)
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.load(
id=self.want.uuid
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.load(
id=self.want.uuid
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.cm.device.licensing.pool.regkey.licenses_s.licenses.load(
id=self.want.uuid
)
result = resource.attrs
return ApiParameters(result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(required=True),
description=dict(),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.f5_product_name = 'bigiq'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.4.70'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
}
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
UNSIGNED = object()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
s2 = _number_cap_regex.sub(r'\1' + sep + r'\2', s1)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s2).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2014-2021 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.server.cio.backend
import io.ktor.network.sockets.*
import io.ktor.util.network.*
internal val SocketAddress.port: Int?
get() = (this as? InetSocketAddress)?.port
internal expect fun SocketAddress.toNetworkAddress(): NetworkAddress?
|
kotlin
|
github
|
https://github.com/ktorio/ktor
|
ktor-server/ktor-server-cio/common/src/io/ktor/server/cio/backend/SocketAddressUtils.kt
|
//===--- GenKeyPath.h - IR generation for KeyPath ---------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file provides the private interface to the emission of KeyPath
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_IRGEN_GENKEYPATH_H
#define SWIFT_IRGEN_GENKEYPATH_H
#include "GenericRequirement.h"
#include "swift/AST/SubstitutionMap.h"
#include "swift/Basic/LLVM.h"
#include "swift/SIL/SILValue.h"
#include "llvm/IR/Value.h"
namespace swift {
namespace irgen {
class Explosion;
class IRGenFunction;
class StackAddress;
std::pair<llvm::Value *, llvm::Value *>
emitKeyPathArgument(IRGenFunction &IGF, SubstitutionMap subs,
const CanGenericSignature &sig,
ArrayRef<SILType> indiceTypes, Explosion &indiceValues,
std::optional<StackAddress> &dynamicArgsBuf);
} // end namespace irgen
} // end namespace swift
#endif
|
c
|
github
|
https://github.com/apple/swift
|
lib/IRGen/GenKeyPath.h
|
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.build.autoconfigure;
import java.util.Arrays;
import java.util.Collections;
import java.util.Set;
import java.util.stream.Collectors;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.artifacts.ConfigurationContainer;
import org.gradle.api.artifacts.Dependency;
import org.gradle.api.attributes.Category;
import org.gradle.api.attributes.Usage;
import org.gradle.api.plugins.JavaBasePlugin;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.api.plugins.JavaPluginExtension;
import org.gradle.api.tasks.SourceSet;
import org.gradle.api.tasks.TaskContainer;
import org.gradle.api.tasks.TaskProvider;
import org.springframework.boot.build.DeployedPlugin;
import org.springframework.boot.build.optional.OptionalDependenciesPlugin;
/**
* {@link Plugin} for projects that define auto-configuration. When applied, the plugin
* applies the {@link DeployedPlugin}. Additionally, when the {@link JavaPlugin} is
* applied it:
*
* <ul>
* <li>Adds a dependency on the auto-configuration annotation processor.
* <li>Defines a task that produces metadata describing the auto-configuration. The
* metadata is made available as an artifact in the {@code autoConfigurationMetadata}
* configuration.
* <li>Add checks to ensure import files and annotations are correct</li>
* </ul>
*
* @author Andy Wilkinson
*/
public class AutoConfigurationPlugin implements Plugin<Project> {
private static final String AUTO_CONFIGURATION_METADATA_CONFIGURATION_NAME = "autoConfigurationMetadata";
@Override
public void apply(Project project) {
project.getPlugins().apply(DeployedPlugin.class);
project.getPlugins().withType(JavaPlugin.class, (javaPlugin) -> new Configurer(project).configure());
}
private static class Configurer {
private final Project project;
private SourceSet main;
Configurer(Project project) {
this.project = project;
this.main = project.getExtensions()
.getByType(JavaPluginExtension.class)
.getSourceSets()
.getByName(SourceSet.MAIN_SOURCE_SET_NAME);
}
void configure() {
addAnnotationProcessorsDependencies();
TaskContainer tasks = this.project.getTasks();
ConfigurationContainer configurations = this.project.getConfigurations();
configurations.consumable(AUTO_CONFIGURATION_METADATA_CONFIGURATION_NAME, (configuration) -> {
configuration.attributes((attributes) -> {
attributes.attribute(Category.CATEGORY_ATTRIBUTE,
this.project.getObjects().named(Category.class, Category.DOCUMENTATION));
attributes.attribute(Usage.USAGE_ATTRIBUTE,
this.project.getObjects().named(Usage.class, "auto-configuration-metadata"));
});
});
tasks.register("autoConfigurationMetadata", AutoConfigurationMetadata.class,
this::configureAutoConfigurationMetadata);
TaskProvider<CheckAutoConfigurationImports> checkAutoConfigurationImports = tasks.register(
"checkAutoConfigurationImports", CheckAutoConfigurationImports.class,
this::configureCheckAutoConfigurationImports);
Configuration requiredClasspath = configurations.create("autoConfigurationRequiredClasspath")
.extendsFrom(configurations.getByName(this.main.getImplementationConfigurationName()),
configurations.getByName(this.main.getRuntimeOnlyConfigurationName()));
requiredClasspath.getDependencies().add(projectDependency(":core:spring-boot-autoconfigure"));
TaskProvider<CheckAutoConfigurationClasses> checkAutoConfigurationClasses = tasks.register(
"checkAutoConfigurationClasses", CheckAutoConfigurationClasses.class,
(task) -> configureCheckAutoConfigurationClasses(requiredClasspath, task));
this.project.getPlugins()
.withType(OptionalDependenciesPlugin.class,
(plugin) -> configureCheckAutoConfigurationClassesForOptionalDependencies(configurations,
checkAutoConfigurationClasses));
this.project.getTasks()
.getByName(JavaBasePlugin.CHECK_TASK_NAME)
.dependsOn(checkAutoConfigurationImports, checkAutoConfigurationClasses);
}
private void addAnnotationProcessorsDependencies() {
this.project.getConfigurations()
.getByName(JavaPlugin.ANNOTATION_PROCESSOR_CONFIGURATION_NAME)
.getDependencies()
.addAll(projectDependencies(":core:spring-boot-autoconfigure-processor",
":configuration-metadata:spring-boot-configuration-processor"));
}
private void configureAutoConfigurationMetadata(AutoConfigurationMetadata task) {
task.setSourceSet(this.main);
task.dependsOn(this.main.getClassesTaskName());
task.getOutputFile()
.set(this.project.getLayout().getBuildDirectory().file("auto-configuration-metadata.properties"));
this.project.getArtifacts()
.add(AutoConfigurationPlugin.AUTO_CONFIGURATION_METADATA_CONFIGURATION_NAME, task.getOutputFile(),
(artifact) -> artifact.builtBy(task));
}
private void configureCheckAutoConfigurationImports(CheckAutoConfigurationImports task) {
task.setSource(this.main.getResources());
task.setClasspath(this.main.getOutput().getClassesDirs());
task.setDescription(
"Checks the %s file of the main source set.".formatted(AutoConfigurationImportsTask.IMPORTS_FILE));
}
private void configureCheckAutoConfigurationClasses(Configuration requiredClasspath,
CheckAutoConfigurationClasses task) {
task.setSource(this.main.getResources());
task.setClasspath(this.main.getOutput().getClassesDirs());
task.setRequiredDependencies(requiredClasspath);
task.setDescription("Checks the auto-configuration classes of the main source set.");
}
private void configureCheckAutoConfigurationClassesForOptionalDependencies(
ConfigurationContainer configurations,
TaskProvider<CheckAutoConfigurationClasses> checkAutoConfigurationClasses) {
checkAutoConfigurationClasses.configure((check) -> {
Configuration optionalClasspath = configurations.create("autoConfigurationOptionalClassPath")
.extendsFrom(configurations.getByName(OptionalDependenciesPlugin.OPTIONAL_CONFIGURATION_NAME));
check.setOptionalDependencies(optionalClasspath);
});
}
private Set<Dependency> projectDependencies(String... paths) {
return Arrays.stream(paths).map((path) -> projectDependency(path)).collect(Collectors.toSet());
}
private Dependency projectDependency(String path) {
return this.project.getDependencies().project(Collections.singletonMap("path", path));
}
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
buildSrc/src/main/java/org/springframework/boot/build/autoconfigure/AutoConfigurationPlugin.java
|
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from django.utils import translation
from ...utils import setup
class I18nGetLanguageInfoTagTests(SimpleTestCase):
libraries = {
'custom': 'template_tests.templatetags.custom',
'i18n': 'django.templatetags.i18n',
}
# retrieving language information
@setup({'i18n28_2': '{% load i18n %}'
'{% get_language_info for "de" as l %}'
'{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}'})
def test_i18n28_2(self):
output = self.engine.render_to_string('i18n28_2')
self.assertEqual(output, 'de: German/Deutsch bidi=False')
@setup({'i18n29': '{% load i18n %}'
'{% get_language_info for LANGUAGE_CODE as l %}'
'{{ l.code }}: {{ l.name }}/{{ l.name_local }} bidi={{ l.bidi }}'})
def test_i18n29(self):
output = self.engine.render_to_string('i18n29', {'LANGUAGE_CODE': 'fi'})
self.assertEqual(output, 'fi: Finnish/suomi bidi=False')
# Test whitespace in filter arguments
@setup({'i18n38': '{% load i18n custom %}'
'{% get_language_info for "de"|noop:"x y" as l %}'
'{{ l.code }}: {{ l.name }}/{{ l.name_local }}/'
'{{ l.name_translated }} bidi={{ l.bidi }}'})
def test_i18n38(self):
with translation.override('cs'):
output = self.engine.render_to_string('i18n38')
self.assertEqual(output, 'de: German/Deutsch/německy bidi=False')
@setup({'template': '{% load i18n %}''{% get_language_info %}'})
def test_no_for_as(self):
msg = "'get_language_info' requires 'for string as variable' (got [])"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.render_to_string('template')
|
unknown
|
codeparrot/codeparrot-clean
| ||
class TrieNode(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.childlist=[0 for _ in xrange(26)]
# the node value
self.val=None
# end flag to determin whether it's prefix
self.isend=False
class Trie(object):
def __init__(self):
self.root = TrieNode()
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: void
"""
if not word:
return
curnode=self.root
for i in xrange(len(word)):
pos=ord(word[i])-97
if not curnode.childlist[pos]:
curnode.childlist[pos]=TrieNode()
curnode.childlist[pos].val=word[i]
# move on
curnode=curnode.childlist[pos]
curnode.isend=True
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
n=len(word)
if not n:
return False
curnode=self.root
for i in xrange(n):
pos=ord(word[i])-97
if curnode.childlist[pos]:
curnode=curnode.childlist[pos]
else:
return False
# if reach the ends
return curnode.isend
def startsWith(self, prefix):
"""
Returns if there is any word in the trie
that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
n=len(prefix)
if not n:
return False
curnode=self.root
for i in xrange(n):
pos=ord(prefix[i])-97
if curnode.childlist[pos]:
curnode=curnode.childlist[pos]
else:
return False
return True
# Your Trie object will be instantiated and called as such:
# trie = Trie()
# trie.insert("somestring")
# trie.search("key")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2011 Marco Conti
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# sourced from https://github.com/marcoconti83/read-ods-with-odfpy
# further altered locally
from odf import opendocument
from odf.table import Table, TableRow, TableCell
from odf.text import P
class ODSReader(object):
# loads the file
def __init__(self, file=None, content=None, clonespannedcolumns=None):
if not content:
self.clonespannedcolumns = clonespannedcolumns
self.doc = opendocument.load(file)
else:
self.clonespannedcolumns = clonespannedcolumns
self.doc = content
self.SHEETS = {}
for sheet in self.doc.spreadsheet.getElementsByType(Table):
self.readSheet(sheet)
# reads a sheet in the sheet dictionary, storing each sheet as an
# array (rows) of arrays (columns)
def readSheet(self, sheet):
name = sheet.getAttribute("name")
rows = sheet.getElementsByType(TableRow)
arrRows = []
# for each row
for row in rows:
arrCells = []
cells = row.getElementsByType(TableCell)
# for each cell
for count, cell in enumerate(cells, start=1):
# repeated value?
repeat = 0
if count != len(cells):
repeat = cell.getAttribute("numbercolumnsrepeated")
if not repeat:
repeat = 1
spanned = int(cell.getAttribute('numbercolumnsspanned') or 0)
# clone spanned cells
if self.clonespannedcolumns is not None and spanned > 1:
repeat = spanned
ps = cell.getElementsByType(P)
textContent = u""
# for each text/text:span node
for p in ps:
for n in p.childNodes:
if n.nodeType == 1 and n.tagName == "text:span":
for c in n.childNodes:
if c.nodeType == 3:
textContent = u'{}{}'.format(textContent, n.data)
if n.nodeType == 3:
textContent = u'{}{}'.format(textContent, n.data)
if textContent:
if not textContent.startswith("#"): # ignore comments cells
for rr in range(int(repeat)): # repeated?
arrCells.append(textContent)
else:
for rr in range(int(repeat)):
arrCells.append("")
# if row contained something
if arrCells:
arrRows.append(arrCells)
#else:
# print ("Empty or commented row (", row_comment, ")")
self.SHEETS[name] = arrRows
# returns a sheet as an array (rows) of arrays (columns)
def getSheet(self, name):
return self.SHEETS[name]
def getFirstSheet(self):
return next(iter(self.SHEETS.itervalues()))
|
unknown
|
codeparrot/codeparrot-clean
| ||
#---------------------------------------------------------------------------
# Copyright 2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import os
import sys
import unittest
curDir = os.path.dirname(os.path.abspath(__file__))
scriptDir = os.path.normpath(os.path.join(curDir, "../../"))
if scriptDir not in sys.path:
sys.path.append(scriptDir)
PACKAGE_DIR = os.path.normpath(os.path.join(scriptDir, "../Packages"))
TEST_PACKAGE_DIR = os.path.join(curDir, "Packages")
from PatchOrderGenerator import PatchOrderGenerator, topologicSort
from LoggerManager import logger, initConsoleLogging
import logging
class TestPatchOrderGenerator(unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
self.handler = None
def setUp(self):
logger.setLevel(logging.ERROR)
self.handler = initConsoleLogging(logging.ERROR)
def tearDown(self):
logger.removeHandler(self.handler)
def test_generatePatchOrderTopologic(self):
patchOrderGen = PatchOrderGenerator()
patchOrder = patchOrderGen.generatePatchOrderTopologic(PACKAGE_DIR)
self.assertTrue(patchOrder, "no valid patch order is generated")
def test_generatePatchOrderTopologicSample(self):
patchOrderGen = PatchOrderGenerator()
patchOrder = patchOrderGen.generatePatchOrderTopologic(TEST_PACKAGE_DIR)
self.verifySampleOrder(patchOrder)
def verifySampleOrder(self, patchOrder):
self.assertTrue(patchOrder, "no valid patch order is generated")
expectedOrder = ['LR*5.2*382', 'HDI*1.0*7', 'LR*5.2*350',
'LA*5.2*74', 'LR*5.2*420'
]
installList = [x.installName for x in patchOrder]
self.assertEqual(installList, expectedOrder)
def test_topologicSort(self):
depDict = {'2': ['11'],
'9': ['11', '8'],
'10': ['11', '3'],
'11': ['7', '5'],
'8': ['7' , '3'],
'12': [],
}
result = topologicSort(depDict)
self.assertTrue(result, "no valid order is generated")
self.assertTrue('12' in result, "orphan node is ignored")
print result
result = topologicSort(depDict, '9')
self.assertTrue(result, "no valid order is generated")
print result
result = topologicSort(depDict, '10')
self.assertTrue(result, "no valid order is generated")
print result
result = topologicSort(depDict, '2')
self.assertTrue(result, "no valid order is generated")
print result
self.assertTrue(result, "no valid order is generated")
# this will create a cycle among 5, 11, 10
depDict.update({'5': ['10']})
self.assertRaises(Exception, topologicSort, depDict)
# this will create a cycle among 2, 5, 8, 11
depDict.update({'5': ['8'],
'8': ['7', '3', '2']})
self.assertRaises(Exception, topologicSort, depDict)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestPatchOrderGenerator)
unittest.TextTestRunner(verbosity=2).run(suite)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cauchy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import numpy as np
from tensorflow.contrib.distributions.python.ops import cauchy as cauchy_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
stats = try_import("scipy.stats")
class CauchyTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def assertAllFinite(self, tensor):
is_finite = np.isfinite(tensor.eval())
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
def _testParamShapes(self, sample_shape, expected):
with self.cached_session():
param_shapes = cauchy_lib.Cauchy.param_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertAllEqual(expected, loc_shape.eval())
self.assertAllEqual(expected, scale_shape.eval())
loc = array_ops.zeros(loc_shape)
scale = array_ops.ones(scale_shape)
self.assertAllEqual(expected,
array_ops.shape(
cauchy_lib.Cauchy(loc, scale).sample()).eval())
def _testParamStaticShapes(self, sample_shape, expected):
param_shapes = cauchy_lib.Cauchy.param_static_shapes(sample_shape)
loc_shape, scale_shape = param_shapes["loc"], param_shapes["scale"]
self.assertEqual(expected, loc_shape)
self.assertEqual(expected, scale_shape)
def testParamShapes(self):
sample_shape = [10, 3, 4]
self._testParamShapes(sample_shape, sample_shape)
self._testParamShapes(constant_op.constant(sample_shape), sample_shape)
def testParamStaticShapes(self):
sample_shape = [10, 3, 4]
self._testParamStaticShapes(sample_shape, sample_shape)
self._testParamStaticShapes(
tensor_shape.TensorShape(sample_shape), sample_shape)
def testCauchyLogPDF(self):
with self.cached_session():
batch_size = 6
loc = constant_op.constant([3.0] * batch_size)
scale = constant_op.constant([np.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.eval().shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
def testCauchyLogPDFMultidimensional(self):
with self.cached_session():
batch_size = 6
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant(
[[np.sqrt(10.0), np.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
log_pdf = cauchy.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
log_pdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.shape)
self.assertAllEqual(cauchy.batch_shape, log_pdf.eval().shape)
pdf = cauchy.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.shape, (6, 2))
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), pdf_values.shape)
self.assertAllEqual(cauchy.batch_shape, pdf.shape)
self.assertAllEqual(cauchy.batch_shape, pdf_values.shape)
if not stats:
return
expected_log_pdf = stats.cauchy(loc.eval(), scale.eval()).logpdf(x)
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
def testCauchyCDF(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).cdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0)
def testCauchySurvivalFunction(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).sf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0)
def testCauchyLogCDF(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-100.0, 10.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
cdf = cauchy.log_cdf(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), cdf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, cdf.shape)
self.assertAllEqual(cauchy.batch_shape, cdf.eval().shape)
if not stats:
return
expected_cdf = stats.cauchy(loc, scale).logcdf(x)
self.assertAllClose(expected_cdf, cdf.eval(), atol=0, rtol=1e-5)
def testFiniteGradientAtDifficultPoints(self):
for dtype in [np.float32, np.float64]:
g = ops.Graph()
with g.as_default():
loc = variables.Variable(dtype(0.0))
scale = variables.Variable(dtype(1.0))
dist = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = np.array([-100., -20., -5., 0., 5., 20., 100.]).astype(dtype)
for func in [
dist.cdf, dist.log_cdf, dist.survival_function,
dist.log_survival_function, dist.log_prob, dist.prob
]:
value = func(x)
grads = gradients_impl.gradients(value, [loc, scale])
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllFinite(value)
self.assertAllFinite(grads[0])
self.assertAllFinite(grads[1])
def testCauchyLogSurvivalFunction(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
x = np.linspace(-10.0, 100.0, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
sf = cauchy.log_survival_function(x)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), sf.eval().shape)
self.assertAllEqual(cauchy.batch_shape, sf.shape)
self.assertAllEqual(cauchy.batch_shape, sf.eval().shape)
if not stats:
return
expected_sf = stats.cauchy(loc, scale).logsf(x)
self.assertAllClose(expected_sf, sf.eval(), atol=0, rtol=1e-5)
def testCauchyEntropy(self):
with self.cached_session():
loc = np.array([1.0, 1.0, 1.0])
scale = np.array([[1.0, 2.0, 3.0]])
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
entropy = cauchy.entropy()
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), entropy.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(),
entropy.eval().shape)
self.assertAllEqual(cauchy.batch_shape, entropy.shape)
self.assertAllEqual(cauchy.batch_shape, entropy.eval().shape)
if not stats:
return
expected_entropy = stats.cauchy(loc, scale[0]).entropy().reshape((1, 3))
self.assertAllClose(expected_entropy, entropy.eval())
def testCauchyMode(self):
with self.cached_session():
# Mu will be broadcast to [7, 7, 7].
loc = [7.]
scale = [11., 12., 13.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mode().shape)
self.assertAllEqual([7., 7, 7], cauchy.mode().eval())
def testCauchyMean(self):
with self.cached_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.mean().shape)
self.assertAllEqual([np.nan] * 3, cauchy.mean().eval())
def testCauchyNanMean(self):
with self.cached_session():
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.mean().eval()
def testCauchyQuantile(self):
with self.cached_session():
batch_size = 50
loc = self._rng.randn(batch_size)
scale = self._rng.rand(batch_size) + 1.0
p = np.linspace(0.000001, 0.999999, batch_size).astype(np.float64)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
x = cauchy.quantile(p)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.shape)
self.assertAllEqual(cauchy.batch_shape_tensor().eval(), x.eval().shape)
self.assertAllEqual(cauchy.batch_shape, x.shape)
self.assertAllEqual(cauchy.batch_shape, x.eval().shape)
if not stats:
return
expected_x = stats.cauchy(loc, scale).ppf(p)
self.assertAllClose(expected_x, x.eval(), atol=0.)
def testCauchyVariance(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.variance().shape)
self.assertAllEqual([np.nan] * 3, cauchy.variance().eval())
def testCauchyNanVariance(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.variance().eval()
def testCauchyStandardDeviation(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertAllEqual((3,), cauchy.stddev().shape)
self.assertAllEqual([np.nan] * 3, cauchy.stddev().eval())
def testCauchyNanStandardDeviation(self):
with self.cached_session():
# scale will be broadcast to [7, 7, 7]
loc = [1., 2., 3.]
scale = [7.]
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale, allow_nan_stats=False)
with self.assertRaises(ValueError):
cauchy.stddev().eval()
def testCauchySample(self):
with self.cached_session():
loc = constant_op.constant(3.0)
scale = constant_op.constant(1.0)
loc_v = 3.0
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(np.median(sample_values), loc_v, atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchySampleMultiDimensional(self):
with self.cached_session():
batch_size = 2
loc = constant_op.constant([[3.0, -3.0]] * batch_size)
scale = constant_op.constant([[0.5, 1.0]] * batch_size)
loc_v = [3.0, -3.0]
n = constant_op.constant(100000)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
samples = cauchy.sample(n)
sample_values = samples.eval()
self.assertEqual(samples.shape, (100000, batch_size, 2))
self.assertAllClose(
np.median(sample_values[:, 0, 0]), loc_v[0], atol=1e-1)
self.assertAllClose(
np.median(sample_values[:, 0, 1]), loc_v[1], atol=1e-1)
expected_shape = tensor_shape.TensorShape([n.eval()]).concatenate(
tensor_shape.TensorShape(cauchy.batch_shape_tensor().eval()))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
expected_shape = (
tensor_shape.TensorShape([n.eval()]).concatenate(cauchy.batch_shape))
self.assertAllEqual(expected_shape, samples.shape)
self.assertAllEqual(expected_shape, sample_values.shape)
def testCauchyNegativeLocFails(self):
with self.cached_session():
cauchy = cauchy_lib.Cauchy(loc=[1.], scale=[-5.], validate_args=True)
with self.assertRaisesOpError("Condition x > 0 did not hold"):
cauchy.mode().eval()
def testCauchyShape(self):
with self.cached_session():
loc = constant_op.constant([-3.0] * 5)
scale = constant_op.constant(11.0)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
self.assertEqual(cauchy.batch_shape_tensor().eval(), [5])
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertEqual(cauchy.event_shape, tensor_shape.TensorShape([]))
def testCauchyShapeWithPlaceholders(self):
loc = array_ops.placeholder(dtype=dtypes.float32)
scale = array_ops.placeholder(dtype=dtypes.float32)
cauchy = cauchy_lib.Cauchy(loc=loc, scale=scale)
with self.cached_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(cauchy.batch_shape, tensor_shape.TensorShape(None))
self.assertEqual(cauchy.event_shape, ())
self.assertAllEqual(cauchy.event_shape_tensor().eval(), [])
self.assertAllEqual(
sess.run(
cauchy.batch_shape_tensor(),
feed_dict={
loc: 5.0,
scale: [1.0, 2.0]
}), [2])
if __name__ == "__main__":
test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
import mock
import os
import signal
import socket
import time
import traceback
from oslo_config import cfg
import psutil
from neutron.agent.linux import utils
from neutron import service
from neutron.tests import base
from neutron import wsgi
CONF = cfg.CONF
# This message will be written to temporary file each time
# reset method is called.
FAKE_RESET_MSG = "reset".encode("utf-8")
TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
class TestNeutronServer(base.BaseTestCase):
def setUp(self):
super(TestNeutronServer, self).setUp()
self.service_pid = None
self.workers = None
self.temp_file = self.get_temp_file_path("test_server.tmp")
self.health_checker = None
self.pipein, self.pipeout = os.pipe()
self.addCleanup(self._destroy_workers)
def _destroy_workers(self):
if self.service_pid:
# Make sure all processes are stopped
os.kill(self.service_pid, signal.SIGKILL)
def _start_server(self, callback, workers):
"""Run a given service.
:param callback: callback that will start the required service
:param workers: number of service workers
:returns: list of spawned workers' pids
"""
self.workers = workers
# Fork a new process in which server will be started
pid = os.fork()
if pid == 0:
status = 0
try:
callback(workers)
except SystemExit as exc:
status = exc.code
except BaseException:
traceback.print_exc()
status = 2
# Really exit
os._exit(status)
self.service_pid = pid
if self.workers > 0:
# Wait at most 10 seconds to spawn workers
condition = lambda: self.workers == len(self._get_workers())
utils.wait_until_true(
condition, timeout=10, sleep=0.1,
exception=RuntimeError(
"Failed to start %d workers." % self.workers))
workers = self._get_workers()
self.assertEqual(len(workers), self.workers)
return workers
# Wait for a service to start.
utils.wait_until_true(self.health_checker, timeout=10, sleep=0.1,
exception=RuntimeError(
"Failed to start service."))
return [self.service_pid]
def _get_workers(self):
"""Get the list of processes in which WSGI server is running."""
if self.workers > 0:
return [proc.pid for proc in psutil.process_iter()
if proc.ppid == self.service_pid]
else:
return [proc.pid for proc in psutil.process_iter()
if proc.pid == self.service_pid]
def _fake_reset(self):
"""Writes FAKE_RESET_MSG to temporary file on each call."""
with open(self.temp_file, 'a') as f:
f.write(FAKE_RESET_MSG)
def _test_restart_service_on_sighup(self, service, workers=0):
"""Test that a service correctly restarts on receiving SIGHUP.
1. Start a service with a given number of workers.
2. Send SIGHUP to the service.
3. Wait for workers (if any) to restart.
4. Assert that the pids of the workers didn't change after restart.
"""
start_workers = self._start_server(callback=service, workers=workers)
os.kill(self.service_pid, signal.SIGHUP)
# Wait for temp file to be created and its size become equal
# to size of FAKE_RESET_MSG repeated (workers + 1) times.
expected_size = len(FAKE_RESET_MSG) * (workers + 1)
condition = lambda: (os.path.isfile(self.temp_file)
and os.stat(self.temp_file).st_size ==
expected_size)
utils.wait_until_true(
condition, timeout=5, sleep=0.1,
exception=RuntimeError(
"Timed out waiting for file %(filename)s to be created and "
"its size become equal to %(size)s." %
{'filename': self.temp_file,
'size': expected_size}))
# Verify that reset has been called for parent process in which
# a service was started and for each worker by checking that
# FAKE_RESET_MSG has been written to temp file workers + 1 times.
with open(self.temp_file, 'r') as f:
res = f.readline()
self.assertEqual(FAKE_RESET_MSG * (workers + 1), res)
# Make sure worker pids don't change
end_workers = self._get_workers()
self.assertEqual(start_workers, end_workers)
class TestWsgiServer(TestNeutronServer):
"""Tests for neutron.wsgi.Server."""
def setUp(self):
super(TestWsgiServer, self).setUp()
self.health_checker = self._check_active
self.port = None
@staticmethod
def application(environ, start_response):
"""A primitive test application."""
response_body = 'Response'
status = '200 OK'
response_headers = [('Content-Type', 'text/plain'),
('Content-Length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def _check_active(self):
"""Check a wsgi service is active by making a GET request."""
port = int(os.read(self.pipein, 5))
conn = httplib2.HTTPConnectionWithTimeout("localhost", port)
try:
conn.request("GET", "/")
resp = conn.getresponse()
return resp.status == 200
except socket.error:
return False
def _run_wsgi(self, workers=0):
"""Start WSGI server with a test application."""
# Mock reset method to check that it is being called
# on receiving SIGHUP.
with mock.patch("neutron.wsgi.WorkerService.reset") as reset_method:
reset_method.side_effect = self._fake_reset
server = wsgi.Server("Test")
server.start(self.application, 0, "0.0.0.0",
workers=workers)
# Memorize a port that was chosen for the service
self.port = server.port
os.write(self.pipeout, str(self.port))
server.wait()
def test_restart_wsgi_on_sighup_multiple_workers(self):
self._test_restart_service_on_sighup(service=self._run_wsgi,
workers=2)
class TestRPCServer(TestNeutronServer):
"""Tests for neutron RPC server."""
def setUp(self):
super(TestRPCServer, self).setUp()
self.setup_coreplugin(TARGET_PLUGIN)
self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True)
self.plugin = self._plugin_patcher.start()
self.plugin.return_value.rpc_workers_supported = True
self.health_checker = self._check_active
def _check_active(self):
time.sleep(5)
return True
def _serve_rpc(self, workers=0):
"""Start RPC server with a given number of workers."""
# Mock reset method to check that it is being called
# on receiving SIGHUP.
with mock.patch("neutron.service.RpcWorker.reset") as reset_method:
with mock.patch(
"neutron.manager.NeutronManager.get_plugin"
) as get_plugin:
reset_method.side_effect = self._fake_reset
get_plugin.return_value = self.plugin
CONF.set_override("rpc_workers", workers)
launcher = service.serve_rpc()
launcher.wait()
def test_restart_rpc_on_sighup_multiple_workers(self):
self._test_restart_service_on_sighup(service=self._serve_rpc,
workers=2)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
import sys
from django.contrib.auth import authenticate, login, get_backends
from django.core.management.base import BaseCommand
from django.conf import settings
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
# Quick tool to test whether you're correctly authenticating to LDAP
def query_ldap(**options):
# type: (**str) -> None
email = options['email']
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
if ldap_attrs is None:
print("No such user found")
else:
for django_field, ldap_field in settings.AUTH_LDAP_USER_ATTR_MAP.items():
print("%s: %s" % (django_field, ldap_attrs[ldap_field]))
class Command(BaseCommand):
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('email', metavar='<email>', type=str,
help="email of user to query")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
query_ldap(**options)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.configurationsample.record;
import org.springframework.boot.configurationsample.TestConfigurationProperties;
import org.springframework.boot.configurationsample.TestNestedConfigurationProperty;
@TestConfigurationProperties("record-nested")
public record NestedPropertiesRecord(String myProperty, @TestNestedConfigurationProperty NestedRecord nested,
InnerPropertiesRecord inner) {
public record InnerPropertiesRecord(String myInnerProperty, @TestNestedConfigurationProperty NestedRecord nested) {
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
configuration-metadata/spring-boot-configuration-processor/src/test/java/org/springframework/boot/configurationsample/record/NestedPropertiesRecord.java
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from spack import *
from spack.spec import UnsupportedCompilerError
class Elemental(CMakePackage):
"""Elemental: Distributed-memory dense and sparse-direct linear algebra
and optimization library."""
homepage = "http://libelemental.org"
url = "https://github.com/elemental/Elemental/archive/v0.87.7.tar.gz"
git = "https://github.com/elemental/Elemental.git"
version('develop', branch='master')
version('0.87.7', '6c1e7442021c59a36049e37ea69b8075')
version('0.87.6', '9fd29783d45b0a0e27c0df85f548abe9')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('hybrid', default=True,
description='Make use of OpenMP within MPI packing/unpacking')
variant('openmp_blas', default=False,
description='Use OpenMP for threading in the BLAS library')
variant('c', default=False,
description='Build C interface')
variant('python', default=False,
description='Install Python interface')
variant('parmetis', default=False,
description='Enable ParMETIS')
variant('quad', default=False,
description='Enable quad precision')
variant('int64', default=False,
description='Use 64bit integers')
variant('cublas', default=False,
description='Enable cuBLAS for local BLAS operations')
# When this variant is set remove the normal dependencies since
# Elemental has to build BLAS and ScaLAPACK internally
variant('int64_blas', default=False,
description='Use 64bit integers for BLAS.'
' Requires local build of BLAS library.')
variant('scalapack', default=False,
description='Build with ScaLAPACK library')
variant('build_type', default='Release',
description='The build type to build',
values=('Debug', 'Release'))
variant('blas', default='openblas', values=('openblas', 'mkl', 'accelerate', 'essl'),
description='Enable the use of OpenBlas/MKL/Accelerate/ESSL')
variant('mpfr', default=False,
description='Support GNU MPFR\'s'
'arbitrary-precision floating-point arithmetic')
# Note that #1712 forces us to enumerate the different blas variants
depends_on('blas', when='~openmp_blas ~int64_blas')
# Hack to forward variant to openblas package
depends_on('openblas', when='blas=openblas ~openmp_blas ~int64_blas')
# Allow Elemental to build internally when using 8-byte ints
depends_on('openblas threads=openmp', when='blas=openblas +openmp_blas ~int64_blas')
depends_on('intel-mkl', when="blas=mkl ~openmp_blas ~int64_blas")
depends_on('intel-mkl threads=openmp', when='blas=mkl +openmp_blas ~int64_blas')
depends_on('intel-mkl@2017.1 +openmp +ilp64', when='blas=mkl +openmp_blas +int64_blas')
depends_on('veclibfort', when='blas=accelerate')
depends_on('essl ~cuda', when='blas=essl ~openmp_blas ~int64_blas')
depends_on('essl threads=openmp', when='blas=essl +openmp_blas ~int64_blas')
# Note that this forces us to use OpenBLAS until #1712 is fixed
depends_on('lapack', when='blas=openblas ~openmp_blas')
depends_on('netlib-lapack +external-blas', when='blas=essl')
depends_on('metis')
depends_on('metis +int64', when='+int64')
depends_on('mpi')
# Allow Elemental to build internally when using 8-byte ints
depends_on('scalapack', when='+scalapack ~int64_blas')
extends('python', when='+python')
depends_on('python@:2.8', when='+python')
depends_on('gmp', when='+mpfr')
depends_on('mpc', when='+mpfr')
depends_on('mpfr', when='+mpfr')
patch('elemental_cublas.patch', when='+cublas')
patch('cmake_0.87.7.patch', when='@0.87.7')
@property
def libs(self):
shared = True if '+shared' in self.spec else False
return find_libraries(
'libEl', root=self.prefix, shared=shared, recursive=True
)
def cmake_args(self):
spec = self.spec
if '@:0.87.7' in spec and '%intel@:17.0.2' in spec:
raise UnsupportedCompilerError(
"Elemental {0} has a known bug with compiler: {1} {2}".format(
spec.version, spec.compiler.name, spec.compiler.version))
args = [
'-DCMAKE_INSTALL_MESSAGE:STRING=LAZY',
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
'-DEL_PREFER_OPENBLAS:BOOL=TRUE',
'-DEL_DISABLE_SCALAPACK:BOOL=%s' % ('~scalapack' in spec),
'-DBUILD_SHARED_LIBS:BOOL=%s' % ('+shared' in spec),
'-DEL_HYBRID:BOOL=%s' % ('+hybrid' in spec),
'-DEL_C_INTERFACE:BOOL=%s' % ('+c' in spec),
'-DINSTALL_PYTHON_PACKAGE:BOOL=%s' % ('+python' in spec),
'-DEL_DISABLE_PARMETIS:BOOL=%s' % ('~parmetis' in spec),
'-DEL_DISABLE_QUAD:BOOL=%s' % ('~quad' in spec),
'-DEL_USE_64BIT_INTS:BOOL=%s' % ('+int64' in spec),
'-DEL_USE_64BIT_BLAS_INTS:BOOL=%s' % ('+int64_blas' in spec),
'-DEL_DISABLE_MPFR:BOOL=%s' % ('~mpfr' in spec)]
if self.spec.satisfies('%intel'):
ifort = env['SPACK_F77']
intel_bin = os.path.dirname(ifort)
intel_root = os.path.dirname(intel_bin)
libfortran = find_libraries('libifcoremt',
root=intel_root, recursive=True)
elif self.spec.satisfies('%gcc'):
# see <stage_folder>/debian/rules as an example:
mpif77 = Executable(spec['mpi'].mpif77)
libfortran = LibraryList(mpif77('--print-file-name',
'libgfortran.%s' % dso_suffix,
output=str).strip())
elif self.spec.satisfies('%xl') or self.spec.satisfies('%xl_r'):
xl_fort = env['SPACK_F77']
xl_bin = os.path.dirname(xl_fort)
xl_root = os.path.dirname(xl_bin)
libfortran = find_libraries('libxlf90_r',
root=xl_root, recursive=True)
else:
libfortran = None
if libfortran:
args.append('-DGFORTRAN_LIB=%s' % libfortran.libraries[0])
# If using 64bit int BLAS libraries, elemental has to build
# them internally
if '+int64_blas' in spec:
args.extend(['-DEL_BLAS_SUFFIX:STRING={0}'.format((
'_64_' if '+int64_blas' in spec else '_')),
'-DCUSTOM_BLAS_SUFFIX:BOOL=TRUE']),
if '+scalapack' in spec:
args.extend(['-DEL_LAPACK_SUFFIX:STRING={0}'.format((
'_64_' if '+int64_blas' in spec else '_')),
'-DCUSTOM_LAPACK_SUFFIX:BOOL=TRUE']),
else:
math_libs = (spec['lapack'].libs +
spec['blas'].libs)
if '+scalapack' in spec:
math_libs = spec['scalapack'].libs + math_libs
args.extend([
'-DMATH_LIBS:STRING={0}'.format(math_libs.ld_flags)])
if '+python' in spec:
args.extend([
'-DPYTHON_SITE_PACKAGES:STRING={0}'.format(site_packages_dir)])
return args
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
import sys
import caffe
import shutil
import subprocess
import numpy as np
from random import random
class PhotoBomb(object):
def __init__(self, train_path, caffe_path, img_size=96):
self.train_path = train_path
self.img_path = os.path.join(self.train_path, 'images')
self.val_img_path = os.path.join(self.train_path, 'val_images')
self.caffe_path = caffe_path
self.img_size = img_size
def _create_lmdb_index(self):
"""
:return: index file for lmdb creation and label file necessary for caffe
"""
for idx, lmdb_index in enumerate(['lmdb_index.txt', 'val_lmdb_index.txt']):
label_index = 'label_index.txt' if idx == 0 else 'val_label_index.txt'
img_path = self.img_path if idx == 0 else self.val_img_path
image_class_count = 0
with open(os.path.join(self.train_path, lmdb_index), 'wb') as f:
with open(os.path.join(self.train_path, label_index), 'wb') as g:
for root, folders, _ in os.walk(self.train_path + 'images'):
for folder in folders:
for img_class, __, images in os.walk(os.path.join(img_path, folder)):
g.write(str(image_class_count) + '\t' + img_class.split("/")[-1] + '\n')
for img in images:
f.write(os.path.join(folder, img) + '\t' + str(image_class_count) + '\n')
image_class_count += 1
return
def _create_val_set(self, pct=.1):
"""
scans all images in training and randomly moves pct to a validation folder
"""
if not os.path.isdir(self.val_img_path):
os.mkdir(self.val_img_path)
for root, folders, _ in os.walk(self.img_path):
for folder in folders:
if not os.path.isdir(os.path.join(self.val_img_path, folder)):
os.mkdir(self.val_img_path)
for img_class, __, images in os.walk(os.path.join(self.img_path, folder)):
for img in images:
if random() >= (1 - pct):
os.rename(os.path.join(folder, img), os.path.join(self.val_img_path, folder, img))
return
def _load_caffe_solver_and_net(self, caffe_solver_file=None, caffe_net_file=None, caffe_deploy_file=None):
"""
uses default ImageNet configuration if nothing provided, replacing the final softmax
output layer to equal the number of classes present in the training image directory.
"""
if not caffe_solver_file:
caffe_solver_file = self.caffe_path + 'example_basic_solver.prototxt'
if not caffe_net_file:
caffe_net_file = self.caffe_path + 'example_basic_net.prototxt'
if not caffe_deploy_file:
caffe_deploy_file = self.caffe_path + 'example_basic_deploy.prototxt'
# caffe solver must reference path to caffe net
caffe_solver = open(caffe_solver_file).readlines()
with open(caffe_solver_file, 'wb') as f:
for line in caffe_solver:
if "net:" in line:
line = 'net: ' + '\"' + caffe_net_file + '\"\n'
print line
f.write(line)
# overwrite number of classes to length of image directory
num_labels = len(open(self.train_path + 'label_index.txt').readlines())
caffe_net = open(caffe_net_file).readlines()
caffe_deploy = open(caffe_deploy_file).readlines()
output = []
for caffe_file in (caffe_net, caffe_deploy):
for idx, _ in enumerate(caffe_file):
if 'num_output' in _:
output.append(idx)
caffe_file[output[-1]] = ' num_output: ' + str(num_labels) + '\n'
# save changes
source_call = 0
with open(caffe_net_file, 'wb') as f:
for line in caffe_net:
if 'source:' in line:
if source_call == 0:
line = ' source: ' + '\"' + self.train_path + 'train_lmdb\"\n'
source_call += 1
else:
line = ' source: ' + '\"' + self.train_path + 'val_lmdb\"\n'
print line
elif 'mean_file:' in line:
line = ' mean_file: ' + '\"' + self.train_path + 'image_mean.binaryproto\"\n'
print line
f.write(line)
with open(caffe_deploy_file, 'wb') as f:
for line in caffe_deploy:
f.write(line)
return caffe_solver_file
def build_caffe_prereqs(self):
"""
calls scripts to resize all training images, create lmdb storage,
and calculate image mean, storing these in a caffe-friendly format.
helper scripts are expected to be in caffe's root folder
"""
self._create_lmdb_index()
print "indexes built."
lmdb_cmd = self.caffe_path + 'create_lmdb.sh ' + self.train_path[:-1] + ' ' + self.caffe_path
print lmdb_cmd
print "lmdb build exit: " + str(subprocess.Popen(lmdb_cmd, shell=True).wait())
mean_cmd = self.caffe_path + 'create_image_mean.sh ' + self.caffe_path + ' ' + \
self.train_path + 'train_lmdb ' + self.train_path + 'image_mean.binaryproto'
print "image mean build exit: " + str(subprocess.Popen(mean_cmd, shell=True).wait())
sys.path.insert(1, self.caffe_path + 'python')
# converting mean to numpy array
blob = caffe.proto.caffe_pb2.BlobProto()
data = open(self.train_path + 'image_mean.binaryproto', 'rb').read()
blob.ParseFromString(data)
mean_path = self.train_path + 'image_mean.npy'
np.save(mean_path, np.array(caffe.io.blobproto_to_array(blob))[0])
return
def fit(self, caffe_solver_file=None, caffe_net_file=None, caffe_deploy_file=None):
"""
wrapper for cmd line execution of caffe training
"""
caffe_solver_file = self._load_caffe_solver_and_net(caffe_solver_file, caffe_net_file, caffe_deploy_file)
train_cmd = self.caffe_path + 'build/tools/caffe train --solver=' + caffe_solver_file
print "caffe train build exit: " + str(subprocess.Popen(train_cmd, shell=True).wait())
return
def predict(self, image_path, caffe_model_file, caffe_deploy_file='example_basic_deploy.prototxt', mean_file='image_mean.npy'):
"""
handling of communication via detection and caffe to identify relevant regions of a photo,
pass on to caffe to predict, and return the most likely object label for each region.
image_path: local path to the predicted image (can be file or folder, subject to matlab & caffe's handling)
caffe_model_file: remote filename of .caffemodel, expected to be in self.train_path directory
caffe_deploy_file: remote filename of deploy.prototxt, expected to be in self.caffe_path directory
mean_file: remote filename of image mean array, expected to be in self.train_path directory
"""
img_resize_cmd = "convert " + image_path + " -resize 96x96 " + image_path
subprocess.Popen(img_resize_cmd, shell=True).wait()
output_file = os.path.join(self.train_path, "prediction_images", os.path.split(image_path)[-1][:-4] + ".txt")
print "Processing :", output_file
if os.path.exists(output_file):
print "Already Processed :", output_file
# first run detection on this image to get the box regions.
image_boxes_file_name = image_path[:-4] + '.txt'
detect_command = 'image_selective_search ' + image_path
print "Going to execute selective search command : ", detect_command
mat_process = subprocess.Popen(detect_command, bufsize=0, executable=None, stdin=None,
stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=True,
env=None, universal_newlines=False, startupinfo=None, creationflags=0)
mat_process.wait()
print "Search process complete :", image_boxes_file_name, "Written"
path, image_boxes_file_name = os.path.split(image_boxes_file_name)
path, image = os.path.split(image_path)
py_exec_cmd = "LD_LIBRARY_PATH=/usr/local/lib " + self.caffe_path + "python/detect_cpp_photobomb.py " + \
self.train_path + 'prediction_images/' + image + " " + self.train_path + 'prediction_images/' + image_boxes_file_name + \
" " + self.train_path + "something.csv" + " --pretrained_model=" + self.train_path + caffe_model_file + \
" --model_def=" + self.caffe_path + caffe_deploy_file + \
" --raw_scale=255 --gpu --mean_file=" + self.train_path + mean_file
print "Going to exec ", py_exec_cmd
ssh_process = subprocess.Popen(py_exec_cmd, shell=True, stdout=subprocess.PIPE)
out, err = ssh_process.communicate()
print out
def break_down(self):
"""
delete all files relative to the model fit. necessary since caffe
scripts require fresh creation of directories while executing
"""
shutil.rmtree(self.train_path + 'train_lmdb')
shutil.rmtree(self.train_path + 'val_lmdb')
for file in ['image_mean.binaryproto', 'image_mean.npy', 'label_index.txt',
'lmdb_index.txt', 'val_lmdb_index.txt', 'val_label_index.txt']:
try:
os.remove(self.train_path + file)
except OSError:
continue
for root, _, files in os.walk(self.train_path):
for file in files:
if file.endswith('.caffemodel') or file.endswith('.solverstate'):
os.remove(self.train_path + file)
return
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
import requests
import sys
def get_all():
page_num = 1
price_data = ''
while True:
req = requests.get("http://coinbase.com/api/v1/prices/historical?page="+str(page_num))
if req.status_code == 200:
price_data += '\n' + req.text
else:
price_data += "API error"
print("... getting page "+str(page_num))
page_num += 1
if req.text == "":
break
return price_data
if __name__ == '__main__':
sys.stdout.write(get_all())
#with open('.tmp/{}_full_output.py'.format(int(time.time())), 'a') as f1:
# f1.write('\n'+ price_data)
#price_data_format1 = price_data.replace(',','\n')
#with open('.tmp/{}_lines_removed.py'.format(int(time.time())), 'a') as f2:
# f2.write('\n' + price_data_format1)
#price_data_format2 = price_data_format1.split('\n')
#with open('.tmp/{}_xyxy.py'.format(int(time.time())), 'a') as f3:
# f3.write(str(price_data_format2))
#prices = price_data_format2[::2]
#k=1
#with open('.tmp/{}_prices.py'.format(int(time.time())), 'a') as f4:
# while k<len(prices):
# f4.write('{!r}\n'.format(prices[k]))
# k+=1
#timestamps = price_data_format2[1::2]
#j=1
#with open('.tmp/{}_stamps.py'.format(int(time.time())), 'a') as f5:
# while j<len(timestamps):
# f5.write('{!r}\n'.format(timestamps[j]))
# j += 1
|
unknown
|
codeparrot/codeparrot-clean
| ||
// @validatePreserveExistingMemoizationGuarantees
function Component(props) {
const data = useMemo(() => {
// actual code is non-optional
return props.items.edges.nodes ?? [];
// deps are optional
}, [props.items?.edges?.nodes]);
return <Foo data={data} />;
}
|
javascript
|
github
|
https://github.com/facebook/react
|
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/error.invalid-optional-member-expression-as-memo-dep-non-optional-in-body.js
|
# -*- coding: utf-8 -*-
import unittest
import numpy as np
from .. import emspectrum
from ...patch.pint import ureg
from ...patch import jsonpickle
class test_emspectrum(unittest.TestCase):
def test_discrete(self):
s1 = emspectrum.Discrete(ureg.Quantity([100, 200, 300], "nm"), [1, 1, 1])
s2 = emspectrum.Discrete(ureg.Quantity([200, 400], "nm"), [1, 1])
s3 = s1 + s2
def func(x):
return ureg.Quantity(x, "nm").to("keV", "spectroscopy")
np.testing.assert_array_equal(s3.energies, func([100, 200, 300, 400]))
np.testing.assert_array_equal(s3.ratios, [0.2, 0.4, 0.2, 0.2])
s4 = emspectrum.Discrete(ureg.Quantity([150, 350], "nm"), [1, 2])
self.assertEqual(s4.sample(s3), 1.5 * 1 / 3.0 + 1 * 2 / 3.0)
def test_dark(self):
s1 = emspectrum.Dark()
s4 = emspectrum.Discrete(ureg.Quantity([150, 350], "nm"), [1, 2])
self.assertEqual(s4.sample(s1), 0)
def test_serialize(self):
s1 = emspectrum.Discrete(ureg.Quantity([100, 300], "nm"), [1, 1])
s2 = jsonpickle.loads(jsonpickle.dumps(s1))
self.assertEqual(s1, s2)
s1 = emspectrum.Dark()
s2 = jsonpickle.loads(jsonpickle.dumps(s1))
self.assertEqual(s1, s2)
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_emspectrum("test_discrete"))
testSuite.addTest(test_emspectrum("test_dark"))
testSuite.addTest(test_emspectrum("test_serialize"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
from __future__ import print_function
import os.path
import socket
import sys
import eventlet
import eventlet.wsgi
import greenlet
from oslo.config import cfg
from paste import deploy
import routes.middleware
import ssl
import webob.dec
import webob.exc
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
wsgi_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for nova-api'),
cfg.StrOpt('wsgi_log_format',
default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
' len: %(body_length)s time: %(wall_seconds).7f',
help='A python format string that is used as the template to '
'generate log lines. The following values can be formatted '
'into it: client_ip, date_time, request_line, status_code, '
'body_length, wall_seconds.'),
cfg.StrOpt('ssl_ca_file',
help="CA certificate file to use to verify "
"connecting clients"),
cfg.StrOpt('ssl_cert_file',
help="SSL certificate of API server"),
cfg.StrOpt('ssl_key_file',
help="SSL private key of API server"),
cfg.IntOpt('tcp_keepidle',
default=600,
help="Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X."),
cfg.IntOpt('wsgi_default_pool_size',
default=1000,
help="Size of the pool of greenthreads used by wsgi"),
cfg.IntOpt('max_header_line',
default=16384,
help="Maximum line size of message headers to be accepted. "
"max_header_line may need to be increased when using "
"large tokens (typically those generated by the "
"Keystone v3 API with big service catalogs)."),
]
CONF = cfg.CONF
CONF.register_opts(wsgi_opts)
LOG = logging.getLogger(__name__)
class Server(object):
"""Server class to manage a WSGI server, serving a WSGI application."""
default_pool_size = CONF.wsgi_default_pool_size
def __init__(self, name, app, host='0.0.0.0', port=0, pool_size=None,
protocol=eventlet.wsgi.HttpProtocol, backlog=128,
use_ssl=False, max_url_len=None):
"""Initialize, but do not start, a WSGI server.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param backlog: Maximum number of queued connections.
:param max_url_len: Maximum length of permitted URLs.
:returns: None
:raises: nova.exception.InvalidInput
"""
# Allow operators to customize http requests max header line size.
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.name = name
self.app = app
self._server = None
self._protocol = protocol
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("nova.%s.wsgi.server" % self.name)
self._wsgi_logger = logging.WritableLogger(self._logger)
self._use_ssl = use_ssl
self._max_url_len = max_url_len
if backlog < 1:
raise exception.InvalidInput(
reason='The backlog must be more than 1')
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
try:
self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
except EnvironmentError:
LOG.error(_("Could not bind to %(host)s:%(port)s"),
{'host': host, 'port': port})
raise
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
def start(self):
"""Start serving a WSGI application.
:returns: None
"""
if self._use_ssl:
try:
ca_file = CONF.ssl_ca_file
cert_file = CONF.ssl_cert_file
key_file = CONF.ssl_key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(
_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(
_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(
_("Unable to find key_file : %s") % key_file)
if self._use_ssl and (not cert_file or not key_file):
raise RuntimeError(
_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
ssl_kwargs = {
'server_side': True,
'certfile': cert_file,
'keyfile': key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
self._socket = eventlet.wrap_ssl(self._socket,
**ssl_kwargs)
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self._socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to start %(name)s on %(host)s"
":%(port)s with SSL support") % self.__dict__)
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': self._socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._wsgi_logger,
'log_format': CONF.wsgi_log_format,
'debug': False
}
if self._max_url_len:
wsgi_kwargs['url_length_limit'] = self._max_url_len
self._server = eventlet.spawn(**wsgi_kwargs)
def stop(self):
"""Stop this server.
This is not a very nice action, as currently the method by which a
server is stopped is by killing its eventlet.
:returns: None
"""
LOG.info(_("Stopping WSGI server."))
if self._server is not None:
# Resize pool to stop new requests from being processed
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
if self._server is not None:
self._server.wait()
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
pass
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print(('*' * 40) + ' REQUEST ENVIRON')
for key, value in req.environ.items():
print(key, '=', value)
print()
resp = req.get_response(self.application)
print(('*' * 40) + ' RESPONSE HEADERS')
for (key, value) in resp.headers.iteritems():
print(key, '=', value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
print(('*' * 40) + ' BODY')
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, config_path=None):
"""Initialize the loader, and attempt to find the config.
:param config_path: Full or relative path to the paste config.
:returns: None
"""
self.config_path = None
config_path = config_path or CONF.api_paste_config
if not os.path.isabs(config_path):
self.config_path = CONF.find_file(config_path)
elif os.path.exists(config_path):
self.config_path = config_path
if not self.config_path:
raise exception.ConfigNotFound(path=config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: `nova.exception.PasteAppNotFound`
"""
try:
LOG.debug(_("Loading app %(name)s from %(path)s") %
{'name': name, 'path': self.config_path})
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError as err:
LOG.error(err)
raise exception.PasteAppNotFound(name=name, path=self.config_path)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
# toolchains options
ARCH='ppc'
CPU='ppc405'
CROSS_TOOL='gcc'
TextBase = '0x00000000'
PLATFORM = 'gcc'
EXEC_PATH = 'C:/Program Files/CodeSourcery/Sourcery G++ Lite/bin'
BUILD = 'debug'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'powerpc-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=405 -mno-multiple -mno-string -mno-update -fno-exceptions -fno-builtin -msoft-float'
CFLAGS = DEVICE + ' -D__KERNEL__'
AFLAGS = '-D__ASSEMBLY__ -fno-exceptions -fno-builtin -mregnames -c -Wall -Xassembler -m405 -msoft-float -ffunction-sections'
LFLAGS = DEVICE + ' -Wl,--gc-sections,--cref,-Map=rtthread.map -T taihu.lds' + ' -Ttext=' + TextBase
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
DASM_ACTION = OBJDUMP + ' -d rtthread-taihu.elf > rtt.asm\n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n' # + DASM_ACTION
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/sh
test_description='handling of promisor remote advertisement'
. ./test-lib.sh
if ! test_have_prereq PERL_TEST_HELPERS
then
skip_all='skipping promisor remote capabilities tests; Perl not available'
test_done
fi
GIT_TEST_MULTI_PACK_INDEX=0
GIT_TEST_MULTI_PACK_INDEX_WRITE_INCREMENTAL=0
# Setup the repository with three commits, this way HEAD is always
# available and we can hide commit 1 or 2.
test_expect_success 'setup: create "template" repository' '
git init template &&
test_commit -C template 1 &&
test_commit -C template 2 &&
test_commit -C template 3 &&
test-tool genrandom foo 10240 >template/foo &&
git -C template add foo &&
git -C template commit -m foo
'
# A bare repo will act as a server repo with unpacked objects.
test_expect_success 'setup: create bare "server" repository' '
git clone --bare --no-local template server &&
mv server/objects/pack/pack-* . &&
packfile=$(ls pack-*.pack) &&
git -C server unpack-objects --strict <"$packfile"
'
check_missing_objects () {
git -C "$1" rev-list --objects --all --missing=print > all.txt &&
perl -ne 'print if s/^[?]//' all.txt >missing.txt &&
test_line_count = "$2" missing.txt &&
if test "$2" -lt 2
then
test "$3" = "$(cat missing.txt)"
else
test -f "$3" &&
sort <"$3" >expected_sorted &&
sort <missing.txt >actual_sorted &&
test_cmp expected_sorted actual_sorted
fi
}
initialize_server () {
count="$1"
missing_oids="$2"
# Repack everything first
git -C server -c repack.writebitmaps=false repack -a -d &&
# Remove promisor file in case they exist, useful when reinitializing
rm -rf server/objects/pack/*.promisor &&
# Repack without the largest object and create a promisor pack on server
git -C server -c repack.writebitmaps=false repack -a -d \
--filter=blob:limit=5k --filter-to="$(pwd)/pack" &&
promisor_file=$(ls server/objects/pack/*.pack | sed "s/\.pack/.promisor/") &&
>"$promisor_file" &&
# Check objects missing on the server
check_missing_objects server "$count" "$missing_oids"
}
copy_to_lop () {
oid_path="$(test_oid_to_path $1)" &&
path="server/objects/$oid_path" &&
path2="lop/objects/$oid_path" &&
mkdir -p $(dirname "$path2") &&
cp "$path" "$path2"
}
test_expect_success "setup for testing promisor remote advertisement" '
# Create another bare repo called "lop" (for Large Object Promisor)
git init --bare lop &&
# Copy the largest object from server to lop
obj="HEAD:foo" &&
oid="$(git -C server rev-parse $obj)" &&
copy_to_lop "$oid" &&
initialize_server 1 "$oid" &&
# Configure lop as promisor remote for server
git -C server remote add lop "file://$(pwd)/lop" &&
git -C server config remote.lop.promisor true &&
git -C lop config uploadpack.allowFilter true &&
git -C lop config uploadpack.allowAnySHA1InWant true &&
git -C server config uploadpack.allowFilter true &&
git -C server config uploadpack.allowAnySHA1InWant true
'
test_expect_success "clone with promisor.advertise set to 'true'" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
# Clone from server to create a client
GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=All \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is still missing on the server
check_missing_objects server 1 "$oid"
'
test_expect_success "clone with promisor.advertise set to 'false'" '
git -C server config promisor.advertise false &&
test_when_finished "rm -rf client" &&
# Clone from server to create a client
GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=All \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is not missing on the server
check_missing_objects server 0 "" &&
# Reinitialize server so that the largest object is missing again
initialize_server 1 "$oid"
'
test_expect_success "clone with promisor.acceptfromserver set to 'None'" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
# Clone from server to create a client
GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=None \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is not missing on the server
check_missing_objects server 0 "" &&
# Reinitialize server so that the largest object is missing again
initialize_server 1 "$oid"
'
test_expect_success "init + fetch with promisor.advertise set to 'true'" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
mkdir client &&
git -C client init &&
git -C client config remote.lop.promisor true &&
git -C client config remote.lop.fetch "+refs/heads/*:refs/remotes/lop/*" &&
git -C client config remote.lop.url "file://$(pwd)/lop" &&
git -C client config remote.server.url "file://$(pwd)/server" &&
git -C client config remote.server.fetch "+refs/heads/*:refs/remotes/server/*" &&
git -C client config promisor.acceptfromserver All &&
GIT_NO_LAZY_FETCH=0 git -C client fetch --filter="blob:limit=5k" server &&
# Check that the largest object is still missing on the server
check_missing_objects server 1 "$oid"
'
test_expect_success "clone with promisor.acceptfromserver set to 'KnownName'" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
# Clone from server to create a client
GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=KnownName \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is still missing on the server
check_missing_objects server 1 "$oid"
'
test_expect_success "clone with 'KnownName' and different remote names" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
# Clone from server to create a client
GIT_NO_LAZY_FETCH=0 git clone -c remote.serverTwo.promisor=true \
-c remote.serverTwo.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.serverTwo.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=KnownName \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is not missing on the server
check_missing_objects server 0 "" &&
# Reinitialize server so that the largest object is missing again
initialize_server 1 "$oid"
'
test_expect_success "clone with 'KnownName' and missing URL in the config" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
# Clone from server to create a client
# Lazy fetching by the client from the LOP will fail because of the
# missing URL in the client config, so the server will have to lazy
# fetch from the LOP.
GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c promisor.acceptfromserver=KnownName \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is not missing on the server
check_missing_objects server 0 "" &&
# Reinitialize server so that the largest object is missing again
initialize_server 1 "$oid"
'
test_expect_success "clone with promisor.acceptfromserver set to 'KnownUrl'" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
# Clone from server to create a client
GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=KnownUrl \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is still missing on the server
check_missing_objects server 1 "$oid"
'
test_expect_success "clone with 'KnownUrl' and different remote urls" '
ln -s lop serverTwo &&
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
# Clone from server to create a client
GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/serverTwo" \
-c promisor.acceptfromserver=KnownUrl \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is not missing on the server
check_missing_objects server 0 "" &&
# Reinitialize server so that the largest object is missing again
initialize_server 1 "$oid"
'
test_expect_success "clone with 'KnownUrl' and url not configured on the server" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
test_when_finished "git -C server config set remote.lop.url \"file://$(pwd)/lop\"" &&
git -C server config unset remote.lop.url &&
# Clone from server to create a client
# It should fail because the client will reject the LOP as URLs are
# different, and the server cannot lazy fetch as the LOP URL is
# missing, so the remote name will be used instead which will fail.
test_must_fail env GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=KnownUrl \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is still missing on the server
check_missing_objects server 1 "$oid"
'
test_expect_success "clone with 'KnownUrl' and empty url, so not advertised" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
test_when_finished "git -C server config set remote.lop.url \"file://$(pwd)/lop\"" &&
git -C server config set remote.lop.url "" &&
# Clone from server to create a client
# It should fail because the client will reject the LOP as an empty URL is
# not advertised, and the server cannot lazy fetch as the LOP URL is empty,
# so the remote name will be used instead which will fail.
test_must_fail env GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=KnownUrl \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is still missing on the server
check_missing_objects server 1 "$oid"
'
test_expect_success "clone with promisor.sendFields" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
git -C server remote add otherLop "https://invalid.invalid" &&
git -C server config remote.otherLop.token "fooBar" &&
git -C server config remote.otherLop.stuff "baz" &&
git -C server config remote.otherLop.partialCloneFilter "blob:limit=10k" &&
test_when_finished "git -C server remote remove otherLop" &&
test_config -C server promisor.sendFields "partialCloneFilter, token" &&
test_when_finished "rm trace" &&
# Clone from server to create a client
GIT_TRACE_PACKET="$(pwd)/trace" GIT_NO_LAZY_FETCH=0 git clone \
-c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=All \
--no-local --filter="blob:limit=5k" server client &&
# Check that fields are properly transmitted
ENCODED_URL=$(echo "file://$(pwd)/lop" | sed -e "s/ /%20/g") &&
PR1="name=lop,url=$ENCODED_URL,partialCloneFilter=blob:none" &&
PR2="name=otherLop,url=https://invalid.invalid,partialCloneFilter=blob:limit=10k,token=fooBar" &&
test_grep "clone< promisor-remote=$PR1;$PR2" trace &&
test_grep "clone> promisor-remote=lop;otherLop" trace &&
# Check that the largest object is still missing on the server
check_missing_objects server 1 "$oid"
'
test_expect_success "clone with promisor.checkFields" '
git -C server config promisor.advertise true &&
test_when_finished "rm -rf client" &&
git -C server remote add otherLop "https://invalid.invalid" &&
git -C server config remote.otherLop.token "fooBar" &&
git -C server config remote.otherLop.stuff "baz" &&
git -C server config remote.otherLop.partialCloneFilter "blob:limit=10k" &&
test_when_finished "git -C server remote remove otherLop" &&
test_config -C server promisor.sendFields "partialCloneFilter, token" &&
test_when_finished "rm trace" &&
# Clone from server to create a client
GIT_TRACE_PACKET="$(pwd)/trace" GIT_NO_LAZY_FETCH=0 git clone \
-c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c remote.lop.partialCloneFilter="blob:none" \
-c promisor.acceptfromserver=All \
-c promisor.checkFields=partialcloneFilter \
--no-local --filter="blob:limit=5k" server client &&
# Check that fields are properly transmitted
ENCODED_URL=$(echo "file://$(pwd)/lop" | sed -e "s/ /%20/g") &&
PR1="name=lop,url=$ENCODED_URL,partialCloneFilter=blob:none" &&
PR2="name=otherLop,url=https://invalid.invalid,partialCloneFilter=blob:limit=10k,token=fooBar" &&
test_grep "clone< promisor-remote=$PR1;$PR2" trace &&
test_grep "clone> promisor-remote=lop" trace &&
test_grep ! "clone> promisor-remote=lop;otherLop" trace &&
# Check that the largest object is still missing on the server
check_missing_objects server 1 "$oid"
'
test_expect_success "clone with promisor.advertise set to 'true' but don't delete the client" '
git -C server config promisor.advertise true &&
# Clone from server to create a client
GIT_NO_LAZY_FETCH=0 git clone -c remote.lop.promisor=true \
-c remote.lop.fetch="+refs/heads/*:refs/remotes/lop/*" \
-c remote.lop.url="file://$(pwd)/lop" \
-c promisor.acceptfromserver=All \
--no-local --filter="blob:limit=5k" server client &&
# Check that the largest object is still missing on the server
check_missing_objects server 1 "$oid"
'
test_expect_success "setup for subsequent fetches" '
# Generate new commit with large blob
test-tool genrandom bar 10240 >template/bar &&
git -C template add bar &&
git -C template commit -m bar &&
# Fetch new commit with large blob
git -C server fetch origin &&
git -C server update-ref HEAD FETCH_HEAD &&
git -C server rev-parse HEAD >expected_head &&
# Repack everything twice and remove .promisor files before
# each repack. This makes sure everything gets repacked
# into a single packfile. The second repack is necessary
# because the first one fetches from lop and creates a new
# packfile and its associated .promisor file.
rm -f server/objects/pack/*.promisor &&
git -C server -c repack.writebitmaps=false repack -a -d &&
rm -f server/objects/pack/*.promisor &&
git -C server -c repack.writebitmaps=false repack -a -d &&
# Unpack everything
rm pack-* &&
mv server/objects/pack/pack-* . &&
packfile=$(ls pack-*.pack) &&
git -C server unpack-objects --strict <"$packfile" &&
# Copy new large object to lop
obj_bar="HEAD:bar" &&
oid_bar="$(git -C server rev-parse $obj_bar)" &&
copy_to_lop "$oid_bar" &&
# Reinitialize server so that the 2 largest objects are missing
printf "%s\n" "$oid" "$oid_bar" >expected_missing.txt &&
initialize_server 2 expected_missing.txt &&
# Create one more client
cp -r client client2
'
test_expect_success "subsequent fetch from a client when promisor.advertise is true" '
git -C server config promisor.advertise true &&
GIT_NO_LAZY_FETCH=0 git -C client pull origin &&
git -C client rev-parse HEAD >actual &&
test_cmp expected_head actual &&
cat client/bar >/dev/null &&
check_missing_objects server 2 expected_missing.txt
'
test_expect_success "subsequent fetch from a client when promisor.advertise is false" '
git -C server config promisor.advertise false &&
GIT_NO_LAZY_FETCH=0 git -C client2 pull origin &&
git -C client2 rev-parse HEAD >actual &&
test_cmp expected_head actual &&
cat client2/bar >/dev/null &&
check_missing_objects server 1 "$oid"
'
test_done
|
unknown
|
github
|
https://github.com/git/git
|
t/t5710-promisor-remote-capability.sh
|
import numpy as np
import seaborn as sns
from matplotlib import patches
import matplotlib.pyplot as plt
from scipy.signal import gaussian
from scipy.spatial import distance
XY_CACHE = {}
STATIC_DIR = "_static"
plt.rcParams["savefig.dpi"] = 300
def poisson_disc_sample(array_radius, pad_radius, candidates=100, d=2, seed=None):
"""Find positions using poisson-disc sampling."""
# See http://bost.ocks.org/mike/algorithms/
rng = np.random.default_rng(seed)
uniform = rng.uniform
randint = rng.integers
# Cache the results
key = array_radius, pad_radius, seed
if key in XY_CACHE:
return XY_CACHE[key]
# Start at a fixed point we know will work
start = np.zeros(d)
samples = [start]
queue = [start]
while queue:
# Pick a sample to expand from
s_idx = randint(len(queue))
s = queue[s_idx]
for i in range(candidates):
# Generate a candidate from this sample
coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)
# Check the three conditions to accept the candidate
in_array = np.sqrt(np.sum(coords ** 2)) < array_radius
in_ring = np.all(distance.cdist(samples, [coords]) > pad_radius)
if in_array and in_ring:
# Accept the candidate
samples.append(coords)
queue.append(coords)
break
if (i + 1) == candidates:
# We've exhausted the particular sample
queue.pop(s_idx)
samples = np.array(samples)
XY_CACHE[key] = samples
return samples
def logo(
ax,
color_kws, ring, ring_idx, edge,
pdf_means, pdf_sigma, dy, y0, w, h,
hist_mean, hist_sigma, hist_y0, lw, skip,
scatter, pad, scale,
):
# Square, invisible axes with specified limits to center the logo
ax.set(xlim=(35 + w, 95 - w), ylim=(-3, 53))
ax.set_axis_off()
ax.set_aspect('equal')
# Magic numbers for the logo circle
radius = 27
center = 65, 25
# Full x and y grids for a gaussian curve
x = np.arange(101)
y = gaussian(x.size, pdf_sigma)
x0 = 30 # Magic number
xx = x[x0:]
# Vertical distances between the PDF curves
n = len(pdf_means)
dys = np.linspace(0, (n - 1) * dy, n) - (n * dy / 2)
dys -= dys.mean()
# Compute the PDF curves with vertical offsets
pdfs = [h * (y[x0 - m:-m] + y0 + dy) for m, dy in zip(pdf_means, dys)]
# Add in constants to fill from bottom and to top
pdfs.insert(0, np.full(xx.shape, -h))
pdfs.append(np.full(xx.shape, 50 + h))
# Color gradient
colors = sns.cubehelix_palette(n + 1 + bool(hist_mean), **color_kws)
# White fill between curves and around edges
bg = patches.Circle(
center, radius=radius - 1 + ring, color="white",
transform=ax.transData, zorder=0,
)
ax.add_artist(bg)
# Clipping artist (not shown) for the interior elements
fg = patches.Circle(center, radius=radius - edge, transform=ax.transData)
# Ring artist to surround the circle (optional)
if ring:
wedge = patches.Wedge(
center, r=radius + edge / 2, theta1=0, theta2=360, width=edge / 2,
transform=ax.transData, color=colors[ring_idx], alpha=1
)
ax.add_artist(wedge)
# Add histogram bars
if hist_mean:
hist_color = colors.pop(0)
hist_y = gaussian(x.size, hist_sigma)
hist = 1.1 * h * (hist_y[x0 - hist_mean:-hist_mean] + hist_y0)
dx = x[skip] - x[0]
hist_x = xx[::skip]
hist_h = h + hist[::skip]
# Magic number to avoid tiny sliver of bar on edge
use = hist_x < center[0] + radius * .5
bars = ax.bar(
hist_x[use], hist_h[use], bottom=-h, width=dx,
align="edge", color=hist_color, ec="w", lw=lw,
zorder=3,
)
for bar in bars:
bar.set_clip_path(fg)
# Add each smooth PDF "wave"
for i, pdf in enumerate(pdfs[1:], 1):
u = ax.fill_between(xx, pdfs[i - 1] + w, pdf, color=colors[i - 1], lw=0)
u.set_clip_path(fg)
# Add scatterplot in top wave area
if scatter:
seed = sum(map(ord, "seaborn logo"))
xy = poisson_disc_sample(radius - edge - ring, pad, seed=seed)
clearance = distance.cdist(xy + center, np.c_[xx, pdfs[-2]])
use = clearance.min(axis=1) > pad / 1.8
x, y = xy[use].T
sizes = (x - y) % 9
points = ax.scatter(
x + center[0], y + center[1], s=scale * (10 + sizes * 5),
zorder=5, color=colors[-1], ec="w", lw=scale / 2,
)
path = u.get_paths()[0]
points.set_clip_path(path, transform=u.get_transform())
u.set_visible(False)
def savefig(fig, shape, variant):
fig.subplots_adjust(0, 0, 1, 1, 0, 0)
facecolor = (1, 1, 1, 1) if bg == "white" else (1, 1, 1, 0)
for ext in ["png", "svg"]:
fig.savefig(f"{STATIC_DIR}/logo-{shape}-{variant}bg.{ext}", facecolor=facecolor)
if __name__ == "__main__":
for bg in ["white", "light", "dark"]:
color_idx = -1 if bg == "dark" else 0
kwargs = dict(
color_kws=dict(start=.3, rot=-.4, light=.8, dark=.3, reverse=True),
ring=True, ring_idx=color_idx, edge=1,
pdf_means=[8, 24], pdf_sigma=16,
dy=1, y0=1.8, w=.5, h=12,
hist_mean=2, hist_sigma=10, hist_y0=.6, lw=1, skip=6,
scatter=True, pad=1.8, scale=.5,
)
color = sns.cubehelix_palette(**kwargs["color_kws"])[color_idx]
# ------------------------------------------------------------------------ #
fig, ax = plt.subplots(figsize=(2, 2), facecolor="w", dpi=100)
logo(ax, **kwargs)
savefig(fig, "mark", bg)
# ------------------------------------------------------------------------ #
fig, axs = plt.subplots(1, 2, figsize=(8, 2), dpi=100,
gridspec_kw=dict(width_ratios=[1, 3]))
logo(axs[0], **kwargs)
font = {
"family": "avenir",
"color": color,
"weight": "regular",
"size": 120,
}
axs[1].text(.01, .35, "seaborn", ha="left", va="center",
fontdict=font, transform=axs[1].transAxes)
axs[1].set_axis_off()
savefig(fig, "wide", bg)
# ------------------------------------------------------------------------ #
fig, axs = plt.subplots(2, 1, figsize=(2, 2.5), dpi=100,
gridspec_kw=dict(height_ratios=[4, 1]))
logo(axs[0], **kwargs)
font = {
"family": "avenir",
"color": color,
"weight": "regular",
"size": 34,
}
axs[1].text(.5, 1, "seaborn", ha="center", va="top",
fontdict=font, transform=axs[1].transAxes)
axs[1].set_axis_off()
savefig(fig, "tall", bg)
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"XDELEX": {
"summary": "Deletes one or multiple entries from the stream.",
"complexity": "O(1) for each single item to delete in the stream, regardless of the stream size.",
"group": "stream",
"since": "8.2.0",
"arity": -5,
"function": "xdelexCommand",
"command_flags": [
"WRITE",
"FAST"
],
"acl_categories": [
"STREAM"
],
"key_specs": [
{
"flags": [
"RW",
"DELETE"
],
"begin_search": {
"index": {
"pos": 1
}
},
"find_keys": {
"range": {
"lastkey": 0,
"step": 1,
"limit": 0
}
}
}
],
"arguments": [
{
"name": "key",
"type": "key",
"key_spec_index": 0
},
{
"name": "condition",
"type": "oneof",
"optional": true,
"arguments": [
{
"name": "keepref",
"type": "pure-token",
"token": "KEEPREF"
},
{
"name": "delref",
"type": "pure-token",
"token": "DELREF"
},
{
"name": "acked",
"type": "pure-token",
"token": "ACKED"
}
]
},
{
"name": "ids",
"token": "IDS",
"type": "block",
"arguments": [
{
"name": "numids",
"type": "integer"
},
{
"name": "id",
"type": "string",
"multiple": true
}
]
}
],
"reply_schema": {
"description": "Array of results. Returns an array with -1 for each requested ID if the key does not exist.",
"type": "array",
"minItems": 0,
"maxItems": 4294967295,
"items": {
"oneOf": [
{
"description": "The id does not exist in the provided stream key.",
"const": -1
},
{
"description": "Entry was deleted from the stream.",
"const": 1
},
{
"description": "Entry was not deleted, but there are still dangling references.",
"const": 2
}
]
}
}
}
}
|
json
|
github
|
https://github.com/redis/redis
|
src/commands/xdelex.json
|
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_PROPAGATOR_DEBUG_UTILS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_PROPAGATOR_DEBUG_UTILS_H_
namespace tensorflow {
struct Entry;
struct NodeItem;
class Tensor;
// Returns a pointer to the tensor in `input` if one exists, or `nullptr`.
const Tensor* GetTensorValueForDump(const Entry& input);
// Writes a LOG(WARNING) message describing the state of the given pending node
// in the graph described by `immutable_state`.
void DumpPendingNodeState(const NodeItem& node_item, const Entry* input_vector,
const bool show_nodes_with_no_ready_inputs);
// Writes a LOG(WARNING) message describing the state of the given active node
// in the graph described by `immutable_state`.
void DumpActiveNodeState(const NodeItem& node_item, const Entry* input_vector);
} // namespace tensorflow
#endif // TENSORFLOW_CORE_COMMON_RUNTIME_PROPAGATOR_DEBUG_UTILS_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/common_runtime/propagator_debug_utils.h
|
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import urllib2
import glance_store
from oslo_config import cfg
from six.moves import cStringIO
from taskflow import task
import glance.async.flows.base_import as import_flow
from glance.async import taskflow_executor
from glance.common.scripts.image_import import main as image_import
from glance.common.scripts import utils as script_utils
from glance.common import utils
from glance import domain
from glance import gateway
import glance.tests.utils as test_utils
CONF = cfg.CONF
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
class _ErrorTask(task.Task):
def execute(self):
raise RuntimeError()
class TestImportTask(test_utils.BaseTestCase):
def setUp(self):
super(TestImportTask, self).setUp()
glance_store.register_opts(CONF)
self.config(default_store='file',
stores=['file', 'http'],
filesystem_store_datadir=self.test_dir,
group="glance_store")
glance_store.create_stores(CONF)
self.work_dir = os.path.join(self.test_dir, 'work_dir')
utils.safe_mkdirs(self.work_dir)
self.config(work_dir=self.work_dir, group='task')
self.context = mock.MagicMock()
self.img_repo = mock.MagicMock()
self.task_repo = mock.MagicMock()
self.gateway = gateway.Gateway()
self.task_factory = domain.TaskFactory()
self.img_factory = self.gateway.get_image_factory(self.context)
self.image = self.img_factory.new_image(image_id=UUID1,
disk_format='qcow2',
container_format='bare')
task_input = {
"import_from": "http://cloud.foo/image.qcow2",
"import_from_format": "qcow2",
"image_properties": {'disk_format': 'qcow2',
'container_format': 'bare'}
}
task_ttl = CONF.task.task_time_to_live
self.task_type = 'import'
self.task = self.task_factory.new_task(self.task_type, TENANT1,
task_time_to_live=task_ttl,
task_input=task_input)
def test_import_flow(self):
self.config(engine_mode='serial',
group='taskflow_executor')
img_factory = mock.MagicMock()
executor = taskflow_executor.TaskExecutor(
self.context,
self.task_repo,
self.img_repo,
img_factory)
self.task_repo.get.return_value = self.task
def create_image(*args, **kwargs):
kwargs['image_id'] = UUID1
return self.img_factory.new_image(*args, **kwargs)
self.img_repo.get.return_value = self.image
img_factory.new_image.side_effect = create_image
with mock.patch.object(script_utils, 'get_image_data_iter') as dmock:
dmock.return_value = cStringIO("TEST_IMAGE")
executor.begin_processing(self.task.task_id)
image_path = os.path.join(self.test_dir, self.image.image_id)
tmp_image_path = os.path.join(self.work_dir,
"%s.tasks_import" % image_path)
self.assertFalse(os.path.exists(tmp_image_path))
self.assertTrue(os.path.exists(image_path))
def test_import_flow_missing_work_dir(self):
self.config(engine_mode='serial', group='taskflow_executor')
self.config(work_dir=None, group='task')
img_factory = mock.MagicMock()
executor = taskflow_executor.TaskExecutor(
self.context,
self.task_repo,
self.img_repo,
img_factory)
self.task_repo.get.return_value = self.task
def create_image(*args, **kwargs):
kwargs['image_id'] = UUID1
return self.img_factory.new_image(*args, **kwargs)
self.img_repo.get.return_value = self.image
img_factory.new_image.side_effect = create_image
with mock.patch.object(script_utils, 'get_image_data_iter') as dmock:
dmock.return_value = cStringIO("TEST_IMAGE")
with mock.patch.object(import_flow._ImportToFS, 'execute') as emk:
executor.begin_processing(self.task.task_id)
self.assertFalse(emk.called)
image_path = os.path.join(self.test_dir, self.image.image_id)
tmp_image_path = os.path.join(self.work_dir,
"%s.tasks_import" % image_path)
self.assertFalse(os.path.exists(tmp_image_path))
self.assertTrue(os.path.exists(image_path))
def test_import_flow_revert(self):
self.config(engine_mode='serial',
group='taskflow_executor')
img_factory = mock.MagicMock()
executor = taskflow_executor.TaskExecutor(
self.context,
self.task_repo,
self.img_repo,
img_factory)
self.task_repo.get.return_value = self.task
def create_image(*args, **kwargs):
kwargs['image_id'] = UUID1
return self.img_factory.new_image(*args, **kwargs)
self.img_repo.get.return_value = self.image
img_factory.new_image.side_effect = create_image
with mock.patch.object(script_utils, 'get_image_data_iter') as dmock:
dmock.return_value = cStringIO("TEST_IMAGE")
with mock.patch.object(import_flow, "_get_import_flows") as imock:
imock.return_value = (x for x in [_ErrorTask()])
self.assertRaises(RuntimeError,
executor.begin_processing, self.task.task_id)
image_path = os.path.join(self.test_dir, self.image.image_id)
tmp_image_path = os.path.join(self.work_dir,
"%s.tasks_import" % image_path)
self.assertFalse(os.path.exists(tmp_image_path))
# NOTE(flaper87): Eventually, we want this to be assertTrue.
# The current issue is there's no way to tell taskflow to
# continue on failures. That is, revert the subflow but keep
# executing the parent flow. Under discussion/development.
self.assertFalse(os.path.exists(image_path))
def test_import_flow_no_import_flows(self):
self.config(engine_mode='serial',
group='taskflow_executor')
img_factory = mock.MagicMock()
executor = taskflow_executor.TaskExecutor(
self.context,
self.task_repo,
self.img_repo,
img_factory)
self.task_repo.get.return_value = self.task
def create_image(*args, **kwargs):
kwargs['image_id'] = UUID1
return self.img_factory.new_image(*args, **kwargs)
self.img_repo.get.return_value = self.image
img_factory.new_image.side_effect = create_image
with mock.patch.object(urllib2, 'urlopen') as umock:
content = "TEST_IMAGE"
umock.return_value = cStringIO(content)
with mock.patch.object(import_flow, "_get_import_flows") as imock:
imock.return_value = (x for x in [])
executor.begin_processing(self.task.task_id)
image_path = os.path.join(self.test_dir, self.image.image_id)
tmp_image_path = os.path.join(self.work_dir,
"%s.tasks_import" % image_path)
self.assertFalse(os.path.exists(tmp_image_path))
self.assertTrue(os.path.exists(image_path))
umock.assert_called_once()
with open(image_path) as ifile:
self.assertEqual(content, ifile.read())
def test_create_image(self):
image_create = import_flow._CreateImage(self.task.task_id,
self.task_type,
self.task_repo,
self.img_repo,
self.img_factory)
self.task_repo.get.return_value = self.task
with mock.patch.object(image_import, 'create_image') as ci_mock:
ci_mock.return_value = mock.Mock()
image_create.execute()
ci_mock.assert_called_once_with(self.img_repo,
self.img_factory,
{'container_format': 'bare',
'disk_format': 'qcow2'},
self.task.task_id)
def test_save_image(self):
save_image = import_flow._SaveImage(self.task.task_id,
self.task_type,
self.img_repo)
with mock.patch.object(self.img_repo, 'get') as get_mock:
image_id = mock.sentinel.image_id
image = mock.MagicMock(image_id=image_id, status='saving')
get_mock.return_value = image
with mock.patch.object(self.img_repo, 'save') as save_mock:
save_image.execute(image.image_id)
get_mock.assert_called_once_with(image_id)
save_mock.assert_called_once_with(image)
self.assertEqual('active', image.status)
def test_import_to_fs(self):
import_fs = import_flow._ImportToFS(self.task.task_id,
self.task_type,
self.task_repo,
'http://example.com/image.qcow2')
with mock.patch.object(script_utils, 'get_image_data_iter') as dmock:
dmock.return_value = "test"
image_id = UUID1
path = import_fs.execute(image_id)
reader, size = glance_store.get_from_backend(path)
self.assertEqual(4, size)
self.assertEqual(dmock.return_value, "".join(reader))
image_path = os.path.join(self.work_dir, image_id)
tmp_image_path = os.path.join(self.work_dir,
"%s.tasks_import" % image_path)
self.assertTrue(os.path.exists(tmp_image_path))
def test_delete_from_fs(self):
delete_fs = import_flow._DeleteFromFS(self.task.task_id,
self.task_type)
data = "test"
store = glance_store.get_store_from_scheme('file')
path = glance_store.store_add_to_backend(mock.sentinel.image_id, data,
mock.sentinel.image_size,
store, context=None)[0]
path_wo_scheme = path.split("file://")[1]
self.assertTrue(os.path.exists(path_wo_scheme))
delete_fs.execute(path)
self.assertFalse(os.path.exists(path_wo_scheme))
def test_complete_task(self):
complete_task = import_flow._CompleteTask(self.task.task_id,
self.task_type,
self.task_repo)
image_id = mock.sentinel.image_id
image = mock.MagicMock(image_id=image_id)
self.task_repo.get.return_value = self.task
with mock.patch.object(self.task, 'succeed') as succeed:
complete_task.execute(image.image_id)
succeed.assert_called_once_with({'image_id': image_id})
|
unknown
|
codeparrot/codeparrot-clean
| ||
//// [tests/cases/compiler/assignmentCompatability16.ts] ////
//// [assignmentCompatability16.ts]
namespace __test1__ {
export interface interfaceWithPublicAndOptional<T,U> { one: T; two?: U; }; var obj4: interfaceWithPublicAndOptional<number,string> = { one: 1 };;
export var __val__obj4 = obj4;
}
namespace __test2__ {
export var obj = {one: <any[]>[1]};
export var __val__obj = obj;
}
__test2__.__val__obj = __test1__.__val__obj4
//// [assignmentCompatability16.js]
"use strict";
var __test1__;
(function (__test1__) {
;
var obj4 = { one: 1 };
;
__test1__.__val__obj4 = obj4;
})(__test1__ || (__test1__ = {}));
var __test2__;
(function (__test2__) {
__test2__.obj = { one: [1] };
__test2__.__val__obj = __test2__.obj;
})(__test2__ || (__test2__ = {}));
__test2__.__val__obj = __test1__.__val__obj4;
|
javascript
|
github
|
https://github.com/microsoft/TypeScript
|
tests/baselines/reference/assignmentCompatability16.js
|
# Embedded file name: /usr/lib/enigma2/python/Components/Converter/RdsInfo.py
from enigma import iRdsDecoder, iPlayableService
from Components.Converter.Converter import Converter
from Components.Element import cached
class RdsInfo(Converter, object):
RASS_INTERACTIVE_AVAILABLE = 0
RTP_TEXT_CHANGED = 1
RADIO_TEXT_CHANGED = 2
def __init__(self, type):
Converter.__init__(self, type)
self.type, self.interesting_events = {'RadioText': (self.RADIO_TEXT_CHANGED, (iPlayableService.evUpdatedRadioText,)),
'RtpText': (self.RTP_TEXT_CHANGED, (iPlayableService.evUpdatedRtpText,)),
'RasInteractiveAvailable': (self.RASS_INTERACTIVE_AVAILABLE, (iPlayableService.evUpdatedRassInteractivePicMask,))}[type]
@cached
def getText(self):
decoder = self.source.decoder
text = ''
if decoder:
if self.type == self.RADIO_TEXT_CHANGED:
text = decoder.getText(iRdsDecoder.RadioText)
elif self.type == self.RTP_TEXT_CHANGED:
text = decoder.getText(iRdsDecoder.RtpText)
else:
print 'unknown RdsInfo Converter type', self.type
return text
text = property(getText)
@cached
def getBoolean(self):
decoder = self.source.decoder
if self.type == self.RASS_INTERACTIVE_AVAILABLE:
mask = decoder and decoder.getRassInteractiveMask()
return mask and mask[0] & 1 and True or False
if self.type == self.RADIO_TEXT_CHANGED:
return len(decoder.getText(iRdsDecoder.RadioText)) and True or False
if self.type == self.RTP_TEXT_CHANGED:
return len(decoder.getText(iRdsDecoder.RtpText)) and True or False
boolean = property(getBoolean)
def changed(self, what):
if what[0] != self.CHANGED_SPECIFIC or what[1] in self.interesting_events:
Converter.changed(self, what)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#ifndef PSYCH_EMITTER_H
#define PSYCH_EMITTER_H
#include <psych.h>
void Init_psych_emitter(void);
#endif
|
c
|
github
|
https://github.com/ruby/ruby
|
ext/psych/psych_emitter.h
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
import numpy as np
from sklearn.base import is_classifier, is_clusterer, is_outlier_detector, is_regressor
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import _safe_indexing
from sklearn.utils._dataframe import is_pandas_df, is_polars_df
from sklearn.utils._optional_dependencies import check_matplotlib_support
from sklearn.utils._response import _get_response_values
from sklearn.utils._set_output import _get_adapter_from_container
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import (
_is_arraylike_not_scalar,
_num_features,
check_is_fitted,
)
def _check_boundary_response_method(estimator, response_method):
"""Validate the response methods to be used with the fitted estimator.
Parameters
----------
estimator : object
Fitted estimator to check.
response_method : {'auto', 'decision_function', 'predict_proba', 'predict'}
Specifies whether to use :term:`decision_function`, :term:`predict_proba`,
:term:`predict` as the target response. If set to 'auto', the response method is
tried in the before mentioned order.
Returns
-------
prediction_method : list of str or str
The name or list of names of the response methods to use.
"""
has_classes = hasattr(estimator, "classes_")
if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]):
msg = "Multi-label and multi-output multi-class classifiers are not supported"
raise ValueError(msg)
if response_method == "auto":
if is_regressor(estimator):
prediction_method = "predict"
else:
prediction_method = ["decision_function", "predict_proba", "predict"]
else:
prediction_method = response_method
return prediction_method
class DecisionBoundaryDisplay:
"""Decisions boundary visualization.
It is recommended to use
:func:`~sklearn.inspection.DecisionBoundaryDisplay.from_estimator`
to create a :class:`DecisionBoundaryDisplay`. All parameters are stored as
attributes.
Read more in the :ref:`User Guide <visualizations>`.
For a detailed example comparing the decision boundaries of multinomial and
one-vs-rest logistic regression, please see
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_multinomial.py`.
.. versionadded:: 1.1
Parameters
----------
xx0 : ndarray of shape (grid_resolution, grid_resolution)
First output of :func:`meshgrid <numpy.meshgrid>`.
xx1 : ndarray of shape (grid_resolution, grid_resolution)
Second output of :func:`meshgrid <numpy.meshgrid>`.
n_classes : int
Expected number of unique classes or labels if `response` was generated by a
:term:`classifier` or a :term:`clusterer`.
For :term:`outlier detectors`, `n_classes` should be set to 2 by definition
(inlier or outlier).
For :term:`regressors`, `n_classes` should also be set to 2 by convention
(continuous responses are displayed the same way as unthresholded binary
responses).
.. versionadded:: 1.9
response : ndarray of shape (grid_resolution, grid_resolution) or \
(grid_resolution, grid_resolution, n_classes)
Values of the response function.
multiclass_colors : list of str or str, default=None
Specifies how to color each class when plotting all classes of
:term:`multiclass` problems.
Possible inputs are:
* list: list of Matplotlib
`color <https://matplotlib.org/stable/users/explain/colors/colors.html#colors-def>`_
strings, of length `n_classes`
* str: name of :class:`matplotlib.colors.Colormap`
* None: 'tab10' colormap is used to sample colors if the number of
classes is less than or equal to 10, otherwise 'gist_rainbow' colormap.
Single color (fading to white) colormaps will be generated from the colors in
the list or colors taken from the colormap, and passed to the `cmap` parameter
of the `plot_method`.
For :term:`binary` problems, this is ignored and `cmap` or `colors` can be
passed as kwargs instead, otherwise, the default colormap ('viridis') is used.
.. versionadded:: 1.7
.. versionchanged:: 1.9
`multiclass_colors` is now also used when `response_method="predict"`
xlabel : str, default=None
Default label to place on x axis.
ylabel : str, default=None
Default label to place on y axis.
Attributes
----------
surface_ : matplotlib `QuadContourSet` or `QuadMesh` or list of such objects
If `plot_method` is 'contour' or 'contourf', `surface_` is
:class:`QuadContourSet <matplotlib.contour.QuadContourSet>`. If
`plot_method` is 'pcolormesh', `surface_` is
:class:`QuadMesh <matplotlib.collections.QuadMesh>`.
multiclass_colors_ : array of shape (n_classes, 4)
Colors used to plot each class in multiclass problems.
Only defined when `n_classes` > 2.
.. versionadded:: 1.7
ax_ : matplotlib Axes
Axes with decision boundary.
figure_ : matplotlib Figure
Figure containing the decision boundary.
See Also
--------
DecisionBoundaryDisplay.from_estimator : Plot decision boundary given an estimator.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> from sklearn.inspection import DecisionBoundaryDisplay
>>> from sklearn.tree import DecisionTreeClassifier
>>> iris = load_iris()
>>> feature_1, feature_2 = np.meshgrid(
... np.linspace(iris.data[:, 0].min(), iris.data[:, 0].max()),
... np.linspace(iris.data[:, 1].min(), iris.data[:, 1].max())
... )
>>> grid = np.vstack([feature_1.ravel(), feature_2.ravel()]).T
>>> tree = DecisionTreeClassifier().fit(iris.data[:, :2], iris.target)
>>> y_pred = np.reshape(tree.predict(grid), feature_1.shape)
>>> display = DecisionBoundaryDisplay(
... xx0=feature_1, xx1=feature_2, n_classes=len(tree.classes_), response=y_pred
... )
>>> display.plot()
<...>
>>> display.ax_.scatter(
... iris.data[:, 0],
... iris.data[:, 1],
... c=iris.target,
... cmap=mpl.colors.ListedColormap(display.multiclass_colors_),
... edgecolor="black"
... )
<...>
>>> plt.show()
"""
def __init__(
self,
*,
xx0,
xx1,
n_classes,
response,
multiclass_colors=None,
xlabel=None,
ylabel=None,
):
self.xx0 = xx0
self.xx1 = xx1
self.n_classes = n_classes
self.response = response
self.multiclass_colors = multiclass_colors
self.xlabel = xlabel
self.ylabel = ylabel
def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs):
"""Plot visualization.
Parameters
----------
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
xlabel : str, default=None
Overwrite the x-axis label.
ylabel : str, default=None
Overwrite the y-axis label.
**kwargs : dict
Additional keyword arguments to be passed to the `plot_method`. For
:term:`binary` problems, `cmap` or `colors` can be set here to specify the
colormap or colors, otherwise the default colormap ('viridis') is used.
Returns
-------
display: :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores computed values.
"""
check_matplotlib_support("DecisionBoundaryDisplay.plot")
import matplotlib as mpl
import matplotlib.pyplot as plt
if plot_method not in ("contourf", "contour", "pcolormesh"):
raise ValueError(
"plot_method must be 'contourf', 'contour', or 'pcolormesh'. "
f"Got {plot_method} instead."
)
if ax is None:
_, ax = plt.subplots()
plot_func = getattr(ax, plot_method)
if self.n_classes == 2:
self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs)
else: # multiclass
for kwarg in ("cmap", "colors"):
if kwarg in kwargs:
warnings.warn(
f"'{kwarg}' is ignored in favor of 'multiclass_colors' "
"in the multiclass case."
)
del kwargs[kwarg]
if self.multiclass_colors is None or isinstance(
self.multiclass_colors, str
):
if self.multiclass_colors is None:
cmap = "tab10" if self.n_classes <= 10 else "gist_rainbow"
else:
cmap = self.multiclass_colors
# Special case for the tab10 and tab20 colormaps that encode a
# discrete set of colors that are easily distinguishable
# contrary to other colormaps that are continuous.
if cmap == "tab10" and self.n_classes <= 10:
colors = plt.get_cmap("tab10", 10).colors[: self.n_classes]
elif cmap == "tab20" and self.n_classes <= 20:
colors = plt.get_cmap("tab20", 20).colors[: self.n_classes]
else:
cmap = plt.get_cmap(cmap, self.n_classes)
if not hasattr(cmap, "colors"):
# Get `LinearSegmentedColormap` for non-qualitative cmaps
colors = cmap(np.linspace(0, 1, self.n_classes))
else:
colors = cmap.colors
elif isinstance(self.multiclass_colors, list):
colors = [mpl.colors.to_rgba(color) for color in self.multiclass_colors]
else:
raise ValueError("'multiclass_colors' must be a list or a str.")
self.multiclass_colors_ = colors
if self.response.ndim == 2: # predict
# `pcolormesh` requires cmap, for the others it makes no difference
cmap = mpl.colors.ListedColormap(colors)
self.surface_ = plot_func(
self.xx0, self.xx1, self.response, cmap=cmap, **kwargs
)
# predict_proba and decision_function differ for plotting methods
elif plot_method == "contour":
# Plot only integer class values
self.surface_ = plot_func(
self.xx0,
self.xx1,
self.response.argmax(axis=2),
colors=colors,
**kwargs,
)
else:
multiclass_cmaps = [
mpl.colors.LinearSegmentedColormap.from_list(
f"colormap_{class_idx}",
[(1.0, 1.0, 1.0, 1.0), (r, g, b, 1.0)],
)
for class_idx, (r, g, b, _) in enumerate(colors)
]
self.surface_ = []
for class_idx, cmap in enumerate(multiclass_cmaps):
response = np.ma.array(
self.response[:, :, class_idx],
mask=(self.response.argmax(axis=2) != class_idx),
)
self.surface_.append(
plot_func(self.xx0, self.xx1, response, cmap=cmap, **kwargs)
)
if xlabel is not None or not ax.get_xlabel():
xlabel = self.xlabel if xlabel is None else xlabel
ax.set_xlabel(xlabel)
if ylabel is not None or not ax.get_ylabel():
ylabel = self.ylabel if ylabel is None else ylabel
ax.set_ylabel(ylabel)
self.ax_ = ax
self.figure_ = ax.figure
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
*,
grid_resolution=100,
eps=1.0,
plot_method="contourf",
response_method="auto",
class_of_interest=None,
multiclass_colors=None,
xlabel=None,
ylabel=None,
ax=None,
**kwargs,
):
"""Plot decision boundary given an estimator.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
estimator : object
Trained estimator used to plot the decision boundary.
X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2)
Input data that should be only 2-dimensional.
grid_resolution : int, default=100
Number of grid points to use for plotting decision boundary.
Higher values will make the plot look nicer but be slower to
render.
eps : float, default=1.0
Extends the minimum and maximum values of X for evaluating the
response function.
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
response_method : {'auto', 'decision_function', 'predict_proba', \
'predict'}, default='auto'
Specifies whether to use :term:`decision_function`,
:term:`predict_proba` or :term:`predict` as the target response.
If set to 'auto', the response method is tried in the order as
listed above.
.. versionchanged:: 1.6
For multiclass problems, 'auto' no longer defaults to 'predict'.
class_of_interest : int, float, bool or str, default=None
The class to be plotted. For :term:`binary` classifiers, if None,
`estimator.classes_[1]` is considered the positive class. For
:term:`multiclass` classifiers, if None, all classes will be represented in
the decision boundary plot; when `response_method` is :term:`predict_proba`
or :term:`decision_function`, the class with the highest response value
at each point is plotted. The color of each class can be set via
`multiclass_colors`.
.. versionadded:: 1.4
multiclass_colors : list of str, or str, default=None
Specifies how to color each class when plotting :term:`multiclass` problems
and `class_of_interest` is None.
Possible inputs are:
* list: list of Matplotlib
`color <https://matplotlib.org/stable/users/explain/colors/colors.html#colors-def>`_
strings, of length `n_classes`
* str: name of :class:`matplotlib.colors.Colormap`
* None: 'tab10' colormap is used to sample colors if the number of
classes is less than or equal to 10, otherwise 'gist_rainbow'
colormap.
Single color (fading to white) colormaps will be generated from the colors
in the list or colors taken from the colormap, and passed to the `cmap`
parameter of the `plot_method`.
For :term:`binary` problems, this is ignored and `cmap` or `colors` can be
passed as kwargs instead, otherwise, the default colormap ('viridis') is
used.
.. versionadded:: 1.7
.. versionchanged:: 1.9
`multiclass_colors` is now also used when `response_method="predict"`
xlabel : str, default=None
The label used for the x-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ylabel : str, default=None
The label used for the y-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Additional keyword arguments to be passed to the `plot_method`.
Returns
-------
display : :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores the result.
See Also
--------
DecisionBoundaryDisplay : Decision boundary visualization.
sklearn.metrics.ConfusionMatrixDisplay.from_estimator : Plot the
confusion matrix given an estimator, the data, and the label.
sklearn.metrics.ConfusionMatrixDisplay.from_predictions : Plot the
confusion matrix given the true and predicted labels.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import DecisionBoundaryDisplay
>>> iris = load_iris()
>>> X = iris.data[:, :2]
>>> classifier = LogisticRegression().fit(X, iris.target)
>>> disp = DecisionBoundaryDisplay.from_estimator(
... classifier, X, response_method="predict",
... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],
... alpha=0.5,
... )
>>> cmap = mpl.colors.ListedColormap(disp.multiclass_colors_)
>>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k", cmap=cmap)
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
check_is_fitted(estimator)
import matplotlib as mpl
if not grid_resolution > 1:
raise ValueError(
"grid_resolution must be greater than 1. Got"
f" {grid_resolution} instead."
)
if not eps >= 0:
raise ValueError(
f"eps must be greater than or equal to 0. Got {eps} instead."
)
possible_plot_methods = ("contourf", "contour", "pcolormesh")
if plot_method not in possible_plot_methods:
available_methods = ", ".join(possible_plot_methods)
raise ValueError(
f"plot_method must be one of {available_methods}. "
f"Got {plot_method} instead."
)
num_features = _num_features(X)
if num_features != 2:
raise ValueError(
f"n_features must be equal to 2. Got {num_features} instead."
)
if (
response_method in ("predict_proba", "decision_function", "auto")
and multiclass_colors is not None
and hasattr(estimator, "classes_")
and (n_classes := len(estimator.classes_)) > 2
):
if isinstance(multiclass_colors, list):
if len(multiclass_colors) != n_classes:
raise ValueError(
"When 'multiclass_colors' is a list, it must be of the same "
f"length as 'estimator.classes_' ({n_classes}), got: "
f"{len(multiclass_colors)}."
)
elif any(
not mpl.colors.is_color_like(col) for col in multiclass_colors
):
raise ValueError(
"When 'multiclass_colors' is a list, it can only contain valid"
f" Matplotlib color names. Got: {multiclass_colors}"
)
if isinstance(multiclass_colors, str):
if multiclass_colors not in mpl.pyplot.colormaps():
raise ValueError(
"When 'multiclass_colors' is a string, it must be a valid "
f"Matplotlib colormap. Got: {multiclass_colors}"
)
x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1)
x0_min, x0_max = x0.min() - eps, x0.max() + eps
x1_min, x1_max = x1.min() - eps, x1.max() + eps
xx0, xx1 = np.meshgrid(
np.linspace(x0_min, x0_max, grid_resolution),
np.linspace(x1_min, x1_max, grid_resolution),
)
X_grid = np.c_[xx0.ravel(), xx1.ravel()]
if is_pandas_df(X) or is_polars_df(X):
adapter = _get_adapter_from_container(X)
X_grid = adapter.create_container(
X_grid,
X_grid,
columns=X.columns,
)
prediction_method = _check_boundary_response_method(estimator, response_method)
try:
response, _, response_method_used = _get_response_values(
estimator,
X_grid,
response_method=prediction_method,
pos_label=class_of_interest,
return_response_method_used=True,
)
except ValueError as exc:
if "is not a valid label" in str(exc):
# re-raise a more informative error message since `pos_label` is unknown
# to our user when interacting with
# `DecisionBoundaryDisplay.from_estimator`
raise ValueError(
# Note: it is ok to use estimator.classes_ here, as this error will
# only be thrown if estimator is a classifier
f"class_of_interest={class_of_interest} is not a valid label: It "
f"should be one of {estimator.classes_}"
) from exc
raise
# convert classes predictions into integers
if response_method_used == "predict" and hasattr(estimator, "classes_"):
encoder = LabelEncoder()
encoder.classes_ = estimator.classes_
response = encoder.transform(response)
# infer n_classes from the estimator
if (
class_of_interest is not None
or is_regressor(estimator)
or is_outlier_detector(estimator)
):
n_classes = 2
elif is_classifier(estimator) and hasattr(estimator, "classes_"):
n_classes = len(estimator.classes_)
elif is_clusterer(estimator) and hasattr(estimator, "labels_"):
n_classes = len(np.unique(estimator.labels_))
else:
target_type = type_of_target(response)
if target_type in ("binary", "continuous"):
n_classes = 2
elif target_type == "multiclass":
n_classes = len(np.unique(response))
else:
raise ValueError(
"Number of classes or labels cannot be inferred from "
f"{estimator.__class__.__name__}. Please make sure your estimator "
"follows scikit-learn's estimator API as described here: "
"https://scikit-learn.org/stable/developers/develop.html#rolling-your-own-estimator"
)
if response.ndim == 1:
response = response.reshape(*xx0.shape)
else:
if is_regressor(estimator):
raise ValueError("Multi-output regressors are not supported")
if class_of_interest is not None:
# For the multiclass case, `_get_response_values` returns the response
# as-is. Thus, we have a column per class and we need to select the
# column corresponding to the positive class.
col_idx = np.flatnonzero(estimator.classes_ == class_of_interest)[0]
response = response[:, col_idx].reshape(*xx0.shape)
else:
response = response.reshape(*xx0.shape, response.shape[-1])
if xlabel is None:
xlabel = X.columns[0] if hasattr(X, "columns") else ""
if ylabel is None:
ylabel = X.columns[1] if hasattr(X, "columns") else ""
display = cls(
xx0=xx0,
xx1=xx1,
n_classes=n_classes,
response=response,
multiclass_colors=multiclass_colors,
xlabel=xlabel,
ylabel=ylabel,
)
return display.plot(ax=ax, plot_method=plot_method, **kwargs)
|
python
|
github
|
https://github.com/scikit-learn/scikit-learn
|
sklearn/inspection/_plot/decision_boundary.py
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Serial port support for Windows.
Requires PySerial and pywin32.
"""
from __future__ import division, absolute_import
# system imports
from serial import PARITY_NONE
from serial import STOPBITS_ONE
from serial import EIGHTBITS
import win32file, win32event
# twisted imports
from twisted.internet import abstract
# sibling imports
from twisted.internet.serialport import BaseSerialPort
class SerialPort(BaseSerialPort, abstract.FileDescriptor):
"""A serial device, acting as a transport, that uses a win32 event."""
connected = 1
def __init__(self, protocol, deviceNameOrPortNumber, reactor,
baudrate = 9600, bytesize = EIGHTBITS, parity = PARITY_NONE,
stopbits = STOPBITS_ONE, xonxoff = 0, rtscts = 0):
self._serial = self._serialFactory(
deviceNameOrPortNumber, baudrate=baudrate, bytesize=bytesize,
parity=parity, stopbits=stopbits, timeout=None,
xonxoff=xonxoff, rtscts=rtscts)
self.flushInput()
self.flushOutput()
self.reactor = reactor
self.protocol = protocol
self.outQueue = []
self.closed = 0
self.closedNotifies = 0
self.writeInProgress = 0
self.protocol = protocol
self._overlappedRead = win32file.OVERLAPPED()
self._overlappedRead.hEvent = win32event.CreateEvent(None, 1, 0, None)
self._overlappedWrite = win32file.OVERLAPPED()
self._overlappedWrite.hEvent = win32event.CreateEvent(None, 0, 0, None)
self.reactor.addEvent(self._overlappedRead.hEvent, self, 'serialReadEvent')
self.reactor.addEvent(self._overlappedWrite.hEvent, self, 'serialWriteEvent')
self.protocol.makeConnection(self)
self._finishPortSetup()
def _finishPortSetup(self):
"""
Finish setting up the serial port.
This is a separate method to facilitate testing.
"""
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def serialReadEvent(self):
#get that character we set up
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 0)
if n:
first = str(self.read_buf[:n])
#now we should get everything that is already in the buffer
flags, comstat = win32file.ClearCommError(self._serial.hComPort)
if comstat.cbInQue:
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(comstat.cbInQue),
self._overlappedRead)
n = win32file.GetOverlappedResult(self._serial.hComPort, self._overlappedRead, 1)
#handle all the received data:
self.protocol.dataReceived(first + str(buf[:n]))
else:
#handle all the received data:
self.protocol.dataReceived(first)
#set up next one
win32event.ResetEvent(self._overlappedRead.hEvent)
rc, self.read_buf = win32file.ReadFile(self._serial.hComPort,
win32file.AllocateReadBuffer(1),
self._overlappedRead)
def write(self, data):
if data:
if self.writeInProgress:
self.outQueue.append(data)
else:
self.writeInProgress = 1
win32file.WriteFile(self._serial.hComPort, data, self._overlappedWrite)
def serialWriteEvent(self):
try:
dataToWrite = self.outQueue.pop(0)
except IndexError:
self.writeInProgress = 0
return
else:
win32file.WriteFile(self._serial.hComPort, dataToWrite, self._overlappedWrite)
def connectionLost(self, reason):
"""
Called when the serial port disconnects.
Will call C{connectionLost} on the protocol that is handling the
serial data.
"""
self.reactor.removeEvent(self._overlappedRead.hEvent)
self.reactor.removeEvent(self._overlappedWrite.hEvent)
abstract.FileDescriptor.connectionLost(self, reason)
self._serial.close()
self.protocol.connectionLost(reason)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
from unittest import TestCase
from prospector.profiles.profile import ProspectorProfile
class ProfileTestBase(TestCase):
def setUp(self):
self._profile_path = [
os.path.join(os.path.dirname(__file__), 'profiles'),
os.path.join(os.path.dirname(__file__), '../../prospector/profiles/profiles')
]
def _file_content(self, name):
path = os.path.join(self._profile_path, name)
with open(path) as f:
return f.read()
class TestProfileParsing(ProfileTestBase):
def test_empty_disable_list(self):
"""
This test verifies that a profile can still be loaded if it contains
an empty 'pylint.disable' list
"""
profile = ProspectorProfile.load('empty_disable_list', self._profile_path, allow_shorthand=False)
self.assertEqual([], profile.pylint['disable'])
def test_empty_profile(self):
"""
Verifies that a completely empty profile can still be parsed and have
default values
"""
profile = ProspectorProfile.load('empty_profile', self._profile_path, allow_shorthand=False)
self.assertEqual([], profile.pylint['disable'])
def test_ignores(self):
profile = ProspectorProfile.load('ignores', self._profile_path)
self.assertEqual(['^tests/', '/migrations/'].sort(), profile.ignore_patterns.sort())
def test_disable_tool(self):
profile = ProspectorProfile.load('pylint_disabled', self._profile_path)
self.assertFalse(profile.is_tool_enabled('pylint'))
self.assertTrue(profile.is_tool_enabled('pep8') is None)
class TestProfileInheritance(ProfileTestBase):
def _example_path(self, testname):
return os.path.join(os.path.dirname(__file__), 'profiles', 'inheritance', testname)
def _load(self, testname):
profile_path = self._profile_path + [self._example_path(testname)]
return ProspectorProfile.load('start', profile_path)
def test_simple_inheritance(self):
profile = ProspectorProfile.load('inherittest3', self._profile_path, allow_shorthand=False)
disable = profile.pylint['disable']
disable.sort()
self.assertEqual(['I0002', 'I0003', 'raw-checker-failed'], disable)
def test_disable_tool_inheritance(self):
profile = ProspectorProfile.load('pep8_and_pylint_disabled', self._profile_path)
self.assertFalse(profile.is_tool_enabled('pylint'))
self.assertFalse(profile.is_tool_enabled('pep8'))
def test_precedence(self):
profile = self._load('precedence')
self.assertTrue(profile.is_tool_enabled('pylint'))
self.assertTrue('expression-not-assigned' in profile.get_disabled_messages('pylint'))
def test_strictness_equivalence(self):
profile = self._load('strictness_equivalence')
medium_strictness = ProspectorProfile.load('strictness_medium', self._profile_path)
self.assertListEqual(sorted(profile.pylint['disable']), sorted(medium_strictness.pylint['disable']))
def test_shorthand_inheritance(self):
profile = self._load('shorthand_inheritance')
high_strictness = ProspectorProfile.load('strictness_high', self._profile_path,
# don't implicitly add things
allow_shorthand=False,
# but do include the profiles that the start.yaml will
forced_inherits=['doc_warnings', 'no_member_warnings']
)
self.assertDictEqual(profile.pylint, high_strictness.pylint)
self.assertDictEqual(profile.pep8, high_strictness.pep8)
self.assertDictEqual(profile.pyflakes, high_strictness.pyflakes)
def test_pep8_inheritance(self):
profile = self._load('pep8')
self.assertTrue(profile.is_tool_enabled('pep8'))
|
unknown
|
codeparrot/codeparrot-clean
| ||
import logging
from datetime import datetime
from modularodm import Q
from framework.auth import User
from framework.celery_tasks import app as celery_app
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website import mails, settings
from scripts.utils import add_file_logger
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main(dry_run=True):
for user in find_inactive_users_with_no_inactivity_email_sent_or_queued():
if dry_run:
logger.warn('Dry run mode')
logger.warn('Email of type no_login queued to {0}'.format(user.username))
if not dry_run:
with TokuTransaction():
mails.queue_mail(
to_addr=user.username,
mail=mails.NO_LOGIN,
send_at=datetime.utcnow(),
user=user,
fullname=user.fullname,
)
def find_inactive_users_with_no_inactivity_email_sent_or_queued():
inactive_users = User.find(
(Q('date_last_login', 'lt', datetime.utcnow() - settings.NO_LOGIN_WAIT_TIME) & Q('osf4m', 'ne', 'system_tags')) |
(Q('date_last_login', 'lt', datetime.utcnow() - settings.NO_LOGIN_OSF4M_WAIT_TIME) & Q('osf4m', 'eq', 'system_tags'))
)
inactive_emails = mails.QueuedMail.find(Q('email_type', 'eq', mails.NO_LOGIN_TYPE))
#This is done to prevent User query returns comparison to User, as equality fails
#on datetime fields due to pymongo rounding. Instead here _id is compared.
users_sent_id = [email.user._id for email in inactive_emails]
inactive_ids = [user._id for user in inactive_users if user.is_active]
users_to_send = [User.load(id) for id in (set(inactive_ids) - set(users_sent_id))]
return users_to_send
@celery_app.task(name='scripts.triggered_mails')
def run_main(dry_run=True):
init_app(routes=False)
if not dry_run:
add_file_logger(logger, __file__)
main(dry_run=dry_run)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session)
from flask.ext.login import login_required
from kraken.grade.models import Grade
import json
from kraken.helpers.Skynet import Skynet
blueprint = Blueprint("grade", __name__, url_prefix='/grades',
static_folder="../static")
@blueprint.route("/recent/")
@login_required
def recent():
grades = Grade.get_recent(100, "created_at DESC");
return render_template("grades/recent.html", grades = grades)
@blueprint.route("/create/", methods=['POST'])
@login_required
def create():
newGrade = Grade.create(start=int(request.form['start']),
end=int(request.form['end']),
snoozes=int(request.form['snooze_count']))
return render_template("grades/result.html", grade=newGrade)
@blueprint.route("/api/create", methods=['GET'])
def api_create():
print("Request :: %s" % request.args )
if request.args and 'start' in request.args and 'end' in request.args and 'snooze_count' in request.args:
newGrade = Grade.create(start=int(request.args['start']),
end=int(request.args['end']),
snoozes=int(request.args['snooze_count']))
else:
return json.dumps({'success' : False, 'error' : 'Invalid Request'})
return json.dumps({'success' : True})
|
unknown
|
codeparrot/codeparrot-clean
| ||
from flask import url_for
from flask_login import current_user
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from werkzeug.exceptions import Unauthorized
from tracker.form.admin import ERROR_EMAIL_EXISTS
from tracker.form.admin import ERROR_USERNAME_EXISTS
from tracker.form.login import ERROR_ACCOUNT_DISABLED
from tracker.model.enum import UserRole
from tracker.user import random_string
from .conftest import DEFAULT_USERNAME
from .conftest import assert_logged_in
from .conftest import assert_not_logged_in
from .conftest import create_user
from .conftest import logged_in
USERNAME = 'cyberwehr87654321'
PASSWORD = random_string()
EMAIL = '{}@cyber.cyber'.format(USERNAME)
@create_user(username=USERNAME, password=PASSWORD, role=UserRole.administrator)
@logged_in
def test_delete_user(db, client):
resp = client.post(url_for('tracker.delete_user', username=USERNAME), follow_redirects=True,
data=dict(confirm='confirm'))
assert resp.status_code == 200
resp = client.post(url_for('tracker.logout'), follow_redirects=True)
assert_not_logged_in(resp)
resp = client.post(url_for('tracker.login'), follow_redirects=True,
data=dict(username=USERNAME, password=PASSWORD))
assert_not_logged_in(resp, status_code=Unauthorized.code)
@logged_in
def test_delete_last_admin_fails(db, client):
resp = client.post(url_for('tracker.delete_user', username=DEFAULT_USERNAME), follow_redirects=True,
data=dict(confirm='confirm'))
assert resp.status_code == Forbidden.code
@logged_in
def test_delete_user_not_found(db, client):
resp = client.post(url_for('tracker.delete_user', username='nobody'), follow_redirects=True,
data=dict(confirm='confirm'))
assert resp.status_code == NotFound.code
@create_user(username=USERNAME, password=PASSWORD, role=UserRole.administrator)
@logged_in
def test_delete_form_invalid(db, client):
resp = client.post(url_for('tracker.delete_user', username=USERNAME),
data=dict())
assert resp.status_code == 302
assert resp.location == url_for('tracker.list_user', _external=True)
@logged_in
def test_create_user(db, client):
role = UserRole.security_team
resp = client.post(url_for('tracker.create_user'), follow_redirects=True,
data=dict(username=USERNAME, password=PASSWORD,
email=EMAIL, active=True, role=role.name))
assert resp.status_code == 200
resp = client.post(url_for('tracker.logout'), follow_redirects=True)
assert_not_logged_in(resp)
resp = client.post(url_for('tracker.login'), follow_redirects=True,
data=dict(username=USERNAME, password=PASSWORD))
assert_logged_in(resp)
assert USERNAME == current_user.name
assert EMAIL == current_user.email
assert role == current_user.role
@logged_in
def test_create_duplicate_user_fails(db, client):
resp = client.post(url_for('tracker.create_user'), follow_redirects=True,
data=dict(username=DEFAULT_USERNAME, password=PASSWORD,
email=EMAIL, active=True))
assert resp.status_code == 200
assert ERROR_USERNAME_EXISTS in resp.data.decode()
@logged_in
def test_create_duplicate_email_fails(db, client):
resp = client.post(url_for('tracker.create_user'), follow_redirects=True,
data=dict(username=USERNAME, password=PASSWORD,
email=current_user.email, active=True))
assert resp.status_code == 200
assert ERROR_EMAIL_EXISTS in resp.data.decode()
@logged_in
def test_create_incomplete_form(db, client):
resp = client.post(url_for('tracker.create_user'), follow_redirects=True,
data=dict(email=EMAIL, active=True))
assert resp.status_code == 200
assert 'This field is required.' in resp.data.decode()
@logged_in
def test_create_user_in_password(db, client):
resp = client.post(url_for('tracker.create_user'), follow_redirects=True,
data=dict(username=USERNAME,
password=USERNAME+PASSWORD, email=EMAIL,
active=True))
assert resp.status_code == 200
assert 'Password must not contain the username.' in resp.data.decode()
@create_user(username=USERNAME, password=PASSWORD)
@logged_in
def test_edit_user(db, client):
new_password = random_string()
new_email = '{}foo'.format(EMAIL)
new_role = UserRole.security_team
resp = client.post(url_for('tracker.edit_user', username=USERNAME), follow_redirects=True,
data=dict(username=USERNAME, email=new_email, password=new_password,
role=new_role.name, active=True))
assert resp.status_code == 200
resp = client.post(url_for('tracker.logout'), follow_redirects=True)
assert_not_logged_in(resp)
resp = client.post(url_for('tracker.login'), follow_redirects=True,
data={'username': USERNAME, 'password': new_password})
assert_logged_in(resp)
assert USERNAME == current_user.name
assert new_email == current_user.email
assert new_role == current_user.role
@create_user(username=USERNAME, password=PASSWORD)
@logged_in
def test_get_edit_user(db, client):
resp = client.get(url_for('tracker.edit_user', username=USERNAME), follow_redirects=True)
assert resp.status_code == 200
assert 'Edit {}'.format(USERNAME) in resp.data.decode()
@create_user(username=USERNAME, password=PASSWORD)
@logged_in
def test_edit_preserves_password(db, client):
new_email = '{}foo'.format(EMAIL)
resp = client.post(url_for('tracker.edit_user', username=USERNAME), follow_redirects=True,
data=dict(username=USERNAME, email=new_email, active=True))
assert resp.status_code == 200
resp = client.post(url_for('tracker.logout'), follow_redirects=True)
assert_not_logged_in(resp)
resp = client.post(url_for('tracker.login'), follow_redirects=True,
data={'username': USERNAME, 'password': PASSWORD})
assert_logged_in(resp)
assert USERNAME == current_user.name
assert new_email == current_user.email
@create_user(username=USERNAME, password=PASSWORD)
@logged_in
def test_deactive_user(db, client):
resp = client.post(url_for('tracker.edit_user', username=USERNAME), follow_redirects=True,
data=dict(username=USERNAME, email=EMAIL, password=PASSWORD))
assert resp.status_code == 200
resp = client.post(url_for('tracker.logout'), follow_redirects=True)
assert_not_logged_in(resp)
resp = client.post(url_for('tracker.login'), data={'username': USERNAME, 'password': PASSWORD})
assert_not_logged_in(resp, status_code=Unauthorized.code)
assert ERROR_ACCOUNT_DISABLED in resp.data.decode()
@create_user(username=USERNAME, password=PASSWORD)
@logged_in(role=UserRole.security_team)
def test_edit_requires_admin(db, client):
resp = client.post(url_for('tracker.edit_user', username=USERNAME), follow_redirects=True,
data=dict(username=USERNAME, email=EMAIL, password=PASSWORD))
assert resp.status_code == Forbidden.code
@create_user(username=USERNAME, password=PASSWORD)
@logged_in(role=UserRole.security_team)
def test_list_user(db, client):
resp = client.get(url_for('tracker.list_user'), follow_redirects=True)
assert resp.status_code == 200
assert USERNAME in resp.data.decode()
|
unknown
|
codeparrot/codeparrot-clean
| ||
""" Thoroughly document Bokeh property attributes.
The ``bokeh-prop`` directive generates useful type information
for the property attribute, including cross links to the relevant
property types. Additionally, any per-attribute docstrings are
also displayed.
Usage
-----
This directive takes the path to an attribute on a Bokeh
model class as an argument::
.. bokeh-prop:: bokeh.sphinxext.sample.Bar.thing
Examples
--------
For the following definition of ``bokeh.sphinxext.sample.Bar``::
class Bar(PlotObject):
''' This is a Bar model. '''
thing = List(Int, help="doc for thing")
the above usage yields the output:
----
.. bokeh-prop:: bokeh.sphinxext.sample.Bar.thing
"""
from __future__ import absolute_import, print_function
import importlib
from docutils import nodes
from docutils.statemachine import ViewList
import textwrap
import jinja2
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from bokeh.plot_object import Viewable
import bokeh.properties
PROP_TEMPLATE = jinja2.Template(u"""
.. attribute:: {{ name }}
:module: {{ module }}
*property type:* {{ type_info }}
{% if doc %}{{ doc|indent(4) }}{% endif %}
""")
PROP_NAMES = [
name for name, cls in bokeh.properties.__dict__.items()
if isinstance(cls, type) and issubclass(cls, bokeh.properties.Property)
]
PROP_NAMES.sort(reverse=True, key=len)
class BokehPropDirective(Directive):
has_content = True
required_arguments = 1
def run(self):
prop_path = self.arguments[0]
module_path, model_name, prop_name = prop_path.rsplit('.', 2)
try:
module = importlib.import_module(module_path)
except ImportError:
pass
model = getattr(module, model_name, None)
if model is None:
pass
if type(model) != Viewable:
pass
model_obj = model()
prop = getattr(model_obj.__class__, prop_name)
type_info = self._get_type_info(prop)
rst_text = PROP_TEMPLATE.render(
name=prop_name,
module=module_path,
type_info=type_info,
doc="" if prop.__doc__ is None else textwrap.dedent(prop.__doc__),
)
result = ViewList()
for line in rst_text.split("\n"):
result.append(line, "<bokeh-prop>")
node = nodes.paragraph()
node.document = self.state.document
nested_parse_with_titles(self.state, result, node)
return node.children
def _get_type_info(self, prop):
desc = str(prop)
template = ":class:`~bokeh.properties.%s`\ "
# some of the property names are substrings of other property names
# so first go through greedily replacing the longest possible match
# with a unique id (PROP_NAMES is reverse sorted by length)
for i, name in enumerate(PROP_NAMES):
desc = desc.replace(name, "__ID%d" % i)
# now replace the unique id with the corresponding prop name. Go in
# reverse to make sure replacements are greedy
for i in range(len(PROP_NAMES)-1, 0, -1):
name = PROP_NAMES[i]
desc = desc.replace("__ID%d" % i, template % name)
return desc
def setup(app):
app.add_directive_to_domain('py', 'bokeh-prop', BokehPropDirective)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from decimal import Decimal
from sys import float_info
from unittest import TestCase
from django.utils.numberformat import format as nformat
class TestNumberFormat(TestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, '.'), '1234')
self.assertEqual(nformat(1234.2, '.'), '1234.2')
self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3')
def test_format_string(self):
self.assertEqual(nformat('1234', '.'), '1234')
self.assertEqual(nformat('1234.2', '.'), '1234.2')
self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat('10000', '.', grouping=3, thousand_sep='comma', force_grouping=True), '10comma000')
def test_large_number(self):
most_max = (
'{}179769313486231570814527423731704356798070567525844996'
'598917476803157260780028538760589558632766878171540458953'
'514382464234321326889464182768467546703537516986049910576'
'551282076245490090389328944075868508455133942304583236903'
'222948165808559332123348274797826204144723168738177180919'
'29988125040402618412485836{}'
)
most_max2 = (
'{}35953862697246314162905484746340871359614113505168999'
'31978349536063145215600570775211791172655337563430809179'
'07028764928468642653778928365536935093407075033972099821'
'15310256415249098018077865788815173701691026788460916647'
'38064458963316171186642466965495956524082894463374763543'
'61838599762500808052368249716736'
)
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, '.'), most_max.format('', '8'))
self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9'))
self.assertEqual(nformat(int_max * 2, '.'), most_max2.format(''))
self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8'))
self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9'))
self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-'))
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal('1234'), '.'), '1234')
self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2')
self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001')
def test_decimal_subclass(self):
class EuroDecimal(Decimal):
"""
Wrapper for Decimal which prefixes each amount with the € symbol.
"""
def __format__(self, specifier, **kwargs):
amount = super().__format__(specifier, **kwargs)
return '€ {}'.format(amount)
price = EuroDecimal('1.23')
self.assertEqual(nformat(price, ','), '€ 1,23')
|
unknown
|
codeparrot/codeparrot-clean
| ||
//// [tests/cases/compiler/blockScopedClassDeclarationAcrossFiles.ts] ////
//// [c.ts]
let foo: typeof C;
//// [b.ts]
class C { }
//// [foo.js]
"use strict";
let foo;
class C {
}
|
javascript
|
github
|
https://github.com/microsoft/TypeScript
|
tests/baselines/reference/blockScopedClassDeclarationAcrossFiles.js
|
#
# Copyright (c) 2006, 2007 Canonical
#
# Written by Gustavo Niemeyer <gustavo@niemeyer.net>
#
# This file is part of Storm Object Relational Mapper.
#
# Storm is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Storm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""A SQLObject emulation layer for Storm.
L{SQLObjectBase} is the central point of compatibility.
"""
import re
from storm.properties import (
RawStr, Int, Bool, Float, DateTime, Date, TimeDelta)
from storm.references import Reference, ReferenceSet
from storm.properties import SimpleProperty, PropertyPublisherMeta
from storm.variables import Variable
from storm.exceptions import StormError
from storm.info import get_cls_info
from storm.store import Store
from storm.base import Storm
from storm.expr import SQL, SQLRaw, Desc, And, Or, Not, In, Like
from storm.tz import tzutc
from storm import Undef
__all__ = ["SQLObjectBase", "StringCol", "IntCol", "BoolCol", "FloatCol",
"DateCol", "UtcDateTimeCol", "IntervalCol", "ForeignKey",
"SQLMultipleJoin", "SQLRelatedJoin", "DESC", "AND", "OR",
"NOT", "IN", "LIKE", "SQLConstant", "SQLObjectNotFound",
"CONTAINSSTRING"]
DESC, AND, OR, NOT, IN, LIKE, SQLConstant = Desc, And, Or, Not, In, Like, SQL
_IGNORED = object()
class SQLObjectNotFound(StormError):
pass
class SQLObjectStyle(object):
longID = False
def idForTable(self, table_name):
if self.longID:
return self.tableReference(table_name)
else:
return 'id'
def pythonClassToAttr(self, class_name):
return self._lowerword(class_name)
def instanceAttrToIDAttr(self, attr_name):
return attr_name + "ID"
def pythonAttrToDBColumn(self, attr_name):
return self._mixed_to_under(attr_name)
def dbColumnToPythonAttr(self, column_name):
return self._under_to_mixed(column_name)
def pythonClassToDBTable(self, class_name):
return class_name[0].lower()+self._mixed_to_under(class_name[1:])
def dbTableToPythonClass(self, table_name):
return table_name[0].upper()+self._under_to_mixed(table_name[1:])
def pythonClassToDBTableReference(self, class_name):
return self.tableReference(self.pythonClassToDBTable(class_name))
def tableReference(self, table_name):
return table_name+"_id"
def _mixed_to_under(self, name, _re=re.compile(r'[A-Z]+')):
if name.endswith('ID'):
return self._mixed_to_under(name[:-2]+"_id")
name = _re.sub(self._mixed_to_under_sub, name)
if name.startswith('_'):
return name[1:]
return name
def _mixed_to_under_sub(self, match):
m = match.group(0).lower()
if len(m) > 1:
return '_%s_%s' % (m[:-1], m[-1])
else:
return '_%s' % m
def _under_to_mixed(self, name, _re=re.compile('_.')):
if name.endswith('_id'):
return self._under_to_mixed(name[:-3] + "ID")
return _re.sub(self._under_to_mixed_sub, name)
def _under_to_mixed_sub(self, match):
return match.group(0)[1].upper()
@staticmethod
def _capword(s):
return s[0].upper() + s[1:]
@staticmethod
def _lowerword(s):
return s[0].lower() + s[1:]
class SQLObjectMeta(PropertyPublisherMeta):
@staticmethod
def _get_attr(attr, bases, dict):
value = dict.get(attr)
if value is None:
for base in bases:
value = getattr(base, attr, None)
if value is not None:
break
return value
def __new__(cls, name, bases, dict):
if Storm in bases or SQLObjectBase in bases:
# Do not parse abstract base classes.
return type.__new__(cls, name, bases, dict)
style = cls._get_attr("_style", bases, dict)
if style is None:
dict["_style"] = style = SQLObjectStyle()
table_name = cls._get_attr("_table", bases, dict)
if table_name is None:
table_name = style.pythonClassToDBTable(name)
id_name = cls._get_attr("_idName", bases, dict)
if id_name is None:
id_name = style.idForTable(table_name)
# Handle this later to call _parse_orderBy() on the created class.
default_order = cls._get_attr("_defaultOrder", bases, dict)
dict["__storm_table__"] = table_name
attr_to_prop = {}
for attr, prop in dict.items():
attr_to_prop[attr] = attr
if isinstance(prop, ForeignKey):
db_name = prop.kwargs.get("dbName", attr)
local_prop_name = style.instanceAttrToIDAttr(attr)
dict[local_prop_name] = local_prop = Int(db_name)
dict[attr] = Reference(local_prop,
"%s.<primary key>" % prop.foreignKey)
attr_to_prop[attr] = local_prop_name
elif isinstance(prop, PropertyAdapter):
db_name = prop.dbName or attr
method_name = prop.alternateMethodName
if method_name is None and prop.alternateID:
method_name = "by" + db_name[0].upper() + db_name[1:]
if method_name is not None:
def func(cls, key, attr=attr):
store = cls._get_store()
obj = store.find(cls, getattr(cls, attr) == key).one()
if obj is None:
raise SQLObjectNotFound
return obj
func.func_name = method_name
dict[method_name] = classmethod(func)
id_type = dict.get("_idType", int)
id_cls = {int: Int, str: RawStr, unicode: AutoUnicode}[id_type]
dict[id_name] = id_cls(primary=True)
# Notice that obj is the class since this is the metaclass.
obj = super(SQLObjectMeta, cls).__new__(cls, name, bases, dict)
property_registry = obj._storm_property_registry
property_registry.add_property(obj, getattr(obj, id_name),
"<primary key>")
for fake_name, real_name in attr_to_prop.items():
prop = getattr(obj, real_name)
if fake_name != real_name:
property_registry.add_property(obj, prop, fake_name)
attr_to_prop[fake_name] = prop
obj._attr_to_prop = attr_to_prop
if default_order is not None:
cls_info = get_cls_info(obj)
cls_info.default_order = obj._parse_orderBy(default_order)
return obj
class DotQ(object):
"""A descriptor that mimics the SQLObject 'Table.q' syntax"""
def __get__(self, obj, cls=None):
return BoundDotQ(cls)
class BoundDotQ(object):
def __init__(self, cls):
self._cls = cls
def __getattr__(self, attr):
if attr.startswith('__'):
raise AttributeError(attr)
elif attr == 'id':
cls_info = get_cls_info(self._cls)
return cls_info.primary_key[0]
else:
return getattr(self._cls, attr)
class SQLObjectBase(Storm):
"""The root class of all SQLObject-emulating classes in your application.
The general strategy for using Storm's SQLObject emulation layer
is to create an application-specific subclass of SQLObjectBase
(probably named "SQLObject") that provides an implementation of
_get_store to return an instance of L{storm.store.Store}. It may
even be implemented as returning a global L{Store} instance. Then
all database classes should subclass that class.
"""
__metaclass__ = SQLObjectMeta
q = DotQ()
def __init__(self, *args, **kwargs):
self._get_store().add(self)
self._create(None, **kwargs)
def __storm_loaded__(self):
self._init(None)
def _init(self, id, *args, **kwargs):
pass
def _create(self, _id_, **kwargs):
self.set(**kwargs)
self._init(None)
def set(self, **kwargs):
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
def destroySelf(self):
Store.of(self).remove(self)
@staticmethod
def _get_store():
raise NotImplementedError("SQLObjectBase._get_store() "
"must be implemented")
@classmethod
def delete(cls, id):
# destroySelf() should be extended to support cascading, so
# we'll mimic what SQLObject does here, even if more expensive.
obj = cls.get(id)
obj.destroySelf()
@classmethod
def get(cls, id):
store = cls._get_store()
obj = store.get(cls, id)
if obj is None:
raise SQLObjectNotFound("Object not found")
return obj
@classmethod
def _parse_orderBy(cls, orderBy):
result = []
if not isinstance(orderBy, (tuple, list)):
orderBy = (orderBy,)
for item in orderBy:
if isinstance(item, basestring):
desc = item.startswith("-")
if desc:
item = item[1:]
item = cls._attr_to_prop.get(item, item)
if desc:
item = Desc(item)
result.append(item)
return tuple(result)
@classmethod
def _find(cls, clause=None, clauseTables=None, orderBy=None,
limit=None, distinct=None, prejoins=_IGNORED,
prejoinClauseTables=_IGNORED, _by={}):
store = cls._get_store()
if clause is None:
args = ()
else:
args = (clause,)
if clauseTables is not None:
clauseTables = set(table.lower() for table in clauseTables)
clauseTables.add(cls.__storm_table__.lower())
store = store.using(*clauseTables)
result = store.find(cls, *args, **_by)
if orderBy is not None:
result.order_by(*cls._parse_orderBy(orderBy))
result.config(limit=limit, distinct=distinct)
return result
@classmethod
def select(cls, *args, **kwargs):
result = cls._find(*args, **kwargs)
return SQLObjectResultSet(result, cls)
@classmethod
def selectBy(cls, orderBy=None, **kwargs):
result = cls._find(orderBy=orderBy, _by=kwargs)
return SQLObjectResultSet(result, cls)
@classmethod
def selectOne(cls, *args, **kwargs):
return cls._find(*args, **kwargs).one()
@classmethod
def selectOneBy(cls, **kwargs):
return cls._find(_by=kwargs).one()
@classmethod
def selectFirst(cls, *args, **kwargs):
return cls._find(*args, **kwargs).first()
@classmethod
def selectFirstBy(cls, orderBy=None, **kwargs):
return cls._find(orderBy=orderBy, _by=kwargs).first()
# Dummy methods.
def sync(self): pass
def syncUpdate(self): pass
class SQLObjectResultSet(object):
def __init__(self, result_set, cls):
self._result_set = result_set
self._cls = cls
def count(self):
return self._result_set.count()
def __iter__(self):
return self._result_set.__iter__()
def __getitem__(self, index):
result_set = self._result_set[index]
if isinstance(index, slice):
return self.__class__(result_set, self._cls)
return result_set
def __nonzero__(self):
return self._result_set.any() is not None
def orderBy(self, orderBy):
result_set = self._result_set.copy()
result_set.order_by(*self._cls._parse_orderBy(orderBy))
return self.__class__(result_set, self._cls)
def limit(self, limit):
result_set = self._result_set.copy().config(limit=limit)
return self.__class__(result_set, self._cls)
def distinct(self):
result_set = self._result_set.copy().config(distinct=True)
result_set.order_by() # Remove default order.
return self.__class__(result_set, self._cls)
def union(self, otherSelect, unionAll=False, orderBy=None):
result_set = self._result_set.union(otherSelect._result_set,
all=unionAll)
result_set.order_by() # Remove default order.
new = self.__class__(result_set, self._cls)
if orderBy is not None:
return new.orderBy(orderBy)
return new
def except_(self, otherSelect, exceptAll=False, orderBy=None):
result_set = self._result_set.difference(otherSelect._result_set,
all=exceptAll)
result_set.order_by() # Remove default order.
new = self.__class__(result_set, self._cls)
if orderBy is not None:
return new.orderBy(orderBy)
return new
def intersect(self, otherSelect, intersectAll=False, orderBy=None):
result_set = self._result_set.intersection(otherSelect._result_set,
all=intersectAll)
new = self.__class__(result_set, self._cls)
if orderBy is not None:
return new.orderBy(orderBy)
return new
def prejoin(self, prejoins):
return self
def prejoinClauseTables(self, prejoinClauseTables):
return self
class PropertyAdapter(object):
_kwargs = {}
def __init__(self, dbName=None, notNull=False, default=Undef,
alternateID=None, unique=_IGNORED, name=_IGNORED,
alternateMethodName=None, length=_IGNORED, immutable=None,
prejoins=_IGNORED):
if default is None and notNull:
raise RuntimeError("Can't use default=None and notNull=True")
self.dbName = dbName
self.alternateID = alternateID
self.alternateMethodName = alternateMethodName
# XXX Implement handler for:
#
# - immutable (causes setting the attribute to fail)
#
# XXX Implement tests for ignored parameters:
#
# - unique (for tablebuilder)
# - length (for tablebuilder for StringCol)
# - name (for _columns stuff)
# - prejoins
if callable(default):
default_factory = default
default = Undef
else:
default_factory = Undef
super(PropertyAdapter, self).__init__(dbName, allow_none=not notNull,
default_factory=default_factory,
default=default, **self._kwargs)
class AutoUnicodeVariable(Variable):
"""Unlike UnicodeVariable, this will try to convert str to unicode."""
def parse_set(self, value, from_db):
if not isinstance(value, basestring):
raise TypeError("Expected basestring, found %s" % repr(type(value)))
return unicode(value)
class AutoUnicode(SimpleProperty):
variable_class = AutoUnicodeVariable
class StringCol(PropertyAdapter, AutoUnicode):
pass
class IntCol(PropertyAdapter, Int):
pass
class BoolCol(PropertyAdapter, Bool):
pass
class FloatCol(PropertyAdapter, Float):
pass
class UtcDateTimeCol(PropertyAdapter, DateTime):
_kwargs = {"tzinfo": tzutc()}
class DateCol(PropertyAdapter, Date):
pass
class IntervalCol(PropertyAdapter, TimeDelta):
pass
class ForeignKey(object):
def __init__(self, foreignKey, **kwargs):
self.foreignKey = foreignKey
self.kwargs = kwargs
class SQLMultipleJoin(ReferenceSet):
def __init__(self, otherClass=None, joinColumn=None,
intermediateTable=None, otherColumn=None, orderBy=None,
prejoins=_IGNORED):
if intermediateTable:
args = ("<primary key>",
"%s.%s" % (intermediateTable, joinColumn),
"%s.%s" % (intermediateTable, otherColumn),
"%s.<primary key>" % otherClass)
else:
args = ("<primary key>", "%s.%s" % (otherClass, joinColumn))
ReferenceSet.__init__(self, *args)
self._orderBy = orderBy
def __get__(self, obj, cls=None):
if obj is None:
return self
bound_reference_set = ReferenceSet.__get__(self, obj)
target_cls = bound_reference_set._target_cls
result_set = bound_reference_set.find()
if self._orderBy:
result_set.order_by(*target_cls._parse_orderBy(self._orderBy))
return SQLObjectResultSet(result_set, target_cls)
SQLRelatedJoin = SQLMultipleJoin
class CONTAINSSTRING(Like):
def __init__(self, expr, string):
string = string.replace("!", "!!") \
.replace("_", "!_") \
.replace("%", "!%")
Like.__init__(self, expr, "%"+string+"%", SQLRaw("'!'"))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2010-2025 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.impl.base.contracts.description.booleans
import org.jetbrains.kotlin.analysis.api.KaImplementationDetail
import org.jetbrains.kotlin.analysis.api.contracts.description.KaContractParameterValue
import org.jetbrains.kotlin.analysis.api.contracts.description.booleans.KaContractBooleanConstantExpression
import org.jetbrains.kotlin.analysis.api.contracts.description.booleans.KaContractBooleanValueParameterExpression
import org.jetbrains.kotlin.analysis.api.lifetime.KaLifetimeToken
import org.jetbrains.kotlin.analysis.api.lifetime.withValidityAssertion
@KaImplementationDetail
class KaBaseContractBooleanValueParameterExpression(
private val backingParameterSymbol: KaContractParameterValue,
) : KaContractBooleanValueParameterExpression {
override val token: KaLifetimeToken get() = backingParameterSymbol.token
override val parameterSymbol: KaContractParameterValue get() = withValidityAssertion { backingParameterSymbol }
override fun equals(other: Any?): Boolean {
return this === other
|| other is KaBaseContractBooleanValueParameterExpression
&& other.backingParameterSymbol == backingParameterSymbol
}
override fun hashCode(): Int = backingParameterSymbol.hashCode()
}
@KaImplementationDetail
class KaBaseContractBooleanConstantExpression(
private val backingBooleanConstant: Boolean,
override val token: KaLifetimeToken
) : KaContractBooleanConstantExpression {
override val booleanConstant: Boolean get() = withValidityAssertion { backingBooleanConstant }
override fun equals(other: Any?): Boolean {
return this === other || other is KaBaseContractBooleanConstantExpression && other.backingBooleanConstant == backingBooleanConstant
}
override fun hashCode(): Int = backingBooleanConstant.hashCode()
}
|
kotlin
|
github
|
https://github.com/JetBrains/kotlin
|
analysis/analysis-api-impl-base/src/org/jetbrains/kotlin/analysis/api/impl/base/contracts/description/booleans/KaBaseContractBooleanExpression.kt
|
from itertools import combinations
def sub_lists(my_list):
subs = []
for i in range(0, len(my_list)+1):
temp = [list(x) for x in combinations(my_list, i)]
if len(temp)>0:
subs.extend(temp)
return subs
|
unknown
|
mbpp
| ||
from django import forms, http
from django.conf import settings
from django.test import TestCase
from django.template.response import TemplateResponse
from django.utils.importlib import import_module
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import (WizardView,
SessionWizardView,
CookieWizardView)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = POST and "POST" or "GET"
if POST is not None:
self.POST.update(POST)
self.session = {}
self._dont_enforce_csrf_checks = True
def get_request(*args, **kwargs):
request = DummyRequest(*args, **kwargs)
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(None)
return request
class Step1(forms.Form):
name = forms.CharField()
class Step2(forms.Form):
name = forms.CharField()
class Step3(forms.Form):
data = forms.CharField()
class CustomKwargsStep1(Step1):
def __init__(self, test=None, *args, **kwargs):
self.test = test
return super(CustomKwargsStep1, self).__init__(*args, **kwargs)
class UserForm(forms.ModelForm):
class Meta:
model = User
UserFormSet = forms.models.modelformset_factory(User, form=UserForm, extra=2)
class TestWizard(WizardView):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
def dispatch(self, request, *args, **kwargs):
response = super(TestWizard, self).dispatch(request, *args, **kwargs)
return response, self
def get_form_kwargs(self, step, *args, **kwargs):
kwargs = super(TestWizard, self).get_form_kwargs(step, *args, **kwargs)
if step == 'kwargs_test':
kwargs['test'] = True
return kwargs
class FormTests(TestCase):
def test_form_init(self):
testform = TestWizard.get_initkwargs([Step1, Step2])
self.assertEqual(testform['form_list'], {u'0': Step1, u'1': Step2})
testform = TestWizard.get_initkwargs([('start', Step1), ('step2', Step2)])
self.assertEqual(
testform['form_list'], {u'start': Step1, u'step2': Step2})
testform = TestWizard.get_initkwargs([Step1, Step2, ('finish', Step3)])
self.assertEqual(
testform['form_list'], {u'0': Step1, u'1': Step2, u'finish': Step3})
def test_first_step(self):
request = get_request()
testform = TestWizard.as_view([Step1, Step2])
response, instance = testform(request)
self.assertEqual(instance.steps.current, u'0')
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
def test_persistence(self):
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request = get_request({'test_wizard-current_step': 'start',
'name': 'data1'})
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
instance.storage.current_step = 'step2'
testform2 = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request.POST = {'test_wizard-current_step': 'step2'}
response, instance = testform2(request)
self.assertEqual(instance.steps.current, 'step2')
def test_form_condition(self):
request = get_request()
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': True})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': False})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step3')
def test_form_kwargs(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1),
('kwargs_test', CustomKwargsStep1)])
response, instance = testform(request)
self.assertEqual(instance.get_form_kwargs('start'), {})
self.assertEqual(instance.get_form_kwargs('kwargs_test'), {'test': True})
self.assertEqual(instance.get_form('kwargs_test').test, True)
def test_form_prefix(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.get_form_prefix(), 'start')
self.assertEqual(instance.get_form_prefix('another'), 'another')
def test_form_initial(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)],
initial_dict={'start': {'name': 'value1'}})
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
def test_form_instance(self):
request = get_request()
the_instance = User()
testform = TestWizard.as_view([('start', UserForm), ('step2', Step2)],
instance_dict={'start': the_instance})
response, instance = testform(request)
self.assertEqual(
instance.get_form_instance('start'),
the_instance)
self.assertEqual(
instance.get_form_instance('non_exist_instance'),
None)
def test_formset_instance(self):
request = get_request()
the_instance1, created = User.objects.get_or_create(
username='testuser1')
the_instance2, created = User.objects.get_or_create(
username='testuser2')
testform = TestWizard.as_view([('start', UserFormSet), ('step2', Step2)],
instance_dict={'start': User.objects.filter(username='testuser1')})
response, instance = testform(request)
self.assertEqual(list(instance.get_form_instance('start')), [the_instance1])
self.assertEqual(instance.get_form_instance('non_exist_instance'), None)
self.assertEqual(instance.get_form().initial_form_count(), 1)
def test_done(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertRaises(NotImplementedError, instance.done, None)
def test_revalidation(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class SessionFormTests(TestCase):
def test_init(self):
request = get_request()
testform = SessionWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
class CookieFormTests(TestCase):
def test_init(self):
request = get_request()
testform = CookieWizardView.as_view([('start', Step1)])
self.assertTrue(isinstance(testform(request), TemplateResponse))
|
unknown
|
codeparrot/codeparrot-clean
| ||
import { jsonToReadableText } from "../helpers.js";
import {
TscWatchCompileChange,
verifyTscWatch,
} from "../helpers/tscWatch.js";
import {
File,
FileOrFolderOrSymLinkMap,
TestServerHost,
} from "../helpers/virtualFileSystemWithWatch.js";
describe("unittests:: tscWatch:: emitAndErrorUpdates:: Emit times and Error updates in builder after program changes", () => {
const config: File = {
path: `/user/username/projects/myproject/tsconfig.json`,
content: `{}`,
};
interface VerifyEmitAndErrorUpdates {
subScenario: string;
files: () => FileOrFolderOrSymLinkMap | readonly File[];
changes: TscWatchCompileChange[];
}
function verifyEmitAndErrorUpdates({
subScenario,
files,
changes,
}: VerifyEmitAndErrorUpdates) {
verifyTscWatch({
scenario: "emitAndErrorUpdates",
subScenario: `default/${subScenario}`,
commandLineArgs: ["--w"],
sys,
edits: changes,
baselineIncremental: true,
});
verifyTscWatch({
scenario: "emitAndErrorUpdates",
subScenario: `defaultAndD/${subScenario}`,
commandLineArgs: ["--w", "--d"],
sys,
edits: changes,
baselineIncremental: true,
});
verifyTscWatch({
scenario: "emitAndErrorUpdates",
subScenario: `isolatedModules/${subScenario}`,
commandLineArgs: ["--w", "--isolatedModules"],
sys,
edits: changes,
baselineIncremental: true,
});
verifyTscWatch({
scenario: "emitAndErrorUpdates",
subScenario: `isolatedModulesAndD/${subScenario}`,
commandLineArgs: ["--w", "--isolatedModules", "--d"],
sys,
edits: changes,
baselineIncremental: true,
});
verifyTscWatch({
scenario: "emitAndErrorUpdates",
subScenario: `assumeChangesOnlyAffectDirectDependencies/${subScenario}`,
commandLineArgs: ["--w", "--assumeChangesOnlyAffectDirectDependencies"],
sys,
edits: changes,
baselineIncremental: true,
});
verifyTscWatch({
scenario: "emitAndErrorUpdates",
subScenario: `assumeChangesOnlyAffectDirectDependenciesAndD/${subScenario}`,
commandLineArgs: ["--w", "--assumeChangesOnlyAffectDirectDependencies", "--d"],
sys,
edits: changes,
baselineIncremental: true,
});
function sys() {
return TestServerHost.createWatchedSystem(
files(),
{ currentDirectory: "/user/username/projects/myproject" },
);
}
}
describe("deep import changes", () => {
const aFile: File = {
path: `/user/username/projects/myproject/a.ts`,
content: `import {B} from './b';
declare var console: any;
let b = new B();
console.log(b.c.d);`,
};
function verifyDeepImportChange(subScenario: string, bFile: File, cFile: File) {
verifyEmitAndErrorUpdates({
subScenario: `deepImportChanges/${subScenario}`,
files: () => [aFile, bFile, cFile, config],
changes: [
{
caption: "Rename property d to d2 of class C to initialize signatures",
edit: sys => sys.writeFile(cFile.path, cFile.content.replace("d", "d2")),
timeouts: sys => sys.runQueuedTimeoutCallbacks(),
},
{
caption: "Rename property d2 to d of class C to revert back to original text",
edit: sys => sys.writeFile(cFile.path, cFile.content.replace("d2", "d")),
timeouts: sys => sys.runQueuedTimeoutCallbacks(),
},
{
caption: "Rename property d to d2 of class C",
edit: sys => sys.writeFile(cFile.path, cFile.content.replace("d", "d2")),
timeouts: sys => sys.runQueuedTimeoutCallbacks(),
},
],
});
}
describe("updates errors when deep import file changes", () => {
const bFile: File = {
path: `/user/username/projects/myproject/b.ts`,
content: `import {C} from './c';
export class B
{
c = new C();
}`,
};
const cFile: File = {
path: `/user/username/projects/myproject/c.ts`,
content: `export class C
{
d = 1;
}`,
};
verifyDeepImportChange(
"errors for .ts change",
bFile,
cFile,
);
});
describe("updates errors when deep import through declaration file changes", () => {
const bFile: File = {
path: `/user/username/projects/myproject/b.d.ts`,
content: `import {C} from './c';
export class B
{
c: C;
}`,
};
const cFile: File = {
path: `/user/username/projects/myproject/c.d.ts`,
content: `export class C
{
d: number;
}`,
};
verifyDeepImportChange(
"errors for .d.ts change",
bFile,
cFile,
);
});
});
describe("updates errors in file not exporting a deep multilevel import that changes", () => {
const aFile: File = {
path: `/user/username/projects/myproject/a.ts`,
content: `export interface Point {
name: string;
c: Coords;
}
export interface Coords {
x2: number;
y: number;
}`,
};
const bFile: File = {
path: `/user/username/projects/myproject/b.ts`,
content: `import { Point } from "./a";
export interface PointWrapper extends Point {
}`,
};
const cFile: File = {
path: `/user/username/projects/myproject/c.ts`,
content: `import { PointWrapper } from "./b";
export function getPoint(): PointWrapper {
return {
name: "test",
c: {
x: 1,
y: 2
}
}
};`,
};
const dFile: File = {
path: `/user/username/projects/myproject/d.ts`,
content: `import { getPoint } from "./c";
getPoint().c.x;`,
};
const eFile: File = {
path: `/user/username/projects/myproject/e.ts`,
content: `import "./d";`,
};
verifyEmitAndErrorUpdates({
subScenario: "file not exporting a deep multilevel import that changes",
files: () => [aFile, bFile, cFile, dFile, eFile, config],
changes: [
{
caption: "Rename property x2 to x of interface Coords to initialize signatures",
edit: sys => sys.writeFile(aFile.path, aFile.content.replace("x2", "x")),
timeouts: sys => sys.runQueuedTimeoutCallbacks(),
},
{
caption: "Rename property x to x2 of interface Coords to revert back to original text",
edit: sys => sys.writeFile(aFile.path, aFile.content.replace("x: number", "x2: number")),
timeouts: sys => sys.runQueuedTimeoutCallbacks(),
},
{
caption: "Rename property x2 to x of interface Coords",
edit: sys => sys.writeFile(aFile.path, aFile.content.replace("x2", "x")),
timeouts: sys => sys.runQueuedTimeoutCallbacks(),
},
],
});
});
describe("updates errors when file transitively exported file changes", () => {
const config: File = {
path: `/user/username/projects/myproject/tsconfig.json`,
content: jsonToReadableText({
files: ["app.ts"],
compilerOptions: { baseUrl: "." },
}),
};
const app: File = {
path: `/user/username/projects/myproject/app.ts`,
content: `import { Data } from "lib2/public";
export class App {
public constructor() {
new Data().test();
}
}`,
};
const lib2Public: File = {
path: `/user/username/projects/myproject/lib2/public.ts`,
content: `export * from "./data";`,
};
const lib2Data: File = {
path: `/user/username/projects/myproject/lib2/data.ts`,
content: `import { ITest } from "lib1/public";
export class Data {
public test() {
const result: ITest = {
title: "title"
}
return result;
}
}`,
};
const lib1Public: File = {
path: `/user/username/projects/myproject/lib1/public.ts`,
content: `export * from "./tools/public";`,
};
const lib1ToolsPublic: File = {
path: `/user/username/projects/myproject/lib1/tools/public.ts`,
content: `export * from "./toolsinterface";`,
};
const lib1ToolsInterface: File = {
path: `/user/username/projects/myproject/lib1/tools/toolsinterface.ts`,
content: `export interface ITest {
title: string;
}`,
};
function verifyTransitiveExports(subScenario: string, files: readonly File[]) {
verifyEmitAndErrorUpdates({
subScenario: `transitive exports/${subScenario}`,
files: () => [lib1ToolsInterface, lib1ToolsPublic, app, lib2Public, lib1Public, ...files, config],
changes: [
{
caption: "Rename property title to title2 of interface ITest to initialize signatures",
edit: sys => sys.writeFile(lib1ToolsInterface.path, lib1ToolsInterface.content.replace("title", "title2")),
timeouts: sys => sys.runQueuedTimeoutCallbacks(),
},
{
caption: "Rename property title2 to title of interface ITest to revert back to original text",
edit: sys => sys.writeFile(lib1ToolsInterface.path, lib1ToolsInterface.content.replace("title2", "title")),
timeouts: sys => sys.runQueuedTimeoutCallbacks(),
},
{
caption: "Rename property title to title2 of interface ITest",
edit: sys => sys.writeFile(lib1ToolsInterface.path, lib1ToolsInterface.content.replace("title", "title2")),
timeouts: sys => sys.runQueuedTimeoutCallbacks(),
},
],
});
}
describe("when there are no circular import and exports", () => {
verifyTransitiveExports(
"no circular import/export",
[lib2Data],
);
});
describe("when there are circular import and exports", () => {
const lib2Data: File = {
path: `/user/username/projects/myproject/lib2/data.ts`,
content: `import { ITest } from "lib1/public"; import { Data2 } from "./data2";
export class Data {
public dat?: Data2; public test() {
const result: ITest = {
title: "title"
}
return result;
}
}`,
};
const lib2Data2: File = {
path: `/user/username/projects/myproject/lib2/data2.ts`,
content: `import { Data } from "./data";
export class Data2 {
public dat?: Data;
}`,
};
verifyTransitiveExports(
"yes circular import/exports",
[lib2Data, lib2Data2],
);
});
});
});
|
typescript
|
github
|
https://github.com/microsoft/TypeScript
|
src/testRunner/unittests/tscWatch/emitAndErrorUpdates.ts
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import xml.etree.ElementTree as ET
input_list = []
for arg in sys.argv[1:]:
input_list.append(arg)
if len(input_list) < 1:
print 'usage: makedoku.py <class_list.xml>'
sys.exit(0)
def validate_tag(elem, tag):
if elem.tag != tag:
print "Tag mismatch, expected '" + tag + "', got " + elem.tag
sys.exit(255)
class_names = []
classes = {}
def make_class_list(class_list, columns):
f = open('class_list.md', 'wb')
prev = 0
col_max = len(class_list) / columns + 1
print ('col max is ', col_max)
col_count = 0
row_count = 0
last_initial = ''
fit_columns = []
for n in range(0, columns):
fit_columns += [[]]
indexers = []
last_initial = ''
idx = 0
for n in class_list:
col = idx / col_max
if col >= columns:
col = columns - 1
fit_columns[col] += [n]
idx += 1
if n[:1] != last_initial:
indexers += [n]
last_initial = n[:1]
row_max = 0
f.write("\n")
for n in range(0, columns):
if len(fit_columns[n]) > row_max:
row_max = len(fit_columns[n])
f.write("| ")
for n in range(0, columns):
f.write(" | |")
f.write("\n")
f.write("| ")
for n in range(0, columns):
f.write(" --- | ------- |")
f.write("\n")
for r in range(0, row_max):
s = '| '
for c in range(0, columns):
if r >= len(fit_columns[c]):
continue
classname = fit_columns[c][r]
initial = classname[0]
if classname in indexers:
s += '**' + initial + '** | '
else:
s += ' | '
s += '[' + classname + '](class_'+ classname.lower()+') | '
s += '\n'
f.write(s)
def dokuize_text(txt):
return txt
def dokuize_text(text):
pos = 0
while True:
pos = text.find('[', pos)
if pos == -1:
break
endq_pos = text.find(']', pos + 1)
if endq_pos == -1:
break
pre_text = text[:pos]
post_text = text[endq_pos + 1:]
tag_text = text[pos + 1:endq_pos]
if tag_text in class_names:
tag_text = make_type(tag_text)
else:
# command
cmd = tag_text
space_pos = tag_text.find(' ')
if cmd.find('html') == 0:
cmd = tag_text[:space_pos]
param = tag_text[space_pos + 1:]
tag_text = '<' + param + '>'
elif cmd.find('method') == 0:
cmd = tag_text[:space_pos]
param = tag_text[space_pos + 1:]
if param.find('.') != -1:
(class_param, method_param) = param.split('.')
tag_text = '['+class_param+'.'+method_param.replace("_","_")+'](' + class_param.lower() + '#' \
+ method_param + ')'
else:
tag_text = '[' + param.replace("_","_") + '](#' + param + ')'
elif cmd.find('image=') == 0:
tag_text = ''
elif cmd.find('url=') == 0:
tag_text = '[' + cmd[4:] + ']('+cmd[4:]
elif cmd == '/url':
tag_text = ')'
elif cmd == 'center':
tag_text = ''
elif cmd == '/center':
tag_text = ''
elif cmd == 'br':
tag_text = '\n'
elif cmd == 'i' or cmd == '/i':
tag_text = '_'
elif cmd == 'b' or cmd == '/b':
tag_text = '**'
elif cmd == 'u' or cmd == '/u':
tag_text = '__'
else:
tag_text = '[' + tag_text + ']'
text = pre_text + tag_text + post_text
pos = len(pre_text) + len(tag_text)
# tnode = ET.SubElement(parent,"div")
# tnode.text=text
return text
def make_type(t):
global class_names
if t in class_names:
return '[' + t + '](class_' + t.lower() + ')'
return t
def make_method(
f,
name,
m,
declare,
event=False,
):
s = ' * '
ret_type = 'void'
args = list(m)
mdata = {}
mdata['argidx'] = []
for a in args:
if a.tag == 'return':
idx = -1
elif a.tag == 'argument':
idx = int(a.attrib['index'])
else:
continue
mdata['argidx'].append(idx)
mdata[idx] = a
if not event:
if -1 in mdata['argidx']:
s += make_type(mdata[-1].attrib['type'])
else:
s += 'void'
s += ' '
if declare:
# span.attrib["class"]="funcdecl"
# a=ET.SubElement(span,"a")
# a.attrib["name"]=name+"_"+m.attrib["name"]
# a.text=name+"::"+m.attrib["name"]
s += ' **'+m.attrib['name'].replace("_","_")+'** '
else:
s += ' **['+ m.attrib['name'].replace("_","_")+'](#' + m.attrib['name'] + ')** '
s += ' **(**'
argfound = False
for a in mdata['argidx']:
arg = mdata[a]
if a < 0:
continue
if a > 0:
s += ', '
else:
s += ' '
s += make_type(arg.attrib['type'])
if 'name' in arg.attrib:
s += ' ' + arg.attrib['name']
else:
s += ' arg' + str(a)
if 'default' in arg.attrib:
s += '=' + arg.attrib['default']
argfound = True
if argfound:
s += ' '
s += ' **)**'
if 'qualifiers' in m.attrib:
s += ' ' + m.attrib['qualifiers']
f.write(s + '\n')
def make_doku_class(node):
name = node.attrib['name']
f = open("class_"+name.lower() + '.md', 'wb')
f.write('# ' + name + ' \n')
if 'inherits' in node.attrib:
inh = node.attrib['inherits'].strip()
f.write('####**Inherits:** '+make_type(inh)+'\n')
if 'category' in node.attrib:
f.write('####**Category:** ' + node.attrib['category'].strip()
+ '\n')
briefd = node.find('brief_description')
if briefd != None:
f.write('\n### Brief Description \n')
f.write(dokuize_text(briefd.text.strip()) + '\n')
methods = node.find('methods')
if methods != None and len(list(methods)) > 0:
f.write('\n### Member Functions \n')
for m in list(methods):
make_method(f, node.attrib['name'], m, False)
events = node.find('signals')
if events != None and len(list(events)) > 0:
f.write('\n### Signals \n')
for m in list(events):
make_method(f, node.attrib['name'], m, True, True)
members = node.find('members')
if members != None and len(list(members)) > 0:
f.write('\n### Member Variables \n')
for c in list(members):
s = ' * '
s += make_type(c.attrib['type']) + ' '
s += '**' + c.attrib['name'] + '**'
if c.text.strip() != '':
s += ' - ' + c.text.strip()
f.write(s + '\n')
constants = node.find('constants')
if constants != None and len(list(constants)) > 0:
f.write('\n### Numeric Constants \n')
for c in list(constants):
s = ' * '
s += '**' + c.attrib['name'] + '**'
if 'value' in c.attrib:
s += ' = **' + c.attrib['value'] + '**'
if c.text.strip() != '':
s += ' - ' + c.text.strip()
f.write(s + '\n')
descr = node.find('description')
if descr != None and descr.text.strip() != '':
f.write('\n### Description \n')
f.write(dokuize_text(descr.text.strip()) + '\n')
methods = node.find('methods')
if methods != None and len(list(methods)) > 0:
f.write('\n### Member Function Description \n')
for m in list(methods):
d = m.find('description')
if d == None or d.text.strip() == '':
continue
f.write('\n#### <a name="'+m.attrib['name']+'">' + m.attrib['name'] + '</a>\n')
make_method(f, node.attrib['name'], m, True)
f.write('\n')
f.write(dokuize_text(d.text.strip()))
f.write('\n')
for file in input_list:
tree = ET.parse(file)
doc = tree.getroot()
if 'version' not in doc.attrib:
print "Version missing from 'doc'"
sys.exit(255)
version = doc.attrib['version']
for c in list(doc):
if c.attrib['name'] in class_names:
continue
class_names.append(c.attrib['name'])
classes[c.attrib['name']] = c
class_names.sort()
make_class_list(class_names, 2)
for cn in class_names:
c = classes[cn]
make_doku_class(c)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import platform
from telemetry.core.platform import cros_device
from telemetry.core.platform import cros_interface
from telemetry.core.platform import linux_based_platform_backend
from telemetry.core.platform.power_monitor import cros_power_monitor
from telemetry.core.platform import ps_util
from telemetry.core import util
from telemetry.internal.forwarders import cros_forwarder
class CrosPlatformBackend(
linux_based_platform_backend.LinuxBasedPlatformBackend):
def __init__(self, device=None):
super(CrosPlatformBackend, self).__init__(device)
if device:
self._cri = cros_interface.CrOSInterface(
device.host_name, device.ssh_port, device.ssh_identity)
self._cri.TryLogin()
else:
self._cri = cros_interface.CrOSInterface()
self._powermonitor = cros_power_monitor.CrosPowerMonitor(self)
@classmethod
def IsPlatformBackendForHost(cls):
return util.IsRunningOnCrosDevice()
@classmethod
def SupportsDevice(cls, device):
return isinstance(device, cros_device.CrOSDevice)
@classmethod
def CreatePlatformForDevice(cls, device, finder_options):
assert cls.SupportsDevice(device)
return platform.Platform(CrosPlatformBackend(device))
@property
def cri(self):
return self._cri
@property
def forwarder_factory(self):
if not self._forwarder_factory:
self._forwarder_factory = cros_forwarder.CrOsForwarderFactory(self._cri)
return self._forwarder_factory
def GetRemotePort(self, port):
if self._cri.local:
return port
return self._cri.GetRemotePort()
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def RunCommand(self, args):
if not isinstance(args, list):
args = [args]
stdout, stderr = self._cri.RunCmdOnDevice(args)
if stderr:
raise IOError('Failed to run: cmd = %s, stderr = %s' %
(str(args), stderr))
return stdout
def GetFileContents(self, filename):
try:
return self.RunCommand(['cat', filename])
except AssertionError:
return ''
def GetPsOutput(self, columns, pid=None):
return ps_util.GetPsOutputWithPlatformBackend(self, columns, pid)
@staticmethod
def ParseCStateSample(sample):
sample_stats = {}
for cpu in sample:
values = sample[cpu].splitlines()
# There are three values per state after excluding the single time value.
num_states = (len(values) - 1) / 3
names = values[:num_states]
times = values[num_states:2 * num_states]
latencies = values[2 * num_states:]
# The last line in the sample contains the time.
cstates = {'C0': int(values[-1]) * 10 ** 6}
for i, state in enumerate(names):
if names[i] == 'POLL' and not int(latencies[i]):
# C0 state. Kernel stats aren't right, so calculate by
# subtracting all other states from total time (using epoch
# timer since we calculate differences in the end anyway).
# NOTE: Only x86 lists C0 under cpuidle, ARM does not.
continue
cstates['C0'] -= int(times[i])
if names[i] == '<null>':
# Kernel race condition that can happen while a new C-state gets
# added (e.g. AC->battery). Don't know the 'name' of the state
# yet, but its 'time' would be 0 anyway.
continue
cstates[state] = int(times[i])
sample_stats[cpu] = cstates
return sample_stats
def GetOSName(self):
return 'chromeos'
def GetOSVersionName(self):
return '' # TODO: Implement this.
def GetChildPids(self, pid):
"""Returns a list of child pids of |pid|."""
all_process_info = self._cri.ListProcesses()
processes = [(curr_pid, curr_ppid, curr_state)
for curr_pid, _, curr_ppid, curr_state in all_process_info]
return ps_util.GetChildPids(processes, pid)
def GetCommandLine(self, pid):
procs = self._cri.ListProcesses()
return next((proc[1] for proc in procs if proc[0] == pid), None)
def CanFlushIndividualFilesFromSystemCache(self):
return True
def FlushEntireSystemCache(self):
raise NotImplementedError()
def FlushSystemCacheForDirectory(self, directory):
flush_command = (
'/usr/local/telemetry/src/src/out/Release/clear_system_cache')
self.RunCommand(['chmod', '+x', flush_command])
self.RunCommand([flush_command, '--recurse', directory])
def CanMonitorPower(self):
return self._powermonitor.CanMonitorPower()
def StartMonitoringPower(self, browser):
self._powermonitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._powermonitor.StopMonitoringPower()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bytes"
"testing"
"github.com/spf13/cobra"
)
const shellsError = "Unexpected empty completion shells list"
func TestNewCmdCompletion(t *testing.T) {
var out bytes.Buffer
shells := GetSupportedShells()
if len(shells) == 0 {
t.Error(shellsError)
}
// test newCmdCompletion with a valid shell.
// use a dummy parent command as newCmdCompletion needs it.
parentCmd := &cobra.Command{}
args := []string{"completion", shells[0]}
parentCmd.SetArgs(args)
cmd := newCmdCompletion(&out, "")
parentCmd.AddCommand(cmd)
if err := parentCmd.Execute(); err != nil {
t.Errorf("Cannot execute newCmdCompletion: %v", err)
}
}
func TestRunCompletion(t *testing.T) {
var out bytes.Buffer
type TestCase struct {
name string
args []string
expectedError bool
}
testCases := []TestCase{
{
name: "invalid: missing argument",
args: []string{},
expectedError: true,
},
{
name: "invalid: too many arguments",
args: []string{"", ""},
expectedError: true,
},
{
name: "invalid: unsupported shell name",
args: []string{"unsupported"},
expectedError: true,
},
}
// test all supported shells
shells := GetSupportedShells()
if len(shells) == 0 {
t.Error(shellsError)
}
for _, shell := range shells {
test := TestCase{
name: "valid: test shell " + shell,
args: []string{shell},
}
testCases = append(testCases, test)
}
// use dummy cobra commands
parentCmd := &cobra.Command{}
cmd := &cobra.Command{}
parentCmd.AddCommand(cmd)
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if err := RunCompletion(&out, "", cmd, tc.args); (err != nil) != tc.expectedError {
t.Errorf("Test case %q: TestRunCompletion expected error: %v, saw: %v", tc.name, tc.expectedError, (err != nil))
}
})
}
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
cmd/kubeadm/app/cmd/completion_test.go
|
#pragma once
#include <ATen/core/jit_type_base.h>
#include <ATen/core/ivalue.h>
namespace c10 {
template<class T> decltype(auto) getTypePtr();
std::string toString(const Type& type);
template<class T>
List<T>::List(c10::intrusive_ptr<c10::detail::ListImpl>&& elements)
: impl_(std::move(elements)) {}
template<class T>
List<T>::List(const c10::intrusive_ptr<c10::detail::ListImpl>& elements)
: impl_(elements) {}
template<class T>
List<T>::List()
: List(make_intrusive<c10::detail::ListImpl>(
typename c10::detail::ListImpl::list_type(),
getTypePtr<T>())) {
static_assert(!std::is_same_v<T, IValue>, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType) instead.");
}
template<class T>
List<T>::List(ArrayRef<T> values)
: List(make_intrusive<c10::detail::ListImpl>(
typename c10::detail::ListImpl::list_type(),
getTypePtr<T>())) {
static_assert(!std::is_same_v<T, IValue>, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
impl_->list.reserve(values.size());
for (const T& element : values) {
impl_->list.push_back(element);
}
}
template<class T>
List<T>::List(std::initializer_list<T> initial_values)
: List(ArrayRef<T>(initial_values)) {
static_assert(!std::is_same_v<T, IValue>, "This constructor is not valid for List<IValue>. Please use c10::impl::GenericList(elementType).");
}
template<class T>
List<T>::List(TypePtr elementType)
: List(make_intrusive<c10::detail::ListImpl>(
typename c10::detail::ListImpl::list_type(),
std::move(elementType))) {
static_assert(std::is_same_v<T, IValue> || std::is_same_v<T, c10::intrusive_ptr<ivalue::Future>>,
"This constructor is only valid for c10::impl::GenericList or List<Future>.");
}
namespace impl {
template<class T>
List<T> toTypedList(impl::GenericList list) {
// If there's other instances of the list (i.e. list.use_count() > 1), then we have to be invariant
// because upcasting would allow people to add types into the new list that would break the old list.
// However, if there aren't any other instances of this list (i.e. list.use_count() == 1), then we can
// allow upcasting. This can be a perf improvement since we can cast List<T> to List<optional<T>>
// without having to copy it. This is also used to provide backwards compatibility with some old models
// that serialized the index arguments to aten::index, aten::index_put, aten::index_put_ and aten::index_put_impl_
// as List<Tensor> before we changed that argument to be List<optional<Tensor>>. When deserializing, we
// have list.use_count() == 1 and can deserialize the List<Tensor> directly as List<optional<Tensor>>.
TORCH_CHECK(*list.impl_->elementType == *getTypePtr<T>()
|| (list.use_count() == 1 && list.impl_->elementType->isSubtypeOf(*getTypePtr<T>()))
, "Tried to cast a List<", toString(*list.impl_->elementType), "> to a List<", toString(*getTypePtr<T>()), ">. Types mismatch.");
return List<T>(std::move(list.impl_));
}
template<class T>
impl::GenericList toList(List<T>&& list) {
return GenericList(std::move(list.impl_));
}
template<class T>
impl::GenericList toList(const List<T>& list) {
return GenericList(list.impl_);
}
}
template<class T>
List<T> List<T>::copy() const {
return List<T>(impl_->copy());
}
namespace detail {
template<class T>
T list_element_to(T element) {
return element;
}
template<class T>
T list_element_to(const IValue& element) {
return element.template to<T>();
}
template<class T>
T list_element_to(IValue&& element) {
return std::move(element).template to<T>();
}
template<class T>
struct ListElementFrom {
static IValue from(const T& element) {
return element;
}
static IValue from(T&& element) {
return std::move(element);
}
};
template<>
struct ListElementFrom<IValue> {
static const IValue& from(const IValue& element) {
return element;
}
static IValue&& from(IValue&& element) {
return std::move(element);
}
};
}
namespace impl {
template <class T, class Iterator>
ListElementReference<T, Iterator>::operator std::conditional_t<
std::is_reference_v<typename c10::detail::ivalue_to_const_ref_overload_return<
T>::type>,
const T&,
T>() const {
return iterator_->template to<T>();
}
template<class T, class Iterator>
ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(T&& new_value) && {
*iterator_ = c10::detail::ListElementFrom<T>::from(std::move(new_value));
return *this;
}
template<class T, class Iterator>
ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(const T& new_value) && {
*iterator_ = c10::detail::ListElementFrom<T>::from(new_value);
return *this;
}
template<class T, class Iterator>
ListElementReference<T, Iterator>& ListElementReference<T, Iterator>::operator=(ListElementReference<T, Iterator>&& rhs) && noexcept {
*iterator_ = *rhs.iterator_;
return *this;
}
template<class T, class Iterator>
void swap(ListElementReference<T, Iterator>&& lhs, ListElementReference<T, Iterator>&& rhs) noexcept {
std::swap(*lhs.iterator_, *rhs.iterator_);
}
template<class T, class Iterator>
bool operator==(const ListElementReference<T, Iterator>& lhs, const T& rhs) {
const T& lhs_tmp = lhs;
return lhs_tmp == rhs;
}
template<class T, class Iterator>
inline bool operator==(const T& lhs, const ListElementReference<T, Iterator>& rhs) {
return rhs == lhs;
}
template<class T>
inline typename ListElementConstReferenceTraits<T>::const_reference
list_element_to_const_ref(const IValue& element) {
return element.template to<T>();
}
template<>
inline typename ListElementConstReferenceTraits<std::optional<std::string>>::const_reference
list_element_to_const_ref<std::optional<std::string>>(const IValue& element) {
return element.toOptionalStringRef();
}
} // namespace impl
template<class T>
void List<T>::set(size_type pos, const value_type& value) const {
impl_->list.at(pos) = c10::detail::ListElementFrom<T>::from(value);
}
template<class T>
void List<T>::set(size_type pos, value_type&& value) const {
impl_->list.at(pos) = c10::detail::ListElementFrom<T>::from(std::move(value));
}
template<class T>
typename List<T>::internal_const_reference_type List<T>::get(size_type pos) const {
return operator[](pos);
}
template<class T>
typename List<T>::internal_const_reference_type List<T>::operator[](size_type pos) const {
return c10::impl::list_element_to_const_ref<T>(impl_->list.at(pos));
}
template<class T>
typename List<T>::internal_reference_type List<T>::operator[](size_type pos) {
static_cast<void>(impl_->list.at(pos)); // Throw the exception if it is out of range.
return {impl_->list.begin() + static_cast<typename decltype(impl_->list)::difference_type>(pos)};
}
template<class T>
typename List<T>::value_type List<T>::extract(size_type pos) const {
auto& elem = impl_->list.at(pos);
auto result = c10::detail::list_element_to<T>(std::move(elem));
// Reset the list element to a T() instead of None to keep it correctly typed
elem = c10::detail::ListElementFrom<T>::from(T{});
return result;
}
template<class T>
typename List<T>::iterator List<T>::begin() const {
return iterator(impl_->list.begin());
}
template<class T>
typename List<T>::iterator List<T>::end() const {
return iterator(impl_->list.end());
}
template<class T>
bool List<T>::empty() const {
return impl_->list.empty();
}
template<class T>
typename List<T>::size_type List<T>::size() const {
return impl_->list.size();
}
template<class T>
void List<T>::reserve(size_type new_cap) const {
impl_->list.reserve(new_cap);
}
template<class T>
void List<T>::clear() const {
impl_->list.clear();
}
template<class T>
typename List<T>::iterator List<T>::insert(iterator pos, const T& value) const {
return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom<T>::from(value)) };
}
template<class T>
typename List<T>::iterator List<T>::insert(iterator pos, T&& value) const {
return iterator { impl_->list.insert(pos.iterator_, c10::detail::ListElementFrom<T>::from(std::move(value))) };
}
template<class T>
template<class... Args>
typename List<T>::iterator List<T>::emplace(iterator pos, Args&&... value) const {
// TODO Use list_element_from?
return iterator { impl_->list.emplace(pos.iterator_, std::forward<Args>(value)...) };
}
template<class T>
void List<T>::push_back(const T& value) const {
impl_->list.push_back(c10::detail::ListElementFrom<T>::from(value));
}
template<class T>
void List<T>::push_back(T&& value) const {
impl_->list.push_back(c10::detail::ListElementFrom<T>::from(std::move(value)));
}
template<class T>
void List<T>::append(List<T> b) const {
if (b.use_count() == 1) {
impl_->list.insert(impl_->list.end(), make_move_iterator(b.impl_->list.begin()), make_move_iterator(b.impl_->list.end()));
} else {
impl_->list.insert(impl_->list.end(), b.impl_->list.begin(), b.impl_->list.end());
}
}
template<class T>
template<class... Args>
void List<T>::emplace_back(Args&&... args) const {
// TODO Use list_element_from?
impl_->list.push_back(T(std::forward<Args>(args)...));
}
template<class T>
typename List<T>::iterator List<T>::erase(iterator pos) const {
return iterator { impl_->list.erase(pos.iterator_) };
}
template<class T>
typename List<T>::iterator List<T>::erase(iterator first, iterator last) const {
return iterator { impl_->list.erase(first.iterator_, last.iterator_) };
}
template<class T>
void List<T>::pop_back() const {
impl_->list.pop_back();
}
template<class T>
void List<T>::resize(size_type count) const {
impl_->list.resize(count, T{});
}
template<class T>
void List<T>::resize(size_type count, const T& value) const {
impl_->list.resize(count, value);
}
template<class T>
bool operator==(const List<T>& lhs, const List<T>& rhs) {
// Lists with the same identity trivially compare equal.
if (lhs.impl_ == rhs.impl_) {
return true;
}
// Otherwise, just compare values directly.
return *lhs.impl_ == *rhs.impl_;
}
template<class T>
bool operator!=(const List<T>& lhs, const List<T>& rhs) {
return !(lhs == rhs);
}
template<class T>
bool List<T>::is(const List<T>& rhs) const {
return this->impl_ == rhs.impl_;
}
template<class T>
std::vector<T> List<T>::vec() const {
std::vector<T> result(begin(), end());
return result;
}
template<class T>
size_t List<T>::use_count() const {
return impl_.use_count();
}
template <class T>
TypePtr List<T>::elementType() const {
return impl_->elementType;
}
template <class T>
void List<T>::unsafeSetElementType(TypePtr t) {
impl_->elementType = std::move(t);
}
}
|
c
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/core/List_inl.h
|
/*-------------------------------------------------------------------------
*
* local_source.c
* Functions for using a local data directory as the source.
*
* Portions Copyright (c) 2013-2026, PostgreSQL Global Development Group
*
*-------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <fcntl.h>
#include <unistd.h>
#include "common/logging.h"
#include "file_ops.h"
#include "rewind_source.h"
typedef struct
{
rewind_source common; /* common interface functions */
const char *datadir; /* path to the source data directory */
} local_source;
static void local_traverse_files(rewind_source *source,
process_file_callback_t callback);
static char *local_fetch_file(rewind_source *source, const char *path,
size_t *filesize);
static void local_queue_fetch_file(rewind_source *source, const char *path,
size_t len);
static void local_queue_fetch_range(rewind_source *source, const char *path,
off_t off, size_t len);
static void local_finish_fetch(rewind_source *source);
static void local_destroy(rewind_source *source);
rewind_source *
init_local_source(const char *datadir)
{
local_source *src;
src = pg_malloc0(sizeof(local_source));
src->common.traverse_files = local_traverse_files;
src->common.fetch_file = local_fetch_file;
src->common.queue_fetch_file = local_queue_fetch_file;
src->common.queue_fetch_range = local_queue_fetch_range;
src->common.finish_fetch = local_finish_fetch;
src->common.get_current_wal_insert_lsn = NULL;
src->common.destroy = local_destroy;
src->datadir = datadir;
return &src->common;
}
static void
local_traverse_files(rewind_source *source, process_file_callback_t callback)
{
traverse_datadir(((local_source *) source)->datadir, callback);
}
static char *
local_fetch_file(rewind_source *source, const char *path, size_t *filesize)
{
return slurpFile(((local_source *) source)->datadir, path, filesize);
}
/*
* Copy a file from source to target.
*
* 'len' is the expected length of the file.
*/
static void
local_queue_fetch_file(rewind_source *source, const char *path, size_t len)
{
const char *datadir = ((local_source *) source)->datadir;
PGIOAlignedBlock buf;
char srcpath[MAXPGPATH];
int srcfd;
size_t written_len;
snprintf(srcpath, sizeof(srcpath), "%s/%s", datadir, path);
/* Open source file for reading */
srcfd = open(srcpath, O_RDONLY | PG_BINARY, 0);
if (srcfd < 0)
pg_fatal("could not open source file \"%s\": %m",
srcpath);
/* Truncate and open the target file for writing */
open_target_file(path, true);
written_len = 0;
for (;;)
{
ssize_t read_len;
read_len = read(srcfd, buf.data, sizeof(buf));
if (read_len < 0)
pg_fatal("could not read file \"%s\": %m", srcpath);
else if (read_len == 0)
break; /* EOF reached */
write_target_range(buf.data, written_len, read_len);
written_len += read_len;
}
/*
* A local source is not expected to change while we're rewinding, so
* check that the size of the file matches our earlier expectation.
*/
if (written_len != len)
pg_fatal("size of source file \"%s\" changed concurrently: %zu bytes expected, %zu copied",
srcpath, len, written_len);
if (close(srcfd) != 0)
pg_fatal("could not close file \"%s\": %m", srcpath);
}
/*
* Copy a file from source to target, starting at 'off', for 'len' bytes.
*/
static void
local_queue_fetch_range(rewind_source *source, const char *path, off_t off,
size_t len)
{
const char *datadir = ((local_source *) source)->datadir;
PGIOAlignedBlock buf;
char srcpath[MAXPGPATH];
int srcfd;
off_t begin = off;
off_t end = off + len;
snprintf(srcpath, sizeof(srcpath), "%s/%s", datadir, path);
srcfd = open(srcpath, O_RDONLY | PG_BINARY, 0);
if (srcfd < 0)
pg_fatal("could not open source file \"%s\": %m",
srcpath);
if (lseek(srcfd, begin, SEEK_SET) == -1)
pg_fatal("could not seek in source file: %m");
open_target_file(path, false);
while (end - begin > 0)
{
ssize_t readlen;
size_t thislen;
if (end - begin > sizeof(buf))
thislen = sizeof(buf);
else
thislen = end - begin;
readlen = read(srcfd, buf.data, thislen);
if (readlen < 0)
pg_fatal("could not read file \"%s\": %m", srcpath);
else if (readlen == 0)
pg_fatal("unexpected EOF while reading file \"%s\"", srcpath);
write_target_range(buf.data, begin, readlen);
begin += readlen;
}
if (close(srcfd) != 0)
pg_fatal("could not close file \"%s\": %m", srcpath);
}
static void
local_finish_fetch(rewind_source *source)
{
/*
* Nothing to do, local_queue_fetch_range() copies the ranges immediately.
*/
}
static void
local_destroy(rewind_source *source)
{
pfree(source);
}
|
c
|
github
|
https://github.com/postgres/postgres
|
src/bin/pg_rewind/local_source.c
|
# -*- coding: utf-8 -*-
# Run with one of these commands:
# > OPENERP_ADDONS_PATH='../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy PYTHONPATH=. python tests/test_ir_sequence.py
# > OPENERP_ADDONS_PATH='../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy nosetests tests/test_ir_sequence.py
# > OPENERP_ADDONS_PATH='../../../addons/trunk' OPENERP_PORT=8069 \
# OPENERP_DATABASE=yy PYTHONPATH=../:. unit2 test_ir_sequence
# This assume an existing database.
import psycopg2
import psycopg2.errorcodes
import unittest2
import openerp
from openerp.tests import common
ADMIN_USER_ID = common.ADMIN_USER_ID
def registry(model):
return openerp.modules.registry.RegistryManager.get(common.get_db_name())[model]
def cursor():
return openerp.modules.registry.RegistryManager.get(common.get_db_name()).cursor()
def drop_sequence(code):
cr = cursor()
for model in ['ir.sequence', 'ir.sequence.type']:
s = registry(model)
ids = s.search(cr, ADMIN_USER_ID, [('code', '=', code)])
s.unlink(cr, ADMIN_USER_ID, ids)
cr.commit()
cr.close()
class test_ir_sequence_standard(unittest2.TestCase):
""" A few tests for a 'Standard' (i.e. PostgreSQL) sequence. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_search(self):
""" Try a search. """
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID, [], {})
assert ids
cr.commit()
cr.close()
def test_ir_sequence_draw(self):
""" Try to draw a number. """
cr = cursor()
n = registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type', {})
assert n
cr.commit()
cr.close()
def test_ir_sequence_draw_twice(self):
""" Try to draw a number from two transactions. """
cr0 = cursor()
cr1 = cursor()
n0 = registry('ir.sequence').next_by_code(cr0, ADMIN_USER_ID, 'test_sequence_type', {})
assert n0
n1 = registry('ir.sequence').next_by_code(cr1, ADMIN_USER_ID, 'test_sequence_type', {})
assert n1
cr0.commit()
cr1.commit()
cr0.close()
cr1.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type')
class test_ir_sequence_no_gap(unittest2.TestCase):
""" Copy of the previous tests for a 'No gap' sequence. """
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_2', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_2', name='Test sequence',
implementation='no_gap')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_draw_no_gap(self):
""" Try to draw a number. """
cr = cursor()
n = registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_2', {})
assert n
cr.commit()
cr.close()
def test_ir_sequence_draw_twice_no_gap(self):
""" Try to draw a number from two transactions.
This is expected to not work.
"""
cr0 = cursor()
cr1 = cursor()
cr1._default_log_exceptions = False # Prevent logging a traceback
with self.assertRaises(psycopg2.OperationalError) as e:
n0 = registry('ir.sequence').next_by_code(cr0, ADMIN_USER_ID, 'test_sequence_type_2', {})
assert n0
n1 = registry('ir.sequence').next_by_code(cr1, ADMIN_USER_ID, 'test_sequence_type_2', {})
self.assertEqual(e.exception.pgcode, psycopg2.errorcodes.LOCK_NOT_AVAILABLE, msg="postgresql returned an incorrect errcode")
cr0.close()
cr1.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_2')
class test_ir_sequence_change_implementation(unittest2.TestCase):
""" Create sequence objects and change their ``implementation`` field. """
def test_ir_sequence_1_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_3', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_3', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_4', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_4', name='Test sequence',
implementation='no_gap')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
def test_ir_sequence_2_write(self):
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID,
[('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])], {})
registry('ir.sequence').write(cr, ADMIN_USER_ID, ids,
{'implementation': 'standard'}, {})
registry('ir.sequence').write(cr, ADMIN_USER_ID, ids,
{'implementation': 'no_gap'}, {})
cr.commit()
cr.close()
def test_ir_sequence_3_unlink(self):
cr = cursor()
ids = registry('ir.sequence').search(cr, ADMIN_USER_ID,
[('code', 'in', ['test_sequence_type_3', 'test_sequence_type_4'])], {})
registry('ir.sequence').unlink(cr, ADMIN_USER_ID, ids, {})
cr.commit()
cr.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_3')
drop_sequence('test_sequence_type_4')
class test_ir_sequence_generate(unittest2.TestCase):
""" Create sequence objects and generate some values. """
def test_ir_sequence_create(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_5', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_5', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
cr = cursor()
f = lambda *a: registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_5', {})
assert all(str(x) == f() for x in xrange(1,10))
cr.commit()
cr.close()
def test_ir_sequence_create_no_gap(self):
""" Try to create a sequence object. """
cr = cursor()
d = dict(code='test_sequence_type_6', name='Test sequence type')
c = registry('ir.sequence.type').create(cr, ADMIN_USER_ID, d, {})
assert c
d = dict(code='test_sequence_type_6', name='Test sequence')
c = registry('ir.sequence').create(cr, ADMIN_USER_ID, d, {})
assert c
cr.commit()
cr.close()
cr = cursor()
f = lambda *a: registry('ir.sequence').next_by_code(cr, ADMIN_USER_ID, 'test_sequence_type_6', {})
assert all(str(x) == f() for x in xrange(1,10))
cr.commit()
cr.close()
@classmethod
def tearDownClass(cls):
drop_sequence('test_sequence_type_5')
drop_sequence('test_sequence_type_6')
class Test_ir_sequence_init(common.TransactionCase):
def test_00(self):
registry, cr, uid = self.registry, self.cr, self.uid
# test if read statement return the good number_next value (from postgreSQL sequence and not ir_sequence value)
sequence = registry('ir.sequence')
# first creation of sequence (normal)
values = {'number_next': 1,
'company_id': 1,
'padding': 4,
'number_increment': 1,
'implementation': 'standard',
'name': 'test-sequence-00'}
seq_id = sequence.create(cr, uid, values)
# Call get next 4 times
sequence.next_by_id(cr, uid, seq_id)
sequence.next_by_id(cr, uid, seq_id)
sequence.next_by_id(cr, uid, seq_id)
read_sequence = sequence.next_by_id(cr, uid, seq_id)
# Read the value of the current sequence
assert read_sequence == "0004", 'The actual sequence value must be 4. reading : %s' % read_sequence
# reset sequence to 1 by write method calling
sequence.write(cr, uid, [seq_id], {'number_next': 1})
# Read the value of the current sequence
read_sequence = sequence.next_by_id(cr, uid, seq_id)
assert read_sequence == "0001", 'The actual sequence value must be 1. reading : %s' % read_sequence
if __name__ == "__main__":
unittest2.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto.exception
from boto.compat import json
import requests
import boto
class SearchServiceException(Exception):
pass
class CommitMismatchError(Exception):
pass
class EncodingError(Exception):
"""
Content sent for Cloud Search indexing was incorrectly encoded.
This usually happens when a document is marked as unicode but non-unicode
characters are present.
"""
pass
class ContentTooLongError(Exception):
"""
Content sent for Cloud Search indexing was too long
This will usually happen when documents queued for indexing add up to more
than the limit allowed per upload batch (5MB)
"""
pass
class DocumentServiceConnection(object):
"""
A CloudSearch document service.
The DocumentServiceConection is used to add, remove and update documents in
CloudSearch. Commands are uploaded to CloudSearch in SDF (Search Document Format).
To generate an appropriate SDF, use :func:`add` to add or update documents,
as well as :func:`delete` to remove documents.
Once the set of documents is ready to be index, use :func:`commit` to send the
commands to CloudSearch.
If there are a lot of documents to index, it may be preferable to split the
generation of SDF data and the actual uploading into CloudSearch. Retrieve
the current SDF with :func:`get_sdf`. If this file is the uploaded into S3,
it can be retrieved back afterwards for upload into CloudSearch using
:func:`add_sdf_from_s3`.
The SDF is not cleared after a :func:`commit`. If you wish to continue
using the DocumentServiceConnection for another batch upload of commands,
you will need to :func:`clear_sdf` first to stop the previous batch of
commands from being uploaded again.
"""
def __init__(self, domain=None, endpoint=None):
self.domain = domain
self.endpoint = endpoint
if not self.endpoint:
self.endpoint = domain.doc_service_endpoint
self.documents_batch = []
self._sdf = None
def add(self, _id, version, fields, lang='en'):
"""
Add a document to be processed by the DocumentService
The document will not actually be added until :func:`commit` is called
:type _id: string
:param _id: A unique ID used to refer to this document.
:type version: int
:param version: Version of the document being indexed. If a file is
being reindexed, the version should be higher than the existing one
in CloudSearch.
:type fields: dict
:param fields: A dictionary of key-value pairs to be uploaded .
:type lang: string
:param lang: The language code the data is in. Only 'en' is currently
supported
"""
d = {'type': 'add', 'id': _id, 'version': version, 'lang': lang,
'fields': fields}
self.documents_batch.append(d)
def delete(self, _id, version):
"""
Schedule a document to be removed from the CloudSearch service
The document will not actually be scheduled for removal until :func:`commit` is called
:type _id: string
:param _id: The unique ID of this document.
:type version: int
:param version: Version of the document to remove. The delete will only
occur if this version number is higher than the version currently
in the index.
"""
d = {'type': 'delete', 'id': _id, 'version': version}
self.documents_batch.append(d)
def get_sdf(self):
"""
Generate the working set of documents in Search Data Format (SDF)
:rtype: string
:returns: JSON-formatted string of the documents in SDF
"""
return self._sdf if self._sdf else json.dumps(self.documents_batch)
def clear_sdf(self):
"""
Clear the working documents from this DocumentServiceConnection
This should be used after :func:`commit` if the connection will be reused
for another set of documents.
"""
self._sdf = None
self.documents_batch = []
def add_sdf_from_s3(self, key_obj):
"""
Load an SDF from S3
Using this method will result in documents added through
:func:`add` and :func:`delete` being ignored.
:type key_obj: :class:`boto.s3.key.Key`
:param key_obj: An S3 key which contains an SDF
"""
#@todo:: (lucas) would be nice if this could just take an s3://uri..."
self._sdf = key_obj.get_contents_as_string()
def commit(self):
"""
Actually send an SDF to CloudSearch for processing
If an SDF file has been explicitly loaded it will be used. Otherwise,
documents added through :func:`add` and :func:`delete` will be used.
:rtype: :class:`CommitResponse`
:returns: A summary of documents added and deleted
"""
sdf = self.get_sdf()
if ': null' in sdf:
boto.log.error('null value in sdf detected. This will probably raise '
'500 error.')
index = sdf.index(': null')
boto.log.error(sdf[index - 100:index + 100])
url = "http://%s/2011-02-01/documents/batch" % (self.endpoint)
# Keep-alive is automatic in a post-1.0 requests world.
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(
pool_connections=20,
pool_maxsize=50,
max_retries=5
)
session.mount('http://', adapter)
session.mount('https://', adapter)
r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'})
return CommitResponse(r, self, sdf)
class CommitResponse(object):
"""Wrapper for response to Cloudsearch document batch commit.
:type response: :class:`requests.models.Response`
:param response: Response from Cloudsearch /documents/batch API
:type doc_service: :class:`boto.cloudsearch.document.DocumentServiceConnection`
:param doc_service: Object containing the documents posted and methods to
retry
:raises: :class:`boto.exception.BotoServerError`
:raises: :class:`boto.cloudsearch.document.SearchServiceException`
:raises: :class:`boto.cloudsearch.document.EncodingError`
:raises: :class:`boto.cloudsearch.document.ContentTooLongError`
"""
def __init__(self, response, doc_service, sdf):
self.response = response
self.doc_service = doc_service
self.sdf = sdf
_body = response.content.decode('utf-8')
try:
self.content = json.loads(_body)
except:
boto.log.error('Error indexing documents.\nResponse Content:\n{0}\n\n'
'SDF:\n{1}'.format(_body, self.sdf))
raise boto.exception.BotoServerError(self.response.status_code, '',
body=_body)
self.status = self.content['status']
if self.status == 'error':
self.errors = [e.get('message') for e in self.content.get('errors',
[])]
for e in self.errors:
if "Illegal Unicode character" in e:
raise EncodingError("Illegal Unicode character in document")
elif e == "The Content-Length is too long":
raise ContentTooLongError("Content was too long")
if 'adds' not in self.content or 'deletes' not in self.content:
raise SearchServiceException("Error indexing documents"
" => %s" % self.content.get('message', ''))
else:
self.errors = []
self.adds = self.content['adds']
self.deletes = self.content['deletes']
self._check_num_ops('add', self.adds)
self._check_num_ops('delete', self.deletes)
def _check_num_ops(self, type_, response_num):
"""Raise exception if number of ops in response doesn't match commit
:type type_: str
:param type_: Type of commit operation: 'add' or 'delete'
:type response_num: int
:param response_num: Number of adds or deletes in the response.
:raises: :class:`boto.cloudsearch.document.CommitMismatchError`
"""
commit_num = len([d for d in self.doc_service.documents_batch
if d['type'] == type_])
if response_num != commit_num:
raise CommitMismatchError(
'Incorrect number of {0}s returned. Commit: {1} Response: {2}'\
.format(type_, commit_num, response_num))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. See accompanying LICENSE file.
*/
package org.apache.hadoop.security.authentication.server;
import static org.apache.hadoop.security.authentication.server.LdapAuthenticationHandler.BASE_DN;
import static org.apache.hadoop.security.authentication.server.LdapAuthenticationHandler.PROVIDER_URL;
import static org.apache.hadoop.security.authentication.server.LdapAuthenticationHandler.TYPE;
import static org.apache.hadoop.security.authentication.server.MultiSchemeAuthenticationHandler.SCHEMES_PROPERTY;
import static org.apache.hadoop.security.authentication.server.MultiSchemeAuthenticationHandler.AUTH_HANDLER_PROPERTY;
import static org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler.PRINCIPAL;
import static org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler.KEYTAB;
import static org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler.NAME_RULES;
import static org.apache.hadoop.security.authentication.server.LdapConstants.*;
import static org.apache.hadoop.security.authentication.server.HttpConstants.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.File;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.codec.binary.Base64;
import org.apache.directory.server.annotations.CreateLdapServer;
import org.apache.directory.server.annotations.CreateTransport;
import org.apache.directory.server.core.annotations.ApplyLdifs;
import org.apache.directory.server.core.annotations.ContextEntry;
import org.apache.directory.server.core.annotations.CreateDS;
import org.apache.directory.server.core.annotations.CreatePartition;
import org.apache.directory.server.core.integ.AbstractLdapTestUnit;
import org.apache.directory.server.core.integ.ApacheDSTestExtension;
import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
import org.apache.hadoop.security.authentication.KerberosTestUtils;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.extension.ExtendWith;
/**
* This unit test verifies the functionality of "multi-scheme" auth handler.
*/
@ExtendWith(ApacheDSTestExtension.class)
@CreateLdapServer(
transports =
{
@CreateTransport(protocol = "LDAP", address = LDAP_SERVER_ADDR),
})
@CreateDS(allowAnonAccess = true,
partitions = {
@CreatePartition(
name = "Test_Partition", suffix = LDAP_BASE_DN,
contextEntry = @ContextEntry(
entryLdif = "dn: "+ LDAP_BASE_DN+ " \n"
+ "dc: example\n"
+ "objectClass: top\n"
+ "objectClass: domain\n\n"))})
@ApplyLdifs({
"dn: uid=bjones," + LDAP_BASE_DN,
"cn: Bob Jones",
"sn: Jones",
"objectClass: inetOrgPerson",
"uid: bjones",
"userPassword: p@ssw0rd"})
public class TestMultiSchemeAuthenticationHandler
extends AbstractLdapTestUnit {
private KerberosSecurityTestcase krbTest = new KerberosSecurityTestcase();
private MultiSchemeAuthenticationHandler handler;
@BeforeEach
public void setUp() throws Exception {
krbTest.startMiniKdc();
// create keytab
File keytabFile = new File(KerberosTestUtils.getKeytabFile());
String clientPrinc = KerberosTestUtils.getClientPrincipal();
String serverPrinc = KerberosTestUtils.getServerPrincipal();
clientPrinc = clientPrinc.substring(0, clientPrinc.lastIndexOf("@"));
serverPrinc = serverPrinc.substring(0, serverPrinc.lastIndexOf("@"));
krbTest.getKdc().createPrincipal(keytabFile, clientPrinc, serverPrinc);
// configure handler
handler = new MultiSchemeAuthenticationHandler();
try {
handler.init(getDefaultProperties());
} catch (Exception e) {
throw e;
}
}
@AfterEach
public void tearDown() throws Exception {
krbTest.stopMiniKdc();
}
private Properties getDefaultProperties() {
Properties p = new Properties();
p.setProperty(SCHEMES_PROPERTY, BASIC + "," + NEGOTIATE);
p.setProperty(String.format(AUTH_HANDLER_PROPERTY, "negotiate"),
"kerberos");
p.setProperty(String.format(AUTH_HANDLER_PROPERTY, "basic"), "ldap");
// Kerberos related config
p.setProperty(PRINCIPAL, KerberosTestUtils.getServerPrincipal());
p.setProperty(KEYTAB, KerberosTestUtils.getKeytabFile());
p.setProperty(NAME_RULES,
"RULE:[1:$1@$0](.*@" + KerberosTestUtils.getRealm()+")s/@.*//\n");
// LDAP related config
p.setProperty(BASE_DN, LDAP_BASE_DN);
p.setProperty(PROVIDER_URL, String.format("ldap://%s:%s", LDAP_SERVER_ADDR,
getLdapServer().getPort()));
return p;
}
@Test
@Timeout(value = 60, unit = TimeUnit.SECONDS)
public void testRequestWithoutAuthorization() throws Exception {
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
assertNull(handler.authenticate(request, response));
verify(response).addHeader(WWW_AUTHENTICATE_HEADER, BASIC);
verify(response).addHeader(WWW_AUTHENTICATE_HEADER, NEGOTIATE);
verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
}
@Test
@Timeout(value = 60, unit = TimeUnit.SECONDS)
public void testRequestWithInvalidAuthorization() throws Exception {
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
final Base64 base64 = new Base64(0);
String credentials = "bjones:invalidpassword";
when(request.getHeader(AUTHORIZATION_HEADER))
.thenReturn(base64.encodeToString(credentials.getBytes()));
assertNull(handler.authenticate(request, response));
verify(response).addHeader(WWW_AUTHENTICATE_HEADER, BASIC);
verify(response).addHeader(WWW_AUTHENTICATE_HEADER, NEGOTIATE);
verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
}
@Test
@Timeout(value = 60, unit = TimeUnit.SECONDS)
public void testRequestWithLdapAuthorization() throws Exception {
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
final Base64 base64 = new Base64(0);
String credentials = base64.encodeToString("bjones:p@ssw0rd".getBytes());
String authHeader = BASIC + " " + credentials;
when(request.getHeader(AUTHORIZATION_HEADER))
.thenReturn(authHeader);
AuthenticationToken token = handler.authenticate(request, response);
assertNotNull(token);
verify(response).setStatus(HttpServletResponse.SC_OK);
assertEquals(TYPE, token.getType());
assertEquals(token.getUserName(), "bjones");
assertEquals(token.getName(), "bjones");
}
@Test
@Timeout(value = 60, unit = TimeUnit.SECONDS)
public void testRequestWithInvalidKerberosAuthorization() throws Exception {
String token = new Base64(0).encodeToString(new byte[]{0, 1, 2});
HttpServletRequest request = mock(HttpServletRequest.class);
HttpServletResponse response = mock(HttpServletResponse.class);
when(request.getHeader(AUTHORIZATION_HEADER)).thenReturn(
NEGOTIATE + token);
try {
handler.authenticate(request, response);
fail();
} catch (AuthenticationException ex) {
// Expected
} catch (Exception ex) {
fail("Wrong exception :"+ex);
}
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestMultiSchemeAuthenticationHandler.java
|
import unittest
from test import support
import io # C implementation.
import _pyio as pyio # Python implementation.
# Simple test to ensure that optimizations in the IO library deliver the
# expected results. For best testing, run this under a debug-build Python too
# (to exercise asserts in the C code).
lengths = list(range(1, 257)) + [512, 1000, 1024, 2048, 4096, 8192, 10000,
16384, 32768, 65536, 1000000]
class BufferSizeTest:
def try_one(self, s):
# Write s + "\n" + s to file, then open it and ensure that successive
# .readline()s deliver what we wrote.
# Ensure we can open TESTFN for writing.
support.unlink(support.TESTFN)
# Since C doesn't guarantee we can write/read arbitrary bytes in text
# files, use binary mode.
f = self.open(support.TESTFN, "wb")
try:
# write once with \n and once without
f.write(s)
f.write(b"\n")
f.write(s)
f.close()
f = open(support.TESTFN, "rb")
line = f.readline()
self.assertEqual(line, s + b"\n")
line = f.readline()
self.assertEqual(line, s)
line = f.readline()
self.assertFalse(line) # Must be at EOF
f.close()
finally:
support.unlink(support.TESTFN)
def drive_one(self, pattern):
for length in lengths:
# Repeat string 'pattern' as often as needed to reach total length
# 'length'. Then call try_one with that string, a string one larger
# than that, and a string one smaller than that. Try this with all
# small sizes and various powers of 2, so we exercise all likely
# stdio buffer sizes, and "off by one" errors on both sides.
q, r = divmod(length, len(pattern))
teststring = pattern * q + pattern[:r]
self.assertEqual(len(teststring), length)
self.try_one(teststring)
self.try_one(teststring + b"x")
self.try_one(teststring[:-1])
def test_primepat(self):
# A pattern with prime length, to avoid simple relationships with
# stdio buffer sizes.
self.drive_one(b"1234567890\00\01\02\03\04\05\06")
def test_nullpat(self):
self.drive_one(b'\0' * 1000)
class CBufferSizeTest(BufferSizeTest, unittest.TestCase):
open = io.open
class PyBufferSizeTest(BufferSizeTest, unittest.TestCase):
open = staticmethod(pyio.open)
if __name__ == "__main__":
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
SilentProgress.py
---------------------
Date : April 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'April 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
class SilentProgress:
def error(self, msg):
print msg
def setText(self, text):
pass
def setPercentage(self, i):
pass
def setInfo(self, _):
pass
def setCommand(self, _):
pass
def setDebugInfo(self, _):
pass
def setConsoleInfo(self, _):
pass
def close(self):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
import sys,argparse,re,os
from stanfordnlp.corenlp import *
from common.AMRGraph import *
from pprint import pprint
import cPickle as pickle
from Aligner import Aligner
from common.SpanGraph import SpanGraph
from depparser import CharniakParser,StanfordDepParser,ClearDepParser,TurboDepParser, MateDepParser
from collections import OrderedDict
import constants
log = sys.stdout
def load_hand_alignments(hand_aligned_file):
hand_alignments = {}
comments, amr_strings = readAMR(hand_aligned_file)
for comment, amr_string in zip(comments,amr_strings):
hand_alignments[comment['id']] = comment['alignments']
return hand_alignments
def readAMR(amrfile_path):
amrfile = codecs.open(amrfile_path,'r',encoding='utf-8')
comment_list = []
# comment = OrderedDict()
comment = {}
amr_list = []
amr_string = ''
for line in amrfile.readlines():
if line.startswith('#'):
for m in re.finditer("::([^:\s]+)\s(((?!::).)*)",line):
#print m.group(1),m.group(2)
comment[m.group(1)] = m.group(2)
elif not line.strip():
if amr_string and comment:
comment_list.append(comment)
amr_list.append(amr_string)
amr_string = ''
# comment = OrderedDict()
comment = {}
else:
amr_string += line.strip()+' '
if amr_string and comment:
comment_list.append(comment)
amr_list.append(amr_string)
amrfile.close()
return (comment_list,amr_list)
def _write_sentences(file_path,sentences):
"""
write out the sentences to file
"""
output = codecs.open(file_path,'w',encoding='utf-8')
for sent in sentences:
output.write(sent+'\n')
output.close()
def _write_tok_sentences(file_path,instances,comments=None):
output_tok = open(file_path,'w')
for i,inst in enumerate(instances):
if comments:
output_tok.write("%s %s\n" % (comments[i]['id'],' '.join(inst.get_tokenized_sent())))
else:
output_tok.write("%s\n" % (' '.join(inst.get_tokenized_sent())))
output_tok.close()
def _write_tok_amr(file_path,amr_file,instances):
output_tok = open(file_path,'w')
origin_comment_string = ''
origin_amr_string = ''
comment_list = []
amr_list = []
for line in open(amr_file,'r').readlines():
if line.startswith('#'):
origin_comment_string += line
elif not line.strip():
if origin_amr_string and origin_comment_string:
comment_list.append(origin_comment_string)
amr_list.append(origin_amr_string)
origin_amr_string = ''
origin_comment_string = ''
else:
origin_amr_string += line
if origin_amr_string and origin_comment_string:
comment_list.append(origin_comment_string)
amr_list.append(origin_amr_string)
for i in xrange(len(instances)):
output_tok.write(comment_list[i])
output_tok.write("# ::tok %s\n" % (' '.join(instances[i].get_tokenized_sent())))
output_tok.write(amr_list[i])
output_tok.write('\n')
output_tok.close()
def _add_amr(instances,amr_strings):
assert len(instances) == len(amr_strings)
for i in range(len(instances)):
instances[i].addAMR(AMR.parse_string(amr_strings[i]))
def _load_cparse(cparse_filename):
'''
load the constituent parse tree
'''
from nltk.tree import Tree
ctree_list = []
with open(cparse_filename,'r') as cf:
for line in cf:
ctree_list.append(Tree(line.strip()))
return ctree_list
def _fix_prop_head(inst,ctree,start_index,height):
head_index = None
tree_pos = ctree.leaf_treeposition(start_index)
span_root = ctree[tree_pos[:-(height+1)]]
end_index = start_index + len(span_root.leaves())
cur = inst.tokens[start_index+1]
visited = set()
while cur['id'] - 1 < end_index and cur['id'] - 1 >= start_index:
if cur['id'] not in visited:
visited.add(cur['id'])
else:
cur = inst.tokens[cur['id']+1]
continue
head_index = cur['id'] - 1
if 'head' in cur:
cur = inst.tokens[cur['head']]
else:
cur = inst.tokens[cur['id']+1]
return head_index
def _add_prop(instances,prop_filename,dep_filename,FIX_PROP_HEAD=False):
ctree_list = None
if FIX_PROP_HEAD:
cparse_filename = dep_filename.rsplit('.',1)[0]
ctree_list = _load_cparse(cparse_filename)
with open(prop_filename,'r') as f:
for line in f:
prd_info = line.split('-----')[0]
arg_info = line.split('-----')[1]
fn,sid,ppos,ptype,pred,frameset = prd_info.strip().split()
sid = int(sid)
ppos = int(ppos)
frameset = frameset.replace('.','-')
for match in re.finditer('(\d+):(\d+)(\|(\d+))?\-([^:\|\s]+)',arg_info):
start_index = int(match.group(1))
height = int(match.group(2))
head_index = match.group(4)
label = match.group(5)
if label != 'rel':
if FIX_PROP_HEAD: head_index = _fix_prop_head(instances[sid],ctree_list[sid],start_index,height)
instances[sid].addProp(ppos+1,frameset,int(head_index)+1,label)
def _add_dependency(instances,result,FORMAT="stanford"):
if FORMAT=="stanford":
i = 0
for line in result.split('\n'):
if line.strip():
split_entry = re.split("\(|, ", line[:-1])
if len(split_entry) == 3:
rel, l_lemma, r_lemma = split_entry
m = re.match(r'(?P<lemma>.+)-(?P<index>[^-]+)', l_lemma)
l_lemma, l_index = m.group('lemma'), m.group('index')
m = re.match(r'(?P<lemma>.+)-(?P<index>[^-]+)', r_lemma)
r_lemma, r_index = m.group('lemma'), m.group('index')
instances[i].addDependency( rel, l_index, r_index )
else:
i += 1
elif FORMAT == "clear":
i = 0
for line in result.split('\n'):
if line.strip():
line = line.split()
instances[i].addDependency( line[6], line[5], line[0])
else:
i += 1
elif FORMAT == "turbo":
i = 0
for line in result.split('\n'):
if line.strip():
line = line.split()
instances[i].addDependency( line[7], line[6], line[0])
else:
i += 1
elif FORMAT == "mate":
i = 0
for line in result.split('\n'):
if line.strip():
line = line.split()
instances[i].addDependency( line[11], line[9], line[0])
else:
i += 1
elif FORMAT in ["stanfordConvert","stdconv+charniak"]:
i = 0
splitre = re.compile(r'\(|, ')
for line in result.split('\n'):
if line.strip():
split_entry = splitre.split(line[:-1])
if len(split_entry) == 3:
rel, l_lemma, r_lemma = split_entry
l_lemma, l_index = l_lemma.rsplit('-', 1)
r_lemma, r_index = r_lemma.rsplit('-', 1)
parts = r_lemma.rsplit('^', 1)
if len(parts) < 2 or not parts[1]:
r_trace = None
else:
r_lemma, r_trace = parts
if r_index != 'null':
instances[i].addDependency( rel, l_index, r_index )
if r_trace is not None:
instances[i].addTrace( rel, l_index, r_trace )
else:
i += 1
else:
raise ValueError("Unknown dependency format!")
def preprocess(input_file,START_SNLP=True,INPUT_AMR=True, align=True, use_amr_tokens=False):
'''nasty function'''
tmp_sent_filename = None
instances = None
tok_sent_filename = None
if INPUT_AMR: # the input file is amr annotation
amr_file = input_file
if amr_file.endswith('.amr'):
aligned_amr_file = amr_file + '.tok.aligned'
amr_tok_file = amr_file + '.tok'
else:
aligned_amr_file = amr_file + '.amr.tok.aligned'
amr_tok_file = amr_file + '.amr.tok'
tmp_sent_filename = amr_file+'.sent'
tok_sent_filename = tmp_sent_filename+'.tok' # write tokenized sentence file
comments,amr_strings = readAMR(amr_file)
if os.path.exists(aligned_amr_file):
print "Reading aligned AMR ..."
# read aligned amr and transfer alignment comments
comments_with_alignment,_ = readAMR(aligned_amr_file)
for comment,comment_with_alignment in zip(comments,comments_with_alignment):
comment['alignments'] = comment_with_alignment['alignments']
tokenized_sentences = None
try:
if use_amr_tokens:
tokenized_sentences = [c['tok'] for c in comments] # here should be 'snt'
if not os.path.exists(tok_sent_filename):
with open(tok_sent_filename,'w') as f:
for sentence in tokenized_sentences:
print >> f, sentence
if tokenized_sentences:
print >> log, "AMR has tokens, will use them"
except:
raise
pass
sentences = [c['snt'] for c in comments] # here should be 'snt'
if not os.path.exists(tmp_sent_filename): # write sentences into file
_write_sentences(tmp_sent_filename,sentences)
print >> log, "Start Stanford CoreNLP..."
proc1 = StanfordCoreNLP(tokenize=not tokenized_sentences)
# preprocess 1: tokenization, POS tagging and name entity using Stanford CoreNLP
if START_SNLP: proc1.setup()
instances = proc1.parse(tmp_sent_filename if proc1.tokenize else tok_sent_filename)
if not os.path.exists(tok_sent_filename):
_write_tok_sentences(tok_sent_filename,instances)
if len(instances) == 0:
print 'Error: no instances!'
sys.exit(1)
if not os.path.exists(amr_tok_file): # write tokenized amr file
_write_tok_amr(amr_tok_file,amr_file,instances)
if not os.path.exists(aligned_amr_file) and align:
# align
print "Call JAMR to generate alignment ..."
subprocess.call('./scripts/jamr_align.sh '+amr_tok_file,shell=True)
print "Reading aligned AMR ..."
# read aligned amr and transfer alignment comments
comments_with_alignment,_ = readAMR(aligned_amr_file)
for comment,comment_with_alignment in zip(comments,comments_with_alignment):
comment['alignments'] = comment_with_alignment['alignments']
from progress import Progress
p = Progress(len(instances), estimate=True, values=True)
print 'Parsing AMR:'
SpanGraph.graphID = 0
for i in range(len(instances)):
amr = AMR.parse_string(amr_strings[i])
if 'alignments' in comments[i]:
alignment,s2c_alignment = Aligner.readJAMRAlignment(amr,comments[i]['alignments'])
#ggraph = SpanGraph.init_ref_graph(amr,alignment,instances[i].tokens)
ggraph = SpanGraph.init_ref_graph_abt(amr,alignment,s2c_alignment,instances[i].tokens)
#ggraph.pre_merge_netag(instances[i])
#print >> log, "Graph ID:%s\n%s\n"%(ggraph.graphID,ggraph.print_tuples())
instances[i].addAMR(amr)
instances[i].addGoldGraph(ggraph)
instances[i].addComment(comments[i])
p += 1
p.complete()
else:
# input file is sentence
tmp_sent_filename = input_file
print >> log, "Start Stanford CoreNLP ..."
proc1 = StanfordCoreNLP()
# preprocess 1: tokenization, POS tagging and name entity using Stanford CoreNLP
if START_SNLP: proc1.setup()
instances = proc1.parse(tmp_sent_filename)
tok_sent_filename = tmp_sent_filename+'.tok' # write tokenized sentence file
if not os.path.exists(tok_sent_filename):
_write_tok_sentences(tok_sent_filename,instances)
# preprocess 2: dependency parsing
if constants.FLAG_DEPPARSER == "stanford":
dep_filename = tok_sent_filename+'.stanford.dep'
if os.path.exists(dep_filename):
print 'Read dependency file %s...' % (dep_filename)
dep_result = open(dep_filename,'r').read()
else:
dparser = StanfordDepParser()
dep_result = dparser.parse(tok_sent_filename)
output_dep = open(dep_filename,'w')
output_dep.write(dep_result)
output_dep.close()
_add_dependency(instances,dep_result)
elif constants.FLAG_DEPPARSER == "stanfordConvert":
dep_filename = tok_sent_filename+'.stanford.parse.dep'
if os.path.exists(dep_filename):
print 'Read dependency file %s...' % (dep_filename)
dep_result = open(dep_filename,'r').read()
else:
raise IOError('Converted dependency file %s not founded' % (dep_filename))
_add_dependency(instances,dep_result,constants.FLAG_DEPPARSER)
elif constants.FLAG_DEPPARSER == "stdconv+charniak":
dep_filename = tok_sent_filename+'.charniak.parse.dep'
if not os.path.exists(dep_filename):
dparser = CharniakParser()
dparser.parse(tok_sent_filename)
#raise IOError('Converted dependency file %s not founded' % (dep_filename))
print 'Read dependency file %s...' % (dep_filename)
dep_result = open(dep_filename,'r').read()
_add_dependency(instances,dep_result,constants.FLAG_DEPPARSER)
elif constants.FLAG_DEPPARSER == "clear":
dep_filename = tok_sent_filename+'.clear.dep'
if os.path.exists(dep_filename):
print 'Read dependency file %s...' % (dep_filename)
dep_result = open(dep_filename,'r').read()
else:
dparser = ClearDepParser()
dep_result = dparser.parse(tok_sent_filename)
_add_dependency(instances,dep_result,constants.FLAG_DEPPARSER)
elif constants.FLAG_DEPPARSER == "turbo":
dep_filename = tok_sent_filename+'.turbo.dep'
if os.path.exists(dep_filename):
print 'Read dependency file %s...' % (dep_filename)
dep_result = open(dep_filename,'r').read()
else:
dparser = TurboDepParser()
dep_result = dparser.parse(tok_sent_filename)
_add_dependency(instances,dep_result,constants.FLAG_DEPPARSER)
elif constants.FLAG_DEPPARSER == "mate":
dep_filename = tok_sent_filename+'.mate.dep'
if os.path.exists(dep_filename):
print 'Read dependency file %s...' % (dep_filename)
dep_result = open(dep_filename,'r').read()
else:
dparser = MateDepParser()
dep_result = dparser.parse(tok_sent_filename)
_add_dependency(instances,dep_result,constants.FLAG_DEPPARSER)
else:
pass
if constants.FLAG_PROP:
print >> log, "Adding SRL information..."
prop_filename = tok_sent_filename + '.prop'
if os.path.exists(prop_filename):
if constants.FLAG_DEPPARSER == "stdconv+charniak":
_add_prop(instances,prop_filename,dep_filename,FIX_PROP_HEAD=True)
else:
_add_prop(instances,prop_filename,dep_filename)
else:
raise FileNotFoundError('Semantic role labeling file %s not found!'%(prop_filename))
return instances
'''
def _init_instances(sent_file,amr_strings,comments):
print >> log, "Preprocess 1:pos, ner and dependency using stanford parser..."
proc = StanfordCoreNLP()
instances = proc.parse(sent_file)
print >> log, "Preprocess 2:adding amr and generating gold graph"
assert len(instances) == len(amr_strings)
for i in range(len(instances)):
amr = AMR.parse_string(amr_strings[i])
instances[i].addAMR(amr)
alignment = Aligner.readJAMRAlignment(amr,comments[i]['alignments'])
ggraph = SpanGraph.init_ref_graph(amr,alignment,comments[i]['snt'])
ggraph.pre_merge_netag(instances[i])
instances[i].addGoldGraph(ggraph)
return instances
def add_JAMR_align(instances,aligned_amr_file):
comments,amr_strings = readAMR(aligned_amr_file)
for i in range(len(instances)):
amr = AMR.parse_string(amr_strings[i])
alignment = Aligner.readJAMRAlignment(amr,comments[i]['alignments'])
ggraph = SpanGraph.init_ref_graph(amr,alignment,instances[i].tokens)
ggraph.pre_merge_netag(instances[i])
#print >> log, "Graph ID:%s\n%s\n"%(ggraph.graphID,ggraph.print_tuples())
instances[i].addAMR(amr)
instances[i].addGoldGraph(ggraph)
#output_file = aligned_amr_file.rsplit('.',1)[0]+'_dataInst.p'
#pickle.dump(instances,open(output_file,'wb'),pickle.HIGHEST_PROTOCOL)
def preprocess_aligned(aligned_amr_file,writeToFile=True):
comments,amr_strings = readAMR(aligned_amr_file)
sentences = [c['tok'] for c in comments]
tmp_sentence_file = aligned_amr_file.rsplit('.',1)[0]+'_sent.txt'
_write_sentences(tmp_sentence_file,sentences)
instances = _init_instances(tmp_sentence_file,amr_strings,comments)
if writeToFile:
output_file = aligned_amr_file.rsplit('.',1)[0]+'_dataInst.p'
pickle.dump(instances,open(output_file,'wb'),pickle.HIGHEST_PROTOCOL)
return instances
'''
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="preprocessing for training/testing data")
arg_parser.add_argument('-v','--verbose',action='store_true',default=False)
#arg_parser.add_argument('-m','--mode',choices=['train','parse'])
arg_parser.add_argument('-w','--writeToFile',action='store_true',help='write preprocessed sentences to file')
arg_parser.add_argument('amr_file',help='amr bank file')
args = arg_parser.parse_args()
instances = preprocess(args.amr_file)
pprint(instances[1].toJSON())
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
import subprocess
import urllib
import orjson
from django.conf import settings
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect, render
from django.views.decorators.http import require_safe
from confirmation.models import Confirmation, confirmation_url
from zerver.lib.actions import (
change_user_is_active,
do_change_user_delivery_email,
do_send_realm_reactivation_email,
)
from zerver.lib.email_notifications import enqueue_welcome_emails
from zerver.lib.response import json_success
from zerver.models import Realm, get_realm, get_realm_stream, get_user_by_delivery_email
from zproject.email_backends import get_forward_address, set_forward_address
ZULIP_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")
def email_page(request: HttpRequest) -> HttpResponse:
if request.method == "POST":
set_forward_address(request.POST["forward_address"])
return json_success()
try:
with open(settings.EMAIL_CONTENT_LOG_PATH, "r+") as f:
content = f.read()
except FileNotFoundError:
content = ""
return render(
request,
"zerver/development/email_log.html",
{"log": content, "forward_address": get_forward_address()},
)
def clear_emails(request: HttpRequest) -> HttpResponse:
try:
os.remove(settings.EMAIL_CONTENT_LOG_PATH)
except FileNotFoundError: # nocoverage
pass
return redirect(email_page)
@require_safe
def generate_all_emails(request: HttpRequest) -> HttpResponse:
if not settings.TEST_SUITE: # nocoverage
# It's really convenient to automatically inline the email CSS
# here, since that saves a step when testing out changes to
# the email CSS. But we don't run this inside the test suite,
# because by role, the tests shouldn't be doing a provision-like thing.
subprocess.check_call(["./scripts/setup/inline_email_css.py"])
# We import the Django test client inside the view function,
# because it isn't needed in production elsewhere, and not
# importing it saves ~50ms of unnecessary manage.py startup time.
from django.test import Client
client = Client()
# write fake data for all variables
registered_email = "hamlet@zulip.com"
unregistered_email_1 = "new-person@zulip.com"
unregistered_email_2 = "new-person-2@zulip.com"
realm = get_realm("zulip")
other_realm = Realm.objects.exclude(string_id="zulip").first()
user = get_user_by_delivery_email(registered_email, realm)
host_kwargs = {"HTTP_HOST": realm.host}
# Password reset emails
# active account in realm
result = client.post("/accounts/password/reset/", {"email": registered_email}, **host_kwargs)
assert result.status_code == 302
# deactivated user
change_user_is_active(user, False)
result = client.post("/accounts/password/reset/", {"email": registered_email}, **host_kwargs)
assert result.status_code == 302
change_user_is_active(user, True)
# account on different realm
result = client.post(
"/accounts/password/reset/", {"email": registered_email}, HTTP_HOST=other_realm.host
)
assert result.status_code == 302
# no account anywhere
result = client.post(
"/accounts/password/reset/", {"email": unregistered_email_1}, **host_kwargs
)
assert result.status_code == 302
# Confirm account email
result = client.post("/accounts/home/", {"email": unregistered_email_1}, **host_kwargs)
assert result.status_code == 302
# Find account email
result = client.post("/accounts/find/", {"emails": registered_email}, **host_kwargs)
assert result.status_code == 302
# New login email
logged_in = client.login(dev_auth_username=registered_email, realm=realm)
assert logged_in
# New user invite and reminder emails
stream = get_realm_stream("Denmark", user.realm.id)
result = client.post(
"/json/invites",
{"invitee_emails": unregistered_email_2, "stream_ids": orjson.dumps([stream.id]).decode()},
**host_kwargs,
)
assert result.status_code == 200
# Verification for new email
result = client.patch(
"/json/settings", urllib.parse.urlencode({"email": "hamlets-new@zulip.com"}), **host_kwargs
)
assert result.status_code == 200
# Email change successful
key = Confirmation.objects.filter(type=Confirmation.EMAIL_CHANGE).latest("id").confirmation_key
url = confirmation_url(key, realm, Confirmation.EMAIL_CHANGE)
user_profile = get_user_by_delivery_email(registered_email, realm)
result = client.get(url)
assert result.status_code == 200
# Reset the email value so we can run this again
do_change_user_delivery_email(user_profile, registered_email)
# Follow up day1 day2 emails for normal user
enqueue_welcome_emails(user_profile)
# Follow up day1 day2 emails for admin user
enqueue_welcome_emails(get_user_by_delivery_email("iago@zulip.com", realm), realm_creation=True)
# Realm reactivation email
do_send_realm_reactivation_email(realm, acting_user=None)
return redirect(email_page)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* jdmerge.c
*
* This file was part of the Independent JPEG Group's software:
* Copyright (C) 1994-1996, Thomas G. Lane.
* libjpeg-turbo Modifications:
* Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
* Copyright (C) 2009, 2011, 2014-2015, 2020, 2022, D. R. Commander.
* Copyright (C) 2013, Linaro Limited.
* For conditions of distribution and use, see the accompanying README.ijg
* file.
*
* This file contains code for merged upsampling/color conversion.
*
* This file combines functions from jdsample.c and jdcolor.c;
* read those files first to understand what's going on.
*
* When the chroma components are to be upsampled by simple replication
* (ie, box filtering), we can save some work in color conversion by
* calculating all the output pixels corresponding to a pair of chroma
* samples at one time. In the conversion equations
* R = Y + K1 * Cr
* G = Y + K2 * Cb + K3 * Cr
* B = Y + K4 * Cb
* only the Y term varies among the group of pixels corresponding to a pair
* of chroma samples, so the rest of the terms can be calculated just once.
* At typical sampling ratios, this eliminates half or three-quarters of the
* multiplications needed for color conversion.
*
* This file currently provides implementations for the following cases:
* YCbCr => RGB color conversion only.
* Sampling ratios of 2h1v or 2h2v.
* No scaling needed at upsample time.
* Corner-aligned (non-CCIR601) sampling alignment.
* Other special cases could be added, but in most applications these are
* the only common cases. (For uncommon cases we fall back on the more
* general code in jdsample.c and jdcolor.c.)
*/
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jdmerge.h"
#include "jsimd.h"
#ifdef UPSAMPLE_MERGING_SUPPORTED
#define SCALEBITS 16 /* speediest right-shift on some machines */
#define ONE_HALF ((JLONG)1 << (SCALEBITS - 1))
#define FIX(x) ((JLONG)((x) * (1L << SCALEBITS) + 0.5))
/* Include inline routines for colorspace extensions */
#include "jdmrgext.c"
#undef RGB_RED
#undef RGB_GREEN
#undef RGB_BLUE
#undef RGB_PIXELSIZE
#define RGB_RED EXT_RGB_RED
#define RGB_GREEN EXT_RGB_GREEN
#define RGB_BLUE EXT_RGB_BLUE
#define RGB_PIXELSIZE EXT_RGB_PIXELSIZE
#define h2v1_merged_upsample_internal extrgb_h2v1_merged_upsample_internal
#define h2v2_merged_upsample_internal extrgb_h2v2_merged_upsample_internal
#include "jdmrgext.c"
#undef RGB_RED
#undef RGB_GREEN
#undef RGB_BLUE
#undef RGB_PIXELSIZE
#undef h2v1_merged_upsample_internal
#undef h2v2_merged_upsample_internal
#define RGB_RED EXT_RGBX_RED
#define RGB_GREEN EXT_RGBX_GREEN
#define RGB_BLUE EXT_RGBX_BLUE
#define RGB_ALPHA 3
#define RGB_PIXELSIZE EXT_RGBX_PIXELSIZE
#define h2v1_merged_upsample_internal extrgbx_h2v1_merged_upsample_internal
#define h2v2_merged_upsample_internal extrgbx_h2v2_merged_upsample_internal
#include "jdmrgext.c"
#undef RGB_RED
#undef RGB_GREEN
#undef RGB_BLUE
#undef RGB_ALPHA
#undef RGB_PIXELSIZE
#undef h2v1_merged_upsample_internal
#undef h2v2_merged_upsample_internal
#define RGB_RED EXT_BGR_RED
#define RGB_GREEN EXT_BGR_GREEN
#define RGB_BLUE EXT_BGR_BLUE
#define RGB_PIXELSIZE EXT_BGR_PIXELSIZE
#define h2v1_merged_upsample_internal extbgr_h2v1_merged_upsample_internal
#define h2v2_merged_upsample_internal extbgr_h2v2_merged_upsample_internal
#include "jdmrgext.c"
#undef RGB_RED
#undef RGB_GREEN
#undef RGB_BLUE
#undef RGB_PIXELSIZE
#undef h2v1_merged_upsample_internal
#undef h2v2_merged_upsample_internal
#define RGB_RED EXT_BGRX_RED
#define RGB_GREEN EXT_BGRX_GREEN
#define RGB_BLUE EXT_BGRX_BLUE
#define RGB_ALPHA 3
#define RGB_PIXELSIZE EXT_BGRX_PIXELSIZE
#define h2v1_merged_upsample_internal extbgrx_h2v1_merged_upsample_internal
#define h2v2_merged_upsample_internal extbgrx_h2v2_merged_upsample_internal
#include "jdmrgext.c"
#undef RGB_RED
#undef RGB_GREEN
#undef RGB_BLUE
#undef RGB_ALPHA
#undef RGB_PIXELSIZE
#undef h2v1_merged_upsample_internal
#undef h2v2_merged_upsample_internal
#define RGB_RED EXT_XBGR_RED
#define RGB_GREEN EXT_XBGR_GREEN
#define RGB_BLUE EXT_XBGR_BLUE
#define RGB_ALPHA 0
#define RGB_PIXELSIZE EXT_XBGR_PIXELSIZE
#define h2v1_merged_upsample_internal extxbgr_h2v1_merged_upsample_internal
#define h2v2_merged_upsample_internal extxbgr_h2v2_merged_upsample_internal
#include "jdmrgext.c"
#undef RGB_RED
#undef RGB_GREEN
#undef RGB_BLUE
#undef RGB_ALPHA
#undef RGB_PIXELSIZE
#undef h2v1_merged_upsample_internal
#undef h2v2_merged_upsample_internal
#define RGB_RED EXT_XRGB_RED
#define RGB_GREEN EXT_XRGB_GREEN
#define RGB_BLUE EXT_XRGB_BLUE
#define RGB_ALPHA 0
#define RGB_PIXELSIZE EXT_XRGB_PIXELSIZE
#define h2v1_merged_upsample_internal extxrgb_h2v1_merged_upsample_internal
#define h2v2_merged_upsample_internal extxrgb_h2v2_merged_upsample_internal
#include "jdmrgext.c"
#undef RGB_RED
#undef RGB_GREEN
#undef RGB_BLUE
#undef RGB_ALPHA
#undef RGB_PIXELSIZE
#undef h2v1_merged_upsample_internal
#undef h2v2_merged_upsample_internal
/*
* Initialize tables for YCC->RGB colorspace conversion.
* This is taken directly from jdcolor.c; see that file for more info.
*/
LOCAL(void)
build_ycc_rgb_table(j_decompress_ptr cinfo)
{
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
int i;
JLONG x;
SHIFT_TEMPS
upsample->Cr_r_tab = (int *)
(*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
(_MAXJSAMPLE + 1) * sizeof(int));
upsample->Cb_b_tab = (int *)
(*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
(_MAXJSAMPLE + 1) * sizeof(int));
upsample->Cr_g_tab = (JLONG *)
(*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
(_MAXJSAMPLE + 1) * sizeof(JLONG));
upsample->Cb_g_tab = (JLONG *)
(*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
(_MAXJSAMPLE + 1) * sizeof(JLONG));
for (i = 0, x = -_CENTERJSAMPLE; i <= _MAXJSAMPLE; i++, x++) {
/* i is the actual input pixel value, in the range 0.._MAXJSAMPLE */
/* The Cb or Cr value we are thinking of is x = i - _CENTERJSAMPLE */
/* Cr=>R value is nearest int to 1.40200 * x */
upsample->Cr_r_tab[i] = (int)
RIGHT_SHIFT(FIX(1.40200) * x + ONE_HALF, SCALEBITS);
/* Cb=>B value is nearest int to 1.77200 * x */
upsample->Cb_b_tab[i] = (int)
RIGHT_SHIFT(FIX(1.77200) * x + ONE_HALF, SCALEBITS);
/* Cr=>G value is scaled-up -0.71414 * x */
upsample->Cr_g_tab[i] = (-FIX(0.71414)) * x;
/* Cb=>G value is scaled-up -0.34414 * x */
/* We also add in ONE_HALF so that need not do it in inner loop */
upsample->Cb_g_tab[i] = (-FIX(0.34414)) * x + ONE_HALF;
}
}
/*
* Initialize for an upsampling pass.
*/
METHODDEF(void)
start_pass_merged_upsample(j_decompress_ptr cinfo)
{
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
/* Mark the spare buffer empty */
upsample->spare_full = FALSE;
/* Initialize total-height counter for detecting bottom of image */
upsample->rows_to_go = cinfo->output_height;
}
/*
* Control routine to do upsampling (and color conversion).
*
* The control routine just handles the row buffering considerations.
*/
METHODDEF(void)
merged_2v_upsample(j_decompress_ptr cinfo, _JSAMPIMAGE input_buf,
JDIMENSION *in_row_group_ctr,
JDIMENSION in_row_groups_avail, _JSAMPARRAY output_buf,
JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
/* 2:1 vertical sampling case: may need a spare row. */
{
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
_JSAMPROW work_ptrs[2];
JDIMENSION num_rows; /* number of rows returned to caller */
if (upsample->spare_full) {
/* If we have a spare row saved from a previous cycle, just return it. */
JDIMENSION size = upsample->out_row_width;
if (cinfo->out_color_space == JCS_RGB565)
size = cinfo->output_width * 2;
_jcopy_sample_rows(&upsample->spare_row, 0, output_buf + *out_row_ctr, 0,
1, size);
num_rows = 1;
upsample->spare_full = FALSE;
} else {
/* Figure number of rows to return to caller. */
num_rows = 2;
/* Not more than the distance to the end of the image. */
if (num_rows > upsample->rows_to_go)
num_rows = upsample->rows_to_go;
/* And not more than what the client can accept: */
out_rows_avail -= *out_row_ctr;
if (num_rows > out_rows_avail)
num_rows = out_rows_avail;
/* Create output pointer array for upsampler. */
work_ptrs[0] = output_buf[*out_row_ctr];
if (num_rows > 1) {
work_ptrs[1] = output_buf[*out_row_ctr + 1];
} else {
work_ptrs[1] = upsample->spare_row;
upsample->spare_full = TRUE;
}
/* Now do the upsampling. */
(*upsample->upmethod) (cinfo, input_buf, *in_row_group_ctr, work_ptrs);
}
/* Adjust counts */
*out_row_ctr += num_rows;
upsample->rows_to_go -= num_rows;
/* When the buffer is emptied, declare this input row group consumed */
if (!upsample->spare_full)
(*in_row_group_ctr)++;
}
METHODDEF(void)
merged_1v_upsample(j_decompress_ptr cinfo, _JSAMPIMAGE input_buf,
JDIMENSION *in_row_group_ctr,
JDIMENSION in_row_groups_avail, _JSAMPARRAY output_buf,
JDIMENSION *out_row_ctr, JDIMENSION out_rows_avail)
/* 1:1 vertical sampling case: much easier, never need a spare row. */
{
my_merged_upsample_ptr upsample = (my_merged_upsample_ptr)cinfo->upsample;
/* Just do the upsampling. */
(*upsample->upmethod) (cinfo, input_buf, *in_row_group_ctr,
output_buf + *out_row_ctr);
/* Adjust counts */
(*out_row_ctr)++;
(*in_row_group_ctr)++;
}
/*
* These are the routines invoked by the control routines to do
* the actual upsampling/conversion. One row group is processed per call.
*
* Note: since we may be writing directly into application-supplied buffers,
* we have to be honest about the output width; we can't assume the buffer
* has been rounded up to an even width.
*/
/*
* Upsample and color convert for the case of 2:1 horizontal and 1:1 vertical.
*/
METHODDEF(void)
h2v1_merged_upsample(j_decompress_ptr cinfo, _JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, _JSAMPARRAY output_buf)
{
switch (cinfo->out_color_space) {
case JCS_EXT_RGB:
extrgb_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_RGBX:
case JCS_EXT_RGBA:
extrgbx_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_BGR:
extbgr_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_BGRX:
case JCS_EXT_BGRA:
extbgrx_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_XBGR:
case JCS_EXT_ABGR:
extxbgr_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_XRGB:
case JCS_EXT_ARGB:
extxrgb_h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
default:
h2v1_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
}
}
/*
* Upsample and color convert for the case of 2:1 horizontal and 2:1 vertical.
*/
METHODDEF(void)
h2v2_merged_upsample(j_decompress_ptr cinfo, _JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, _JSAMPARRAY output_buf)
{
switch (cinfo->out_color_space) {
case JCS_EXT_RGB:
extrgb_h2v2_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_RGBX:
case JCS_EXT_RGBA:
extrgbx_h2v2_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_BGR:
extbgr_h2v2_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_BGRX:
case JCS_EXT_BGRA:
extbgrx_h2v2_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_XBGR:
case JCS_EXT_ABGR:
extxbgr_h2v2_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
case JCS_EXT_XRGB:
case JCS_EXT_ARGB:
extxrgb_h2v2_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
default:
h2v2_merged_upsample_internal(cinfo, input_buf, in_row_group_ctr,
output_buf);
break;
}
}
/*
* RGB565 conversion
*/
#define PACK_SHORT_565_LE(r, g, b) \
((((r) << 8) & 0xF800) | (((g) << 3) & 0x7E0) | ((b) >> 3))
#define PACK_SHORT_565_BE(r, g, b) \
(((r) & 0xF8) | ((g) >> 5) | (((g) << 11) & 0xE000) | (((b) << 5) & 0x1F00))
#define PACK_TWO_PIXELS_LE(l, r) ((r << 16) | l)
#define PACK_TWO_PIXELS_BE(l, r) ((l << 16) | r)
#define WRITE_TWO_PIXELS_LE(addr, pixels) { \
((INT16 *)(addr))[0] = (INT16)(pixels); \
((INT16 *)(addr))[1] = (INT16)((pixels) >> 16); \
}
#define WRITE_TWO_PIXELS_BE(addr, pixels) { \
((INT16 *)(addr))[1] = (INT16)(pixels); \
((INT16 *)(addr))[0] = (INT16)((pixels) >> 16); \
}
#define DITHER_565_R(r, dither) ((r) + ((dither) & 0xFF))
#define DITHER_565_G(g, dither) ((g) + (((dither) & 0xFF) >> 1))
#define DITHER_565_B(b, dither) ((b) + ((dither) & 0xFF))
/* Declarations for ordered dithering
*
* We use a 4x4 ordered dither array packed into 32 bits. This array is
* sufficient for dithering RGB888 to RGB565.
*/
#define DITHER_MASK 0x3
#define DITHER_ROTATE(x) ((((x) & 0xFF) << 24) | (((x) >> 8) & 0x00FFFFFF))
static const JLONG dither_matrix[4] = {
0x0008020A,
0x0C040E06,
0x030B0109,
0x0F070D05
};
/* Include inline routines for RGB565 conversion */
#define PACK_SHORT_565 PACK_SHORT_565_LE
#define PACK_TWO_PIXELS PACK_TWO_PIXELS_LE
#define WRITE_TWO_PIXELS WRITE_TWO_PIXELS_LE
#define h2v1_merged_upsample_565_internal h2v1_merged_upsample_565_le
#define h2v1_merged_upsample_565D_internal h2v1_merged_upsample_565D_le
#define h2v2_merged_upsample_565_internal h2v2_merged_upsample_565_le
#define h2v2_merged_upsample_565D_internal h2v2_merged_upsample_565D_le
#include "jdmrg565.c"
#undef PACK_SHORT_565
#undef PACK_TWO_PIXELS
#undef WRITE_TWO_PIXELS
#undef h2v1_merged_upsample_565_internal
#undef h2v1_merged_upsample_565D_internal
#undef h2v2_merged_upsample_565_internal
#undef h2v2_merged_upsample_565D_internal
#define PACK_SHORT_565 PACK_SHORT_565_BE
#define PACK_TWO_PIXELS PACK_TWO_PIXELS_BE
#define WRITE_TWO_PIXELS WRITE_TWO_PIXELS_BE
#define h2v1_merged_upsample_565_internal h2v1_merged_upsample_565_be
#define h2v1_merged_upsample_565D_internal h2v1_merged_upsample_565D_be
#define h2v2_merged_upsample_565_internal h2v2_merged_upsample_565_be
#define h2v2_merged_upsample_565D_internal h2v2_merged_upsample_565D_be
#include "jdmrg565.c"
#undef PACK_SHORT_565
#undef PACK_TWO_PIXELS
#undef WRITE_TWO_PIXELS
#undef h2v1_merged_upsample_565_internal
#undef h2v1_merged_upsample_565D_internal
#undef h2v2_merged_upsample_565_internal
#undef h2v2_merged_upsample_565D_internal
static INLINE boolean is_big_endian(void)
{
int test_value = 1;
if (*(char *)&test_value != 1)
return TRUE;
return FALSE;
}
METHODDEF(void)
h2v1_merged_upsample_565(j_decompress_ptr cinfo, _JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, _JSAMPARRAY output_buf)
{
if (is_big_endian())
h2v1_merged_upsample_565_be(cinfo, input_buf, in_row_group_ctr,
output_buf);
else
h2v1_merged_upsample_565_le(cinfo, input_buf, in_row_group_ctr,
output_buf);
}
METHODDEF(void)
h2v1_merged_upsample_565D(j_decompress_ptr cinfo, _JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, _JSAMPARRAY output_buf)
{
if (is_big_endian())
h2v1_merged_upsample_565D_be(cinfo, input_buf, in_row_group_ctr,
output_buf);
else
h2v1_merged_upsample_565D_le(cinfo, input_buf, in_row_group_ctr,
output_buf);
}
METHODDEF(void)
h2v2_merged_upsample_565(j_decompress_ptr cinfo, _JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, _JSAMPARRAY output_buf)
{
if (is_big_endian())
h2v2_merged_upsample_565_be(cinfo, input_buf, in_row_group_ctr,
output_buf);
else
h2v2_merged_upsample_565_le(cinfo, input_buf, in_row_group_ctr,
output_buf);
}
METHODDEF(void)
h2v2_merged_upsample_565D(j_decompress_ptr cinfo, _JSAMPIMAGE input_buf,
JDIMENSION in_row_group_ctr, _JSAMPARRAY output_buf)
{
if (is_big_endian())
h2v2_merged_upsample_565D_be(cinfo, input_buf, in_row_group_ctr,
output_buf);
else
h2v2_merged_upsample_565D_le(cinfo, input_buf, in_row_group_ctr,
output_buf);
}
/*
* Module initialization routine for merged upsampling/color conversion.
*
* NB: this is called under the conditions determined by use_merged_upsample()
* in jdmaster.c. That routine MUST correspond to the actual capabilities
* of this module; no safety checks are made here.
*/
GLOBAL(void)
_jinit_merged_upsampler(j_decompress_ptr cinfo)
{
my_merged_upsample_ptr upsample;
if (cinfo->data_precision != BITS_IN_JSAMPLE)
ERREXIT1(cinfo, JERR_BAD_PRECISION, cinfo->data_precision);
upsample = (my_merged_upsample_ptr)
(*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE,
sizeof(my_merged_upsampler));
cinfo->upsample = (struct jpeg_upsampler *)upsample;
upsample->pub.start_pass = start_pass_merged_upsample;
upsample->pub.need_context_rows = FALSE;
upsample->out_row_width = cinfo->output_width * cinfo->out_color_components;
if (cinfo->max_v_samp_factor == 2) {
upsample->pub._upsample = merged_2v_upsample;
#ifdef WITH_SIMD
if (jsimd_can_h2v2_merged_upsample())
upsample->upmethod = jsimd_h2v2_merged_upsample;
else
#endif
upsample->upmethod = h2v2_merged_upsample;
if (cinfo->out_color_space == JCS_RGB565) {
if (cinfo->dither_mode != JDITHER_NONE) {
upsample->upmethod = h2v2_merged_upsample_565D;
} else {
upsample->upmethod = h2v2_merged_upsample_565;
}
}
/* Allocate a spare row buffer */
upsample->spare_row = (_JSAMPROW)
(*cinfo->mem->alloc_large) ((j_common_ptr)cinfo, JPOOL_IMAGE,
(size_t)(upsample->out_row_width * sizeof(_JSAMPLE)));
} else {
upsample->pub._upsample = merged_1v_upsample;
#ifdef WITH_SIMD
if (jsimd_can_h2v1_merged_upsample())
upsample->upmethod = jsimd_h2v1_merged_upsample;
else
#endif
upsample->upmethod = h2v1_merged_upsample;
if (cinfo->out_color_space == JCS_RGB565) {
if (cinfo->dither_mode != JDITHER_NONE) {
upsample->upmethod = h2v1_merged_upsample_565D;
} else {
upsample->upmethod = h2v1_merged_upsample_565;
}
}
/* No spare row needed */
upsample->spare_row = NULL;
}
build_ycc_rgb_table(cinfo);
}
#endif /* UPSAMPLE_MERGING_SUPPORTED */
|
c
|
github
|
https://github.com/opencv/opencv
|
3rdparty/libjpeg-turbo/src/jdmerge.c
|
# Q-Learner for Angrybirds, using function approximation
# Imanol Arrieta, Bernardo Ramos, Lars Roemheld
# Adapted from a homework assignment in Percy Liang's class CS221 at Stanford University
import collections
import random
import math
class QLearningAlgorithm():
"""
actions: a function (!) that takes a state and returns a list of allowed actions.
discount: a number between 0 and 1, which determines the discount factor
featureExtractor: a function that takes a state and action and returns a list of (feature name, feature value) pairs.
explorationProb: the epsilon value indicating how frequently the policy returns a random action
"""
def __init__(self, actions, featureExtractor,discount=1.0, epsilon=0.3):
self.actions = actions
self.discount = discount
self.featureExtractor = featureExtractor
self.explorationProb = epsilon
self.weights = collections.Counter()
self.numIters = 0
def getQ(self, state, action):
"""
Return the Q function for the current gameState and action, computed as (linear) function approximation
"""
score = 0.0
for f_name, f_val in self.featureExtractor(state, action):
score += self.weights[f_name] * f_val
return score
def getAction(self, state):
"""
Epsilon-greedy algorithm: with probability |explorationProb|, take a random action. Otherwise, take action that
maximizes expected Q
:param state: current gameState
:return: the chosen action
"""
self.numIters += 1
if random.random() < self.explorationProb:
return random.choice(self.actions(state))
else:
options = [(self.getQ(state, action), action) for action in self.actions(state)]
bestVal = max(options)[0]
return random.choice([opt[1] for opt in options if opt[0] == bestVal])
# Call this function to get the step size to update the weights.
def getStepSize(self):
return 1.0 / math.sqrt(self.numIters)
def setExplorationProb(self, probability):
self.explorationProb = probability
# We will call this function with (s, a, r, s'), which you should use to update |weights|.
# Note that if s is a terminal state, then s' will be None. Remember to check for this.
def incorporateFeedback(self, state, action, reward, newState):
# goal: w := w - eta * (Q(s,a;w) - (reward + discount * V_opt(newState)) * phi(s, a)
# Where V_opt = max_a Q(s,a)
if newState == None:
V_newState = 0.0
else:
V_newState = max(self.getQ(newState, newAction) for newAction in self.actions(newState))
# "Gradient descent" on the Q-Learner weights
# TODO: Note that by not running on newWeights and rather run directly on Weights we might get "stochastic" performance
newWeights = collections.Counter(self.weights)
phi = self.featureExtractor(state, action)
for (f_name, f_val) in phi:
newWeights[f_name] = self.weights[f_name] - self.getStepSize() * (\
self.getQ(state, action) - reward - self.discount * V_newState \
) * f_val
self.weights = newWeights
|
unknown
|
codeparrot/codeparrot-clean
| ||
# See: http://hunterford.me/django-custom-model-manager-chaining/
import models
class ArticleManagerMixin(object):
def published(self, status=True):
if status:
return self.filter(status=models.Article.STATUS_PUBLISHED)
else:
return self.filter(status=models.Article.STATUS_DRAFT)
def articles(self):
return self.filter(kind=models.Article.KIND_ARTICLE)
def blog(self):
return self.filter(kind=models.Article.KIND_BLOG)
def featured(self):
return self.filter(kind=models.Article.KIND_FEATURE)
class ImageManagerMixin(object):
def published(self, status=True):
return self.filter(article__status=models.Article.STATUS_PUBLISHED).filter(status=status)
def articles(self):
return self.filter(article__kind=models.Article.KIND_ARTICLE)
def blog(self):
return self.filter(article__kind=models.Article.KIND_BLOG)
def featured(self):
return self.filter(article__kind=models.Article.KIND_FEATURE)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import re
from setuptools import setup, find_packages
EXTRAS_REQUIRE = {
"yaml": ["PyYAML>=3.10"],
"validation": ["prance[osv]>=0.11"],
"lint": ["flake8==3.9.2", "flake8-bugbear==21.4.3", "pre-commit~=2.4"],
"docs": [
"marshmallow>=3.0.0",
"pyyaml==5.4.1",
"sphinx==4.0.3",
"sphinx-issues==1.2.0",
"sphinx-rtd-theme==0.5.2",
],
}
EXTRAS_REQUIRE["tests"] = (
EXTRAS_REQUIRE["yaml"]
+ EXTRAS_REQUIRE["validation"]
+ ["marshmallow>=3.10.0", "pytest", "mock"]
)
EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["lint"] + ["tox"]
def find_version(fname):
"""Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ""
with open(fname) as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError("Cannot find version information.")
return version
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
setup(
name="apispec",
version=find_version("src/apispec/__init__.py"),
description="A pluggable API specification generator. Currently supports the "
"OpenAPI Specification (f.k.a. the Swagger specification).",
long_description=read("README.rst"),
author="Steven Loria",
author_email="sloria1@gmail.com",
url="https://github.com/marshmallow-code/apispec",
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
extras_require=EXTRAS_REQUIRE,
license="MIT",
zip_safe=False,
keywords="apispec swagger openapi specification oas documentation spec rest api",
python_requires=">=3.6",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
],
test_suite="tests",
project_urls={
"Funding": "https://opencollective.com/marshmallow",
"Issues": "https://github.com/marshmallow-code/apispec/issues",
"Tidelift": "https://tidelift.com/subscription/pkg/pypi-apispec?utm_source=pypi-apispec&utm_medium=pypi", # noqa: B950,E501
},
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
# Apache Hadoop 0.23.3 Release Notes
These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
---
* [MAPREDUCE-3348](https://issues.apache.org/jira/browse/MAPREDUCE-3348) | *Major* | **mapred job -status fails to give info even if the job is present in History**
Fixed a bug in MR client to redirect to JobHistoryServer correctly when RM forgets the app.
---
* [MAPREDUCE-4072](https://issues.apache.org/jira/browse/MAPREDUCE-4072) | *Major* | **User set java.library.path seems to overwrite default creating problems native lib loading**
-Djava.library.path in mapred.child.java.opts can cause issues with native libraries. LD\_LIBRARY\_PATH through mapred.child.env should be used instead.
---
* [MAPREDUCE-4017](https://issues.apache.org/jira/browse/MAPREDUCE-4017) | *Trivial* | **Add jobname to jobsummary log**
The Job Summary log may contain commas in values that are escaped by a '\\' character. This was true before, but is more likely to be exposed now.
---
* [MAPREDUCE-3812](https://issues.apache.org/jira/browse/MAPREDUCE-3812) | *Major* | **Lower default allocation sizes, fix allocation configurations and document them**
Removes two sets of previously available config properties:
1. ( yarn.scheduler.fifo.minimum-allocation-mb and yarn.scheduler.fifo.maximum-allocation-mb ) and,
2. ( yarn.scheduler.capacity.minimum-allocation-mb and yarn.scheduler.capacity.maximum-allocation-mb )
In favor of two new, generically named properties:
1. yarn.scheduler.minimum-allocation-mb - This acts as the floor value of memory resource requests for containers.
2. yarn.scheduler.maximum-allocation-mb - This acts as the ceiling value of memory resource requests for containers.
Both these properties need to be set at the ResourceManager (RM) to take effect, as the RM is where the scheduler resides.
Also changes the default minimum and maximums to 128 MB and 10 GB respectively.
---
* [HDFS-3318](https://issues.apache.org/jira/browse/HDFS-3318) | *Blocker* | **Hftp hangs on transfers \>2GB**
**WARNING: No release note provided for this change.**
---
* [HADOOP-8327](https://issues.apache.org/jira/browse/HADOOP-8327) | *Major* | **distcpv2 and distcpv1 jars should not coexist**
Resolve sporadic distcp issue due to having two DistCp classes (v1 & v2) in the classpath.
---
* [MAPREDUCE-3543](https://issues.apache.org/jira/browse/MAPREDUCE-3543) | *Critical* | **Mavenize Gridmix.**
Note that to apply this you should first run the script - ./MAPREDUCE-3543v3.sh svn, then apply the patch.
If this is merged to more then trunk, the version inside of hadoop-tools/hadoop-gridmix/pom.xml will need to be udpated accordingly.
---
* [MAPREDUCE-4311](https://issues.apache.org/jira/browse/MAPREDUCE-4311) | *Major* | **Capacity scheduler.xml does not accept decimal values for capacity and maximum-capacity settings**
**WARNING: No release note provided for this change.**
---
* [MAPREDUCE-3940](https://issues.apache.org/jira/browse/MAPREDUCE-3940) | *Major* | **ContainerTokens should have an expiry interval**
ContainerTokens now have an expiry interval so that stale tokens cannot be used for launching containers.
---
* [HADOOP-8551](https://issues.apache.org/jira/browse/HADOOP-8551) | *Major* | **fs -mkdir creates parent directories without the -p option**
FsShell's "mkdir" no longer implicitly creates all non-existent parent directories. The command adopts the posix compliant behavior of requiring the "-p" flag to auto-create parent directories.
---
* [HADOOP-8703](https://issues.apache.org/jira/browse/HADOOP-8703) | *Major* | **distcpV2: turn CRC checking off for 0 byte size**
distcp skips CRC on 0 byte files.
|
unknown
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/site/markdown/release/0.23.3/RELEASENOTES.0.23.3.md
|
use std::io::Write;
use anyhow::{Result, bail};
use turbo_rcstr::{RcStr, rcstr};
use turbo_tasks::{ResolvedVc, TryJoinIterExt, Vc, fxindexmap};
use turbo_tasks_fs::{
self, File, FileContent, FileSystemPath, FileSystemPathOption, rope::RopeBuilder,
};
use turbopack_core::{
asset::{Asset, AssetContent},
chunk::{ChunkData, ChunkingContext, ChunksData},
context::AssetContext,
ident::AssetIdent,
module::Module,
output::{OutputAsset, OutputAssets, OutputAssetsReference, OutputAssetsWithReferenced},
proxied_asset::ProxiedAsset,
reference_type::{EntryReferenceSubType, ReferenceType},
source::Source,
virtual_source::VirtualSource,
};
use turbopack_ecmascript::{chunk::EcmascriptChunkData, utils::StringifyJs};
use crate::{embed_js::next_js_file_path, util::get_asset_path_from_pathname};
#[turbo_tasks::function]
pub async fn create_page_loader_entry_module(
client_context: Vc<Box<dyn AssetContext>>,
entry_asset: Vc<Box<dyn Source>>,
pathname: RcStr,
) -> Result<Vc<Box<dyn Module>>> {
let mut result = RopeBuilder::default();
writeln!(result, "const PAGE_PATH = {};\n", StringifyJs(&pathname))?;
let page_loader_path = next_js_file_path(rcstr!("entry/page-loader.ts"))
.owned()
.await?;
let base_code = page_loader_path.read();
if let FileContent::Content(base_file) = &*base_code.await? {
result += base_file.content()
} else {
bail!("required file `entry/page-loader.ts` not found");
}
let file = File::from(result.build());
let virtual_source = Vc::upcast(VirtualSource::new(
page_loader_path,
AssetContent::file(FileContent::Content(file).cell()),
));
let module = client_context
.process(
entry_asset,
ReferenceType::Entry(EntryReferenceSubType::Page),
)
.module()
.to_resolved()
.await?;
let module = client_context
.process(
virtual_source,
ReferenceType::Internal(ResolvedVc::cell(fxindexmap! {
rcstr!("PAGE") => module,
})),
)
.module();
Ok(module)
}
#[turbo_tasks::value(shared)]
pub struct PageLoaderAsset {
pub server_root: FileSystemPath,
pub pathname: RcStr,
pub rebase_prefix_path: ResolvedVc<FileSystemPathOption>,
pub page_chunks: ResolvedVc<OutputAssets>,
pub chunking_context: ResolvedVc<Box<dyn ChunkingContext>>,
pub use_fixed_path: bool,
}
#[turbo_tasks::value_impl]
impl PageLoaderAsset {
#[turbo_tasks::function]
pub fn new(
server_root: FileSystemPath,
pathname: RcStr,
rebase_prefix_path: ResolvedVc<FileSystemPathOption>,
page_chunks: ResolvedVc<OutputAssets>,
chunking_context: ResolvedVc<Box<dyn ChunkingContext>>,
use_fixed_path: bool,
) -> Vc<Self> {
Self {
server_root,
pathname,
rebase_prefix_path,
page_chunks,
chunking_context,
use_fixed_path,
}
.cell()
}
#[turbo_tasks::function]
async fn chunks_data(
&self,
rebase_prefix_path: Vc<FileSystemPathOption>,
) -> Result<Vc<ChunksData>> {
let mut chunks = self.page_chunks;
// If we are provided a prefix path, we need to rewrite our chunk paths to
// remove that prefix.
if let Some(rebase_path) = &*rebase_prefix_path.await? {
let root_path = rebase_path.root().owned().await?;
let rebased = chunks
.await?
.iter()
.map(|&chunk| {
let root_path = root_path.clone();
async move {
Vc::upcast::<Box<dyn OutputAsset>>(ProxiedAsset::new(
*chunk,
FileSystemPath::rebase(
chunk.path().owned().await?,
rebase_path.clone(),
root_path.clone(),
)
.owned()
.await?,
))
.to_resolved()
.await
}
})
.try_join()
.await?;
chunks = ResolvedVc::cell(rebased);
};
Ok(ChunkData::from_assets(self.server_root.clone(), *chunks))
}
}
impl PageLoaderAsset {
async fn ident_for_path(&self) -> Result<Vc<AssetIdent>> {
let rebase_prefix_path = self.rebase_prefix_path.await?;
let root = rebase_prefix_path.as_ref().unwrap_or(&self.server_root);
Ok(AssetIdent::from_path(root.join(&format!(
"static/chunks/pages{}",
get_asset_path_from_pathname(&self.pathname, ".js")
))?)
.with_modifier(rcstr!("page loader asset")))
}
}
#[turbo_tasks::value_impl]
impl OutputAssetsReference for PageLoaderAsset {
#[turbo_tasks::function]
async fn references(self: Vc<Self>) -> Result<Vc<OutputAssetsWithReferenced>> {
Ok(OutputAssetsWithReferenced::from_assets(
*self.await?.page_chunks,
))
}
}
#[turbo_tasks::value_impl]
impl OutputAsset for PageLoaderAsset {
#[turbo_tasks::function]
async fn path(self: Vc<Self>) -> Result<Vc<FileSystemPath>> {
let this = self.await?;
let ident = this.ident_for_path().await?;
if this.use_fixed_path {
// In development mode, don't include a content hash and put the chunk at e.g.
// `static/chunks/pages/page2.js`, so that the dev runtime can request it at a known
// path.
// https://github.com/vercel/next.js/blob/84873e00874e096e6c4951dcf070e8219ed414e5/packages/next/src/client/route-loader.ts#L256-L271
Ok(ident.path())
} else {
Ok(this
.chunking_context
.chunk_path(Some(Vc::upcast(self)), ident, None, rcstr!(".js")))
}
}
}
#[turbo_tasks::value_impl]
impl Asset for PageLoaderAsset {
#[turbo_tasks::function]
async fn content(self: Vc<Self>) -> Result<Vc<AssetContent>> {
let this = &*self.await?;
let chunks_data = self.chunks_data(*this.rebase_prefix_path).await?;
let chunks_data = chunks_data.iter().try_join().await?;
let chunks_data: Vec<_> = chunks_data
.iter()
.map(|chunk_data| EcmascriptChunkData::new(chunk_data))
.collect();
let content = format!(
"__turbopack_load_page_chunks__({}, {:#})\n",
StringifyJs(&this.pathname),
StringifyJs(&chunks_data)
);
Ok(AssetContent::file(
FileContent::Content(File::from(content)).cell(),
))
}
}
|
rust
|
github
|
https://github.com/vercel/next.js
|
crates/next-core/src/page_loader.rs
|
# Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import os
import logging
from tempfile import mkdtemp
from tempfile import NamedTemporaryFile
import shutil
# project
from kiwi.bootloader.config import BootLoaderConfig
from kiwi.filesystem import FileSystem
from kiwi.filesystem.isofs import FileSystemIsoFs
from kiwi.filesystem.setup import FileSystemSetup
from kiwi.storage.loop_device import LoopDevice
from kiwi.boot.image.dracut import BootImageDracut
from kiwi.system.size import SystemSize
from kiwi.system.setup import SystemSetup
from kiwi.firmware import FirmWare
from kiwi.defaults import Defaults
from kiwi.path import Path
from kiwi.system.result import Result
from kiwi.iso_tools.iso import Iso
from kiwi.system.identifier import SystemIdentifier
from kiwi.system.kernel import Kernel
from kiwi.runtime_config import RuntimeConfig
from kiwi.iso_tools.base import IsoToolsBase
from kiwi.exceptions import (
KiwiLiveBootImageError
)
log = logging.getLogger('kiwi')
class LiveImageBuilder:
"""
**Live image builder**
:param object xml_state: instance of :class:`XMLState`
:param str target_dir: target directory path name
:param str root_dir: root directory path name
:param dict custom_args: Custom processing arguments
"""
def __init__(self, xml_state, target_dir, root_dir, custom_args=None):
self.media_dir = None
self.live_container_dir = None
self.arch = Defaults.get_platform_name()
self.root_dir = root_dir
self.target_dir = target_dir
self.xml_state = xml_state
self.live_type = xml_state.build_type.get_flags()
self.volume_id = xml_state.build_type.get_volid() or \
Defaults.get_volume_id()
self.mbrid = SystemIdentifier()
self.mbrid.calculate_id()
self.publisher = xml_state.build_type.get_publisher() or \
Defaults.get_publisher()
self.custom_args = custom_args
if not self.live_type:
self.live_type = Defaults.get_default_live_iso_type()
self.boot_image = BootImageDracut(
xml_state, target_dir, self.root_dir
)
self.firmware = FirmWare(
xml_state
)
self.system_setup = SystemSetup(
xml_state=xml_state, root_dir=self.root_dir
)
self.isoname = ''.join(
[
target_dir, '/',
xml_state.xml_data.get_name(),
'.' + Defaults.get_platform_name(),
'-' + xml_state.get_image_version(),
'.iso'
]
)
self.result = Result(xml_state)
self.runtime_config = RuntimeConfig()
def create(self):
"""
Build a bootable hybrid live ISO image
Image types which triggers this builder are:
* image="iso"
:raises KiwiLiveBootImageError: if no kernel or hipervisor is found
in boot image tree
:return: result
:rtype: instance of :class:`Result`
"""
# media dir to store CD contents
self.media_dir = mkdtemp(
prefix='live-media.', dir=self.target_dir
)
# unpack cdroot user files to media dir
self.system_setup.import_cdroot_files(self.media_dir)
rootsize = SystemSize(self.media_dir)
# custom iso metadata
log.info('Using following live ISO metadata:')
log.info('--> Application id: {0}'.format(self.mbrid.get_id()))
log.info('--> Publisher: {0}'.format(Defaults.get_publisher()))
log.info('--> Volume id: {0}'.format(self.volume_id))
custom_iso_args = {
'meta_data': {
'publisher': self.publisher,
'preparer': Defaults.get_preparer(),
'volume_id': self.volume_id,
'mbr_id': self.mbrid.get_id(),
'efi_mode': self.firmware.efi_mode()
}
}
log.info(
'Setting up live image bootloader configuration'
)
if self.firmware.efi_mode():
# setup bootloader config to boot the ISO via EFI
# This also embedds an MBR and the respective BIOS modules
# for compat boot. The complete bootloader setup will be
# based on grub
bootloader_config = BootLoaderConfig.new(
'grub2', self.xml_state, root_dir=self.root_dir,
boot_dir=self.media_dir, custom_args={
'grub_directory_name':
Defaults.get_grub_boot_directory_name(self.root_dir)
}
)
bootloader_config.setup_live_boot_images(
mbrid=self.mbrid, lookup_path=self.root_dir
)
else:
# setup bootloader config to boot the ISO via isolinux.
# This allows for booting on x86 platforms in BIOS mode
# only.
bootloader_config = BootLoaderConfig.new(
'isolinux', self.xml_state, root_dir=self.root_dir,
boot_dir=self.media_dir
)
IsoToolsBase.setup_media_loader_directory(
self.boot_image.boot_root_directory, self.media_dir,
bootloader_config.get_boot_theme()
)
bootloader_config.write_meta_data()
bootloader_config.setup_live_image_config(
mbrid=self.mbrid
)
bootloader_config.write()
# call custom editbootconfig script if present
self.system_setup.call_edit_boot_config_script(
filesystem='iso:{0}'.format(self.media_dir), boot_part_id=1,
working_directory=self.root_dir
)
# prepare dracut initrd call
self.boot_image.prepare()
# create dracut initrd for live image
log.info('Creating live ISO boot image')
live_dracut_modules = Defaults.get_live_dracut_modules_from_flag(
self.live_type
)
live_dracut_modules.append('pollcdrom')
for dracut_module in live_dracut_modules:
self.boot_image.include_module(dracut_module)
self.boot_image.omit_module('multipath')
self.boot_image.write_system_config_file(
config={
'modules': live_dracut_modules,
'omit_modules': ['multipath']
},
config_file=self.root_dir + '/etc/dracut.conf.d/02-livecd.conf'
)
self.boot_image.create_initrd(self.mbrid)
# setup kernel file(s) and initrd in ISO boot layout
log.info('Setting up kernel file(s) and boot image in ISO boot layout')
self._setup_live_iso_kernel_and_initrd()
# calculate size and decide if we need UDF
if rootsize.accumulate_mbyte_file_sizes() > 4096:
log.info('ISO exceeds 4G size, using UDF filesystem')
custom_iso_args['meta_data']['udf'] = True
# pack system into live boot structure as expected by dracut
log.info(
'Packing system into dracut live ISO type: {0}'.format(
self.live_type
)
)
root_filesystem = Defaults.get_default_live_iso_root_filesystem()
filesystem_custom_parameters = {
'mount_options': self.xml_state.get_fs_mount_option_list(),
'create_options': self.xml_state.get_fs_create_option_list()
}
filesystem_setup = FileSystemSetup(
self.xml_state, self.root_dir
)
root_image = NamedTemporaryFile()
loop_provider = LoopDevice(
root_image.name,
filesystem_setup.get_size_mbytes(root_filesystem),
self.xml_state.build_type.get_target_blocksize()
)
loop_provider.create()
live_filesystem = FileSystem.new(
name=root_filesystem,
device_provider=loop_provider,
root_dir=self.root_dir + os.sep,
custom_args=filesystem_custom_parameters
)
live_filesystem.create_on_device()
log.info(
'--> Syncing data to {0} root image'.format(root_filesystem)
)
live_filesystem.sync_data(
Defaults.get_exclude_list_for_root_data_sync()
)
live_filesystem.umount()
log.info('--> Creating squashfs container for root image')
self.live_container_dir = mkdtemp(
prefix='live-container.', dir=self.target_dir
)
Path.create(self.live_container_dir + '/LiveOS')
shutil.copy(
root_image.name, self.live_container_dir + '/LiveOS/rootfs.img'
)
live_container_image = FileSystem.new(
name='squashfs',
device_provider=None,
root_dir=self.live_container_dir,
custom_args={
'compression':
self.xml_state.build_type.get_squashfscompression()
}
)
container_image = NamedTemporaryFile()
live_container_image.create_on_file(
container_image.name
)
Path.create(self.media_dir + '/LiveOS')
shutil.copy(
container_image.name, self.media_dir + '/LiveOS/squashfs.img'
)
# create iso filesystem from media_dir
log.info('Creating live ISO image')
iso_image = FileSystemIsoFs(
device_provider=None, root_dir=self.media_dir,
custom_args=custom_iso_args
)
iso_image.create_on_file(self.isoname)
# include metadata for checkmedia tool
if self.xml_state.build_type.get_mediacheck() is True:
Iso.set_media_tag(self.isoname)
Result.verify_image_size(
self.runtime_config.get_max_size_constraint(),
self.isoname
)
self.result.add(
key='live_image',
filename=self.isoname,
use_for_bundle=True,
compress=False,
shasum=True
)
self.result.add(
key='image_packages',
filename=self.system_setup.export_package_list(
self.target_dir
),
use_for_bundle=True,
compress=False,
shasum=False
)
self.result.add(
key='image_changes',
filename=self.system_setup.export_package_changes(
self.target_dir
),
use_for_bundle=True,
compress=True,
shasum=False
)
self.result.add(
key='image_verified',
filename=self.system_setup.export_package_verification(
self.target_dir
),
use_for_bundle=True,
compress=False,
shasum=False
)
return self.result
def _setup_live_iso_kernel_and_initrd(self):
"""
Copy kernel and initrd from the root tree into the iso boot structure
"""
boot_path = ''.join(
[self.media_dir, '/boot/', self.arch, '/loader']
)
Path.create(boot_path)
# Move kernel files to iso filesystem structure
kernel = Kernel(self.boot_image.boot_root_directory)
if kernel.get_kernel():
kernel.copy_kernel(boot_path, '/linux')
else:
raise KiwiLiveBootImageError(
'No kernel in boot image tree {0} found'.format(
self.boot_image.boot_root_directory
)
)
if self.xml_state.is_xen_server():
if kernel.get_xen_hypervisor():
kernel.copy_xen_hypervisor(boot_path, '/xen.gz')
else:
raise KiwiLiveBootImageError(
'No hypervisor in boot image tree {0} found'.format(
self.boot_image.boot_root_directory
)
)
# Move initrd to iso filesystem structure
if os.path.exists(self.boot_image.initrd_filename):
shutil.move(
self.boot_image.initrd_filename, boot_path + '/initrd'
)
else:
raise KiwiLiveBootImageError(
'No boot image {0} in boot image tree {1} found'.format(
self.boot_image.initrd_filename,
self.boot_image.boot_root_directory
)
)
def __del__(self):
if self.media_dir or self.live_container_dir:
log.info(
'Cleaning up {0} instance'.format(type(self).__name__)
)
if self.media_dir:
Path.wipe(self.media_dir)
if self.live_container_dir:
Path.wipe(self.live_container_dir)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import sys
import os.path
import importlib
from inspect import getmembers, isclass
from collections import defaultdict
from optparse import make_option
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from django.conf import settings
from django.db.models.base import ModelBase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from operis.log import log
from selenium_tests.models import PpfaTest, PpfaTestRun
def runTest(location,run,logger):
try:
run = PpfaTestRun.objects.get(pk=run)
except PpfaTestRun.DoesNotExist:
return None
test = run.ppfa_test
logger.log("Found Test %s",[test.name],"debug")
thetest = "selenium_tests.tests.%s" % test.location
module = importlib.import_module(thetest)
for name, obj in getmembers(module, lambda member: isclass(member) and member.__module__ == thetest):
logger.log("Found Test Class %s",[name],"notice")
#try:
aclass = getattr(module,name)
object = aclass()
object.logger = logger
object.testObject = test
object.runObject = run
object.set_up()
#try:
object.runTest()
#except:
# pass
object.tear_down()
#except:
# pass
logger.log("Had Errors: %s",[len(object.errors)],"notice")
if (len(object.errors) == 0):
test.status = True
run.status = True
run.save()
test.last_run = run.date_created
test.save()
#if (test_result):
# return test_result
#return None
#self.logger.log("No Test Found in %s",[name],"error")
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package depsfile
import (
"github.com/google/go-cmp/cmp"
)
// ProviderLockComparer is an option for github.com/google/go-cmp/cmp that
// specifies how to compare values of type depsfile.ProviderLock.
//
// Use this, rather than crafting comparison options yourself, in case the
// comparison strategy needs to change in future due to implementation details
// of the ProviderLock type.
var ProviderLockComparer cmp.Option
func init() {
// For now, direct comparison of the unexported fields is good enough
// because we store everything in a normalized form. If that changes
// later then we might need to write a custom transformer to a hidden
// type with exported fields, so we can retain the ability for cmp to
// still report differences deeply.
ProviderLockComparer = cmp.AllowUnexported(ProviderLock{})
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/depsfile/testing.go
|
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.auth.models import User as AuthUser
from django.contrib.contenttypes.models import ContentType
from django.core import checks, management
from django.db import DEFAULT_DB_ALIAS, models
from django.db.models import signals
from django.test import TestCase, override_settings
from django.test.utils import isolate_apps
from django.urls import reverse
from .admin import admin as force_admin_model_registration # NOQA
from .models import (
Abstract, BaseUser, Bug, Country, Improvement, Issue, LowerStatusPerson,
MultiUserProxy, MyPerson, MyPersonProxy, OtherPerson, Person, ProxyBug,
ProxyImprovement, ProxyProxyBug, ProxyTrackerUser, State, StateProxy,
StatusPerson, TrackerUser, User, UserProxy, UserProxyProxy,
)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheritance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted(mpp.name for mpp in MyPersonProxy.objects.all())
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
with self.assertRaises(Person.DoesNotExist):
MyPersonProxy.objects.get(name='Zathras')
with self.assertRaises(Person.MultipleObjectsReturned):
MyPersonProxy.objects.get(id__lt=max_id + 1)
with self.assertRaises(Person.DoesNotExist):
StatusPerson.objects.get(name='Zathras')
StatusPerson.objects.create(name='Bazza Jr.')
StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
with self.assertRaises(Person.MultipleObjectsReturned):
StatusPerson.objects.get(id__lt=max_id + 1)
def test_abstract_base_with_model_fields(self):
msg = "Abstract base class containing model fields not permitted for proxy model 'NoAbstract'."
with self.assertRaisesMessage(TypeError, msg):
class NoAbstract(Abstract):
class Meta:
proxy = True
def test_too_many_concrete_classes(self):
msg = "Proxy model 'TooManyBases' has more than one non-abstract model base class."
with self.assertRaisesMessage(TypeError, msg):
class TooManyBases(User, Person):
class Meta:
proxy = True
def test_no_base_classes(self):
msg = "Proxy model 'NoBaseClasses' has no non-abstract model base class."
with self.assertRaisesMessage(TypeError, msg):
class NoBaseClasses(models.Model):
class Meta:
proxy = True
@isolate_apps('proxy_models')
def test_new_fields(self):
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
errors = NoNewFields.check()
expected = [
checks.Error(
"Proxy model 'NoNewFields' contains model fields.",
id='models.E017',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPABLE_MODEL='proxy_models.AlternateModel')
@isolate_apps('proxy_models')
def test_swappable(self):
class SwappableModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class AlternateModel(models.Model):
pass
# You can't proxy a swapped model
with self.assertRaises(TypeError):
class ProxyModel(SwappableModel):
class Meta:
proxy = True
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_permissions_created(self):
from django.contrib.auth.models import Permission
try:
Permission.objects.get(name="May display users information")
except Permission.DoesNotExist:
self.fail("The permission 'May display users information' has not been created")
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
MyPerson.objects.create(name="dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
MyPersonProxy.objects.create(name="pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertIs(ctype(Person), ctype(OtherPerson))
def test_user_proxy_models(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
self.assertEqual([u.name for u in MultiUserProxy.objects.all()], ['Bruce'])
def test_proxy_for_model(self):
self.assertEqual(UserProxy, UserProxyProxy._meta.proxy_for_model)
def test_concrete_model(self):
self.assertEqual(User, UserProxyProxy._meta.concrete_model)
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name, 'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_filter_proxy_relation_reverse(self):
tu = TrackerUser.objects.create(name='Contributor', status='contrib')
ptu = ProxyTrackerUser.objects.get()
issue = Issue.objects.create(assignee=tu)
self.assertEqual(tu.issues.get(), issue)
self.assertEqual(ptu.issues.get(), issue)
self.assertQuerysetEqual(
TrackerUser.objects.filter(issues=issue),
[tu], lambda x: x
)
self.assertQuerysetEqual(
ProxyTrackerUser.objects.filter(issues=issue),
[ptu], lambda x: x
)
def test_proxy_bug(self):
contributor = ProxyTrackerUser.objects.create(name='Contributor', status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta', assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor', status='proxy')
Improvement.objects.create(
summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0],
)
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(
repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
def test_eq(self):
self.assertEqual(MyPerson(id=100), Person(id=100))
@override_settings(ROOT_URLCONF='proxy_models.urls')
class ProxyModelAdminTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = AuthUser.objects.create(is_superuser=True, is_staff=True)
cls.tu1 = ProxyTrackerUser.objects.create(name='Django Pony', status='emperor')
cls.i1 = Issue.objects.create(summary="Pony's Issue", assignee=cls.tu1)
def test_cascade_delete_proxy_model_admin_warning(self):
"""
Test if admin gives warning about cascade deleting models referenced
to concrete model by deleting proxy object.
"""
tracker_user = TrackerUser.objects.all()[0]
base_user = BaseUser.objects.all()[0]
issue = Issue.objects.all()[0]
with self.assertNumQueries(7):
collector = admin.utils.NestedObjects('default')
collector.collect(ProxyTrackerUser.objects.all())
self.assertIn(tracker_user, collector.edges.get(None, ()))
self.assertIn(base_user, collector.edges.get(None, ()))
self.assertIn(issue, collector.edges.get(tracker_user, ()))
def test_delete_str_in_model_admin(self):
"""
Test if the admin delete page shows the correct string representation
for a proxy model.
"""
user = TrackerUser.objects.get(name='Django Pony')
proxy = ProxyTrackerUser.objects.get(name='Django Pony')
user_str = 'Tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_trackeruser_change', args=(user.pk,)), user
)
proxy_str = 'Proxy tracker user: <a href="%s">%s</a>' % (
reverse('admin_proxy:proxy_models_proxytrackeruser_change', args=(proxy.pk,)), proxy
)
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin_proxy:proxy_models_trackeruser_delete', args=(user.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, user_str)
response = self.client.get(reverse('admin_proxy:proxy_models_proxytrackeruser_delete', args=(proxy.pk,)))
delete_str = response.context['deleted_objects'][0]
self.assertEqual(delete_str, proxy_str)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Exceptions used throughout package"""
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed. """
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class HashMismatch(InstallationError):
"""Distribution file hash values don't match."""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from _collections_abc import Set as _Set, Sequence as _Sequence
from hashlib import sha512 as _sha512
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
if a is None:
try:
# Seed with enough bytes to span the 19937 bit
# state space for the Mersenne Twister
a = int.from_bytes(_urandom(2500), 'big')
except NotImplementedError:
import time
a = int(time.time() * 256) # use fractional seconds
if version == 2:
if isinstance(a, (str, bytes, bytearray)):
if isinstance(a, str):
a = a.encode()
a += _sha512(a).digest()
a = int.from_bytes(a, 'big')
super().seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super().getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super().setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple(x % (2**32) for x in internalstate)
except ValueError as e:
raise TypeError from e
super().setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
# Issue 17489: Since __reduce__ was defined to fix #759889 this is no
# longer called; we leave it here because it has been here since random was
# rewritten back in 2001 and why risk breaking something.
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, _int=int):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = _int(start)
if istart != start:
raise ValueError("non-integer arg 1 for randrange()")
if stop is None:
if istart > 0:
return self._randbelow(istart)
raise ValueError("empty range for randrange()")
# stop argument supplied.
istop = _int(stop)
if istop != stop:
raise ValueError("non-integer stop for randrange()")
width = istop - istart
if step == 1 and width > 0:
return istart + self._randbelow(width)
if step == 1:
raise ValueError("empty range for randrange() (%d,%d, %d)" % (istart, istop, width))
# Non-unit step argument supplied.
istep = _int(step)
if istep != step:
raise ValueError("non-integer step for randrange()")
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError("zero step for randrange()")
if n <= 0:
raise ValueError("empty range for randrange()")
return istart + istep*self._randbelow(n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, int=int, maxsize=1<<BPF, type=type,
Method=_MethodType, BuiltinMethod=_BuiltinMethodType):
"Return a random int in the range [0,n). Raises ValueError if n==0."
random = self.random
getrandbits = self.getrandbits
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
if type(random) is BuiltinMethod or type(getrandbits) is Method:
k = n.bit_length() # don't use (n-1) here because n can be 1
r = getrandbits(k) # 0 <= r < 2**k
while r >= n:
r = getrandbits(k)
return r
# There's an overriden random() method but no new getrandbits() method,
# so we can only use random() from here.
if n >= maxsize:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large.\n"
"To remove the range limitation, add a getrandbits() method.")
return int(random() * n)
rem = maxsize % n
limit = (maxsize - rem) / maxsize # int(limit * maxsize) % n == 0
r = random()
while r >= limit:
r = random()
return int(r*maxsize) % n
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
try:
i = self._randbelow(len(seq))
except ValueError:
raise IndexError('Cannot choose from an empty sequence')
return seq[i]
def shuffle(self, x, random=None):
"""Shuffle list x in place, and return None.
Optional argument random is a 0-argument function returning a
random float in [0.0, 1.0); if it is the default None, the
standard random.random will be used.
"""
if random is None:
randbelow = self._randbelow
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randbelow(i+1)
x[i], x[j] = x[j], x[i]
else:
_int = int
for i in reversed(range(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = _int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence or set.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use range as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(range(10000000), 60)
"""
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
if isinstance(population, _Set):
population = tuple(population)
if not isinstance(population, _Sequence):
raise TypeError("Population must be a sequence or set. For dicts, use list(d).")
randbelow = self._randbelow
n = len(population)
if not 0 <= k <= n:
raise ValueError("Sample larger than population")
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize:
# An n-length list is smaller than a k-length set
pool = list(population)
for i in range(k): # invariant: non-selected at [0,n-i)
j = randbelow(n-i)
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
selected = set()
selected_add = selected.add
for i in range(k):
j = randbelow(n)
while j in selected:
j = randbelow(n)
selected_add(j)
result[i] = population[j]
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
try:
c = 0.5 if mode is None else (mode - low) / (high - low)
except ZeroDivisionError:
return low
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
# we use 1-random() instead of random() to preclude the
# possibility of taking the log of zero.
return -_log(1.0 - self.random())/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
s = 0.5 / kappa
r = s + _sqrt(1.0 + s * s)
while 1:
u1 = random()
z = _cos(_pi * u1)
d = z / (r + z)
u2 = random()
if u2 < 1.0 - d * d or u2 <= (1.0 - d) * _exp(d):
break
q = 1.0 / r
f = (q + z) / (1.0 + q * z)
u3 = random()
if u3 > 0.5:
theta = (mu + _acos(f)) % TWOPI
else:
theta = (mu - _acos(f)) % TWOPI
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
The probability distribution function is:
x ** (alpha - 1) * math.exp(-x / beta)
pdf(x) = --------------------------------------
math.gamma(alpha) * beta ** alpha
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://mail.python.org/pipermail/python-bugs-list/2001-January/003752.html
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / u ** (1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * (-_log(u)) ** (1.0/beta)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (int.from_bytes(_urandom(7), 'big') >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates an int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
def seed(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print(n, 'times', func.__name__)
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print(round(t1-t0, 3), 'sec,', end=' ')
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print('avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest))
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from tempest.api.compute import base
from tempest import test
from tempest_lib import exceptions as e
# Time that waits for until returning valid response
# TODO(takmatsu): Ideally this value would come from configuration.
VALID_WAIT = 30
class TenantUsagesTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setup_clients(cls):
super(TenantUsagesTestJSON, cls).setup_clients()
cls.adm_client = cls.os_adm.tenant_usages_client
cls.client = cls.os.tenant_usages_client
@classmethod
def resource_setup(cls):
super(TenantUsagesTestJSON, cls).resource_setup()
cls.tenant_id = cls.client.tenant_id
# Create a server in the demo tenant
cls.create_test_server(wait_until='ACTIVE')
now = datetime.datetime.now()
cls.start = cls._parse_strtime(now - datetime.timedelta(days=1))
cls.end = cls._parse_strtime(now + datetime.timedelta(days=1))
@classmethod
def _parse_strtime(cls, at):
# Returns formatted datetime
return at.strftime('%Y-%m-%dT%H:%M:%S.%f')
def call_until_valid(self, func, duration, *args, **kwargs):
# Call until get valid response for "duration"
# because tenant usage doesn't become available immediately
# after create VM.
def is_valid():
try:
self.resp = func(*args, **kwargs)
return True
except e.InvalidHTTPResponseBody:
return False
test.call_until_true(is_valid, duration, 1)
return self.resp
@test.idempotent_id('062c8ae9-9912-4249-8b51-e38d664e926e')
def test_list_usage_all_tenants(self):
# Get usage for all tenants
tenant_usage = self.call_until_valid(
self.adm_client.list_tenant_usages, VALID_WAIT,
start=self.start, end=self.end, detailed="1")['tenant_usages'][0]
self.assertEqual(len(tenant_usage), 8)
@test.idempotent_id('94135049-a4c5-4934-ad39-08fa7da4f22e')
def test_get_usage_tenant(self):
# Get usage for a specific tenant
tenant_usage = self.call_until_valid(
self.adm_client.show_tenant_usage, VALID_WAIT,
self.tenant_id, start=self.start, end=self.end)['tenant_usage']
self.assertEqual(len(tenant_usage), 8)
@test.idempotent_id('9d00a412-b40e-4fd9-8eba-97b496316116')
def test_get_usage_tenant_with_non_admin_user(self):
# Get usage for a specific tenant with non admin user
tenant_usage = self.call_until_valid(
self.client.show_tenant_usage, VALID_WAIT,
self.tenant_id, start=self.start, end=self.end)['tenant_usage']
self.assertEqual(len(tenant_usage), 8)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import types
from Globals import *
from AccessControl import ClassSecurityInfo
from AccessControl import Permissions
from Products.ZenModel.ZenossSecurity import *
from Products.ZenModel.Organizer import Organizer
from Products.ZenRelations.RelSchema import *
from Products.ZenUtils.Search import makeCaseInsensitiveKeywordIndex
from Products.ZenWidgets import messaging
from Products.ZenModel.ZenPackable import ZenPackable
def manage_addProfileOrganizer(context, id='Profiles', REQUEST = None):
"""make a device class"""
porg = ProfileOrganizer(id)
context._setObject(id, porg)
porg = context._getOb(id)
if REQUEST is not None:
REQUEST['RESPONSE'].redirect(context.absolute_url() + '/manage_main')
addProfileOrganizer = DTMLFile('dtml/addProfileOrganizer',globals())
class ProfileOrganizer(Organizer, ZenPackable):
"""
ProfileOrganizer is the base class for rulesets and rules
"""
meta_type = "ProfileOrganizer"
dmdRootName = "Profiles"
default_catalog = 'profileSearch'
security = ClassSecurityInfo()
_relations = Organizer._relations + ZenPackable._relations + (
("rulesets", ToManyCont(ToOne,"ZenPacks.community.zenAppProfiler.ProfileModule","ruleorganizer")),
)
factory_type_information = (
{
'immediate_view' : 'viewProfileOrganizer',
'actions' :
(
{ 'id' : 'settings'
, 'name' : 'Settings'
, 'action' : 'editSettings'
, 'permissions' : ('Manage DMD',)
},
{ 'id' : 'manage'
, 'name' : 'Commands'
, 'action' : 'dataRootManage'
, 'permissions' : ('Manage DMD',)
},
{ 'id' : 'users'
, 'name' : 'Users'
, 'action' : 'ZenUsers/manageUserFolder'
, 'permissions' : ('Manage DMD',)
},
{ 'id' : 'packs'
, 'name' : 'ZenPacks'
, 'action' : 'ZenUsers/manageUserFolder'
, 'permissions' : ('Manage DMD',)
},
{ 'id' : 'jobs'
, 'name' : 'Jobs'
, 'action' : 'joblist'
, 'permissions' : ('Manage DMD',)
},
{ 'id' : 'portlets'
, 'name' : 'Portlets'
, 'action' : 'editPortletPerms'
, 'permissions' : ('Manage DMD',)
},
{ 'id' : 'daemons'
, 'name' : 'Daemons'
, 'action' : '../About/zenossInfo'
, 'permissions' : ('Manage DMD',)
},
{ 'id' : 'versions'
, 'name' : 'Versions'
, 'action' : '../About/zenossVersions'
, 'permissions' : ('Manage DMD',)
},
{ 'id' : 'backups'
, 'name' : 'Backups'
, 'action' : 'backupInfo'
, 'permissions' : ('Manage DMD',)
},
{ 'id' : 'profileorganizer'
, 'name' : 'Profiles'
, 'action' : 'viewProfileOrganizer'
, 'permissions' : ('Manage DMD',)
},
)
},
)
def __init__(self, id=None):
if not id: id = self.dmdRootName
super(ProfileOrganizer, self).__init__(id)
if self.id == self.dmdRootName:
self.createCatalog()
def manage_runMembershipRulesOld(self, REQUEST=None):
""" add memberships to devices matching all
ruleset rules
"""
from ProfileJob import manage_ModifyAllMemberships
rules = manage_ModifyAllMemberships(self.dmd)
if REQUEST:
if rules:
messaging.IMessageSender(self).sendToBrowser(
'Membership Jobs Submitted',
'membership jobs submitted: %s' % self.id
)
return self.callZenScreen(REQUEST)
def manage_runMembershipRules(self, REQUEST=None):
""" add memberships to devices matching all
ruleset rules
"""
modified = []
for ruleset in self.rulesets():
ruleset.removeMembershipRules()
rules = ruleset.addMembershipRules()
if rules:
modified.append(ruleset.id)
if REQUEST:
if modified:
messaging.IMessageSender(self).sendToBrowser(
'Memberships Modified',
'membership modified for rulsets: %s' % modified
)
return self.callZenScreen(REQUEST)
def countClasses(self):
""" Count all rulesets with in a ProfileOrganizer.
"""
count = self.rulesets.countObjects()
for group in self.children():
count += group.countClasses()
return count
def createProfileModule(self, name, path="/"):
""" Create a rule set
"""
profiles = self.getDmdRoot(self.dmdRootName)
mod = None
if not mod:
modorg = profiles.createOrganizer(path)
from ProfileModule import ProfileModule
mod = ProfileModule(name)
modorg.rulesets._setObject(mod.id, mod)
mod = modorg.rulesets._getOb(mod.id)
mod.createModuleGroup()
return mod
def manage_addProfileModule(self, id, REQUEST=None):
""" Create a new service class in this Organizer.
"""
from ProfileModule import ProfileModule
module = ProfileModule(id)
self.rulesets._setObject(id, module)
mod = self.rulesets._getOb(module.id)
mod.createModuleGroup()
if REQUEST:
messaging.IMessageSender(self).sendToBrowser(
'Rule Set Created',
'rule set %s was created.' % id
)
return self.callZenScreen(REQUEST)
else:
return self.rulesets._getOb(id)
def removeProfileModules(self, ids=None, REQUEST=None):
""" Remove Profile Modules from an EventClass.
"""
if not ids: return self()
if type(ids) == types.StringType: ids = (ids,)
for id in ids:
self.rulesets._delObject(id)
if REQUEST:
messaging.IMessageSender(self).sendToBrowser(
'Rule Sets Deleted',
'rule sets deleted: %s' % ', '.join(ids)
)
return self()
def moveProfileModules(self, moveTarget, ids=None, REQUEST=None):
"""Move ProfileModules from this EventClass to moveTarget.
"""
if not moveTarget or not ids: return self()
if type(ids) == types.StringType: ids = (ids,)
target = self.getChildMoveTarget(moveTarget)
for id in ids:
rec = self.rulesets._getOb(id)
rec._operation = 1 # moving object state
self.rulesets._delObject(id)
target.rulesets._setObject(id, rec)
if REQUEST:
messaging.IMessageSender(self).sendToBrowser(
'Rule Set Moved',
'rule set moved to %s.' % moveTarget
)
REQUEST['RESPONSE'].redirect(target.getPrimaryUrlPath())
def reIndex(self):
print "reIndex"
"""Go through all devices in this tree and reindex them."""
zcat = self._getOb(self.default_catalog)
zcat.manage_catalogClear()
for org in [self,] + self.getSubOrganizers():
for ruleset in org.rulesets():
for thing in ruleset.rules():
thing.index_object()
def createCatalog(self):
"""Create a catalog for rules searching"""
from Products.ZCatalog.ZCatalog import manage_addZCatalog
# XXX update to use ManagableIndex
manage_addZCatalog(self, self.default_catalog, self.default_catalog)
zcat = self._getOb(self.default_catalog)
cat = zcat._catalog
cat.addIndex('ruleSystems', makeCaseInsensitiveKeywordIndex('ruleSystems'))
cat.addIndex('ruleGroups', makeCaseInsensitiveKeywordIndex('ruleGroups'))
cat.addIndex('propertyType', makeCaseInsensitiveKeywordIndex('propertyType'))
cat.addIndex('propertyName', makeCaseInsensitiveKeywordIndex('propertyName'))
zcat.addColumn('toRemove')
zcat.addColumn('enabled')
zcat.addColumn('ruleModuleName')
InitializeClass(ProfileOrganizer)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import moves
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class CreateRegisterImagesTest(base.BaseV1ImageTest):
"""Here we test the registration and creation of images."""
@test.idempotent_id('3027f8e6-3492-4a11-8575-c3293017af4d')
def test_register_then_upload(self):
# Register, then upload an image
properties = {'prop1': 'val1'}
body = self.create_image(name='New Name',
container_format='bare',
disk_format='raw',
is_public=False,
properties=properties)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Name', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
# Now try uploading an image file
image_file = moves.cStringIO(data_utils.random_bytes())
body = self.client.update_image(image_id, data=image_file)
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@test.idempotent_id('69da74d9-68a9-404b-9664-ff7164ccb0f5')
def test_register_remote_image(self):
# Register a new remote image
body = self.create_image(name='New Remote Image',
container_format='bare',
disk_format='raw', is_public=False,
location=CONF.image.http_image,
properties={'key1': 'value1',
'key2': 'value2'})
self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('active', body.get('status'))
properties = body.get('properties')
self.assertEqual(properties['key1'], 'value1')
self.assertEqual(properties['key2'], 'value2')
@test.idempotent_id('6d0e13a7-515b-460c-b91f-9f4793f09816')
def test_register_http_image(self):
body = self.create_image(name='New Http Image',
container_format='bare',
disk_format='raw', is_public=False,
copy_from=CONF.image.http_image)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Http Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.client.wait_for_image_status(image_id, 'active')
self.client.show_image(image_id)
@test.idempotent_id('05b19d55-140c-40d0-b36b-fafd774d421b')
def test_register_image_with_min_ram(self):
# Register an image with min ram
properties = {'prop1': 'val1'}
body = self.create_image(name='New_image_with_min_ram',
container_format='bare',
disk_format='raw',
is_public=False,
min_ram=40,
properties=properties)
self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
self.assertEqual(40, body.get('min_ram'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
self.client.delete_image(body['id'])
class ListImagesTest(base.BaseV1ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
img1 = cls._create_remote_image('one', 'bare', 'raw')
img2 = cls._create_remote_image('two', 'ami', 'ami')
img3 = cls._create_remote_image('dup', 'bare', 'raw')
img4 = cls._create_remote_image('dup', 'bare', 'raw')
img5 = cls._create_standard_image('1', 'ami', 'ami', 42)
img6 = cls._create_standard_image('2', 'ami', 'ami', 142)
img7 = cls._create_standard_image('33', 'bare', 'raw', 142)
img8 = cls._create_standard_image('33', 'bare', 'raw', 142)
cls.created_set = set(cls.created_images)
# 4x-4x remote image
cls.remote_set = set((img1, img2, img3, img4))
cls.standard_set = set((img5, img6, img7, img8))
# 5x bare, 3x ami
cls.bare_set = set((img1, img3, img4, img7, img8))
cls.ami_set = set((img2, img5, img6))
# 1x with size 42
cls.size42_set = set((img5,))
# 3x with size 142
cls.size142_set = set((img6, img7, img8,))
# dup named
cls.dup_set = set((img3, img4))
@classmethod
def _create_remote_image(cls, name, container_format, disk_format):
"""
Create a new remote image and return the ID of the newly-registered
image
"""
name = 'New Remote Image %s' % name
location = CONF.image.http_image
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False,
location=location)
image_id = image['id']
return image_id
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file)
image_id = image['id']
return image_id
@test.idempotent_id('246178ab-3b33-4212-9a4b-a7fe8261794d')
def test_index_no_params(self):
# Simple test to see all fixture images returned
images_list = self.client.list_images()
image_list = map(lambda x: x['id'], images_list)
for image_id in self.created_images:
self.assertIn(image_id, image_list)
@test.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
def test_index_disk_format(self):
images_list = self.client.list_images(disk_format='ami')
for image in images_list:
self.assertEqual(image['disk_format'], 'ami')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.ami_set <= result_set)
self.assertFalse(self.created_set - self.ami_set <= result_set)
@test.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
def test_index_container_format(self):
images_list = self.client.list_images(container_format='bare')
for image in images_list:
self.assertEqual(image['container_format'], 'bare')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.bare_set <= result_set)
self.assertFalse(self.created_set - self.bare_set <= result_set)
@test.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
def test_index_max_size(self):
images_list = self.client.list_images(size_max=42)
for image in images_list:
self.assertTrue(image['size'] <= 42)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size42_set <= result_set)
self.assertFalse(self.created_set - self.size42_set <= result_set)
@test.idempotent_id('6ffc16d0-4cbf-4401-95c8-4ac63eac34d8')
def test_index_min_size(self):
images_list = self.client.list_images(size_min=142)
for image in images_list:
self.assertTrue(image['size'] >= 142)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size142_set <= result_set)
self.assertFalse(self.size42_set <= result_set)
@test.idempotent_id('e5dc26d9-9aa2-48dd-bda5-748e1445da98')
def test_index_status_active_detail(self):
images_list = self.client.list_images(detail=True,
status='active',
sort_key='size',
sort_dir='desc')
top_size = images_list[0]['size'] # We have non-zero sized images
for image in images_list:
size = image['size']
self.assertTrue(size <= top_size)
top_size = size
self.assertEqual(image['status'], 'active')
@test.idempotent_id('097af10a-bae8-4342-bff4-edf89969ed2a')
def test_index_name(self):
images_list = self.client.list_images(
detail=True,
name='New Remote Image dup')
result_set = set(map(lambda x: x['id'], images_list))
for image in images_list:
self.assertEqual(image['name'], 'New Remote Image dup')
self.assertTrue(self.dup_set <= result_set)
self.assertFalse(self.created_set - self.dup_set <= result_set)
class UpdateImageMetaTest(base.BaseV1ImageTest):
@classmethod
def resource_setup(cls):
super(UpdateImageMetaTest, cls).resource_setup()
cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image.
"""
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file,
properties={'key1': 'value1'})
image_id = image['id']
return image_id
@test.idempotent_id('01752c1c-0275-4de3-9e5b-876e44541928')
def test_list_image_metadata(self):
# All metadata key/value pairs for an image should be returned
resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'value1'}
self.assertEqual(expected, resp_metadata['properties'])
@test.idempotent_id('d6d7649c-08ce-440d-9ea7-e3dda552f33c')
def test_update_image_metadata(self):
# The metadata for the image should match the updated values
req_metadata = {'key1': 'alt1', 'key2': 'value2'}
metadata = self.client.get_image_meta(self.image_id)
self.assertEqual(metadata['properties'], {'key1': 'value1'})
metadata['properties'].update(req_metadata)
metadata = self.client.update_image(
self.image_id, properties=metadata['properties'])
resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'alt1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata['properties'])
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
This config file extends the test environment configuration
so that we can run the lettuce acceptance tests.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .test import *
from .sauce import *
# You need to start the server in debug mode,
# otherwise the browser will not render the pages correctly
DEBUG = True
SITE_NAME = 'localhost:{}'.format(LETTUCE_SERVER_PORT)
# Output Django logs to a file
import logging
logging.basicConfig(filename=TEST_ROOT / "log" / "lms_acceptance.log", level=logging.ERROR)
# set root logger level
logging.getLogger().setLevel(logging.ERROR)
import os
from random import choice
def seed():
return os.getppid()
# Silence noisy logs
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('codejail.safe_exec', logging.ERROR),
('edx.courseware', logging.ERROR),
('audit', logging.ERROR),
('lms.djangoapps.instructor_task.api_helper', logging.ERROR),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
update_module_store_settings(
MODULESTORE,
doc_store_settings={
'db': 'acceptance_xmodule',
'collection': 'acceptance_modulestore_%s' % seed(),
},
module_store_options={
'fs_root': TEST_ROOT / "data",
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'acceptance_xcontent_%s' % seed(),
}
}
# Set this up so that 'paver lms --settings=acceptance' and running the
# harvest command both use the same (test) database
# which they can flush without messing up your dev db
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "test_edx.db",
'TEST_NAME': TEST_ROOT / "db" / "test_edx.db",
'OPTIONS': {
'timeout': 30,
},
'ATOMIC_REQUESTS': True,
},
'student_module_history': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "test_student_module_history.db",
'TEST_NAME': TEST_ROOT / "db" / "test_student_module_history.db",
'OPTIONS': {
'timeout': 30,
},
}
}
TRACKING_BACKENDS.update({
'mongo': {
'ENGINE': 'track.backends.mongodb.MongoBackend'
}
})
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update({
'mongo': {
'ENGINE': 'eventtracking.backends.mongodb.MongoBackend',
'OPTIONS': {
'database': 'track'
}
}
})
BULK_EMAIL_DEFAULT_FROM_EMAIL = "test@test.org"
# Forums are disabled in test.py to speed up unit tests, but we do not have
# per-test control for lettuce acceptance tests.
# If you are writing an acceptance test that needs the discussion service enabled,
# do not write it in lettuce, but instead write it using bok-choy.
# DO NOT CHANGE THIS SETTING HERE.
FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
# Enable third-party authentication
FEATURES['ENABLE_THIRD_PARTY_AUTH'] = True
THIRD_PARTY_AUTH = {
"Google": {
"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY": "test",
"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET": "test"
},
"Facebook": {
"SOCIAL_AUTH_FACEBOOK_KEY": "test",
"SOCIAL_AUTH_FACEBOOK_SECRET": "test"
}
}
# Enable fake payment processing page
FEATURES['ENABLE_PAYMENT_FAKE'] = True
# Enable special exams
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
# Don't actually send any requests to Software Secure for student identity
# verification.
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
# HACK
# Setting this flag to false causes imports to not load correctly in the lettuce python files
# We do not yet understand why this occurs. Setting this to true is a stopgap measure
USE_I18N = True
FEATURES['ENABLE_FEEDBACK_SUBMISSION'] = False
# Include the lettuce app for acceptance testing, including the 'harvest' django-admin command
INSTALLED_APPS += ('lettuce.django',)
LETTUCE_APPS = ('courseware', 'instructor')
# Lettuce appears to have a bug that causes it to search
# `instructor_task` when we specify the `instructor` app.
# This causes some pretty cryptic errors as lettuce tries
# to parse files in `instructor_task` as features.
# As a quick workaround, explicitly exclude the `instructor_task` app.
# The coursewarehistoryextended app also falls prey to this fuzzy
# for the courseware app.
LETTUCE_AVOID_APPS = ('instructor_task', 'coursewarehistoryextended')
LETTUCE_BROWSER = os.environ.get('LETTUCE_BROWSER', 'chrome')
# Where to run: local, saucelabs, or grid
LETTUCE_SELENIUM_CLIENT = os.environ.get('LETTUCE_SELENIUM_CLIENT', 'local')
SELENIUM_GRID = {
'URL': 'http://127.0.0.1:4444/wd/hub',
'BROWSER': LETTUCE_BROWSER,
}
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
# Because an override for where to run will affect which ports to use,
# set these up after the local overrides.
# Configure XQueue interface to use our stub XQueue server
XQUEUE_INTERFACE = {
"url": "http://127.0.0.1:{0:d}".format(XQUEUE_PORT),
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
if FEATURES.get('ENABLE_COURSEWARE_SEARCH') or \
FEATURES.get('ENABLE_DASHBOARD_SEARCH') or \
FEATURES.get('ENABLE_COURSE_DISCOVERY'):
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Generate a random UUID so that different runs of acceptance tests don't break each other
import uuid
SECRET_KEY = uuid.uuid4().hex
############################### PIPELINE #######################################
PIPELINE_ENABLED = False
# We want to make sure that any new migrations are run
# see https://groups.google.com/forum/#!msg/django-developers/PWPj3etj3-U/kCl6pMsQYYoJ
MIGRATION_MODULES = {}
|
unknown
|
codeparrot/codeparrot-clean
| ||
import django_comment_common.models as models
from django.test import TestCase
from django.test.utils import override_settings
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from opaque_keys.edx.locations import SlashSeparatedCourseKey
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class RoleClassTestCase(TestCase):
def setUp(self):
# For course ID, syntax edx/classname/classdate is important
# because xmodel.course_module.id_to_location looks for a string to split
self.course_id = SlashSeparatedCourseKey("edX", "toy", "2012_Fall")
self.student_role = models.Role.objects.get_or_create(name="Student",
course_id=self.course_id)[0]
self.student_role.add_permission("delete_thread")
self.student_2_role = models.Role.objects.get_or_create(name="Student",
course_id=self.course_id)[0]
self.TA_role = models.Role.objects.get_or_create(name="Community TA",
course_id=self.course_id)[0]
self.course_id_2 = SlashSeparatedCourseKey("edx", "6.002x", "2012_Fall")
self.TA_role_2 = models.Role.objects.get_or_create(name="Community TA",
course_id=self.course_id_2)[0]
class Dummy():
def render_template():
pass
def testHasPermission(self):
# Whenever you add a permission to student_role,
# Roles with the same FORUM_ROLE in same class also receives the same
# permission.
# Is this desirable behavior?
self.assertTrue(self.student_role.has_permission("delete_thread"))
self.assertTrue(self.student_2_role.has_permission("delete_thread"))
self.assertFalse(self.TA_role.has_permission("delete_thread"))
def testInheritPermissions(self):
self.TA_role.inherit_permissions(self.student_role)
self.assertTrue(self.TA_role.has_permission("delete_thread"))
# Despite being from 2 different courses, TA_role_2 can still inherit
# permissions from TA_role without error
self.TA_role_2.inherit_permissions(self.TA_role)
class PermissionClassTestCase(TestCase):
def setUp(self):
self.permission = models.Permission.objects.get_or_create(name="test")[0]
def testUnicode(self):
self.assertEqual(str(self.permission), "test")
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"compilerOptions": {
// type checking
"strict": true,
"noFallthroughCasesInSwitch": true,
"skipLibCheck": true,
// interop constraints
"allowSyntheticDefaultImports": true,
"esModuleInterop": true,
// js support
"allowJs": true,
"checkJs": false,
// environment
"jsx": "react-jsx",
"lib": ["ESNext", "DOM"],
"target": "esnext",
// modules
"baseUrl": ".",
"module": "node16",
"moduleResolution": "node16",
"resolveJsonModule": true,
"types": ["next"],
// emit
"noEmit": true,
"stripInternal": true
},
"include": ["src", "types"]
}
|
json
|
github
|
https://github.com/vercel/next.js
|
crates/next-core/js/tsconfig.json
|
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import datetime
from autobahn.twisted.wamp import ApplicationSession
class TimeService(ApplicationSession):
"""
A simple time service application component.
"""
def __init__(self, realm = "realm1"):
ApplicationSession.__init__(self)
self._realm = realm
def onConnect(self):
self.join(self._realm)
def onJoin(self, details):
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, 'com.timeservice.now')
from twisted.python import log
from autobahn.twisted.websocket import WampWebSocketServerProtocol, WampWebSocketServerFactory
from twisted.internet.defer import Deferred
import json
import urllib
import Cookie
from autobahn.util import newid, utcnow
from autobahn.websocket import http
class ServerProtocol(WampWebSocketServerProtocol):
## authid -> cookie -> set(connection)
def onConnect(self, request):
protocol, headers = WampWebSocketServerProtocol.onConnect(self, request)
## our cookie tracking ID
self._cbtid = None
## see if there already is a cookie set ..
if request.headers.has_key('cookie'):
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if cookie.has_key('cbtid'):
cbtid = cookie['cbtid'].value
if self.factory._cookies.has_key(cbtid):
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
## if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
## do NOT add the "secure" cookie attribute! "secure" refers to the
## scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
## add this WebSocket connection to the set of connections
## associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
self._authenticated = self.factory._cookies[self._cbtid]['authenticated']
## accept the WebSocket connection, speaking subprotocol `protocol`
## and setting HTTP headers `headers`
return (protocol, headers)
from autobahn.twisted.wamp import RouterSession
from autobahn.wamp import types
class MyRouterSession(RouterSession):
def onOpen(self, transport):
RouterSession.onOpen(self, transport)
print "transport authenticated: {}".format(self._transport._authenticated)
def onHello(self, realm, details):
print "onHello: {} {}".format(realm, details)
if self._transport._authenticated is not None:
return types.Accept(authid = self._transport._authenticated)
else:
return types.Challenge("mozilla-persona")
return accept
def onLeave(self, details):
if details.reason == "wamp.close.logout":
cookie = self._transport.factory._cookies[self._transport._cbtid]
cookie['authenticated'] = None
for proto in cookie['connections']:
proto.sendClose()
def onAuthenticate(self, signature, extra):
print "onAuthenticate: {} {}".format(signature, extra)
dres = Deferred()
## The client did it's Mozilla Persona authentication thing
## and now wants to verify the authentication and login.
assertion = signature
audience = 'http://127.0.0.1:8080/'
## To verify the authentication, we need to send a HTTP/POST
## to Mozilla Persona. When successful, Persona will send us
## back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "tobias.oberstein@gmail.com",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url = "https://verifier.login.persona.org/verify",
method = 'POST',
postdata = body,
headers = headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
try:
if res['status'] == 'okay':
## Mozilla Persona successfully authenticated the user
## remember the user's email address. this marks the cookie as
## authenticated
self._transport.factory._cookies[self._transport._cbtid]['authenticated'] = res['email']
log.msg("Authenticated user {}".format(res['email']))
dres.callback(types.Accept(authid = res['email']))
else:
log.msg("Authentication failed!")
dres.callback(types.Deny())
except Exception as e:
print "ERRR", e
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
dres.callback(types.Deny())
d.addCallbacks(done, error)
return dres
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import serverFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("-c", "--component", type = str, default = None,
help = "Start WAMP-WebSocket server with this application component, e.g. 'timeservice.TimeServiceBackend', or None.")
parser.add_argument("--websocket", type = str, default = "tcp:8080",
help = 'WebSocket server Twisted endpoint descriptor, e.g. "tcp:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", type = str, default = "ws://localhost:8080",
help = 'WebSocket URL (must suit the endpoint), e.g. "ws://localhost:9000".')
args = parser.parse_args()
## start Twisted logging to stdout
##
if True or args.debug:
log.startLogging(sys.stdout)
## we use an Autobahn utility to install the "best" available Twisted reactor
##
from autobahn.twisted.choosereactor import install_reactor
reactor = install_reactor()
if args.debug:
print("Running on reactor {}".format(reactor))
## create a WAMP router factory
##
from autobahn.wamp.router import RouterFactory
router_factory = RouterFactory()
## create a WAMP router session factory
##
from autobahn.twisted.wamp import RouterSessionFactory
session_factory = RouterSessionFactory(router_factory)
session_factory.session = MyRouterSession
## start an embedded application component ..
##
session_factory.add(TimeService())
## create a WAMP-over-WebSocket transport server factory
##
from autobahn.twisted.websocket import WampWebSocketServerFactory
transport_factory = WampWebSocketServerFactory(session_factory, args.wsurl, debug_wamp = args.debug)
transport_factory.protocol = ServerProtocol
transport_factory._cookies = {}
transport_factory.setProtocolOptions(failByDrop = False)
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.resource import WebSocketResource
## we serve static files under "/" ..
root = File(".")
## .. and our WebSocket server under "/ws"
resource = WebSocketResource(transport_factory)
root.putChild("ws", resource)
## run both under one Twisted Web Site
site = Site(root)
## start the WebSocket server from an endpoint
##
server = serverFromString(reactor, args.websocket)
server.listen(site)
## now enter the Twisted reactor loop
##
reactor.run()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mfd/rohm,bd9571mwv.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: ROHM BD9571MWV/BD9574MWF Power Management Integrated Circuit (PMIC)
maintainers:
- Marek Vasut <marek.vasut@gmail.com>
properties:
compatible:
enum:
- rohm,bd9571mwv
- rohm,bd9574mwf
reg:
maxItems: 1
interrupts:
maxItems: 1
interrupt-controller: true
'#interrupt-cells':
const: 2
gpio-controller: true
'#gpio-cells':
const: 2
rohm,ddr-backup-power:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 0x0
maximum: 0xf
description: |
Value to use for DDR-Backup Power (default 0).
This is a bitmask that specifies which DDR power rails need to be kept
powered when backup mode is entered, for system suspend:
- bit 0: DDR0
- bit 1: DDR1
- bit 2: DDR0C
- bit 3: DDR1C
These bits match the KEEPON_DDR* bits in the documentation for the "BKUP
Mode Cnt" register.
rohm,rstbmode-level:
$ref: /schemas/types.yaml#/definitions/flag
description:
The RSTB signal is configured for level mode, to accommodate a toggle
power switch (the RSTBMODE pin is strapped low).
rohm,rstbmode-pulse:
$ref: /schemas/types.yaml#/definitions/flag
description:
The RSTB signal is configured for pulse mode, to accommodate a momentary
power switch (the RSTBMODE pin is strapped high).
regulators:
type: object
description:
List of child nodes that specify the regulator initialization data.
Child nodes must be named after their hardware counterparts.
patternProperties:
"^(vd09|vd18|vd25|vd33|dvfs)$":
type: object
$ref: /schemas/regulator/regulator.yaml#
properties:
regulator-name:
pattern: "^(vd09|vd18|vd25|vd33|dvfs)$"
unevaluatedProperties: false
additionalProperties: false
additionalProperties: false
required:
- compatible
- reg
- interrupts
- interrupt-controller
- '#interrupt-cells'
- gpio-controller
- '#gpio-cells'
oneOf:
- required:
- rohm,rstbmode-level
- required:
- rohm,rstbmode-pulse
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
i2c {
#address-cells = <1>;
#size-cells = <0>;
pmic: pmic@30 {
compatible = "rohm,bd9571mwv";
reg = <0x30>;
interrupt-parent = <&gpio2>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <2>;
gpio-controller;
#gpio-cells = <2>;
rohm,ddr-backup-power = <0xf>;
rohm,rstbmode-pulse;
regulators {
dvfs: dvfs {
regulator-name = "dvfs";
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <1030000>;
regulator-boot-on;
regulator-always-on;
};
};
};
};
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/mfd/rohm,bd9571mwv.yaml
|
# Note: initially copied from https://github.com/florimondmanca/httpx-sse/blob/master/src/httpx_sse/_decoders.py
from __future__ import annotations
import json
import inspect
from types import TracebackType
from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast
from typing_extensions import Self, Protocol, TypeGuard, override, get_origin, runtime_checkable
import httpx
from ._utils import is_mapping, extract_type_var_from_base
from ._exceptions import APIError
if TYPE_CHECKING:
from ._client import OpenAI, AsyncOpenAI
_T = TypeVar("_T")
class Stream(Generic[_T]):
"""Provides the core interface to iterate over a synchronous stream response."""
response: httpx.Response
_decoder: SSEBytesDecoder
def __init__(
self,
*,
cast_to: type[_T],
response: httpx.Response,
client: OpenAI,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
def __next__(self) -> _T:
return self._iterator.__next__()
def __iter__(self) -> Iterator[_T]:
for item in self._iterator:
yield item
def _iter_events(self) -> Iterator[ServerSentEvent]:
yield from self._decoder.iter_bytes(self.response.iter_bytes())
def __stream__(self) -> Iterator[_T]:
cast_to = cast(Any, self._cast_to)
response = self.response
process_data = self._client._process_response_data
iterator = self._iter_events()
try:
for sse in iterator:
if sse.data.startswith("[DONE]"):
break
# we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
if sse.event and sse.event.startswith("thread."):
data = sse.json()
if sse.event == "error" and is_mapping(data) and data.get("error"):
message = None
error = data.get("error")
if is_mapping(error):
message = error.get("message")
if not message or not isinstance(message, str):
message = "An error occurred during streaming"
raise APIError(
message=message,
request=self.response.request,
body=data["error"],
)
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
else:
data = sse.json()
if is_mapping(data) and data.get("error"):
message = None
error = data.get("error")
if is_mapping(error):
message = error.get("message")
if not message or not isinstance(message, str):
message = "An error occurred during streaming"
raise APIError(
message=message,
request=self.response.request,
body=data["error"],
)
yield process_data(data=data, cast_to=cast_to, response=response)
finally:
# Ensure the response is closed even if the consumer doesn't read all data
response.close()
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.close()
def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
self.response.close()
class AsyncStream(Generic[_T]):
"""Provides the core interface to iterate over an asynchronous stream response."""
response: httpx.Response
_decoder: SSEDecoder | SSEBytesDecoder
def __init__(
self,
*,
cast_to: type[_T],
response: httpx.Response,
client: AsyncOpenAI,
) -> None:
self.response = response
self._cast_to = cast_to
self._client = client
self._decoder = client._make_sse_decoder()
self._iterator = self.__stream__()
async def __anext__(self) -> _T:
return await self._iterator.__anext__()
async def __aiter__(self) -> AsyncIterator[_T]:
async for item in self._iterator:
yield item
async def _iter_events(self) -> AsyncIterator[ServerSentEvent]:
async for sse in self._decoder.aiter_bytes(self.response.aiter_bytes()):
yield sse
async def __stream__(self) -> AsyncIterator[_T]:
cast_to = cast(Any, self._cast_to)
response = self.response
process_data = self._client._process_response_data
iterator = self._iter_events()
try:
async for sse in iterator:
if sse.data.startswith("[DONE]"):
break
# we have to special case the Assistants `thread.` events since we won't have an "event" key in the data
if sse.event and sse.event.startswith("thread."):
data = sse.json()
if sse.event == "error" and is_mapping(data) and data.get("error"):
message = None
error = data.get("error")
if is_mapping(error):
message = error.get("message")
if not message or not isinstance(message, str):
message = "An error occurred during streaming"
raise APIError(
message=message,
request=self.response.request,
body=data["error"],
)
yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
else:
data = sse.json()
if is_mapping(data) and data.get("error"):
message = None
error = data.get("error")
if is_mapping(error):
message = error.get("message")
if not message or not isinstance(message, str):
message = "An error occurred during streaming"
raise APIError(
message=message,
request=self.response.request,
body=data["error"],
)
yield process_data(data=data, cast_to=cast_to, response=response)
finally:
# Ensure the response is closed even if the consumer doesn't read all data
await response.aclose()
async def __aenter__(self) -> Self:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.close()
async def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
await self.response.aclose()
class ServerSentEvent:
def __init__(
self,
*,
event: str | None = None,
data: str | None = None,
id: str | None = None,
retry: int | None = None,
) -> None:
if data is None:
data = ""
self._id = id
self._data = data
self._event = event or None
self._retry = retry
@property
def event(self) -> str | None:
return self._event
@property
def id(self) -> str | None:
return self._id
@property
def retry(self) -> int | None:
return self._retry
@property
def data(self) -> str:
return self._data
def json(self) -> Any:
return json.loads(self.data)
@override
def __repr__(self) -> str:
return f"ServerSentEvent(event={self.event}, data={self.data}, id={self.id}, retry={self.retry})"
class SSEDecoder:
_data: list[str]
_event: str | None
_retry: int | None
_last_event_id: str | None
def __init__(self) -> None:
self._event = None
self._data = []
self._last_event_id = None
self._retry = None
def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]:
"""Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
for chunk in self._iter_chunks(iterator):
# Split before decoding so splitlines() only uses \r and \n
for raw_line in chunk.splitlines():
line = raw_line.decode("utf-8")
sse = self.decode(line)
if sse:
yield sse
def _iter_chunks(self, iterator: Iterator[bytes]) -> Iterator[bytes]:
"""Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks"""
data = b""
for chunk in iterator:
for line in chunk.splitlines(keepends=True):
data += line
if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")):
yield data
data = b""
if data:
yield data
async def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]:
"""Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
async for chunk in self._aiter_chunks(iterator):
# Split before decoding so splitlines() only uses \r and \n
for raw_line in chunk.splitlines():
line = raw_line.decode("utf-8")
sse = self.decode(line)
if sse:
yield sse
async def _aiter_chunks(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
"""Given an iterator that yields raw binary data, iterate over it and yield individual SSE chunks"""
data = b""
async for chunk in iterator:
for line in chunk.splitlines(keepends=True):
data += line
if data.endswith((b"\r\r", b"\n\n", b"\r\n\r\n")):
yield data
data = b""
if data:
yield data
def decode(self, line: str) -> ServerSentEvent | None:
# See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501
if not line:
if not self._event and not self._data and not self._last_event_id and self._retry is None:
return None
sse = ServerSentEvent(
event=self._event,
data="\n".join(self._data),
id=self._last_event_id,
retry=self._retry,
)
# NOTE: as per the SSE spec, do not reset last_event_id.
self._event = None
self._data = []
self._retry = None
return sse
if line.startswith(":"):
return None
fieldname, _, value = line.partition(":")
if value.startswith(" "):
value = value[1:]
if fieldname == "event":
self._event = value
elif fieldname == "data":
self._data.append(value)
elif fieldname == "id":
if "\0" in value:
pass
else:
self._last_event_id = value
elif fieldname == "retry":
try:
self._retry = int(value)
except (TypeError, ValueError):
pass
else:
pass # Field is ignored.
return None
@runtime_checkable
class SSEBytesDecoder(Protocol):
def iter_bytes(self, iterator: Iterator[bytes]) -> Iterator[ServerSentEvent]:
"""Given an iterator that yields raw binary data, iterate over it & yield every event encountered"""
...
def aiter_bytes(self, iterator: AsyncIterator[bytes]) -> AsyncIterator[ServerSentEvent]:
"""Given an async iterator that yields raw binary data, iterate over it & yield every event encountered"""
...
def is_stream_class_type(typ: type) -> TypeGuard[type[Stream[object]] | type[AsyncStream[object]]]:
"""TypeGuard for determining whether or not the given type is a subclass of `Stream` / `AsyncStream`"""
origin = get_origin(typ) or typ
return inspect.isclass(origin) and issubclass(origin, (Stream, AsyncStream))
def extract_stream_chunk_type(
stream_cls: type,
*,
failure_message: str | None = None,
) -> type:
"""Given a type like `Stream[T]`, returns the generic type variable `T`.
This also handles the case where a concrete subclass is given, e.g.
```py
class MyStream(Stream[bytes]):
...
extract_stream_chunk_type(MyStream) -> bytes
```
"""
from ._base_client import Stream, AsyncStream
return extract_type_var_from_base(
stream_cls,
index=0,
generic_bases=cast("tuple[type, ...]", (Stream, AsyncStream)),
failure_message=failure_message,
)
|
python
|
github
|
https://github.com/openai/openai-python
|
src/openai/_streaming.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import bisect
import io
import logging
import os
import shutil
import sys
import zipimport
from . import DistlibException
from .util import cached_property, get_cache_base, path_to_cache_dir
logger = logging.getLogger(__name__)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries.
"""
def __init__(self, base=None):
"""
Initialise an instance.
:param base: The base directory where the cache should be located. If
not specified, this will be the ``resource-cache``
directory under whatever :func:`get_cache_base` returns.
"""
if base is None:
base = os.path.join(get_cache_base(), 'resource-cache')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def is_stale(self, resource, path):
"""
Is the cache stale for the given resource?
:param resource: The :class:`Resource` being cached.
:param path: The path of the resource in the cache.
:return: True if the cache is stale.
"""
# Cache invalidation is a hard problem :-)
return True
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
cache = Cache()
class ResourceBase(object):
def __init__(self, finder, name):
self.finder = finder
self.name = name
class Resource(ResourceBase):
"""
A class representing an in-package resource, such as a data file. This is
not normally instantiated by user code, but rather by a
:class:`ResourceFinder` which manages the resource.
"""
is_container = False # Backwards compatibility
def as_stream(self):
"Get the resource as a stream. Not a property, as not idempotent."
return self.finder.get_stream(self)
@cached_property
def file_path(self):
return cache.get(self)
@cached_property
def bytes(self):
return self.finder.get_bytes(self)
@cached_property
def size(self):
return self.finder.get_size(self)
class ResourceContainer(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
class ResourceFinder(object):
"""
Resource finder for file system resources.
"""
def __init__(self, module):
self.module = module
self.loader = getattr(module, '__loader__', None)
self.base = os.path.dirname(getattr(module, '__file__', ''))
def _make_path(self, resource_name):
parts = resource_name.split('/')
parts.insert(0, self.base)
return os.path.join(*parts)
def _find(self, path):
return os.path.exists(path)
def get_cache_info(self, resource):
return None, resource.path
def find(self, resource_name):
path = self._make_path(resource_name)
if not self._find(path):
result = None
else:
if self._is_directory(path):
result = ResourceContainer(self, resource_name)
else:
result = Resource(self, resource_name)
result.path = path
return result
def get_stream(self, resource):
return open(resource.path, 'rb')
def get_bytes(self, resource):
with open(resource.path, 'rb') as f:
return f.read()
def get_size(self, resource):
return os.path.getsize(resource.path)
def get_resources(self, resource):
def allowed(f):
return f != '__pycache__' and not f.endswith(('.pyc', '.pyo'))
return set([f for f in os.listdir(resource.path) if allowed(f)])
def is_container(self, resource):
return self._is_directory(resource.path)
_is_directory = staticmethod(os.path.isdir)
class ZipResourceFinder(ResourceFinder):
"""
Resource finder for resources in .zip files.
"""
def __init__(self, module):
super(ZipResourceFinder, self).__init__(module)
archive = self.loader.archive
self.prefix_len = 1 + len(archive)
# PyPy doesn't have a _files attr on zipimporter, and you can't set one
if hasattr(self.loader, '_files'):
self._files = self.loader._files
else:
self._files = zipimport._zip_directory_cache[archive]
self.index = sorted(self._files)
def _find(self, path):
path = path[self.prefix_len:]
if path in self._files:
result = True
else:
if path[-1] != os.sep:
path = path + os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
if not result:
logger.debug('_find failed: %r %r', path, self.loader.prefix)
else:
logger.debug('_find worked: %r %r', path, self.loader.prefix)
return result
def get_cache_info(self, resource):
prefix = self.loader.archive
path = resource.path[1 + len(prefix):]
return prefix, path
def get_bytes(self, resource):
return self.loader.get_data(resource.path)
def get_stream(self, resource):
return io.BytesIO(self.get_bytes(resource))
def get_size(self, resource):
path = resource.path[self.prefix_len:]
return self._files[path][3]
def get_resources(self, resource):
path = resource.path[self.prefix_len:]
if path[-1] != os.sep:
path += os.sep
plen = len(path)
result = set()
i = bisect.bisect(self.index, path)
while i < len(self.index):
if not self.index[i].startswith(path):
break
s = self.index[i][plen:]
result.add(s.split(os.sep, 1)[0]) # only immediate children
i += 1
return result
def _is_directory(self, path):
path = path[self.prefix_len:]
if path[-1] != os.sep:
path += os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
return result
_finder_registry = {
type(None): ResourceFinder,
zipimport.zipimporter: ZipResourceFinder
}
try:
import _frozen_importlib
_finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder
except (ImportError, AttributeError):
pass
def register_finder(loader, finder_maker):
_finder_registry[type(loader)] = finder_maker
_finder_cache = {}
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result
|
unknown
|
codeparrot/codeparrot-clean
| ||
package kotlinx.coroutines.rx3
import io.reactivex.rxjava3.core.*
import kotlinx.coroutines.*
import kotlinx.coroutines.channels.*
import kotlinx.coroutines.reactive.*
import kotlin.coroutines.*
/**
* Creates cold [flowable][Flowable] that will run a given [block] in a coroutine.
* Every time the returned flowable is subscribed, it starts a new coroutine.
*
* Coroutine emits ([ObservableEmitter.onNext]) values with `send`, completes ([ObservableEmitter.onComplete])
* when the coroutine completes or channel is explicitly closed and emits error ([ObservableEmitter.onError])
* if coroutine throws an exception or closes channel with a cause.
* Unsubscribing cancels running coroutine.
*
* Invocations of `send` are suspended appropriately when subscribers apply back-pressure and to ensure that
* `onNext` is not invoked concurrently.
*
* Coroutine context can be specified with [context] argument.
* If the context does not have any dispatcher nor any other [ContinuationInterceptor], then [Dispatchers.Default] is used.
* Method throws [IllegalArgumentException] if provided [context] contains a [Job] instance.
*
* **Note: This is an experimental api.** Behaviour of publishers that work as children in a parent scope with respect
*/
public fun <T: Any> rxFlowable(
context: CoroutineContext = EmptyCoroutineContext,
@BuilderInference block: suspend ProducerScope<T>.() -> Unit
): Flowable<T> {
require(context[Job] === null) { "Flowable context cannot contain job in it." +
"Its lifecycle should be managed via Disposable handle. Had $context" }
return Flowable.fromPublisher(publishInternal(GlobalScope, context, RX_HANDLER, block))
}
private val RX_HANDLER: (Throwable, CoroutineContext) -> Unit = ::handleUndeliverableException
|
kotlin
|
github
|
https://github.com/Kotlin/kotlinx.coroutines
|
reactive/kotlinx-coroutines-rx3/src/RxFlowable.kt
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets.login_helpers import google_login
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
import os
def _DeterministicPerformanceCounters():
with open(os.path.join(os.path.dirname(__file__),
'deterministic_performance_counters.js')) as f:
return f.read()
class GooglePages(page_module.Page):
def __init__(self, url, page_set, shared_page_state_class,
name='', credentials=None):
super(GooglePages, self).__init__(
url=url, page_set=page_set, name=name,
credentials_path='data/credentials.json',
shared_page_state_class=shared_page_state_class)
self.credentials = credentials
self.script_to_evaluate_on_commit = _DeterministicPerformanceCounters()
class GmailPage(GooglePages):
def __init__(self, page_set,
shared_page_state_class=shared_page_state.SharedPageState):
super(GmailPage, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set,
shared_page_state_class=shared_page_state_class)
def RunNavigateSteps(self, action_runner):
google_login.LoginGoogleAccount(action_runner, 'google',
self.credentials_path)
super(GmailPage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
class GoogleDocPage(GooglePages):
def __init__(self, page_set,
shared_page_state_class=shared_page_state.SharedPageState):
super(GoogleDocPage, self).__init__(
# pylint: disable=line-too-long
url='https://docs.google.com/document/d/1X-IKNjtEnx-WW5JIKRLsyhz5sbsat3mfTpAPUSX3_s4/view',
page_set=page_set,
shared_page_state_class=shared_page_state_class)
def RunNavigateSteps(self, action_runner):
google_login.LoginGoogleAccount(action_runner, 'google',
self.credentials_path)
super(GoogleDocPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
action_runner.WaitForJavaScriptCondition(
'document.getElementsByClassName("kix-appview-editor").length')
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
# see http://bugs.python.org/issue8876
# this is just a quick hack so we can test build in vagrant
import os
if os.environ.get('USER','') == 'vagrant':
del os.link
from setuptools import setup, find_packages
def requirements():
with open('./requirements.txt', 'r') as f:
return [l.strip('\n') for l in f if l.strip('\n') and not l.startswith('#')]
setup(name='openregister-entry',
version='0.3.0',
description='Entry model package',
author='Openregister.org',
author_email='paul.downey@whatfettle.com',
url='https://github.com/openregister/entry',
download_url='https://github.com/openregister/entry',
packages=find_packages(exclude=['tests']),
zip_safe=False,
include_package_data=True,
license='MIT',
platforms='any',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Programming Language :: Python :: 3.4',
],
install_requires=requirements(),
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// mkerrors.sh -m32
// Code generated by the command above; DO NOT EDIT.
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -m32 _const.go
package syscall
const (
AF_APPLETALK = 0x10
AF_ARP = 0x23
AF_ATM = 0x1e
AF_BLUETOOTH = 0x24
AF_CCITT = 0xa
AF_CHAOS = 0x5
AF_CNT = 0x15
AF_COIP = 0x14
AF_DATAKIT = 0x9
AF_DECnet = 0xc
AF_DLI = 0xd
AF_E164 = 0x1a
AF_ECMA = 0x8
AF_HYLINK = 0xf
AF_IEEE80211 = 0x25
AF_IMPLINK = 0x3
AF_INET = 0x2
AF_INET6 = 0x1c
AF_INET6_SDP = 0x2a
AF_INET_SDP = 0x28
AF_IPX = 0x17
AF_ISDN = 0x1a
AF_ISO = 0x7
AF_LAT = 0xe
AF_LINK = 0x12
AF_LOCAL = 0x1
AF_MAX = 0x2a
AF_NATM = 0x1d
AF_NETBIOS = 0x6
AF_NETGRAPH = 0x20
AF_OSI = 0x7
AF_PUP = 0x4
AF_ROUTE = 0x11
AF_SCLUSTER = 0x22
AF_SIP = 0x18
AF_SLOW = 0x21
AF_SNA = 0xb
AF_UNIX = 0x1
AF_UNSPEC = 0x0
AF_VENDOR00 = 0x27
AF_VENDOR01 = 0x29
AF_VENDOR02 = 0x2b
AF_VENDOR03 = 0x2d
AF_VENDOR04 = 0x2f
AF_VENDOR05 = 0x31
AF_VENDOR06 = 0x33
AF_VENDOR07 = 0x35
AF_VENDOR08 = 0x37
AF_VENDOR09 = 0x39
AF_VENDOR10 = 0x3b
AF_VENDOR11 = 0x3d
AF_VENDOR12 = 0x3f
AF_VENDOR13 = 0x41
AF_VENDOR14 = 0x43
AF_VENDOR15 = 0x45
AF_VENDOR16 = 0x47
AF_VENDOR17 = 0x49
AF_VENDOR18 = 0x4b
AF_VENDOR19 = 0x4d
AF_VENDOR20 = 0x4f
AF_VENDOR21 = 0x51
AF_VENDOR22 = 0x53
AF_VENDOR23 = 0x55
AF_VENDOR24 = 0x57
AF_VENDOR25 = 0x59
AF_VENDOR26 = 0x5b
AF_VENDOR27 = 0x5d
AF_VENDOR28 = 0x5f
AF_VENDOR29 = 0x61
AF_VENDOR30 = 0x63
AF_VENDOR31 = 0x65
AF_VENDOR32 = 0x67
AF_VENDOR33 = 0x69
AF_VENDOR34 = 0x6b
AF_VENDOR35 = 0x6d
AF_VENDOR36 = 0x6f
AF_VENDOR37 = 0x71
AF_VENDOR38 = 0x73
AF_VENDOR39 = 0x75
AF_VENDOR40 = 0x77
AF_VENDOR41 = 0x79
AF_VENDOR42 = 0x7b
AF_VENDOR43 = 0x7d
AF_VENDOR44 = 0x7f
AF_VENDOR45 = 0x81
AF_VENDOR46 = 0x83
AF_VENDOR47 = 0x85
B0 = 0x0
B110 = 0x6e
B115200 = 0x1c200
B1200 = 0x4b0
B134 = 0x86
B14400 = 0x3840
B150 = 0x96
B1800 = 0x708
B19200 = 0x4b00
B200 = 0xc8
B230400 = 0x38400
B2400 = 0x960
B28800 = 0x7080
B300 = 0x12c
B38400 = 0x9600
B460800 = 0x70800
B4800 = 0x12c0
B50 = 0x32
B57600 = 0xe100
B600 = 0x258
B7200 = 0x1c20
B75 = 0x4b
B76800 = 0x12c00
B921600 = 0xe1000
B9600 = 0x2580
BIOCFEEDBACK = 0x8004427c
BIOCFLUSH = 0x20004268
BIOCGBLEN = 0x40044266
BIOCGDIRECTION = 0x40044276
BIOCGDLT = 0x4004426a
BIOCGDLTLIST = 0xc0084279
BIOCGETBUFMODE = 0x4004427d
BIOCGETIF = 0x4020426b
BIOCGETZMAX = 0x4004427f
BIOCGHDRCMPLT = 0x40044274
BIOCGRSIG = 0x40044272
BIOCGRTIMEOUT = 0x4008426e
BIOCGSEESENT = 0x40044276
BIOCGSTATS = 0x4008426f
BIOCGTSTAMP = 0x40044283
BIOCIMMEDIATE = 0x80044270
BIOCLOCK = 0x2000427a
BIOCPROMISC = 0x20004269
BIOCROTZBUF = 0x400c4280
BIOCSBLEN = 0xc0044266
BIOCSDIRECTION = 0x80044277
BIOCSDLT = 0x80044278
BIOCSETBUFMODE = 0x8004427e
BIOCSETF = 0x80084267
BIOCSETFNR = 0x80084282
BIOCSETIF = 0x8020426c
BIOCSETWF = 0x8008427b
BIOCSETZBUF = 0x800c4281
BIOCSHDRCMPLT = 0x80044275
BIOCSRSIG = 0x80044273
BIOCSRTIMEOUT = 0x8008426d
BIOCSSEESENT = 0x80044277
BIOCSTSTAMP = 0x80044284
BIOCVERSION = 0x40044271
BPF_A = 0x10
BPF_ABS = 0x20
BPF_ADD = 0x0
BPF_ALIGNMENT = 0x4
BPF_ALU = 0x4
BPF_AND = 0x50
BPF_B = 0x10
BPF_BUFMODE_BUFFER = 0x1
BPF_BUFMODE_ZBUF = 0x2
BPF_DIV = 0x30
BPF_H = 0x8
BPF_IMM = 0x0
BPF_IND = 0x40
BPF_JA = 0x0
BPF_JEQ = 0x10
BPF_JGE = 0x30
BPF_JGT = 0x20
BPF_JMP = 0x5
BPF_JSET = 0x40
BPF_K = 0x0
BPF_LD = 0x0
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXBUFSIZE = 0x80000
BPF_MAXINSNS = 0x200
BPF_MEM = 0x60
BPF_MEMWORDS = 0x10
BPF_MINBUFSIZE = 0x20
BPF_MINOR_VERSION = 0x1
BPF_MISC = 0x7
BPF_MSH = 0xa0
BPF_MUL = 0x20
BPF_NEG = 0x80
BPF_OR = 0x40
BPF_RELEASE = 0x30bb6
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAX = 0x0
BPF_TXA = 0x80
BPF_T_BINTIME = 0x2
BPF_T_BINTIME_FAST = 0x102
BPF_T_BINTIME_MONOTONIC = 0x202
BPF_T_BINTIME_MONOTONIC_FAST = 0x302
BPF_T_FAST = 0x100
BPF_T_FLAG_MASK = 0x300
BPF_T_FORMAT_MASK = 0x3
BPF_T_MICROTIME = 0x0
BPF_T_MICROTIME_FAST = 0x100
BPF_T_MICROTIME_MONOTONIC = 0x200
BPF_T_MICROTIME_MONOTONIC_FAST = 0x300
BPF_T_MONOTONIC = 0x200
BPF_T_MONOTONIC_FAST = 0x300
BPF_T_NANOTIME = 0x1
BPF_T_NANOTIME_FAST = 0x101
BPF_T_NANOTIME_MONOTONIC = 0x201
BPF_T_NANOTIME_MONOTONIC_FAST = 0x301
BPF_T_NONE = 0x3
BPF_T_NORMAL = 0x0
BPF_W = 0x0
BPF_X = 0x8
BRKINT = 0x2
CFLUSH = 0xf
CLOCAL = 0x8000
CREAD = 0x800
CS5 = 0x0
CS6 = 0x100
CS7 = 0x200
CS8 = 0x300
CSIZE = 0x300
CSTART = 0x11
CSTATUS = 0x14
CSTOP = 0x13
CSTOPB = 0x400
CSUSP = 0x1a
CTL_MAXNAME = 0x18
CTL_NET = 0x4
DLT_A429 = 0xb8
DLT_A653_ICM = 0xb9
DLT_AIRONET_HEADER = 0x78
DLT_AOS = 0xde
DLT_APPLE_IP_OVER_IEEE1394 = 0x8a
DLT_ARCNET = 0x7
DLT_ARCNET_LINUX = 0x81
DLT_ATM_CLIP = 0x13
DLT_ATM_RFC1483 = 0xb
DLT_AURORA = 0x7e
DLT_AX25 = 0x3
DLT_AX25_KISS = 0xca
DLT_BACNET_MS_TP = 0xa5
DLT_BLUETOOTH_HCI_H4 = 0xbb
DLT_BLUETOOTH_HCI_H4_WITH_PHDR = 0xc9
DLT_CAN20B = 0xbe
DLT_CAN_SOCKETCAN = 0xe3
DLT_CHAOS = 0x5
DLT_CHDLC = 0x68
DLT_CISCO_IOS = 0x76
DLT_C_HDLC = 0x68
DLT_C_HDLC_WITH_DIR = 0xcd
DLT_DBUS = 0xe7
DLT_DECT = 0xdd
DLT_DOCSIS = 0x8f
DLT_DVB_CI = 0xeb
DLT_ECONET = 0x73
DLT_EN10MB = 0x1
DLT_EN3MB = 0x2
DLT_ENC = 0x6d
DLT_ERF = 0xc5
DLT_ERF_ETH = 0xaf
DLT_ERF_POS = 0xb0
DLT_FC_2 = 0xe0
DLT_FC_2_WITH_FRAME_DELIMS = 0xe1
DLT_FDDI = 0xa
DLT_FLEXRAY = 0xd2
DLT_FRELAY = 0x6b
DLT_FRELAY_WITH_DIR = 0xce
DLT_GCOM_SERIAL = 0xad
DLT_GCOM_T1E1 = 0xac
DLT_GPF_F = 0xab
DLT_GPF_T = 0xaa
DLT_GPRS_LLC = 0xa9
DLT_GSMTAP_ABIS = 0xda
DLT_GSMTAP_UM = 0xd9
DLT_HHDLC = 0x79
DLT_IBM_SN = 0x92
DLT_IBM_SP = 0x91
DLT_IEEE802 = 0x6
DLT_IEEE802_11 = 0x69
DLT_IEEE802_11_RADIO = 0x7f
DLT_IEEE802_11_RADIO_AVS = 0xa3
DLT_IEEE802_15_4 = 0xc3
DLT_IEEE802_15_4_LINUX = 0xbf
DLT_IEEE802_15_4_NOFCS = 0xe6
DLT_IEEE802_15_4_NONASK_PHY = 0xd7
DLT_IEEE802_16_MAC_CPS = 0xbc
DLT_IEEE802_16_MAC_CPS_RADIO = 0xc1
DLT_IPFILTER = 0x74
DLT_IPMB = 0xc7
DLT_IPMB_LINUX = 0xd1
DLT_IPNET = 0xe2
DLT_IPOIB = 0xf2
DLT_IPV4 = 0xe4
DLT_IPV6 = 0xe5
DLT_IP_OVER_FC = 0x7a
DLT_JUNIPER_ATM1 = 0x89
DLT_JUNIPER_ATM2 = 0x87
DLT_JUNIPER_ATM_CEMIC = 0xee
DLT_JUNIPER_CHDLC = 0xb5
DLT_JUNIPER_ES = 0x84
DLT_JUNIPER_ETHER = 0xb2
DLT_JUNIPER_FIBRECHANNEL = 0xea
DLT_JUNIPER_FRELAY = 0xb4
DLT_JUNIPER_GGSN = 0x85
DLT_JUNIPER_ISM = 0xc2
DLT_JUNIPER_MFR = 0x86
DLT_JUNIPER_MLFR = 0x83
DLT_JUNIPER_MLPPP = 0x82
DLT_JUNIPER_MONITOR = 0xa4
DLT_JUNIPER_PIC_PEER = 0xae
DLT_JUNIPER_PPP = 0xb3
DLT_JUNIPER_PPPOE = 0xa7
DLT_JUNIPER_PPPOE_ATM = 0xa8
DLT_JUNIPER_SERVICES = 0x88
DLT_JUNIPER_SRX_E2E = 0xe9
DLT_JUNIPER_ST = 0xc8
DLT_JUNIPER_VP = 0xb7
DLT_JUNIPER_VS = 0xe8
DLT_LAPB_WITH_DIR = 0xcf
DLT_LAPD = 0xcb
DLT_LIN = 0xd4
DLT_LINUX_EVDEV = 0xd8
DLT_LINUX_IRDA = 0x90
DLT_LINUX_LAPD = 0xb1
DLT_LINUX_PPP_WITHDIRECTION = 0xa6
DLT_LINUX_SLL = 0x71
DLT_LOOP = 0x6c
DLT_LTALK = 0x72
DLT_MATCHING_MAX = 0xf6
DLT_MATCHING_MIN = 0x68
DLT_MFR = 0xb6
DLT_MOST = 0xd3
DLT_MPEG_2_TS = 0xf3
DLT_MPLS = 0xdb
DLT_MTP2 = 0x8c
DLT_MTP2_WITH_PHDR = 0x8b
DLT_MTP3 = 0x8d
DLT_MUX27010 = 0xec
DLT_NETANALYZER = 0xf0
DLT_NETANALYZER_TRANSPARENT = 0xf1
DLT_NFC_LLCP = 0xf5
DLT_NFLOG = 0xef
DLT_NG40 = 0xf4
DLT_NULL = 0x0
DLT_PCI_EXP = 0x7d
DLT_PFLOG = 0x75
DLT_PFSYNC = 0x79
DLT_PPI = 0xc0
DLT_PPP = 0x9
DLT_PPP_BSDOS = 0x10
DLT_PPP_ETHER = 0x33
DLT_PPP_PPPD = 0xa6
DLT_PPP_SERIAL = 0x32
DLT_PPP_WITH_DIR = 0xcc
DLT_PPP_WITH_DIRECTION = 0xa6
DLT_PRISM_HEADER = 0x77
DLT_PRONET = 0x4
DLT_RAIF1 = 0xc6
DLT_RAW = 0xc
DLT_RIO = 0x7c
DLT_SCCP = 0x8e
DLT_SITA = 0xc4
DLT_SLIP = 0x8
DLT_SLIP_BSDOS = 0xf
DLT_STANAG_5066_D_PDU = 0xed
DLT_SUNATM = 0x7b
DLT_SYMANTEC_FIREWALL = 0x63
DLT_TZSP = 0x80
DLT_USB = 0xba
DLT_USB_LINUX = 0xbd
DLT_USB_LINUX_MMAPPED = 0xdc
DLT_USER0 = 0x93
DLT_USER1 = 0x94
DLT_USER10 = 0x9d
DLT_USER11 = 0x9e
DLT_USER12 = 0x9f
DLT_USER13 = 0xa0
DLT_USER14 = 0xa1
DLT_USER15 = 0xa2
DLT_USER2 = 0x95
DLT_USER3 = 0x96
DLT_USER4 = 0x97
DLT_USER5 = 0x98
DLT_USER6 = 0x99
DLT_USER7 = 0x9a
DLT_USER8 = 0x9b
DLT_USER9 = 0x9c
DLT_WIHART = 0xdf
DLT_X2E_SERIAL = 0xd5
DLT_X2E_XORAYA = 0xd6
DT_BLK = 0x6
DT_CHR = 0x2
DT_DIR = 0x4
DT_FIFO = 0x1
DT_LNK = 0xa
DT_REG = 0x8
DT_SOCK = 0xc
DT_UNKNOWN = 0x0
DT_WHT = 0xe
ECHO = 0x8
ECHOCTL = 0x40
ECHOE = 0x2
ECHOK = 0x4
ECHOKE = 0x1
ECHONL = 0x10
ECHOPRT = 0x20
EVFILT_AIO = -0x3
EVFILT_FS = -0x9
EVFILT_LIO = -0xa
EVFILT_PROC = -0x5
EVFILT_READ = -0x1
EVFILT_SIGNAL = -0x6
EVFILT_SYSCOUNT = 0xb
EVFILT_TIMER = -0x7
EVFILT_USER = -0xb
EVFILT_VNODE = -0x4
EVFILT_WRITE = -0x2
EV_ADD = 0x1
EV_CLEAR = 0x20
EV_DELETE = 0x2
EV_DISABLE = 0x8
EV_DISPATCH = 0x80
EV_DROP = 0x1000
EV_ENABLE = 0x4
EV_EOF = 0x8000
EV_ERROR = 0x4000
EV_FLAG1 = 0x2000
EV_ONESHOT = 0x10
EV_RECEIPT = 0x40
EV_SYSFLAGS = 0xf000
EXTA = 0x4b00
EXTB = 0x9600
EXTPROC = 0x800
FD_CLOEXEC = 0x1
FD_SETSIZE = 0x400
FLUSHO = 0x800000
F_CANCEL = 0x5
F_DUP2FD = 0xa
F_DUP2FD_CLOEXEC = 0x12
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x11
F_GETFD = 0x1
F_GETFL = 0x3
F_GETLK = 0xb
F_GETOWN = 0x5
F_OGETLK = 0x7
F_OK = 0x0
F_OSETLK = 0x8
F_OSETLKW = 0x9
F_RDAHEAD = 0x10
F_RDLCK = 0x1
F_READAHEAD = 0xf
F_SETFD = 0x2
F_SETFL = 0x4
F_SETLK = 0xc
F_SETLKW = 0xd
F_SETLK_REMOTE = 0xe
F_SETOWN = 0x6
F_UNLCK = 0x2
F_UNLCKSYS = 0x4
F_WRLCK = 0x3
HUPCL = 0x4000
ICANON = 0x100
ICMP6_FILTER = 0x12
ICRNL = 0x100
IEXTEN = 0x400
IFAN_ARRIVAL = 0x0
IFAN_DEPARTURE = 0x1
IFF_ALLMULTI = 0x200
IFF_ALTPHYS = 0x4000
IFF_BROADCAST = 0x2
IFF_CANTCHANGE = 0x218f72
IFF_CANTCONFIG = 0x10000
IFF_DEBUG = 0x4
IFF_DRV_OACTIVE = 0x400
IFF_DRV_RUNNING = 0x40
IFF_DYING = 0x200000
IFF_LINK0 = 0x1000
IFF_LINK1 = 0x2000
IFF_LINK2 = 0x4000
IFF_LOOPBACK = 0x8
IFF_MONITOR = 0x40000
IFF_MULTICAST = 0x8000
IFF_NOARP = 0x80
IFF_OACTIVE = 0x400
IFF_POINTOPOINT = 0x10
IFF_PPROMISC = 0x20000
IFF_PROMISC = 0x100
IFF_RENAMING = 0x400000
IFF_RUNNING = 0x40
IFF_SIMPLEX = 0x800
IFF_SMART = 0x20
IFF_STATICARP = 0x80000
IFF_UP = 0x1
IFNAMSIZ = 0x10
IFT_1822 = 0x2
IFT_A12MPPSWITCH = 0x82
IFT_AAL2 = 0xbb
IFT_AAL5 = 0x31
IFT_ADSL = 0x5e
IFT_AFLANE8023 = 0x3b
IFT_AFLANE8025 = 0x3c
IFT_ARAP = 0x58
IFT_ARCNET = 0x23
IFT_ARCNETPLUS = 0x24
IFT_ASYNC = 0x54
IFT_ATM = 0x25
IFT_ATMDXI = 0x69
IFT_ATMFUNI = 0x6a
IFT_ATMIMA = 0x6b
IFT_ATMLOGICAL = 0x50
IFT_ATMRADIO = 0xbd
IFT_ATMSUBINTERFACE = 0x86
IFT_ATMVCIENDPT = 0xc2
IFT_ATMVIRTUAL = 0x95
IFT_BGPPOLICYACCOUNTING = 0xa2
IFT_BRIDGE = 0xd1
IFT_BSC = 0x53
IFT_CARP = 0xf8
IFT_CCTEMUL = 0x3d
IFT_CEPT = 0x13
IFT_CES = 0x85
IFT_CHANNEL = 0x46
IFT_CNR = 0x55
IFT_COFFEE = 0x84
IFT_COMPOSITELINK = 0x9b
IFT_DCN = 0x8d
IFT_DIGITALPOWERLINE = 0x8a
IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
IFT_DLSW = 0x4a
IFT_DOCSCABLEDOWNSTREAM = 0x80
IFT_DOCSCABLEMACLAYER = 0x7f
IFT_DOCSCABLEUPSTREAM = 0x81
IFT_DS0 = 0x51
IFT_DS0BUNDLE = 0x52
IFT_DS1FDL = 0xaa
IFT_DS3 = 0x1e
IFT_DTM = 0x8c
IFT_DVBASILN = 0xac
IFT_DVBASIOUT = 0xad
IFT_DVBRCCDOWNSTREAM = 0x93
IFT_DVBRCCMACLAYER = 0x92
IFT_DVBRCCUPSTREAM = 0x94
IFT_ENC = 0xf4
IFT_EON = 0x19
IFT_EPLRS = 0x57
IFT_ESCON = 0x49
IFT_ETHER = 0x6
IFT_FAITH = 0xf2
IFT_FAST = 0x7d
IFT_FASTETHER = 0x3e
IFT_FASTETHERFX = 0x45
IFT_FDDI = 0xf
IFT_FIBRECHANNEL = 0x38
IFT_FRAMERELAYINTERCONNECT = 0x3a
IFT_FRAMERELAYMPI = 0x5c
IFT_FRDLCIENDPT = 0xc1
IFT_FRELAY = 0x20
IFT_FRELAYDCE = 0x2c
IFT_FRF16MFRBUNDLE = 0xa3
IFT_FRFORWARD = 0x9e
IFT_G703AT2MB = 0x43
IFT_G703AT64K = 0x42
IFT_GIF = 0xf0
IFT_GIGABITETHERNET = 0x75
IFT_GR303IDT = 0xb2
IFT_GR303RDT = 0xb1
IFT_H323GATEKEEPER = 0xa4
IFT_H323PROXY = 0xa5
IFT_HDH1822 = 0x3
IFT_HDLC = 0x76
IFT_HDSL2 = 0xa8
IFT_HIPERLAN2 = 0xb7
IFT_HIPPI = 0x2f
IFT_HIPPIINTERFACE = 0x39
IFT_HOSTPAD = 0x5a
IFT_HSSI = 0x2e
IFT_HY = 0xe
IFT_IBM370PARCHAN = 0x48
IFT_IDSL = 0x9a
IFT_IEEE1394 = 0x90
IFT_IEEE80211 = 0x47
IFT_IEEE80212 = 0x37
IFT_IEEE8023ADLAG = 0xa1
IFT_IFGSN = 0x91
IFT_IMT = 0xbe
IFT_INFINIBAND = 0xc7
IFT_INTERLEAVE = 0x7c
IFT_IP = 0x7e
IFT_IPFORWARD = 0x8e
IFT_IPOVERATM = 0x72
IFT_IPOVERCDLC = 0x6d
IFT_IPOVERCLAW = 0x6e
IFT_IPSWITCH = 0x4e
IFT_IPXIP = 0xf9
IFT_ISDN = 0x3f
IFT_ISDNBASIC = 0x14
IFT_ISDNPRIMARY = 0x15
IFT_ISDNS = 0x4b
IFT_ISDNU = 0x4c
IFT_ISO88022LLC = 0x29
IFT_ISO88023 = 0x7
IFT_ISO88024 = 0x8
IFT_ISO88025 = 0x9
IFT_ISO88025CRFPINT = 0x62
IFT_ISO88025DTR = 0x56
IFT_ISO88025FIBER = 0x73
IFT_ISO88026 = 0xa
IFT_ISUP = 0xb3
IFT_L2VLAN = 0x87
IFT_L3IPVLAN = 0x88
IFT_L3IPXVLAN = 0x89
IFT_LAPB = 0x10
IFT_LAPD = 0x4d
IFT_LAPF = 0x77
IFT_LOCALTALK = 0x2a
IFT_LOOP = 0x18
IFT_MEDIAMAILOVERIP = 0x8b
IFT_MFSIGLINK = 0xa7
IFT_MIOX25 = 0x26
IFT_MODEM = 0x30
IFT_MPC = 0x71
IFT_MPLS = 0xa6
IFT_MPLSTUNNEL = 0x96
IFT_MSDSL = 0x8f
IFT_MVL = 0xbf
IFT_MYRINET = 0x63
IFT_NFAS = 0xaf
IFT_NSIP = 0x1b
IFT_OPTICALCHANNEL = 0xc3
IFT_OPTICALTRANSPORT = 0xc4
IFT_OTHER = 0x1
IFT_P10 = 0xc
IFT_P80 = 0xd
IFT_PARA = 0x22
IFT_PFLOG = 0xf6
IFT_PFSYNC = 0xf7
IFT_PLC = 0xae
IFT_POS = 0xab
IFT_PPP = 0x17
IFT_PPPMULTILINKBUNDLE = 0x6c
IFT_PROPBWAP2MP = 0xb8
IFT_PROPCNLS = 0x59
IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
IFT_PROPMUX = 0x36
IFT_PROPVIRTUAL = 0x35
IFT_PROPWIRELESSP2P = 0x9d
IFT_PTPSERIAL = 0x16
IFT_PVC = 0xf1
IFT_QLLC = 0x44
IFT_RADIOMAC = 0xbc
IFT_RADSL = 0x5f
IFT_REACHDSL = 0xc0
IFT_RFC1483 = 0x9f
IFT_RS232 = 0x21
IFT_RSRB = 0x4f
IFT_SDLC = 0x11
IFT_SDSL = 0x60
IFT_SHDSL = 0xa9
IFT_SIP = 0x1f
IFT_SLIP = 0x1c
IFT_SMDSDXI = 0x2b
IFT_SMDSICIP = 0x34
IFT_SONET = 0x27
IFT_SONETOVERHEADCHANNEL = 0xb9
IFT_SONETPATH = 0x32
IFT_SONETVT = 0x33
IFT_SRP = 0x97
IFT_SS7SIGLINK = 0x9c
IFT_STACKTOSTACK = 0x6f
IFT_STARLAN = 0xb
IFT_STF = 0xd7
IFT_T1 = 0x12
IFT_TDLC = 0x74
IFT_TERMPAD = 0x5b
IFT_TR008 = 0xb0
IFT_TRANSPHDLC = 0x7b
IFT_TUNNEL = 0x83
IFT_ULTRA = 0x1d
IFT_USB = 0xa0
IFT_V11 = 0x40
IFT_V35 = 0x2d
IFT_V36 = 0x41
IFT_V37 = 0x78
IFT_VDSL = 0x61
IFT_VIRTUALIPADDRESS = 0x70
IFT_VOICEEM = 0x64
IFT_VOICEENCAP = 0x67
IFT_VOICEFXO = 0x65
IFT_VOICEFXS = 0x66
IFT_VOICEOVERATM = 0x98
IFT_VOICEOVERFRAMERELAY = 0x99
IFT_VOICEOVERIP = 0x68
IFT_X213 = 0x5d
IFT_X25 = 0x5
IFT_X25DDN = 0x4
IFT_X25HUNTGROUP = 0x7a
IFT_X25MLP = 0x79
IFT_X25PLE = 0x28
IFT_XETHER = 0x1a
IGNBRK = 0x1
IGNCR = 0x80
IGNPAR = 0x4
IMAXBEL = 0x2000
INLCR = 0x40
INPCK = 0x10
IN_CLASSA_HOST = 0xffffff
IN_CLASSA_MAX = 0x80
IN_CLASSA_NET = 0xff000000
IN_CLASSA_NSHIFT = 0x18
IN_CLASSB_HOST = 0xffff
IN_CLASSB_MAX = 0x10000
IN_CLASSB_NET = 0xffff0000
IN_CLASSB_NSHIFT = 0x10
IN_CLASSC_HOST = 0xff
IN_CLASSC_NET = 0xffffff00
IN_CLASSC_NSHIFT = 0x8
IN_CLASSD_HOST = 0xfffffff
IN_CLASSD_NET = 0xf0000000
IN_CLASSD_NSHIFT = 0x1c
IN_LOOPBACKNET = 0x7f
IN_RFC3021_MASK = 0xfffffffe
IPPROTO_3PC = 0x22
IPPROTO_ADFS = 0x44
IPPROTO_AH = 0x33
IPPROTO_AHIP = 0x3d
IPPROTO_APES = 0x63
IPPROTO_ARGUS = 0xd
IPPROTO_AX25 = 0x5d
IPPROTO_BHA = 0x31
IPPROTO_BLT = 0x1e
IPPROTO_BRSATMON = 0x4c
IPPROTO_CARP = 0x70
IPPROTO_CFTP = 0x3e
IPPROTO_CHAOS = 0x10
IPPROTO_CMTP = 0x26
IPPROTO_CPHB = 0x49
IPPROTO_CPNX = 0x48
IPPROTO_DDP = 0x25
IPPROTO_DGP = 0x56
IPPROTO_DIVERT = 0x102
IPPROTO_DONE = 0x101
IPPROTO_DSTOPTS = 0x3c
IPPROTO_EGP = 0x8
IPPROTO_EMCON = 0xe
IPPROTO_ENCAP = 0x62
IPPROTO_EON = 0x50
IPPROTO_ESP = 0x32
IPPROTO_ETHERIP = 0x61
IPPROTO_FRAGMENT = 0x2c
IPPROTO_GGP = 0x3
IPPROTO_GMTP = 0x64
IPPROTO_GRE = 0x2f
IPPROTO_HELLO = 0x3f
IPPROTO_HMP = 0x14
IPPROTO_HOPOPTS = 0x0
IPPROTO_ICMP = 0x1
IPPROTO_ICMPV6 = 0x3a
IPPROTO_IDP = 0x16
IPPROTO_IDPR = 0x23
IPPROTO_IDRP = 0x2d
IPPROTO_IGMP = 0x2
IPPROTO_IGP = 0x55
IPPROTO_IGRP = 0x58
IPPROTO_IL = 0x28
IPPROTO_INLSP = 0x34
IPPROTO_INP = 0x20
IPPROTO_IP = 0x0
IPPROTO_IPCOMP = 0x6c
IPPROTO_IPCV = 0x47
IPPROTO_IPEIP = 0x5e
IPPROTO_IPIP = 0x4
IPPROTO_IPPC = 0x43
IPPROTO_IPV4 = 0x4
IPPROTO_IPV6 = 0x29
IPPROTO_IRTP = 0x1c
IPPROTO_KRYPTOLAN = 0x41
IPPROTO_LARP = 0x5b
IPPROTO_LEAF1 = 0x19
IPPROTO_LEAF2 = 0x1a
IPPROTO_MAX = 0x100
IPPROTO_MAXID = 0x34
IPPROTO_MEAS = 0x13
IPPROTO_MH = 0x87
IPPROTO_MHRP = 0x30
IPPROTO_MICP = 0x5f
IPPROTO_MOBILE = 0x37
IPPROTO_MPLS = 0x89
IPPROTO_MTP = 0x5c
IPPROTO_MUX = 0x12
IPPROTO_ND = 0x4d
IPPROTO_NHRP = 0x36
IPPROTO_NONE = 0x3b
IPPROTO_NSP = 0x1f
IPPROTO_NVPII = 0xb
IPPROTO_OLD_DIVERT = 0xfe
IPPROTO_OSPFIGP = 0x59
IPPROTO_PFSYNC = 0xf0
IPPROTO_PGM = 0x71
IPPROTO_PIGP = 0x9
IPPROTO_PIM = 0x67
IPPROTO_PRM = 0x15
IPPROTO_PUP = 0xc
IPPROTO_PVP = 0x4b
IPPROTO_RAW = 0xff
IPPROTO_RCCMON = 0xa
IPPROTO_RDP = 0x1b
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_RVD = 0x42
IPPROTO_SATEXPAK = 0x40
IPPROTO_SATMON = 0x45
IPPROTO_SCCSP = 0x60
IPPROTO_SCTP = 0x84
IPPROTO_SDRP = 0x2a
IPPROTO_SEND = 0x103
IPPROTO_SEP = 0x21
IPPROTO_SKIP = 0x39
IPPROTO_SPACER = 0x7fff
IPPROTO_SRPC = 0x5a
IPPROTO_ST = 0x7
IPPROTO_SVMTP = 0x52
IPPROTO_SWIPE = 0x35
IPPROTO_TCF = 0x57
IPPROTO_TCP = 0x6
IPPROTO_TLSP = 0x38
IPPROTO_TP = 0x1d
IPPROTO_TPXX = 0x27
IPPROTO_TRUNK1 = 0x17
IPPROTO_TRUNK2 = 0x18
IPPROTO_TTP = 0x54
IPPROTO_UDP = 0x11
IPPROTO_VINES = 0x53
IPPROTO_VISA = 0x46
IPPROTO_VMTP = 0x51
IPPROTO_WBEXPAK = 0x4f
IPPROTO_WBMON = 0x4e
IPPROTO_WSN = 0x4a
IPPROTO_XNET = 0xf
IPPROTO_XTP = 0x24
IPV6_AUTOFLOWLABEL = 0x3b
IPV6_BINDANY = 0x40
IPV6_BINDV6ONLY = 0x1b
IPV6_CHECKSUM = 0x1a
IPV6_DEFAULT_MULTICAST_HOPS = 0x1
IPV6_DEFAULT_MULTICAST_LOOP = 0x1
IPV6_DEFHLIM = 0x40
IPV6_DONTFRAG = 0x3e
IPV6_DSTOPTS = 0x32
IPV6_FAITH = 0x1d
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
IPV6_FRAGTTL = 0x78
IPV6_FW_ADD = 0x1e
IPV6_FW_DEL = 0x1f
IPV6_FW_FLUSH = 0x20
IPV6_FW_GET = 0x22
IPV6_FW_ZERO = 0x21
IPV6_HLIMDEC = 0x1
IPV6_HOPLIMIT = 0x2f
IPV6_HOPOPTS = 0x31
IPV6_IPSEC_POLICY = 0x1c
IPV6_JOIN_GROUP = 0xc
IPV6_LEAVE_GROUP = 0xd
IPV6_MAXHLIM = 0xff
IPV6_MAXOPTHDR = 0x800
IPV6_MAXPACKET = 0xffff
IPV6_MAX_GROUP_SRC_FILTER = 0x200
IPV6_MAX_MEMBERSHIPS = 0xfff
IPV6_MAX_SOCK_SRC_FILTER = 0x80
IPV6_MIN_MEMBERSHIPS = 0x1f
IPV6_MMTU = 0x500
IPV6_MSFILTER = 0x4a
IPV6_MULTICAST_HOPS = 0xa
IPV6_MULTICAST_IF = 0x9
IPV6_MULTICAST_LOOP = 0xb
IPV6_NEXTHOP = 0x30
IPV6_PATHMTU = 0x2c
IPV6_PKTINFO = 0x2e
IPV6_PORTRANGE = 0xe
IPV6_PORTRANGE_DEFAULT = 0x0
IPV6_PORTRANGE_HIGH = 0x1
IPV6_PORTRANGE_LOW = 0x2
IPV6_PREFER_TEMPADDR = 0x3f
IPV6_RECVDSTOPTS = 0x28
IPV6_RECVHOPLIMIT = 0x25
IPV6_RECVHOPOPTS = 0x27
IPV6_RECVPATHMTU = 0x2b
IPV6_RECVPKTINFO = 0x24
IPV6_RECVRTHDR = 0x26
IPV6_RECVTCLASS = 0x39
IPV6_RTHDR = 0x33
IPV6_RTHDRDSTOPTS = 0x23
IPV6_RTHDR_LOOSE = 0x0
IPV6_RTHDR_STRICT = 0x1
IPV6_RTHDR_TYPE_0 = 0x0
IPV6_SOCKOPT_RESERVED1 = 0x3
IPV6_TCLASS = 0x3d
IPV6_UNICAST_HOPS = 0x4
IPV6_USE_MIN_MTU = 0x2a
IPV6_V6ONLY = 0x1b
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
IP_ADD_MEMBERSHIP = 0xc
IP_ADD_SOURCE_MEMBERSHIP = 0x46
IP_BINDANY = 0x18
IP_BLOCK_SOURCE = 0x48
IP_DEFAULT_MULTICAST_LOOP = 0x1
IP_DEFAULT_MULTICAST_TTL = 0x1
IP_DF = 0x4000
IP_DONTFRAG = 0x43
IP_DROP_MEMBERSHIP = 0xd
IP_DROP_SOURCE_MEMBERSHIP = 0x47
IP_DUMMYNET3 = 0x31
IP_DUMMYNET_CONFIGURE = 0x3c
IP_DUMMYNET_DEL = 0x3d
IP_DUMMYNET_FLUSH = 0x3e
IP_DUMMYNET_GET = 0x40
IP_FAITH = 0x16
IP_FW3 = 0x30
IP_FW_ADD = 0x32
IP_FW_DEL = 0x33
IP_FW_FLUSH = 0x34
IP_FW_GET = 0x36
IP_FW_NAT_CFG = 0x38
IP_FW_NAT_DEL = 0x39
IP_FW_NAT_GET_CONFIG = 0x3a
IP_FW_NAT_GET_LOG = 0x3b
IP_FW_RESETLOG = 0x37
IP_FW_TABLE_ADD = 0x28
IP_FW_TABLE_DEL = 0x29
IP_FW_TABLE_FLUSH = 0x2a
IP_FW_TABLE_GETSIZE = 0x2b
IP_FW_TABLE_LIST = 0x2c
IP_FW_ZERO = 0x35
IP_HDRINCL = 0x2
IP_IPSEC_POLICY = 0x15
IP_MAXPACKET = 0xffff
IP_MAX_GROUP_SRC_FILTER = 0x200
IP_MAX_MEMBERSHIPS = 0xfff
IP_MAX_SOCK_MUTE_FILTER = 0x80
IP_MAX_SOCK_SRC_FILTER = 0x80
IP_MAX_SOURCE_FILTER = 0x400
IP_MF = 0x2000
IP_MINTTL = 0x42
IP_MIN_MEMBERSHIPS = 0x1f
IP_MSFILTER = 0x4a
IP_MSS = 0x240
IP_MULTICAST_IF = 0x9
IP_MULTICAST_LOOP = 0xb
IP_MULTICAST_TTL = 0xa
IP_MULTICAST_VIF = 0xe
IP_OFFMASK = 0x1fff
IP_ONESBCAST = 0x17
IP_OPTIONS = 0x1
IP_PORTRANGE = 0x13
IP_PORTRANGE_DEFAULT = 0x0
IP_PORTRANGE_HIGH = 0x1
IP_PORTRANGE_LOW = 0x2
IP_RECVDSTADDR = 0x7
IP_RECVIF = 0x14
IP_RECVOPTS = 0x5
IP_RECVRETOPTS = 0x6
IP_RECVTOS = 0x44
IP_RECVTTL = 0x41
IP_RETOPTS = 0x8
IP_RF = 0x8000
IP_RSVP_OFF = 0x10
IP_RSVP_ON = 0xf
IP_RSVP_VIF_OFF = 0x12
IP_RSVP_VIF_ON = 0x11
IP_SENDSRCADDR = 0x7
IP_TOS = 0x3
IP_TTL = 0x4
IP_UNBLOCK_SOURCE = 0x49
ISIG = 0x80
ISTRIP = 0x20
IXANY = 0x800
IXOFF = 0x400
IXON = 0x200
LOCK_EX = 0x2
LOCK_NB = 0x4
LOCK_SH = 0x1
LOCK_UN = 0x8
MADV_AUTOSYNC = 0x7
MADV_CORE = 0x9
MADV_DONTNEED = 0x4
MADV_FREE = 0x5
MADV_NOCORE = 0x8
MADV_NORMAL = 0x0
MADV_NOSYNC = 0x6
MADV_PROTECT = 0xa
MADV_RANDOM = 0x1
MADV_SEQUENTIAL = 0x2
MADV_WILLNEED = 0x3
MAP_ALIGNED_SUPER = 0x1000000
MAP_ALIGNMENT_MASK = -0x1000000
MAP_ALIGNMENT_SHIFT = 0x18
MAP_ANON = 0x1000
MAP_ANONYMOUS = 0x1000
MAP_COPY = 0x2
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_HASSEMAPHORE = 0x200
MAP_NOCORE = 0x20000
MAP_NORESERVE = 0x40
MAP_NOSYNC = 0x800
MAP_PREFAULT_READ = 0x40000
MAP_PRIVATE = 0x2
MAP_RENAME = 0x20
MAP_RESERVED0080 = 0x80
MAP_RESERVED0100 = 0x100
MAP_SHARED = 0x1
MAP_STACK = 0x400
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MSG_CMSG_CLOEXEC = 0x40000
MSG_COMPAT = 0x8000
MSG_CTRUNC = 0x20
MSG_DONTROUTE = 0x4
MSG_DONTWAIT = 0x80
MSG_EOF = 0x100
MSG_EOR = 0x8
MSG_NBIO = 0x4000
MSG_NOSIGNAL = 0x20000
MSG_NOTIFICATION = 0x2000
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_TRUNC = 0x10
MSG_WAITALL = 0x40
MS_ASYNC = 0x1
MS_INVALIDATE = 0x2
MS_SYNC = 0x0
NAME_MAX = 0xff
NET_RT_DUMP = 0x1
NET_RT_FLAGS = 0x2
NET_RT_IFLIST = 0x3
NET_RT_IFLISTL = 0x5
NET_RT_IFMALIST = 0x4
NET_RT_MAXID = 0x6
NOFLSH = 0x80000000
NOTE_ATTRIB = 0x8
NOTE_CHILD = 0x4
NOTE_DELETE = 0x1
NOTE_EXEC = 0x20000000
NOTE_EXIT = 0x80000000
NOTE_EXTEND = 0x4
NOTE_FFAND = 0x40000000
NOTE_FFCOPY = 0xc0000000
NOTE_FFCTRLMASK = 0xc0000000
NOTE_FFLAGSMASK = 0xffffff
NOTE_FFNOP = 0x0
NOTE_FFOR = 0x80000000
NOTE_FORK = 0x40000000
NOTE_LINK = 0x10
NOTE_LOWAT = 0x1
NOTE_PCTRLMASK = 0xf0000000
NOTE_PDATAMASK = 0xfffff
NOTE_RENAME = 0x20
NOTE_REVOKE = 0x40
NOTE_TRACK = 0x1
NOTE_TRACKERR = 0x2
NOTE_TRIGGER = 0x1000000
NOTE_WRITE = 0x2
OCRNL = 0x10
ONLCR = 0x2
ONLRET = 0x40
ONOCR = 0x20
ONOEOT = 0x8
OPOST = 0x1
O_ACCMODE = 0x3
O_APPEND = 0x8
O_ASYNC = 0x40
O_CLOEXEC = 0x100000
O_CREAT = 0x200
O_DIRECT = 0x10000
O_DIRECTORY = 0x20000
O_EXCL = 0x800
O_EXEC = 0x40000
O_EXLOCK = 0x20
O_FSYNC = 0x80
O_NDELAY = 0x4
O_NOCTTY = 0x8000
O_NOFOLLOW = 0x100
O_NONBLOCK = 0x4
O_RDONLY = 0x0
O_RDWR = 0x2
O_SHLOCK = 0x10
O_SYNC = 0x80
O_TRUNC = 0x400
O_TTY_INIT = 0x80000
O_WRONLY = 0x1
PARENB = 0x1000
PARMRK = 0x8
PARODD = 0x2000
PENDIN = 0x20000000
PRIO_PGRP = 0x1
PRIO_PROCESS = 0x0
PRIO_USER = 0x2
PROT_EXEC = 0x4
PROT_NONE = 0x0
PROT_READ = 0x1
PROT_WRITE = 0x2
RLIMIT_AS = 0xa
RLIMIT_CORE = 0x4
RLIMIT_CPU = 0x0
RLIMIT_DATA = 0x2
RLIMIT_FSIZE = 0x1
RLIMIT_NOFILE = 0x8
RLIMIT_STACK = 0x3
RLIM_INFINITY = 0x7fffffffffffffff
RTAX_AUTHOR = 0x6
RTAX_BRD = 0x7
RTAX_DST = 0x0
RTAX_GATEWAY = 0x1
RTAX_GENMASK = 0x3
RTAX_IFA = 0x5
RTAX_IFP = 0x4
RTAX_MAX = 0x8
RTAX_NETMASK = 0x2
RTA_AUTHOR = 0x40
RTA_BRD = 0x80
RTA_DST = 0x1
RTA_GATEWAY = 0x2
RTA_GENMASK = 0x8
RTA_IFA = 0x20
RTA_IFP = 0x10
RTA_NETMASK = 0x4
RTF_BLACKHOLE = 0x1000
RTF_BROADCAST = 0x400000
RTF_DONE = 0x40
RTF_DYNAMIC = 0x10
RTF_FMASK = 0x1004d808
RTF_GATEWAY = 0x2
RTF_GWFLAG_COMPAT = 0x80000000
RTF_HOST = 0x4
RTF_LLDATA = 0x400
RTF_LLINFO = 0x400
RTF_LOCAL = 0x200000
RTF_MODIFIED = 0x20
RTF_MULTICAST = 0x800000
RTF_PINNED = 0x100000
RTF_PRCLONING = 0x10000
RTF_PROTO1 = 0x8000
RTF_PROTO2 = 0x4000
RTF_PROTO3 = 0x40000
RTF_REJECT = 0x8
RTF_RNH_LOCKED = 0x40000000
RTF_STATIC = 0x800
RTF_STICKY = 0x10000000
RTF_UP = 0x1
RTF_XRESOLVE = 0x200
RTM_ADD = 0x1
RTM_CHANGE = 0x3
RTM_DELADDR = 0xd
RTM_DELETE = 0x2
RTM_DELMADDR = 0x10
RTM_GET = 0x4
RTM_IEEE80211 = 0x12
RTM_IFANNOUNCE = 0x11
RTM_IFINFO = 0xe
RTM_LOCK = 0x8
RTM_LOSING = 0x5
RTM_MISS = 0x7
RTM_NEWADDR = 0xc
RTM_NEWMADDR = 0xf
RTM_OLDADD = 0x9
RTM_OLDDEL = 0xa
RTM_REDIRECT = 0x6
RTM_RESOLVE = 0xb
RTM_RTTUNIT = 0xf4240
RTM_VERSION = 0x5
RTV_EXPIRE = 0x4
RTV_HOPCOUNT = 0x2
RTV_MTU = 0x1
RTV_RPIPE = 0x8
RTV_RTT = 0x40
RTV_RTTVAR = 0x80
RTV_SPIPE = 0x10
RTV_SSTHRESH = 0x20
RTV_WEIGHT = 0x100
RT_CACHING_CONTEXT = 0x1
RT_DEFAULT_FIB = 0x0
RT_NORTREF = 0x2
RUSAGE_CHILDREN = -0x1
RUSAGE_SELF = 0x0
RUSAGE_THREAD = 0x1
SCM_BINTIME = 0x4
SCM_CREDS = 0x3
SCM_RIGHTS = 0x1
SCM_TIMESTAMP = 0x2
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
SIOCADDMULTI = 0x80206931
SIOCADDRT = 0x8030720a
SIOCAIFADDR = 0x8040691a
SIOCAIFGROUP = 0x80246987
SIOCALIFADDR = 0x8118691b
SIOCATMARK = 0x40047307
SIOCDELMULTI = 0x80206932
SIOCDELRT = 0x8030720b
SIOCDIFADDR = 0x80206919
SIOCDIFGROUP = 0x80246989
SIOCDIFPHYADDR = 0x80206949
SIOCDLIFADDR = 0x8118691d
SIOCGDRVSPEC = 0xc01c697b
SIOCGETSGCNT = 0xc0147210
SIOCGETVIFCNT = 0xc014720f
SIOCGHIWAT = 0x40047301
SIOCGIFADDR = 0xc0206921
SIOCGIFBRDADDR = 0xc0206923
SIOCGIFCAP = 0xc020691f
SIOCGIFCONF = 0xc0086924
SIOCGIFDESCR = 0xc020692a
SIOCGIFDSTADDR = 0xc0206922
SIOCGIFFIB = 0xc020695c
SIOCGIFFLAGS = 0xc0206911
SIOCGIFGENERIC = 0xc020693a
SIOCGIFGMEMB = 0xc024698a
SIOCGIFGROUP = 0xc0246988
SIOCGIFINDEX = 0xc0206920
SIOCGIFMAC = 0xc0206926
SIOCGIFMEDIA = 0xc0286938
SIOCGIFMETRIC = 0xc0206917
SIOCGIFMTU = 0xc0206933
SIOCGIFNETMASK = 0xc0206925
SIOCGIFPDSTADDR = 0xc0206948
SIOCGIFPHYS = 0xc0206935
SIOCGIFPSRCADDR = 0xc0206947
SIOCGIFSTATUS = 0xc331693b
SIOCGLIFADDR = 0xc118691c
SIOCGLIFPHYADDR = 0xc118694b
SIOCGLOWAT = 0x40047303
SIOCGPGRP = 0x40047309
SIOCGPRIVATE_0 = 0xc0206950
SIOCGPRIVATE_1 = 0xc0206951
SIOCIFCREATE = 0xc020697a
SIOCIFCREATE2 = 0xc020697c
SIOCIFDESTROY = 0x80206979
SIOCIFGCLONERS = 0xc00c6978
SIOCSDRVSPEC = 0x801c697b
SIOCSHIWAT = 0x80047300
SIOCSIFADDR = 0x8020690c
SIOCSIFBRDADDR = 0x80206913
SIOCSIFCAP = 0x8020691e
SIOCSIFDESCR = 0x80206929
SIOCSIFDSTADDR = 0x8020690e
SIOCSIFFIB = 0x8020695d
SIOCSIFFLAGS = 0x80206910
SIOCSIFGENERIC = 0x80206939
SIOCSIFLLADDR = 0x8020693c
SIOCSIFMAC = 0x80206927
SIOCSIFMEDIA = 0xc0206937
SIOCSIFMETRIC = 0x80206918
SIOCSIFMTU = 0x80206934
SIOCSIFNAME = 0x80206928
SIOCSIFNETMASK = 0x80206916
SIOCSIFPHYADDR = 0x80406946
SIOCSIFPHYS = 0x80206936
SIOCSIFRVNET = 0xc020695b
SIOCSIFVNET = 0xc020695a
SIOCSLIFPHYADDR = 0x8118694a
SIOCSLOWAT = 0x80047302
SIOCSPGRP = 0x80047308
SOCK_CLOEXEC = 0x10000000
SOCK_DGRAM = 0x2
SOCK_MAXADDRLEN = 0xff
SOCK_NONBLOCK = 0x20000000
SOCK_RAW = 0x3
SOCK_RDM = 0x4
SOCK_SEQPACKET = 0x5
SOCK_STREAM = 0x1
SOL_SOCKET = 0xffff
SOMAXCONN = 0x80
SO_ACCEPTCONN = 0x2
SO_ACCEPTFILTER = 0x1000
SO_BINTIME = 0x2000
SO_BROADCAST = 0x20
SO_DEBUG = 0x1
SO_DONTROUTE = 0x10
SO_ERROR = 0x1007
SO_KEEPALIVE = 0x8
SO_LABEL = 0x1009
SO_LINGER = 0x80
SO_LISTENINCQLEN = 0x1013
SO_LISTENQLEN = 0x1012
SO_LISTENQLIMIT = 0x1011
SO_NOSIGPIPE = 0x800
SO_NO_DDP = 0x8000
SO_NO_OFFLOAD = 0x4000
SO_OOBINLINE = 0x100
SO_PEERLABEL = 0x1010
SO_PROTOCOL = 0x1016
SO_PROTOTYPE = 0x1016
SO_RCVBUF = 0x1002
SO_RCVLOWAT = 0x1004
SO_RCVTIMEO = 0x1006
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_SETFIB = 0x1014
SO_SNDBUF = 0x1001
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x1005
SO_TIMESTAMP = 0x400
SO_TYPE = 0x1008
SO_USELOOPBACK = 0x40
SO_USER_COOKIE = 0x1015
SO_VENDOR = 0x80000000
TCIFLUSH = 0x1
TCIOFLUSH = 0x3
TCOFLUSH = 0x2
TCP_CA_NAME_MAX = 0x10
TCP_CONGESTION = 0x40
TCP_INFO = 0x20
TCP_KEEPCNT = 0x400
TCP_KEEPIDLE = 0x100
TCP_KEEPINIT = 0x80
TCP_KEEPINTVL = 0x200
TCP_MAXBURST = 0x4
TCP_MAXHLEN = 0x3c
TCP_MAXOLEN = 0x28
TCP_MAXSEG = 0x2
TCP_MAXWIN = 0xffff
TCP_MAX_SACK = 0x4
TCP_MAX_WINSHIFT = 0xe
TCP_MD5SIG = 0x10
TCP_MINMSS = 0xd8
TCP_MSS = 0x218
TCP_NODELAY = 0x1
TCP_NOOPT = 0x8
TCP_NOPUSH = 0x4
TCP_VENDOR = 0x80000000
TCSAFLUSH = 0x2
TIOCCBRK = 0x2000747a
TIOCCDTR = 0x20007478
TIOCCONS = 0x80047462
TIOCDRAIN = 0x2000745e
TIOCEXCL = 0x2000740d
TIOCEXT = 0x80047460
TIOCFLUSH = 0x80047410
TIOCGDRAINWAIT = 0x40047456
TIOCGETA = 0x402c7413
TIOCGETD = 0x4004741a
TIOCGPGRP = 0x40047477
TIOCGPTN = 0x4004740f
TIOCGSID = 0x40047463
TIOCGWINSZ = 0x40087468
TIOCMBIC = 0x8004746b
TIOCMBIS = 0x8004746c
TIOCMGDTRWAIT = 0x4004745a
TIOCMGET = 0x4004746a
TIOCMSDTRWAIT = 0x8004745b
TIOCMSET = 0x8004746d
TIOCM_CAR = 0x40
TIOCM_CD = 0x40
TIOCM_CTS = 0x20
TIOCM_DCD = 0x40
TIOCM_DSR = 0x100
TIOCM_DTR = 0x2
TIOCM_LE = 0x1
TIOCM_RI = 0x80
TIOCM_RNG = 0x80
TIOCM_RTS = 0x4
TIOCM_SR = 0x10
TIOCM_ST = 0x8
TIOCNOTTY = 0x20007471
TIOCNXCL = 0x2000740e
TIOCOUTQ = 0x40047473
TIOCPKT = 0x80047470
TIOCPKT_DATA = 0x0
TIOCPKT_DOSTOP = 0x20
TIOCPKT_FLUSHREAD = 0x1
TIOCPKT_FLUSHWRITE = 0x2
TIOCPKT_IOCTL = 0x40
TIOCPKT_NOSTOP = 0x10
TIOCPKT_START = 0x8
TIOCPKT_STOP = 0x4
TIOCPTMASTER = 0x2000741c
TIOCSBRK = 0x2000747b
TIOCSCTTY = 0x20007461
TIOCSDRAINWAIT = 0x80047457
TIOCSDTR = 0x20007479
TIOCSETA = 0x802c7414
TIOCSETAF = 0x802c7416
TIOCSETAW = 0x802c7415
TIOCSETD = 0x8004741b
TIOCSIG = 0x2004745f
TIOCSPGRP = 0x80047476
TIOCSTART = 0x2000746e
TIOCSTAT = 0x20007465
TIOCSTI = 0x80017472
TIOCSTOP = 0x2000746f
TIOCSWINSZ = 0x80087467
TIOCTIMESTAMP = 0x40087459
TIOCUCNTL = 0x80047466
TOSTOP = 0x400000
VDISCARD = 0xf
VDSUSP = 0xb
VEOF = 0x0
VEOL = 0x1
VEOL2 = 0x2
VERASE = 0x3
VERASE2 = 0x7
VINTR = 0x8
VKILL = 0x5
VLNEXT = 0xe
VMIN = 0x10
VQUIT = 0x9
VREPRINT = 0x6
VSTART = 0xc
VSTATUS = 0x12
VSTOP = 0xd
VSUSP = 0xa
VTIME = 0x11
VWERASE = 0x4
WCONTINUED = 0x4
WCOREFLAG = 0x80
WEXITED = 0x10
WLINUXCLONE = 0x80000000
WNOHANG = 0x1
WNOWAIT = 0x8
WSTOPPED = 0x2
WTRAPPED = 0x20
WUNTRACED = 0x2
)
// Errors
const (
E2BIG = Errno(0x7)
EACCES = Errno(0xd)
EADDRINUSE = Errno(0x30)
EADDRNOTAVAIL = Errno(0x31)
EAFNOSUPPORT = Errno(0x2f)
EAGAIN = Errno(0x23)
EALREADY = Errno(0x25)
EAUTH = Errno(0x50)
EBADF = Errno(0x9)
EBADMSG = Errno(0x59)
EBADRPC = Errno(0x48)
EBUSY = Errno(0x10)
ECANCELED = Errno(0x55)
ECAPMODE = Errno(0x5e)
ECHILD = Errno(0xa)
ECONNABORTED = Errno(0x35)
ECONNREFUSED = Errno(0x3d)
ECONNRESET = Errno(0x36)
EDEADLK = Errno(0xb)
EDESTADDRREQ = Errno(0x27)
EDOM = Errno(0x21)
EDOOFUS = Errno(0x58)
EDQUOT = Errno(0x45)
EEXIST = Errno(0x11)
EFAULT = Errno(0xe)
EFBIG = Errno(0x1b)
EFTYPE = Errno(0x4f)
EHOSTDOWN = Errno(0x40)
EHOSTUNREACH = Errno(0x41)
EIDRM = Errno(0x52)
EILSEQ = Errno(0x56)
EINPROGRESS = Errno(0x24)
EINTR = Errno(0x4)
EINVAL = Errno(0x16)
EIO = Errno(0x5)
EISCONN = Errno(0x38)
EISDIR = Errno(0x15)
ELAST = Errno(0x60)
ELOOP = Errno(0x3e)
EMFILE = Errno(0x18)
EMLINK = Errno(0x1f)
EMSGSIZE = Errno(0x28)
EMULTIHOP = Errno(0x5a)
ENAMETOOLONG = Errno(0x3f)
ENEEDAUTH = Errno(0x51)
ENETDOWN = Errno(0x32)
ENETRESET = Errno(0x34)
ENETUNREACH = Errno(0x33)
ENFILE = Errno(0x17)
ENOATTR = Errno(0x57)
ENOBUFS = Errno(0x37)
ENODEV = Errno(0x13)
ENOENT = Errno(0x2)
ENOEXEC = Errno(0x8)
ENOLCK = Errno(0x4d)
ENOLINK = Errno(0x5b)
ENOMEM = Errno(0xc)
ENOMSG = Errno(0x53)
ENOPROTOOPT = Errno(0x2a)
ENOSPC = Errno(0x1c)
ENOSYS = Errno(0x4e)
ENOTBLK = Errno(0xf)
ENOTCAPABLE = Errno(0x5d)
ENOTCONN = Errno(0x39)
ENOTDIR = Errno(0x14)
ENOTEMPTY = Errno(0x42)
ENOTRECOVERABLE = Errno(0x5f)
ENOTSOCK = Errno(0x26)
ENOTSUP = Errno(0x2d)
ENOTTY = Errno(0x19)
ENXIO = Errno(0x6)
EOPNOTSUPP = Errno(0x2d)
EOVERFLOW = Errno(0x54)
EOWNERDEAD = Errno(0x60)
EPERM = Errno(0x1)
EPFNOSUPPORT = Errno(0x2e)
EPIPE = Errno(0x20)
EPROCLIM = Errno(0x43)
EPROCUNAVAIL = Errno(0x4c)
EPROGMISMATCH = Errno(0x4b)
EPROGUNAVAIL = Errno(0x4a)
EPROTO = Errno(0x5c)
EPROTONOSUPPORT = Errno(0x2b)
EPROTOTYPE = Errno(0x29)
ERANGE = Errno(0x22)
EREMOTE = Errno(0x47)
EROFS = Errno(0x1e)
ERPCMISMATCH = Errno(0x49)
ESHUTDOWN = Errno(0x3a)
ESOCKTNOSUPPORT = Errno(0x2c)
ESPIPE = Errno(0x1d)
ESRCH = Errno(0x3)
ESTALE = Errno(0x46)
ETIMEDOUT = Errno(0x3c)
ETOOMANYREFS = Errno(0x3b)
ETXTBSY = Errno(0x1a)
EUSERS = Errno(0x44)
EWOULDBLOCK = Errno(0x23)
EXDEV = Errno(0x12)
)
// Signals
const (
SIGABRT = Signal(0x6)
SIGALRM = Signal(0xe)
SIGBUS = Signal(0xa)
SIGCHLD = Signal(0x14)
SIGCONT = Signal(0x13)
SIGEMT = Signal(0x7)
SIGFPE = Signal(0x8)
SIGHUP = Signal(0x1)
SIGILL = Signal(0x4)
SIGINFO = Signal(0x1d)
SIGINT = Signal(0x2)
SIGIO = Signal(0x17)
SIGIOT = Signal(0x6)
SIGKILL = Signal(0x9)
SIGLIBRT = Signal(0x21)
SIGLWP = Signal(0x20)
SIGPIPE = Signal(0xd)
SIGPROF = Signal(0x1b)
SIGQUIT = Signal(0x3)
SIGSEGV = Signal(0xb)
SIGSTOP = Signal(0x11)
SIGSYS = Signal(0xc)
SIGTERM = Signal(0xf)
SIGTHR = Signal(0x20)
SIGTRAP = Signal(0x5)
SIGTSTP = Signal(0x12)
SIGTTIN = Signal(0x15)
SIGTTOU = Signal(0x16)
SIGURG = Signal(0x10)
SIGUSR1 = Signal(0x1e)
SIGUSR2 = Signal(0x1f)
SIGVTALRM = Signal(0x1a)
SIGWINCH = Signal(0x1c)
SIGXCPU = Signal(0x18)
SIGXFSZ = Signal(0x19)
)
// Error table
var errors = [...]string{
1: "operation not permitted",
2: "no such file or directory",
3: "no such process",
4: "interrupted system call",
5: "input/output error",
6: "device not configured",
7: "argument list too long",
8: "exec format error",
9: "bad file descriptor",
10: "no child processes",
11: "resource deadlock avoided",
12: "cannot allocate memory",
13: "permission denied",
14: "bad address",
15: "block device required",
16: "device busy",
17: "file exists",
18: "cross-device link",
19: "operation not supported by device",
20: "not a directory",
21: "is a directory",
22: "invalid argument",
23: "too many open files in system",
24: "too many open files",
25: "inappropriate ioctl for device",
26: "text file busy",
27: "file too large",
28: "no space left on device",
29: "illegal seek",
30: "read-only file system",
31: "too many links",
32: "broken pipe",
33: "numerical argument out of domain",
34: "result too large",
35: "resource temporarily unavailable",
36: "operation now in progress",
37: "operation already in progress",
38: "socket operation on non-socket",
39: "destination address required",
40: "message too long",
41: "protocol wrong type for socket",
42: "protocol not available",
43: "protocol not supported",
44: "socket type not supported",
45: "operation not supported",
46: "protocol family not supported",
47: "address family not supported by protocol family",
48: "address already in use",
49: "can't assign requested address",
50: "network is down",
51: "network is unreachable",
52: "network dropped connection on reset",
53: "software caused connection abort",
54: "connection reset by peer",
55: "no buffer space available",
56: "socket is already connected",
57: "socket is not connected",
58: "can't send after socket shutdown",
59: "too many references: can't splice",
60: "operation timed out",
61: "connection refused",
62: "too many levels of symbolic links",
63: "file name too long",
64: "host is down",
65: "no route to host",
66: "directory not empty",
67: "too many processes",
68: "too many users",
69: "disc quota exceeded",
70: "stale NFS file handle",
71: "too many levels of remote in path",
72: "RPC struct is bad",
73: "RPC version wrong",
74: "RPC prog. not avail",
75: "program version wrong",
76: "bad procedure for program",
77: "no locks available",
78: "function not implemented",
79: "inappropriate file type or format",
80: "authentication error",
81: "need authenticator",
82: "identifier removed",
83: "no message of desired type",
84: "value too large to be stored in data type",
85: "operation canceled",
86: "illegal byte sequence",
87: "attribute not found",
88: "programming error",
89: "bad message",
90: "multihop attempted",
91: "link has been severed",
92: "protocol error",
93: "capabilities insufficient",
94: "not permitted in capability mode",
95: "state not recoverable",
96: "previous owner died",
}
// Signal table
var signals = [...]string{
1: "hangup",
2: "interrupt",
3: "quit",
4: "illegal instruction",
5: "trace/BPT trap",
6: "abort trap",
7: "EMT trap",
8: "floating point exception",
9: "killed",
10: "bus error",
11: "segmentation fault",
12: "bad system call",
13: "broken pipe",
14: "alarm clock",
15: "terminated",
16: "urgent I/O condition",
17: "suspended (signal)",
18: "suspended",
19: "continued",
20: "child exited",
21: "stopped (tty input)",
22: "stopped (tty output)",
23: "I/O possible",
24: "cputime limit exceeded",
25: "filesize limit exceeded",
26: "virtual timer expired",
27: "profiling timer expired",
28: "window size changes",
29: "information request",
30: "user defined signal 1",
31: "user defined signal 2",
32: "unknown signal",
33: "unknown signal",
}
|
go
|
github
|
https://github.com/golang/go
|
src/syscall/zerrors_freebsd_386.go
|
@import "../../css/type.scss";
.content {
display: flex;
align-items: center;
justify-content: space-around;
svg {
margin-right: calc(var(--base) / 2);
width: var(--base);
height: var(--base);
}
}
.label {
@extend %label;
display: flex;
align-items: center;
}
.button {
text-decoration: none;
display: inline-flex;
padding: 12px 18px;
margin-bottom: var(--base);
}
.primary--white {
background-color: black;
color: white;
}
.primary--black {
background-color: white;
color: black;
}
.secondary--white {
background-color: white;
box-shadow: inset 0 0 0 1px black;
}
.secondary--black {
background-color: black;
box-shadow: inset 0 0 0 1px white;
}
.appearance--default {
padding: 0;
margin-left: -8px;
}
|
unknown
|
github
|
https://github.com/vercel/next.js
|
examples/cms-payload/components/Button/index.module.scss
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <benno@ansible.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_router
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_router instead
short_description: Create or Remove router from openstack
description:
- Create or Delete routers from OpenStack
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
default: None
tenant_name:
description:
- Name of the tenant for which the router has to be created, if none router would be created for the login tenant.
required: false
default: None
admin_state_up:
description:
- desired admin state of the created router .
required: false
default: true
requirements:
- "python >= 2.6"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Creates a router for tenant admin
- quantum_router: state=present
login_username=admin
login_password=admin
login_tenant_name=admin
name=router1"
'''
_os_keystone = None
_os_tenant_id = None
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _set_tenant_id(module):
global _os_tenant_id
if not module.params['tenant_name']:
_os_tenant_id = _os_keystone.tenant_id
else:
tenant_name = module.params['tenant_name']
for tenant in _os_keystone.tenants.list():
if tenant.name == tenant_name:
_os_tenant_id = tenant.id
break
if not _os_tenant_id:
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
def _get_router_id(module, neutron):
kwargs = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
}
try:
routers = neutron.list_routers(**kwargs)
except Exception as e:
module.fail_json(msg = "Error in getting the router list: %s " % e.message)
if not routers['routers']:
return None
return routers['routers'][0]['id']
def _create_router(module, neutron):
router = {
'name': module.params['name'],
'tenant_id': _os_tenant_id,
'admin_state_up': module.params['admin_state_up'],
}
try:
new_router = neutron.create_router(dict(router=router))
except Exception as e:
module.fail_json( msg = "Error in creating router: %s" % e.message)
return new_router['router']['id']
def _delete_router(module, neutron, router_id):
try:
neutron.delete_router(router_id)
except:
module.fail_json("Error in deleting the router")
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
admin_state_up = dict(type='bool', default=True),
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required')
neutron = _get_neutron_client(module, module.params)
_set_tenant_id(module)
if module.params['state'] == 'present':
router_id = _get_router_id(module, neutron)
if not router_id:
router_id = _create_router(module, neutron)
module.exit_json(changed=True, result="Created", id=router_id)
else:
module.exit_json(changed=False, result="success" , id=router_id)
else:
router_id = _get_router_id(module, neutron)
if not router_id:
module.exit_json(changed=False, result="success")
else:
_delete_router(module, neutron, router_id)
module.exit_json(changed=True, result="deleted")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"variable": {
"input": {
"type": "string"
}
},
"resource": {
"foo_resource": {
"a": {
"value": "${var.input}"
}
},
"bar_resource": {
"c": {}
}
}
}
|
json
|
github
|
https://github.com/hashicorp/terraform
|
internal/configs/testdata/valid-modules/with-tests-json/main.tf.json
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.