code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
#!/usr/bin/env python
"""
Simple AIS library.
This library supports creating and decoding NMEA formatted AIS type 1,5,24 messages
@author Daniel Hong
https://github.com/doodleincode/aislib
This program is licensed under the GNU GENERAL PUBLIC LICENSE Version 2.
A LICENSE file should have accompanied this program.
"""
import bitstring
import binascii
# Create a character encoding and reversed character encoding map which
# we will use to encode and decode, respectively, AIS bit streams
encodingchars = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', "`", 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w'
]
# We'll populate this with the encoding chars k/v in reverse for use in decoding
# the AIS payload
re_encodingchars = {}
for i in range(len(encodingchars)):
re_encodingchars[encodingchars[i]] = i
# END character encoding map
AISchars='@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ !"#$%&\'()*+,-./0123456789:;<=>?'
re_AISchars = {}
for i in range(len(AISchars)):
re_AISchars[AISchars[i]] = i
def AISString2Bits(name,length=20):
if len(name)>length: name = name[:length]
if len(name)<length: name = name+'@'*(length-len(name))
return bitstring.Bits().join(['uint:6=%d' % re_AISchars[name[k]] for k in range(len(name))])
def int2bin6(num):
"""
Converts the given integer to a 6-bit binary representation
"""
return "".join(num & (1 << i) and '1' or '0' for i in range(5, -1, -1))
class CRCInvalidError(Exception):
pass
class AISMessage(object):
# Contain our AIS message elements
_attrs = {}
# Map the number of bits for each element in the AIS message
_bitmap = {}
def __init__(self, elements):
# Init our bit mapping and load up default message values
for key, arr in elements.iteritems():
# arr[0] == data type of the element
# arr[1] == number of bits for given element
# arr[2] == the default value for the element
self._bitmap[key] = [ arr[0], arr[1] ]
# Set default value
self.__setattr__(key, arr[2])
def __getattr__(self, name):
"""
We are overriding the behavior of __getattr__ to implement dynamic class
properties. This way we can do stuff like [class].[property] without
requiring a "getter" method for each property.
If the AIS message element is not found in our attribute table, we'll
revert to the default behavior of __getattr__
"""
if name in self._attrs:
return self._attrs[name]
# Preserve the default behavior if our custom attributes were not found
return super(AISMessage, self).__getattr__(name)
def __setattr__(self, name, value):
"""
We are overriding the __setattr__ to implement dynamic property "setters".
"""
if type(value) not in [ int,long]:
raise TypeError("Value must be an integer.")
if name == "_bitmap":
super(AISMessage, self).__setattr__(name, value)
# Set attributes that are supported by the sub-classed AIS message type
elif name in self._bitmap:
# String format is: [datatype]:[num_bits]=[value]
self._attrs[name] = bitstring.Bits(
"%s:%d=%d" % (self._bitmap[name][0], self._bitmap[name][1], value))
else:
raise AttributeError("Unsupported AIS message element.")
def get_attr(self, name):
"""
Returns an integer representation of the binary value for the given
element name.
@param name Name of the AIS message element to retrieve
@return Human readable int value. If invalid element, returns None
"""
if name in self._attrs:
if self._bitmap[name][0] == "int":
return self._attrs[name].int
else:
return self._attrs[name].uint
return None
## Sub-classes should implement the methods below ##
def build_bitstream(self):
"""
Build the bitstream which we will be using to encode the payload. This will
basically involve concatenating all the message elements into one bitstring.
Sub-classes that extend the AISMessage class are required to implement this
method. Example implementation:
return bitstring.Bits().join([
self.element_1, self.element_2, [...]
])
"""
pass
def unpack(self, bitstream):
"""
Unpack a bitstream into AIS message elements. Sub-classes can optionally
implement this method to support decoding of AIS messages. Example
implementation:
self._attrs["element_1"] = bitstring.Bits(bin=bitstream[0:6])
self._attrs["element_2"] = bitstring.Bits(bin=bitstream[6:8])
[...]
"""
pass
class AISPositionReportMessage(AISMessage):
def __init__(self, id=1, repeat=0, mmsi=0, status=15, rot=-128, sog=0, pa=0,
lon=0, lat=0, cog=3600, heading=511, ts=60, smi=0, spare=0,
raim=0, comm_state=0):
"""
Returns an instance of an AIS Position Report Message class
The parameters contain the default values, simply set the parameters
who's value need to change. Ex:
aismsg = AISPositionReportMessage(mmsi=12345, lon=4567, lat=5432)
"""
super(AISPositionReportMessage, self).__init__({
# message_element : ["data_type", num_bits, initial_value]
'id' : ["uint", 6, id],
'repeat' : ["uint", 2, repeat],
'mmsi' : ["uint", 30, mmsi],
'status' : ["uint", 4, status],
'rot' : ["int", 8, rot],
'sog' : ["uint", 10, sog],
'pa' : ["uint", 1, pa],
'lon' : ["int", 28, lon],
'lat' : ["int", 27, lat],
'cog' : ["uint", 12, cog],
'heading' : ["uint", 9, heading],
'ts' : ["uint", 6, ts],
'smi' : ["uint", 2, smi],
'spare' : ["uint", 3, spare],
'raim' : ["uint", 1, raim],
'comm_state' : ["uint", 19, comm_state]
})
def build_bitstream(self):
return bitstring.Bits().join([
self.id,
self.repeat,
self.mmsi,
self.status,
self.rot,
self.sog,
self.pa,
self.lon,
self.lat,
self.cog,
self.heading,
self.ts,
self.smi,
self.spare,
self.raim,
self.comm_state
])
def unpack(self, bitstream):
# TODO: figure out a better way to do this, but works fine for now
self._attrs["id"] = bitstring.Bits(bin=bitstream[0:6])
self._attrs["repeat"] = bitstring.Bits(bin=bitstream[6:8])
self._attrs["mmsi"] = bitstring.Bits(bin=bitstream[8:38])
self._attrs["status"] = bitstring.Bits(bin=bitstream[38:42])
self._attrs["rot"] = bitstring.Bits(bin=bitstream[42:50])
self._attrs["sog"] = bitstring.Bits(bin=bitstream[50:60])
self._attrs["pa"] = bitstring.Bits(bin=bitstream[60:61])
self._attrs["lon"] = bitstring.Bits(bin=bitstream[61:89])
self._attrs["lat"] = bitstring.Bits(bin=bitstream[89:116])
self._attrs["cog"] = bitstring.Bits(bin=bitstream[116:128])
self._attrs["heading"] = bitstring.Bits(bin=bitstream[128:137])
self._attrs["ts"] = bitstring.Bits(bin=bitstream[137:143])
self._attrs["smi"] = bitstring.Bits(bin=bitstream[143:145])
self._attrs["spare"] = bitstring.Bits(bin=bitstream[145:148])
self._attrs["raim"] = bitstring.Bits(bin=bitstream[148:149])
self._attrs["comm_state"] = bitstring.Bits(bin=bitstream[149:168])
class AISStaticAndVoyageReportMessage(AISMessage):
def __init__(self, id=5, repeat=0, mmsi=0, ais_version=0, imo=0, callsign=0, shipname=0,
shiptype=0, to_bow=0, to_stern=0, to_port=0, to_starboard=0, epfd=1,
month=0, day=0, hour=24, minute=60, draught=0,
destination=0, dte=0, spare=0):
"""
Returns an instance of an AIS Position Report Message class
The parameters contain the default values, simply set the parameters
who's value need to change. Ex:
aismsg = AISStaticAndVoyageReportMessage(mmsi=12345,shipname='ASIAN JADE')
"""
super(AISStaticAndVoyageReportMessage, self).__init__({
# message_element : ["data_type", num_bits, initial_value]
'id' : ["uint", 6, id],
'repeat' : ["uint", 2, repeat],
'mmsi' : ["uint", 30, mmsi],
'ais_version' : ["uint", 2, ais_version],
'imo' : ["uint", 30, imo],
'callsign' : ["uint", 42, AISString2Bits(callsign,length=42/6).int if type(callsign) == str else callsign],
'shipname' : ["uint", 120, AISString2Bits(shipname,length=120/6).int if type(shipname) == str else shipname],
'shiptype' : ["uint", 8, shiptype],
'to_bow' : ["uint", 9, to_bow],
'to_stern' : ["uint", 9, to_stern],
'to_port' : ["uint", 6, to_port],
'to_starboard' : ["uint", 6, to_starboard],
'epfd' : ["uint", 4, epfd],
'month' : ["uint", 4, month],
'day' : ["uint", 5, day],
'hour' : ["uint", 5, hour],
'minute' : ["uint", 6, minute],
'draught' : ["uint", 8, draught],
'destination' : ["uint", 120, AISString2Bits(destination,length=120/6).int if type(destination) == str else destination],
'dte' : ["uint", 1, dte],
'spare' : ["uint", 1, spare]
})
def build_bitstream(self):
return bitstring.Bits().join([
self.id,
self.repeat,
self.mmsi,
self.ais_version,
self.imo,
self.callsign,
self.shipname,
self.shiptype,
self.to_bow,
self.to_stern,
self.to_port,
self.to_starboard,
self.epfd,
self.month,
self.day,
self.hour,
self.minute,
self.draught,
self.destination,
self.dte,
self.spare
])
def unpack(self, bitstream):
# TODO: figure out a better way to do this, but works fine for now
self._attrs["id"] = bitstring.Bits(bin=bitstream[0:6])
self._attrs["repeat"] = bitstring.Bits(bin=bitstream[6:8])
self._attrs["mmsi"] = bitstring.Bits(bin=bitstream[8:38])
self._attrs["ais_version"] = bitstring.Bits(bin=bitstream[38:40])
self._attrs["imo"] = bitstring.Bits(bin=bitstream[40:70])
self._attrs["callsign"] = bitstring.Bits(bin=bitstream[70:112])
self._attrs["shipname"] = bitstring.Bits(bin=bitstream[112:232])
self._attrs["shiptype"] = bitstring.Bits(bin=bitstream[232:240])
self._attrs["to_bow"] = bitstring.Bits(bin=bitstream[240:249])
self._attrs["to_stern"] = bitstring.Bits(bin=bitstream[249:258])
self._attrs["to_port"] = bitstring.Bits(bin=bitstream[258:264])
self._attrs["to_starboard"] = bitstring.Bits(bin=bitstream[264:270])
self._attrs["epfd"] = bitstring.Bits(bin=bitstream[270:274])
self._attrs["month"] = bitstring.Bits(bin=bitstream[274:278])
self._attrs["day"] = bitstring.Bits(bin=bitstream[278:283])
self._attrs["hour"] = bitstring.Bits(bin=bitstream[283:288])
self._attrs["minute"] = bitstring.Bits(bin=bitstream[288:294])
self._attrs["draught"] = bitstring.Bits(bin=bitstream[294:302])
self._attrs["destination"] = bitstring.Bits(bin=bitstream[302:422])
self._attrs["dte"] = bitstring.Bits(bin=bitstream[422:423])
self._attrs["spare"] = bitstring.Bits(bin=bitstream[423:424])
class AISStaticDataReportAMessage(AISMessage):
def __init__(self, id=24, repeat=0, mmsi=0, partno=0, shipname=0, spare=0):
"""
Returns an instance of an AIS Static Data Report Message Format A class
The parameters contain the default values, simply set the parameters
whose values need to change. Ex:
aismsg = AISPositionReportAMessage(mmsi=12345, shipname='ASIAN JADE')
"""
super(AISStaticDataReportAMessage, self).__init__({
# message_element : ["data_type", num_bits, initial_value]
'id' : ["uint", 6, id],
'repeat' : ["uint", 2, repeat],
'mmsi' : ["uint", 30, mmsi],
'partno' : ["uint", 2, partno],
'shipname' : ["uint", 120, AISString2Bits(shipname,length=120/6).int if type(shipname) == str else shipname],
'spare' : ["uint", 8, spare]
})
def build_bitstream(self):
return bitstring.Bits().join([
self.id,
self.repeat,
self.mmsi,
self.partno,
self.shipname,
self.spare
])
def unpack(self, bitstream):
# TODO: figure out a better way to do this, but works fine for now
self._attrs["id"] = bitstring.Bits(bin=bitstream[0:6])
self._attrs["repeat"] = bitstring.Bits(bin=bitstream[6:8])
self._attrs["mmsi"] = bitstring.Bits(bin=bitstream[8:38])
self._attrs["partno"] = bitstring.Bits(bin=bitstream[38:40])
self._attrs["shipname"] = bitstring.Bits(bin=bitstream[40:160])
self._attrs["spare"] = bitstring.Bits(bin=bitstream[160:168])
class AISStaticDataReportBMessage(AISMessage):
def __init__(self, id=24, repeat=0, mmsi=0, partno=1, shiptype=0,
vendorid=0,model=0,serial=0,callsign=0,
to_bow=0,to_stern=0,to_port=0,to_starboard=0,
spare=0):
"""
Returns an instance of an AIS Static Data Report Message Format A class
The parameters contain the default values, simply set the parameters
whose values need to change. Ex:
aismsg = AISPositionReportBMessage(mmsi=12345, shiptype=60)
"""
super(AISStaticDataReportBMessage, self).__init__({
# message_element : ["data_type", num_bits, initial_value]
'id' : ["uint", 6, id],
'repeat' : ["uint", 2, repeat],
'mmsi' : ["uint", 30, mmsi],
'partno' : ["uint", 2, partno],
'shiptype' : ["uint", 8, shiptype],
'vendorid' : ["uint", 18, AISString2Bits(vendorid,length=18/6).int if type(vendorid) == str else vendorid],
'model' : ["uint", 4, model],
'serial' : ["uint", 20, serial],
'callsign' : ["uint", 42, AISString2Bits(callsign,length=42/6).int if type(callsign) == str else callsign],
'to_bow' : ["uint", 9, to_bow],
'to_stern' : ["uint", 9, to_stern],
'to_port' : ["uint", 6, to_port],
'to_starboard' : ["uint", 6, to_starboard],
'spare' : ["uint", 6, spare]
})
def build_bitstream(self):
return bitstring.Bits().join([
self.id,
self.repeat,
self.mmsi,
self.partno,
self.shiptype,
self.vendorid,
self.model,
self.serial,
self.callsign,
self.to_bow,
self.to_stern,
self.to_port,
self.to_starboard,
self.spare
])
def unpack(self, bitstream):
# TODO: figure out a better way to do this, but works fine for now
self._attrs["id"] = bitstring.Bits(bin=bitstream[0:6])
self._attrs["repeat"] = bitstring.Bits(bin=bitstream[6:8])
self._attrs["mmsi"] = bitstring.Bits(bin=bitstream[8:38])
self._attrs["partno"] = bitstring.Bits(bin=bitstream[38:40])
self._attrs["shiptype"] = bitstring.Bits(bin=bitstream[40:48])
self._attrs["vendorid"] = bitstring.Bits(bin=bitstream[48:66])
self._attrs["model"] = bitstring.Bits(bin=bitstream[66:70])
self._attrs["serial"] = bitstring.Bits(bin=bitstream[70:90])
self._attrs["callsign"] = bitstring.Bits(bin=bitstream[90:132])
self._attrs["to_bow"] = bitstring.Bits(bin=bitstream[132:141])
self._attrs["to_stern"] = bitstring.Bits(bin=bitstream[141:150])
self._attrs["to_port"] = bitstring.Bits(bin=bitstream[150:156])
self._attrs["to_starboard"] = bitstring.Bits(bin=bitstream[156:162])
self._attrs["spare"] = bitstring.Bits(bin=bitstream[162:168])
class AIS(object):
# Instance of the AISMessage class
_ais_message = None
def __init__(self, ais_message):
# If the provided param was not an AISMessage object, throw exception
if not isinstance(ais_message, AISMessage):
raise TypeError("Variable 'ais_message' is not an instance of 'AISMessage'.")
# Otherwise set the variable
self._ais_message = ais_message
def build_payload(self, invert_crc = False):
"""
Builds the AIS NMEA message string
This method only supports AIVDM, single fragment, 168 bit (28-char) payload
Type 1 and Type 24 format A are of this kind
Field 1, !AIVDM, identifies this as an AIVDM packet.
Field 2 (1) is the count of fragments in the currently accumulating message.
The payload size of each sentence is limited by NMEA 0183's 82-character maximum,
so it is sometimes required to split a payload over several fragment sentences.
Field 3 (1) is the fragment number of this sentence. It will be one-based.
A sentence with a fragment count of 1 and a fragment number of 1 is complete in itself.
Field 4 (empty) is a sequential message ID for multi-sentence messages.
Field 5 (A) is a radio channel code. AIS uses the high side of the duplex
from two VHF radio channels:
- AIS Channel A is 161.975Mhz (87B);
- AIS Channel B is 162.025Mhz (88B).
Field 6 is the encoded payload string
Field 7 (0) is the number of fill bits requires to pad the data payload
to a 6-bit boundary. This value can range from 1-5.
"""
payLoad = self.encode()
payload = "!AIVDM,1,1,,A," + payLoad + '*'
chksum = self.crc(payload)
if invert_crc:
chksum = ~chksum
return payload + "%02X" % (chksum & 0xff)
def encode(self, bitstr = None):
"""
Encode a bitstream into a 6-bit encoded AIS message string
@param bitstr The bitstream. This should be a Bit object generated
from bitstring.Bit(...). If this is not provided, then
it will use the bitstring from the '_ais_message' property
@return 6-bit encoded AIS string
"""
curr_index = 0
curr_offset = 6 # We'll be encoding 6 bits at a time
encoded_str = []
if bitstr == None:
bitstr = self._ais_message.build_bitstream()
# The total AIS message is len(bitstr) bits
# Since we are encoding 6-bit chunks, we are looping
# round(len(bitstr)/ 6.) times (type 5 has 424 bits which does not divide by 6
#print len(bitstr),len(bitstr)/6,len(bitstr)/6.,int(round(len(bitstr)/6.))
for i in range(0, int(round(len(bitstr)/6.))):
block = bitstr[curr_index:curr_offset]
encoded_str.append(encodingchars[block.uint])
curr_index += 6
curr_offset += 6
remainingbits = len(bitstr) %6
fillbits = (6 -remainingbits) if remainingbits !=0 else 0
return ("".join(encoded_str))+','+chr(ord('0')+fillbits)
def decode(self, msg):
"""
Decodes an AIS NMEA formatted message. Currently only supports the
Position Report Message type. On success, returns an instance of
AISPositionReportMessage. A CRC check is performed. If the CRC does not
match, a CRCInvalidError exception is thrown
@param msg The message to decode
@return If CRC checks, returns an instance of AISPositionReportMessage
"""
computed_crc = self.crc(msg)
given_crc = int(msg[-2:], 16)
# If CRC did not match, throw exception!
if given_crc != computed_crc:
raise CRCInvalidError("The given CRC did not match the computed CRC.")
# Otherwise we can continue with decoding the message
# ...
# Grap just the payload. The 6th index in the AIS message contains the payload
payload,fillbits = msg.split(",")[5:7]
# First we will reverse the 6-bit ascii encoding to its integer equivalent
# using our reverse encoded character map
dec = []
for c in payload:
dec.append(re_encodingchars[c])
# Now we will take our list of integers and convert it to a bitstream
bits = []
for i in range(len(dec)):
bits.append(int2bin6(dec[i]))
bitstream = "".join(bits)
if fillbits[0] !='0':bitstream = bitstream[:-int(fillbits[0])]
msgId = bitstream[0:6]#;print msgId
if msgId == '000001':
aismsg = AISPositionReportMessage()
elif msgId == '011000' and bitstream[38] == '0':
aismsg = AISStaticDataReportAMessage()
elif msgId == '011000' and bitstream[38] == '1':
aismsg = AISStaticDataReportBMessage()
elif msgId == '000101':
aismsg = AISStaticAndVoyageReportMessage()
aismsg.unpack(bitstream)
return aismsg
def crc(self, msg):
"""
Generates the CRC for the given AIS NMEA formatted string
@param msg The message used to generate the CRC. This should be
a well formed NMEA formatted message
@return Integer representation of the CRC. You can use hex(crc)
to get the hex
"""
chksum = 0
# If the input contains the entire NMEA message, then we just need to
# get the string between the ! and *
# Otherwise we'll assume the input contains just the string to checksum
astk = msg.rfind("*")
if msg[0] == "!" and astk != -1:
msg = msg[1:astk]
for c in msg:
chksum = chksum ^ ord(c)
return chksum
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_rack_type
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS Rack Type
description:
- Apstra AOS Rack Type module let you manage your Rack Type easily.
You can create create and delete Rack Type by Name, ID or by using a JSON File.
This module is idempotent and support the I(check) mode.
It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Rack Type to manage.
Only one of I(name), I(id) or I(content) can be set.
id:
description:
- AOS Id of the Rack Type to manage (can't be used to create a new Rack Type),
Only one of I(name), I(id) or I(content) can be set.
content:
description:
- Datastructure of the Rack Type to create. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value).
state:
description:
- Indicate what is the expected state of the Rack Type (present or not).
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Delete a Rack Type by name"
aos_rack_type:
session: "{{ aos_session }}"
name: "my-rack-type"
state: absent
- name: "Delete a Rack Type by id"
aos_rack_type:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
# Save a Rack Type to a file
- name: "Access Rack Type 1/3"
aos_rack_type:
session: "{{ aos_session }}"
name: "my-rack-type"
state: present
register: rack_type
- name: "Save Rack Type into a JSON file 2/3"
copy:
content: "{{ rack_type.value | to_nice_json }}"
dest: rack_type_saved.json
- name: "Save Rack Type into a YAML file 3/3"
copy:
content: "{{ rack_type.value | to_nice_yaml }}"
dest: rack_type_saved.yaml
- name: "Load Rack Type from a JSON file"
aos_rack_type:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/rack_type_saved.json') }}"
state: present
- name: "Load Rack Type from a YAML file"
aos_rack_type:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/rack_type_saved.yaml') }}"
state: present
'''
RETURNS = '''
name:
description: Name of the Rack Type
returned: always
type: str
sample: AOS-1x25-1
id:
description: AOS unique ID assigned to the Rack Type
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
#########################################################
# State Processing
#########################################################
def rack_type_absent(module, aos, my_rack_type):
margs = module.params
# If the module do not exist, return directly
if my_rack_type.exists is False:
module.exit_json(changed=False,
name=margs['name'],
id=margs['id'],
value={})
# If not in check mode, delete Rack Type
if not module.check_mode:
try:
my_rack_type.delete()
except:
module.fail_json(msg="An error occured, while trying to delete the Rack Type")
module.exit_json( changed=True,
name=my_rack_type.name,
id=my_rack_type.id,
value={} )
def rack_type_present(module, aos, my_rack_type):
margs = module.params
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.RackTypes, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
# if rack_type doesn't exist already, create a new one
if my_rack_type.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'content' is mandatory for module that don't exist currently")
module.exit_json( changed=False,
name=my_rack_type.name,
id=my_rack_type.id,
value=my_rack_type.value )
#########################################################
# Main Function
#########################################################
def rack_type(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'] )
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
#----------------------------------------------------
# Find Object if available based on ID or Name
#----------------------------------------------------
my_rack_type = find_collection_item(aos.RackTypes,
item_name=item_name,
item_id=item_id)
#----------------------------------------------------
# Proceed based on State value
#----------------------------------------------------
if margs['state'] == 'absent':
rack_type_absent(module, aos, my_rack_type)
elif margs['state'] == 'present':
rack_type_present(module, aos, my_rack_type)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False ),
id=dict(required=False ),
content=dict(required=False, type="json"),
state=dict( required=False,
choices=['present', 'absent'],
default="present")
),
mutually_exclusive = [('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
rack_type(module)
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import maya.cmds as cmds
import RMUncategorized
def MirrorChildren(Objects):
for eachObject in Objects:
children = cmds.listRelatives(eachObject, children = True,type='transform')
if (children):
MirrorChildren(children)
for eachObject in Objects:
ObjectTransformDic = RMUncategorized.ObjectTransformDic( [eachObject] )
SplitArray = eachObject.split("_")
Side = SplitArray[1]
if Side == "R":
SplitArray[1]="L"
OpositObject = "_".join(SplitArray)
if cmds.objExists(OpositObject):
RMUncategorized.SetObjectTransformDic({OpositObject : ObjectTransformDic[eachObject]}, MirrorTranslateX = -1 , MirrorTranslateY = 1 , MirrorTranslateZ = 1 , MirrorRotateX = 1 , MirrorRotateY = -1 , MirrorRotateZ = -1)
if cmds.objectType(eachObject) == 'joint':
X = cmds.getAttr("%s.jointOrientX"%(eachObject))
Y = cmds.getAttr("%s.jointOrientY"%(eachObject))
Z = cmds.getAttr("%s.jointOrientZ"%(eachObject))
cmds.setAttr ("%s.jointOrientX"%(OpositObject),-X)
cmds.setAttr ("%s.jointOrientY"%(OpositObject),Y)
cmds.setAttr ("%s.jointOrientZ"%(OpositObject),Z)
else:
SplitArray[1]="R"
OpositObject = "_".join(SplitArray)
if cmds.objExists(OpositObject):
RMUncategorized.SetObjectTransformDic({OpositObject : ObjectTransformDic[eachObject]}, MirrorTranslateX = -1 , MirrorTranslateY = 1 , MirrorTranslateZ = 1 , MirrorRotateX = 1 , MirrorRotateY = -1 , MirrorRotateZ = -1)
if cmds.objectType(eachObject) == 'joint':
X = cmds.getAttr("%s.jointOrientX"%(eachObject))
Y = cmds.getAttr("%s.jointOrientY"%(eachObject))
Z = cmds.getAttr("%s.jointOrientZ"%(eachObject))
cmds.setAttr ("%s.jointOrientX"%(OpositObject), -X)
cmds.setAttr ("%s.jointOrientY"%(OpositObject), Y)
cmds.setAttr ("%s.jointOrientZ"%(OpositObject), Z)
selection = cmds.ls(selection = True)
MirrorChildren(selection)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import Image, { ImageProps } from "next/image";
import ViewSource from "../../components/view-source";
import styles from "../../styles.module.css";
// Note: we cannot use `priority` or `loading="eager"
// because we depend on the default `loading="lazy"`
// behavior to wait for CSS to reveal the proper image.
type Props = Omit<ImageProps, "src" | "priority" | "loading"> & {
srcLight: string;
srcDark: string;
};
const ThemeImage = (props: Props) => {
const { srcLight, srcDark, ...rest } = props;
return (
<>
<Image {...rest} src={srcLight} className={styles.imgLight} />
<Image {...rest} src={srcDark} className={styles.imgDark} />
</>
);
};
const Page = () => (
<div>
<ViewSource pathname="app/theme/page.tsx" />
<h1>Image With Light/Dark Theme Detection</h1>
<ThemeImage
alt="Next.js Streaming"
srcLight="https://assets.vercel.com/image/upload/front/nextjs/streaming-light.png"
srcDark="https://assets.vercel.com/image/upload/front/nextjs/streaming-dark.png"
width={588}
height={387}
/>
</div>
);
export default Page;
|
typescript
|
github
|
https://github.com/vercel/next.js
|
examples/image-component/app/theme/page.tsx
|
from w3lib.http import headers_dict_to_raw
from scrapy.utils.datatypes import CaselessDict
class Headers(CaselessDict):
"""Case insensitive http headers dictionary"""
def __init__(self, seq=None, encoding='utf-8'):
self.encoding = encoding
super(Headers, self).__init__(seq)
def normkey(self, key):
"""Headers must not be unicode"""
if isinstance(key, unicode):
return key.title().encode(self.encoding)
return key.title()
def normvalue(self, value):
"""Headers must not be unicode"""
if not hasattr(value, '__iter__'):
value = [value]
return [x.encode(self.encoding) if isinstance(x, unicode) else x \
for x in value]
def __getitem__(self, key):
try:
return super(Headers, self).__getitem__(key)[-1]
except IndexError:
return None
def get(self, key, def_val=None):
try:
return super(Headers, self).get(key, def_val)[-1]
except IndexError:
return None
def getlist(self, key, def_val=None):
try:
return super(Headers, self).__getitem__(key)
except KeyError:
if def_val is not None:
return self.normvalue(def_val)
return []
def setlist(self, key, list_):
self[key] = list_
def setlistdefault(self, key, default_list=()):
return self.setdefault(key, default_list)
def appendlist(self, key, value):
lst = self.getlist(key)
lst.extend(self.normvalue(value))
self[key] = lst
def items(self):
return list(self.iteritems())
def iteritems(self):
return ((k, self.getlist(k)) for k in self.keys())
def values(self):
return [self[k] for k in self.keys()]
def to_string(self):
return headers_dict_to_raw(self)
def __copy__(self):
return self.__class__(self)
copy = __copy__
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.symbols
import com.intellij.psi.PsiElement
import org.jetbrains.kotlin.analysis.api.KaInitializerValue
import org.jetbrains.kotlin.analysis.api.annotations.KaAnnotationList
import org.jetbrains.kotlin.analysis.api.fir.KaFirSession
import org.jetbrains.kotlin.analysis.api.fir.annotations.KaFirAnnotationListForDeclaration
import org.jetbrains.kotlin.analysis.api.fir.findPsi
import org.jetbrains.kotlin.analysis.api.fir.symbols.pointers.KaFirJavaSyntheticPropertySymbolPointer
import org.jetbrains.kotlin.analysis.api.fir.symbols.pointers.createOwnerPointer
import org.jetbrains.kotlin.analysis.api.lifetime.withValidityAssertion
import org.jetbrains.kotlin.analysis.api.symbols.*
import org.jetbrains.kotlin.analysis.api.symbols.pointers.KaSymbolPointer
import org.jetbrains.kotlin.analysis.api.types.KaType
import org.jetbrains.kotlin.descriptors.Visibility
import org.jetbrains.kotlin.fir.declarations.synthetic.FirSyntheticProperty
import org.jetbrains.kotlin.fir.declarations.utils.*
import org.jetbrains.kotlin.fir.symbols.SyntheticSymbol
import org.jetbrains.kotlin.fir.symbols.impl.FirSyntheticPropertySymbol
import org.jetbrains.kotlin.name.CallableId
import org.jetbrains.kotlin.name.Name
internal class KaFirSyntheticJavaPropertySymbol(
override val firSymbol: FirSyntheticPropertySymbol,
override val analysisSession: KaFirSession,
) : KaSyntheticJavaPropertySymbol(), KaFirSymbol<FirSyntheticPropertySymbol> {
override val psi: PsiElement? get() = withValidityAssertion { findPsi() }
override val isVal: Boolean get() = withValidityAssertion { firSymbol.isVal }
override val name: Name get() = withValidityAssertion { firSymbol.name }
override val isActual: Boolean get() = withValidityAssertion { firSymbol.isActual }
override val isExpect: Boolean get() = withValidityAssertion { firSymbol.isExpect }
override val returnType: KaType get() = withValidityAssertion { firSymbol.returnType(builder) }
override val receiverParameter: KaReceiverParameterSymbol?
get() = withValidityAssertion { KaFirReceiverParameterSymbol.create(null, analysisSession, this) }
override val typeParameters: List<KaTypeParameterSymbol>
get() = withValidityAssertion { firSymbol.createKtTypeParameters(builder) }
override val isExtension: Boolean
get() = withValidityAssertion { false }
override val origin: KaSymbolOrigin
get() = super<KaSyntheticJavaPropertySymbol>.origin
override val initializer: KaInitializerValue? get() = withValidityAssertion { firSymbol.getKtConstantInitializer(builder) }
override val modality: KaSymbolModality get() = withValidityAssertion { firSymbol.kaSymbolModality }
override val compilerVisibility: Visibility get() = withValidityAssertion { firSymbol.visibility }
override val annotations: KaAnnotationList
get() = withValidityAssertion {
KaFirAnnotationListForDeclaration.create(firSymbol, builder)
}
override val callableId: CallableId?
get() = withValidityAssertion { firSymbol.getCallableId() }
override val getter: KaPropertyGetterSymbol
get() = withValidityAssertion {
KaFirSyntheticPropertyGetterSymbol(firSymbol.getterSymbol!!, analysisSession)
}
override val javaGetterSymbol: KaNamedFunctionSymbol
get() = withValidityAssertion {
val fir = firSymbol.fir as FirSyntheticProperty
return builder.functionBuilder.buildNamedFunctionSymbol(fir.getter.delegate.symbol)
}
override val javaSetterSymbol: KaNamedFunctionSymbol?
get() = withValidityAssertion {
val fir = firSymbol.fir as FirSyntheticProperty
return fir.setter?.delegate?.let { builder.functionBuilder.buildNamedFunctionSymbol(it.symbol) }
}
override val setter: KaPropertySetterSymbol?
get() = withValidityAssertion {
firSymbol.setterSymbol?.let {
KaFirSyntheticPropertySetterSymbol(it, analysisSession)
}
}
override val isOverride: Boolean get() = withValidityAssertion { firSymbol.isOverride }
override val isStatic: Boolean get() = withValidityAssertion { firSymbol.isStatic }
override val isExternal: Boolean get() = withValidityAssertion { firSymbol.isEffectivelyExternal(analysisSession.firSession) }
override val hasSetter: Boolean get() = withValidityAssertion { firSymbol.setterSymbol != null }
override fun createPointer(): KaSymbolPointer<KaSyntheticJavaPropertySymbol> = withValidityAssertion {
KaFirJavaSyntheticPropertySymbolPointer(
ownerPointer = analysisSession.createOwnerPointer(this),
propertyName = name,
isSynthetic = firSymbol is SyntheticSymbol,
originalSymbol = this
)
}
override fun equals(other: Any?): Boolean = symbolEquals(other)
override fun hashCode(): Int = symbolHashCode()
}
|
kotlin
|
github
|
https://github.com/JetBrains/kotlin
|
analysis/analysis-api-fir/src/org/jetbrains/kotlin/analysis/api/fir/symbols/KaFirSyntheticJavaPropertySymbol.kt
|
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_EXAMPLE_PROTO_HELPER_H_
#define TENSORFLOW_CORE_UTIL_EXAMPLE_PROTO_HELPER_H_
#include <cstddef>
#include <cstdint>
#include <unordered_set>
#include <vector>
#include "absl/status/status.h"
#include "xla/tsl/platform/errors.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
// This is a set of helper methods that will make it possible to share
// tensorflow::Example proto Tensor conversion code inside the ExampleParserOp
// OpKernel as well as in external code.
namespace tensorflow {
// "Dense" feature configuration.
struct FixedLenFeature {
std::string key;
DataType dtype;
TensorShape shape;
Tensor default_value;
std::string values_output_tensor_name;
};
// "Sparse" feature configuration.
struct VarLenFeature {
std::string key;
DataType dtype;
std::string values_output_tensor_name;
std::string indices_output_tensor_name;
std::string shapes_output_tensor_name;
};
// Given a single tensorflow::Example, with an optional example name
// at a particular index within a batch, and dense and sparse feature
// configurations from fixed_len_features, var_len_features, this method
// updates the dense value tensor and the sparse values temporary vector
// of tensors. The indexing of the output vectors correspond 1:1 to the
// indexing of the feature configuration vectors.
//
// The fixed_len_features and var_len_features maps are assume to be
// have disjoint key fields from the Feature map in the tensorflow.Example
// proto.
//
// For each sparse feature, the sparse values temporary vector holds a
// tensor for each Example. Each tensor is either empty or filled, depending
// on if the sparse feature value is set for the Example. This
// temporary structure is needed because we need to know the total number
// of filled elements in the batch to get the proper final sparse tensor
// shapes allocated. After the entire batch is processed,
// GetSparseTensorShape can be used to calculate the final shapes and
// CopyIntoSparseTensor can be used to copy from the temporary vector
// into the final allocated tensors.
absl::Status SingleExampleProtoToTensors(
const Example& example, const std::string& name, int batch_index,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features,
std::vector<Tensor*>* output_dense_values_tensor,
std::vector<std::vector<Tensor>>* output_sparse_values_tmp);
// The shape of the indices and values tensors associated with a SparseTensor
// are dependent on the contents of the batch.
struct VarLenFeatureBatchShapes {
TensorShape indices_shape;
TensorShape values_shape;
int max_num_features;
};
// Get the shape of the sparse values and indices tensors for the batch,
// given how many of the tensors in the temporary sparse values vector
// are actually filled.
absl::Status GetSparseTensorShapes(const VarLenFeature& var_len_feature,
const std::vector<Tensor>& sparse_values_tmp,
int batch_size,
VarLenFeatureBatchShapes* output_shapes);
// A method to convert a batch of tensorflow::Example protos into output
// tensors. This method is useful if there already is a batch of deserialized
// Example protos in memory (such as a serving use-case) and we do not wish
// to incur an extraneous serialize/deserialize. It is intended
// as an outside of OpKernel compatible replacement for the functionality of
// ExampleParserOp. In a serving setting, this method could be used to produce
// a feed_dict of Tensors that could bypass the ExampleParserOp.
//
// Note that unlike SingleExampleProtoToTensors, output tensors are
// allocated using a provided Allocator within this method.
absl::Status BatchExampleProtoToTensors(
const std::vector<const Example*>& examples,
const std::vector<std::string>& names,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features, Allocator* allocator,
std::vector<Tensor>* output_dense_values_tensor,
std::vector<Tensor>* output_sparse_indices_tensor,
std::vector<Tensor>* output_sparse_values_tensor,
std::vector<Tensor>* output_sparse_shapes_tensor);
// Check that the given dtype is one that is compatible with
// tensorflow::Example protocol buffer feature values.
absl::Status CheckValidType(const DataType& dtype);
// Check that the provided Feature proto message's oneof value
// matches that of the provided dtype.
absl::Status CheckTypesMatch(const Feature& feature, const DataType& dtype,
bool* match);
// For a single Example, copy a dense feature value into an output
// dense value tensor Out at the provided out_index offset.
absl::Status FeatureDenseCopy(std::size_t out_index, const std::string& name,
const std::string& key, const DataType& dtype,
const TensorShape& shape, const Feature& feature,
Tensor* out);
// Copy the value a provided Tensor into an output dense_value tensor Out
// at the provided out_index offset.
void RowDenseCopy(const std::size_t& out_index, const DataType& dtype,
const Tensor& in, Tensor* out);
// For a single Example, and given sparse feature return a temporary output
// Tensor suitable for being collected in the temporary sparse value vector.
Tensor FeatureSparseCopy(std::size_t batch, const std::string& key,
const DataType& dtype, const Feature& feature);
// Copy a temporary Tensor into the final sparse indices and values
// tensor at a given batch index and element offset. This method
// assumes that the indices/values Tensors have been properly allocated
// for the batch.
int64_t CopyIntoSparseTensor(const Tensor& in, int batch, int64_t offset,
Tensor* indices, Tensor* values);
// Check that each dense_shape has known rank and inner dimensions; and
// update variable_length (whether the outer dimension is None) and
// elements_per_stride for each denes_shape.
absl::Status GetDenseShapes(const std::vector<PartialTensorShape>& dense_shapes,
std::vector<bool>* variable_length,
std::vector<std::size_t>* elements_per_stride);
// Parses the attributes passed to ParseExample.
// REQUIRES: Init must be called after construction.
struct ParseExampleAttrs {
public:
template <typename ContextType>
absl::Status Init(ContextType* ctx, int op_version = 1) {
TF_RETURN_IF_ERROR(ctx->GetAttr("sparse_types", &sparse_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("Tdense", &dense_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("dense_shapes", &dense_shapes));
TF_RETURN_IF_ERROR(
GetDenseShapes(dense_shapes, &variable_length, &elements_per_stride));
switch (op_version) {
case 1:
TF_RETURN_IF_ERROR(ctx->GetAttr("Nsparse", &num_sparse));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ndense", &num_dense));
break;
case 2:
TF_RETURN_IF_ERROR(
ctx->GetAttr("ragged_value_types", &ragged_value_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("num_sparse", &num_sparse));
TF_RETURN_IF_ERROR(
ctx->GetAttr("ragged_split_types", &ragged_split_types));
break;
default:
return errors::InvalidArgument("Unexpected op_version", op_version);
}
return FinishInit(op_version);
}
absl::Status UpdateDenseShapes(const std::vector<size_t>& got_dims);
int64_t num_sparse;
int64_t num_dense;
int64_t num_ragged;
std::vector<DataType> sparse_types;
std::vector<DataType> dense_types;
std::vector<DataType> ragged_value_types;
std::vector<DataType> ragged_split_types;
std::vector<PartialTensorShape> dense_shapes;
std::vector<bool> variable_length;
std::vector<std::size_t> elements_per_stride;
private:
absl::Status FinishInit(
int op_version); // for context-independent parts of Init.
};
// Parses the attributes passed to ParseSingleExample.
// REQUIRES: Init must be called after construction.
struct ParseSingleExampleAttrs {
public:
template <typename ContextType>
absl::Status Init(ContextType* ctx) {
TF_RETURN_IF_ERROR(ctx->GetAttr("sparse_keys", &sparse_keys));
TF_RETURN_IF_ERROR(ctx->GetAttr("sparse_types", &sparse_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("dense_keys", &dense_keys));
TF_RETURN_IF_ERROR(ctx->GetAttr("Tdense", &dense_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("dense_shapes", &dense_shapes));
int num_sparse;
TF_RETURN_IF_ERROR(ctx->GetAttr("num_sparse", &num_sparse));
if (num_sparse != sparse_keys.size() || num_sparse != sparse_types.size()) {
return errors::InvalidArgument(
"num_sparse (", num_sparse, ") must match the size of sparse_keys (",
sparse_keys.size(), ") and sparse_types (", sparse_types.size(), ")");
}
TF_RETURN_IF_ERROR(
GetDenseShapes(dense_shapes, &variable_length, &elements_per_stride));
return FinishInit();
}
std::vector<tstring> sparse_keys;
std::vector<DataType> sparse_types;
std::vector<tstring> dense_keys;
std::vector<DataType> dense_types;
std::vector<PartialTensorShape> dense_shapes;
std::vector<bool> variable_length;
std::vector<std::size_t> elements_per_stride;
private:
absl::Status FinishInit(); // for context-independent parts of Init.
};
// Parses the attributes passed to ParseSequenceExample.
// REQUIRES: Init must be called after construction.
struct ParseSequenceExampleAttrs {
public:
template <typename ContextType>
absl::Status Init(ContextType* ctx, int op_version = 1) {
switch (op_version) {
case 1: {
std::vector<std::string> missing_empty_vector;
TF_RETURN_IF_ERROR(ctx->GetAttr(
"feature_list_dense_missing_assumed_empty", &missing_empty_vector));
for (const std::string& feature : missing_empty_vector) {
feature_list_dense_missing_assumed_empty.insert(feature);
}
}
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_sparse_keys", &context_sparse_keys));
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_dense_keys", &context_dense_keys));
TF_RETURN_IF_ERROR(ctx->GetAttr("feature_list_sparse_keys",
&feature_list_sparse_keys));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_keys", &feature_list_dense_keys));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ncontext_dense", &num_context_dense));
break;
case 2:
TF_RETURN_IF_ERROR(ctx->GetAttr("context_ragged_value_types",
&context_ragged_value_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("context_ragged_split_types",
&context_ragged_split_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("feature_list_ragged_value_types",
&feature_list_ragged_value_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("feature_list_ragged_split_types",
&feature_list_ragged_split_types));
break;
default:
return errors::InvalidArgument("Unexpected op_version", op_version);
}
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_sparse_types", &context_sparse_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("Nfeature_list_dense", &num_feature_list_dense));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ncontext_sparse", &num_context_sparse));
TF_RETURN_IF_ERROR(ctx->GetAttr("Tcontext_dense", &context_dense_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_sparse_types", &feature_list_sparse_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_types", &feature_list_dense_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("Nfeature_list_sparse", &num_feature_list_sparse));
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_dense_shapes", &context_dense_shapes));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_shapes", &feature_list_dense_shapes));
return FinishInit(op_version);
}
std::unordered_set<std::string> feature_list_dense_missing_assumed_empty;
int64_t num_context_sparse;
int64_t num_context_dense;
int64_t num_context_ragged;
int64_t num_feature_list_sparse;
int64_t num_feature_list_dense;
int64_t num_feature_list_ragged;
std::vector<tstring> context_sparse_keys;
std::vector<tstring> context_dense_keys;
std::vector<tstring> feature_list_sparse_keys;
std::vector<tstring> feature_list_dense_keys;
std::vector<DataType> context_sparse_types;
std::vector<DataType> context_dense_types;
std::vector<TensorShape> context_dense_shapes;
std::vector<DataType> feature_list_sparse_types;
std::vector<DataType> feature_list_dense_types;
std::vector<TensorShape> feature_list_dense_shapes;
std::vector<DataType> context_ragged_value_types;
std::vector<DataType> context_ragged_split_types;
std::vector<DataType> feature_list_ragged_value_types;
std::vector<DataType> feature_list_ragged_split_types;
private:
absl::Status FinishInit(
int op_version); // for context-independent parts of Init.
};
// Parses the attributes passed to ParseSingleSequenceExample.
// REQUIRES: Init must be called after construction.
struct ParseSingleSequenceExampleAttrs {
public:
template <typename ContextType>
absl::Status Init(ContextType* ctx) {
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_sparse_types", &context_sparse_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ncontext_dense", &num_context_dense));
TF_RETURN_IF_ERROR(
ctx->GetAttr("Nfeature_list_dense", &num_feature_list_dense));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ncontext_sparse", &num_context_sparse));
TF_RETURN_IF_ERROR(ctx->GetAttr("Tcontext_dense", &context_dense_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_sparse_types", &feature_list_sparse_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_types", &feature_list_dense_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("Nfeature_list_sparse", &num_feature_list_sparse));
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_dense_shapes", &context_dense_shapes));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_shapes", &feature_list_dense_shapes));
return FinishInit();
}
int64_t num_context_sparse;
int64_t num_context_dense;
int64_t num_feature_list_sparse;
int64_t num_feature_list_dense;
std::vector<DataType> context_sparse_types;
std::vector<DataType> context_dense_types;
std::vector<TensorShape> context_dense_shapes;
std::vector<DataType> feature_list_sparse_types;
std::vector<DataType> feature_list_dense_types;
std::vector<TensorShape> feature_list_dense_shapes;
private:
absl::Status FinishInit(); // for context-independent parts of Init.
};
} // namespace tensorflow
#endif // TENSORFLOW_CORE_UTIL_EXAMPLE_PROTO_HELPER_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/core/util/example_proto_helper.h
|
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# See golang.org/s/go15bootstrap for an overview of the build process.
# Environment variables that control make.bash:
#
# GOHOSTARCH: The architecture for host tools (compilers and
# binaries). Binaries of this type must be executable on the current
# system, so the only common reason to set this is to set
# GOHOSTARCH=386 on an amd64 machine.
#
# GOARCH: The target architecture for installed packages and tools.
#
# GOOS: The target operating system for installed packages and tools.
#
# GO_GCFLAGS: Additional go tool compile arguments to use when
# building the packages and commands.
#
# GO_LDFLAGS: Additional go tool link arguments to use when
# building the commands.
#
# CGO_ENABLED: Controls cgo usage during the build. Set it to 1
# to include all cgo related files, .c and .go file with "cgo"
# build directive, in the build. Set it to 0 to ignore them.
#
# GO_EXTLINK_ENABLED: Set to 1 to invoke the host linker when building
# packages that use cgo. Set to 0 to do all linking internally. This
# controls the default behavior of the linker's -linkmode option. The
# default value depends on the system.
#
# GO_LDSO: Sets the default dynamic linker/loader (ld.so) to be used
# by the internal linker.
#
# CC: Command line to run to compile C code for GOHOSTARCH.
# Default is "gcc". Also supported: "clang".
#
# CC_FOR_TARGET: Command line to run to compile C code for GOARCH.
# This is used by cgo. Default is CC.
#
# CC_FOR_${GOOS}_${GOARCH}: Command line to run to compile C code for specified ${GOOS} and ${GOARCH}.
# (for example, CC_FOR_linux_arm)
# If this is not set, the build will use CC_FOR_TARGET if appropriate, or CC.
#
# CXX_FOR_TARGET: Command line to run to compile C++ code for GOARCH.
# This is used by cgo. Default is CXX, or, if that is not set,
# "g++" or "clang++".
#
# CXX_FOR_${GOOS}_${GOARCH}: Command line to run to compile C++ code for specified ${GOOS} and ${GOARCH}.
# (for example, CXX_FOR_linux_arm)
# If this is not set, the build will use CXX_FOR_TARGET if appropriate, or CXX.
#
# FC: Command line to run to compile Fortran code for GOARCH.
# This is used by cgo. Default is "gfortran".
#
# PKG_CONFIG: Path to pkg-config tool. Default is "pkg-config".
#
# GO_DISTFLAGS: extra flags to provide to "dist bootstrap".
# (Or just pass them to the make.bash command line.)
#
# GOBUILDTIMELOGFILE: If set, make.bash and all.bash write
# timing information to this file. Useful for profiling where the
# time goes when these scripts run.
#
# GOROOT_BOOTSTRAP: A working Go tree >= Go 1.24.6 for bootstrap.
# If $GOROOT_BOOTSTRAP/bin/go is missing, $(go env GOROOT) is
# tried for all "go" in $PATH. By default, one of $HOME/go1.24.6,
# $HOME/sdk/go1.24.6, or $HOME/go1.4, whichever exists, in that order.
# We still check $HOME/go1.4 to allow for build scripts that still hard-code
# that name even though they put newer Go toolchains there.
bootgo=1.24.6
set -e
if [[ ! -f run.bash ]]; then
echo 'make.bash must be run from $GOROOT/src' 1>&2
exit 1
fi
if [[ "$GOBUILDTIMELOGFILE" != "" ]]; then
echo $(LC_TIME=C date) start make.bash >"$GOBUILDTIMELOGFILE"
fi
# Test for Windows.
case "$(uname)" in
*MINGW* | *WIN32* | *CYGWIN*)
echo 'ERROR: Do not use make.bash to build on Windows.'
echo 'Use make.bat instead.'
echo
exit 1
;;
esac
# Test for bad ld.
if ld --version 2>&1 | grep 'gold.* 2\.20' >/dev/null; then
echo 'ERROR: Your system has gold 2.20 installed.'
echo 'This version is shipped by Ubuntu even though'
echo 'it is known not to work on Ubuntu.'
echo 'Binaries built with this linker are likely to fail in mysterious ways.'
echo
echo 'Run sudo apt-get remove binutils-gold.'
echo
exit 1
fi
# Test for bad SELinux.
# On Fedora 16 the selinux filesystem is mounted at /sys/fs/selinux,
# so loop through the possible selinux mount points.
for se_mount in /selinux /sys/fs/selinux
do
if [[ -d $se_mount && -f $se_mount/booleans/allow_execstack && -x /usr/sbin/selinuxenabled ]] && /usr/sbin/selinuxenabled; then
if ! cat $se_mount/booleans/allow_execstack | grep -c '^1 1$' >> /dev/null ; then
echo "WARNING: the default SELinux policy on, at least, Fedora 12 breaks "
echo "Go. You can enable the features that Go needs via the following "
echo "command (as root):"
echo " # setsebool -P allow_execstack 1"
echo
echo "Note that this affects your system globally! "
echo
echo "The build will continue in five seconds in case we "
echo "misdiagnosed the issue..."
sleep 5
fi
fi
done
# Clean old generated file that will cause problems in the build.
rm -f ./runtime/runtime_defs.go
# Finally! Run the build.
verbose=false
vflag=""
if [[ "$1" == "-v" ]]; then
verbose=true
vflag=-v
shift
fi
goroot_bootstrap_set=${GOROOT_BOOTSTRAP+"true"}
if [[ -z "$GOROOT_BOOTSTRAP" ]]; then
GOROOT_BOOTSTRAP="$HOME/go1.4"
for d in sdk/go$bootgo go$bootgo; do
if [[ -d "$HOME/$d" ]]; then
GOROOT_BOOTSTRAP="$HOME/$d"
fi
done
fi
export GOROOT_BOOTSTRAP
bootstrapenv() {
GOROOT="$GOROOT_BOOTSTRAP" GO111MODULE=off GOENV=off GOOS= GOARCH= GOEXPERIMENT= GOFLAGS= "$@"
}
export GOROOT="$(cd .. && pwd)"
IFS=$'\n'; for go_exe in $(type -ap go); do
if [[ ! -x "$GOROOT_BOOTSTRAP/bin/go" ]]; then
goroot_bootstrap=$GOROOT_BOOTSTRAP
GOROOT_BOOTSTRAP=""
goroot=$(bootstrapenv "$go_exe" env GOROOT)
GOROOT_BOOTSTRAP=$goroot_bootstrap
if [[ "$goroot" != "$GOROOT" ]]; then
if [[ "$goroot_bootstrap_set" == "true" ]]; then
printf 'WARNING: %s does not exist, found %s from env\n' "$GOROOT_BOOTSTRAP/bin/go" "$go_exe" >&2
printf 'WARNING: set %s as GOROOT_BOOTSTRAP\n' "$goroot" >&2
fi
GOROOT_BOOTSTRAP="$goroot"
fi
fi
done; unset IFS
if [[ ! -x "$GOROOT_BOOTSTRAP/bin/go" ]]; then
echo "ERROR: Cannot find $GOROOT_BOOTSTRAP/bin/go." >&2
echo "Set \$GOROOT_BOOTSTRAP to a working Go tree >= Go $bootgo." >&2
exit 1
fi
# Get the exact bootstrap toolchain version to help with debugging.
# We clear GOOS and GOARCH to avoid an ominous but harmless warning if
# the bootstrap doesn't support them.
GOROOT_BOOTSTRAP_VERSION=$(bootstrapenv "$GOROOT_BOOTSTRAP/bin/go" version | sed 's/go version //')
echo "Building Go cmd/dist using $GOROOT_BOOTSTRAP. ($GOROOT_BOOTSTRAP_VERSION)"
if $verbose; then
echo cmd/dist
fi
if [[ "$GOROOT_BOOTSTRAP" == "$GOROOT" ]]; then
echo "ERROR: \$GOROOT_BOOTSTRAP must not be set to \$GOROOT" >&2
echo "Set \$GOROOT_BOOTSTRAP to a working Go tree >= Go $bootgo." >&2
exit 1
fi
rm -f cmd/dist/dist
bootstrapenv "$GOROOT_BOOTSTRAP/bin/go" build -o cmd/dist/dist ./cmd/dist
# -e doesn't propagate out of eval, so check success by hand.
eval $(./cmd/dist/dist env -p || echo FAIL=true)
if [[ "$FAIL" == true ]]; then
exit 1
fi
if $verbose; then
echo
fi
if [[ "$1" == "--dist-tool" ]]; then
# Stop after building dist tool.
mkdir -p "$GOTOOLDIR"
if [[ "$2" != "" ]]; then
cp cmd/dist/dist "$2"
fi
mv cmd/dist/dist "$GOTOOLDIR"/dist
exit 0
fi
# Run dist bootstrap to complete make.bash.
# Bootstrap installs a proper cmd/dist, built with the new toolchain.
# Throw ours, built with the bootstrap toolchain, away after bootstrap.
./cmd/dist/dist bootstrap -a $vflag $GO_DISTFLAGS "$@"
rm -f ./cmd/dist/dist
# DO NOT ADD ANY NEW CODE HERE.
# The bootstrap+rm above are the final step of make.bash.
# If something must be added, add it to cmd/dist's cmdbootstrap,
# to avoid needing three copies in three different shell languages
# (make.bash, make.bat, make.rc).
|
unknown
|
github
|
https://github.com/golang/go
|
src/make.bash
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.test.rest.transform;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.SequenceWriter;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import com.fasterxml.jackson.dataformat.yaml.YAMLParser;
import org.elasticsearch.gradle.internal.test.rest.transform.headers.InjectHeaders;
import org.hamcrest.CoreMatchers;
import org.hamcrest.Matchers;
import org.hamcrest.core.IsCollectionContaining;
import org.junit.Before;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.LongAdder;
import java.util.stream.Collectors;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public abstract class TransformTests {
private static final YAMLFactory YAML_FACTORY = new YAMLFactory();
private static final ObjectMapper MAPPER = new ObjectMapper(YAML_FACTORY);
private static final ObjectReader READER = MAPPER.readerFor(ObjectNode.class);
private static final Map<String, String> headers1 = Map.of("foo", "bar");
private static final Map<String, String> headers2 = Map.of("abc", "xyz");
RestTestTransformer transformer;
@Before
public void setup() {
transformer = new RestTestTransformer();
}
protected void validateSetupAndTearDown(List<ObjectNode> transformedTests) {
assertThat(transformedTests.stream().filter(node -> node.get("setup") != null).count(), CoreMatchers.equalTo(1L));
transformedTests.stream().filter(node -> node.get("setup") != null).forEach(this::assertSetup);
transformedTests.stream().filter(node -> node.get("teardown") != null).forEach(this::assertTeardown);
}
protected ObjectNode validateSkipNodesExist(List<ObjectNode> tests) {
List<ObjectNode> skipNodes = tests.stream()
.filter(node -> node.get("setup") != null)
.filter(node -> getSkipNode((ArrayNode) node.get("setup")) != null)
.map(node -> getSkipNode((ArrayNode) node.get("setup")))
.collect(Collectors.toList());
assertThat(skipNodes.size(), CoreMatchers.equalTo(1));
return skipNodes.get(0);
}
protected void validateSkipNodesDoesNotExist(List<ObjectNode> tests) {
List<ObjectNode> skipNodes = tests.stream()
.filter(node -> node.get("setup") != null)
.filter(node -> getSkipNode((ArrayNode) node.get("setup")) != null)
.map(node -> getSkipNode((ArrayNode) node.get("setup")))
.collect(Collectors.toList());
assertThat(skipNodes.size(), CoreMatchers.equalTo(0));
}
protected void validatePreExistingFeatureExist(List<ObjectNode> tests) {
assertThat(validateSkipNodesExist(tests).get("features"), CoreMatchers.notNullValue());
}
protected void validateSetupDoesNotExist(List<ObjectNode> tests) {
assertThat(tests.stream().filter(node -> node.get("setup") != null).count(), CoreMatchers.equalTo(0L));
}
protected void validateFeatureNameExists(List<ObjectNode> tests, String featureName) {
ObjectNode skipNode = validateSkipNodesExist(tests);
JsonNode featureValues = skipNode.get("features");
assertNotNull(featureValues);
List<String> features = new ArrayList<>(1);
if (featureValues.isArray()) {
Iterator<JsonNode> featuresIt = featureValues.elements();
while (featuresIt.hasNext()) {
JsonNode feature = featuresIt.next();
features.add(feature.asText());
}
} else if (featureValues.isTextual()) {
features.add(featureValues.asText());
}
assertThat(features, IsCollectionContaining.hasItem(featureName));
}
protected void validateSetupExist(List<ObjectNode> tests) {
assertThat(tests.stream().filter(node -> node.get("setup") != null).count(), CoreMatchers.equalTo(1L));
}
protected List<ObjectNode> transformTests(List<ObjectNode> tests) {
return transformTests(tests, getTransformations());
}
protected List<ObjectNode> transformTests(List<ObjectNode> tests, List<RestTestTransform<?>> transforms) {
List<ObjectNode> t = transformer.transformRestTests(new LinkedList<>(tests), transforms);
getKnownFeatures().forEach(name -> { validateFeatureNameExists(t, name); });
return t;
}
protected List<String> getKnownFeatures() {
return Collections.emptyList();
}
protected List<RestTestTransform<?>> getTransformations() {
List<RestTestTransform<?>> transformations = new ArrayList<>();
transformations.add(new InjectHeaders(headers1, Collections.emptySet()));
transformations.add(new InjectHeaders(headers2, Collections.emptySet()));
return transformations;
}
protected List<ObjectNode> getTests(String relativePath) throws Exception {
File testFile = new File(getClass().getResource(relativePath).toURI());
YAMLParser yamlParser = YAML_FACTORY.createParser(testFile);
return READER.<ObjectNode>readValues(yamlParser).readAll();
}
protected void assertTeardown(ObjectNode teardownNode) {
assertThat(teardownNode.get("teardown"), CoreMatchers.instanceOf(ArrayNode.class));
ObjectNode skipNode = getSkipNode((ArrayNode) teardownNode.get("teardown"));
assertSkipNode(skipNode);
}
protected void assertSetup(ObjectNode setupNode) {
assertThat(setupNode.get("setup"), CoreMatchers.instanceOf(ArrayNode.class));
ObjectNode skipNode = getSkipNode((ArrayNode) setupNode.get("setup"));
assertSkipNode(skipNode);
}
protected void assertSkipNode(ObjectNode skipNode) {
assertThat(skipNode, CoreMatchers.notNullValue());
List<String> featureValues = new ArrayList<>();
if (skipNode.get("features").isArray()) {
assertThat(skipNode.get("features"), CoreMatchers.instanceOf(ArrayNode.class));
ArrayNode features = (ArrayNode) skipNode.get("features");
features.forEach(x -> {
if (x.isTextual()) {
featureValues.add(x.asText());
}
});
} else {
featureValues.add(skipNode.get("features").asText());
}
assertEquals(featureValues.stream().distinct().count(), featureValues.size());
}
protected ObjectNode getSkipNode(ArrayNode setupNodeValue) {
Iterator<JsonNode> setupIt = setupNodeValue.elements();
while (setupIt.hasNext()) {
JsonNode arrayEntry = setupIt.next();
if (arrayEntry.isObject()) {
ObjectNode skipCandidate = (ObjectNode) arrayEntry;
if (skipCandidate.get("skip") != null) {
ObjectNode skipNode = (ObjectNode) skipCandidate.get("skip");
return skipNode;
}
}
}
return null;
}
protected void validateBodyHasWarnings(String featureName, List<ObjectNode> tests, Collection<String> expectedWarnings) {
validateBodyHasWarnings(featureName, null, tests, expectedWarnings);
}
protected void validateBodyHasWarnings(
String featureName,
String testName,
List<ObjectNode> tests,
Collection<String> expectedWarnings
) {
AtomicBoolean actuallyDidSomething = new AtomicBoolean(false);
tests.forEach(test -> {
Iterator<Map.Entry<String, JsonNode>> testsIterator = test.fields();
while (testsIterator.hasNext()) {
Map.Entry<String, JsonNode> testObject = testsIterator.next();
assertThat(testObject.getValue(), CoreMatchers.instanceOf(ArrayNode.class));
if (testName == null || testName.equals(testObject.getKey())) {
ArrayNode testBody = (ArrayNode) testObject.getValue();
testBody.forEach(arrayObject -> {
assertThat(arrayObject, CoreMatchers.instanceOf(ObjectNode.class));
ObjectNode testSection = (ObjectNode) arrayObject;
if (testSection.get("do") != null) {
ObjectNode doSection = (ObjectNode) testSection.get("do");
assertThat(doSection.get(featureName), CoreMatchers.notNullValue());
ArrayNode warningsNode = (ArrayNode) doSection.get(featureName);
List<String> actual = new ArrayList<>();
warningsNode.forEach(node -> actual.add(node.asText()));
String[] expected = expectedWarnings.toArray(new String[] {});
assertThat(actual, Matchers.containsInAnyOrder(expected));
actuallyDidSomething.set(true);
}
});
}
}
});
assertTrue(actuallyDidSomething.get());
}
protected void validateBodyHasNoWarnings(String featureName, List<ObjectNode> tests) {
validateBodyHasNoWarnings(featureName, null, tests);
}
protected void validateBodyHasNoWarnings(String featureName, String testName, List<ObjectNode> tests) {
AtomicBoolean actuallyDidSomething = new AtomicBoolean(false);
tests.forEach(test -> {
Iterator<Map.Entry<String, JsonNode>> testsIterator = test.fields();
while (testsIterator.hasNext()) {
Map.Entry<String, JsonNode> testObject = testsIterator.next();
if (testName == null || testName.equals(testObject.getKey())) {
assertThat(testObject.getValue(), CoreMatchers.instanceOf(ArrayNode.class));
ArrayNode testBody = (ArrayNode) testObject.getValue();
testBody.forEach(arrayObject -> {
assertThat(arrayObject, CoreMatchers.instanceOf(ObjectNode.class));
ObjectNode testSection = (ObjectNode) arrayObject;
if (testSection.get("do") != null) {
ObjectNode doSection = (ObjectNode) testSection.get("do");
assertThat(doSection.get(featureName), CoreMatchers.nullValue());
actuallyDidSomething.set(true);
}
});
}
}
});
assertTrue(actuallyDidSomething.get());
}
protected void validateBodyHasEmptyNoWarnings(String featureName, List<ObjectNode> tests) {
tests.forEach(test -> {
Iterator<Map.Entry<String, JsonNode>> testsIterator = test.fields();
while (testsIterator.hasNext()) {
Map.Entry<String, JsonNode> testObject = testsIterator.next();
assertThat(testObject.getValue(), CoreMatchers.instanceOf(ArrayNode.class));
ArrayNode testBody = (ArrayNode) testObject.getValue();
testBody.forEach(arrayObject -> {
assertThat(arrayObject, CoreMatchers.instanceOf(ObjectNode.class));
ObjectNode testSection = (ObjectNode) arrayObject;
if (testSection.get("do") != null) {
ObjectNode doSection = (ObjectNode) testSection.get("do");
assertThat(doSection.get(featureName), CoreMatchers.notNullValue());
ArrayNode warningsNode = (ArrayNode) doSection.get(featureName);
assertTrue(warningsNode.isEmpty());
}
});
}
});
}
protected void validateBodyHasHeaders(List<ObjectNode> tests, Map<String, String> expectedHeaders) {
tests.forEach(test -> {
Iterator<Map.Entry<String, JsonNode>> testsIterator = test.fields();
while (testsIterator.hasNext()) {
Map.Entry<String, JsonNode> testObject = testsIterator.next();
assertThat(testObject.getValue(), CoreMatchers.instanceOf(ArrayNode.class));
ArrayNode testBody = (ArrayNode) testObject.getValue();
testBody.forEach(arrayObject -> {
assertThat(arrayObject, CoreMatchers.instanceOf(ObjectNode.class));
ObjectNode testSection = (ObjectNode) arrayObject;
if (testSection.get("do") != null) {
ObjectNode doSection = (ObjectNode) testSection.get("do");
assertThat(doSection.get("headers"), CoreMatchers.notNullValue());
ObjectNode headersNode = (ObjectNode) doSection.get("headers");
LongAdder assertions = new LongAdder();
expectedHeaders.forEach((k, v) -> {
assertThat(headersNode.get(k), CoreMatchers.notNullValue());
TextNode textNode = (TextNode) headersNode.get(k);
assertThat(textNode.asText(), CoreMatchers.equalTo(v));
assertions.increment();
});
assertThat(assertions.intValue(), CoreMatchers.equalTo(expectedHeaders.size()));
}
});
}
});
}
protected void validateSetupAndTearDownForMatchTests(List<ObjectNode> tests) {
ObjectNode setUp = tests.get(0);
assertThat(setUp.get("setup"), CoreMatchers.notNullValue());
ObjectNode tearDown = tests.get(1);
assertThat(tearDown.get("teardown"), CoreMatchers.notNullValue());
ObjectNode firstTest = tests.get(2);
assertThat(firstTest.get("First test"), CoreMatchers.notNullValue());
ObjectNode lastTest = tests.get(tests.size() - 1);
assertThat(lastTest.get("Last test"), CoreMatchers.notNullValue());
// setup
JsonNode setup = setUp.get("setup");
assertThat(setup, CoreMatchers.instanceOf(ArrayNode.class));
ArrayNode setupParentArray = (ArrayNode) setup;
AtomicBoolean setUpHasMatchObject = new AtomicBoolean(false);
setupParentArray.elements().forEachRemaining(node -> {
assertThat(node, CoreMatchers.instanceOf(ObjectNode.class));
ObjectNode childObject = (ObjectNode) node;
JsonNode matchObject = childObject.get("match");
if (matchObject != null) {
setUpHasMatchObject.set(true);
}
});
assertFalse(setUpHasMatchObject.get());
// teardown
JsonNode teardown = tearDown.get("teardown");
assertThat(teardown, CoreMatchers.instanceOf(ArrayNode.class));
ArrayNode teardownParentArray = (ArrayNode) teardown;
AtomicBoolean teardownHasMatchObject = new AtomicBoolean(false);
teardownParentArray.elements().forEachRemaining(node -> {
assertThat(node, CoreMatchers.instanceOf(ObjectNode.class));
ObjectNode childObject = (ObjectNode) node;
JsonNode matchObject = childObject.get("match");
if (matchObject != null) {
teardownHasMatchObject.set(true);
}
});
assertFalse(teardownHasMatchObject.get());
}
protected boolean getHumanDebug() {
return false;
}
// only to help manually debug
protected void printTest(String testName, List<ObjectNode> tests) {
if (getHumanDebug()) {
System.out.println("\n************* " + testName + " *************");
try (SequenceWriter sequenceWriter = MAPPER.writer().writeValues(System.out)) {
for (ObjectNode transformedTest : tests) {
sequenceWriter.write(transformedTest);
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
|
java
|
github
|
https://github.com/elastic/elasticsearch
|
build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/test/rest/transform/TransformTests.java
|
#!/usr/bin/env python
''' Python DB API 2.0 driver compliance unit test suite.
This software is Public Domain and may be used without restrictions.
"Now we have booze and barflies entering the discussion, plus rumours of
DBAs on drugs... and I won't tell you what flashes through my mind each
time I read the subject line with 'Anal Compliance' in it. All around
this is turning out to be a thoroughly unwholesome unit test."
-- Ian Bicking
'''
from __future__ import absolute_import
__rcs_id__ = '$Id: dbapi20.py,v 1.11 2005/01/02 02:41:01 zenzen Exp $'
__version__ = '$Revision: 1.12 $'[11:-2]
__author__ = 'Stuart Bishop <stuart@stuartbishop.net>'
import time
import sys
from six.moves import range
from impala.tests.compat import unittest
# Revision 1.12 2009/02/06 03:35:11 kf7xm
# Tested okay with Python 3.0, includes last minute patches from Mark H.
#
# Revision 1.1.1.1.2.1 2008/09/20 19:54:59 rupole
# Include latest changes from main branch
# Updates for py3k
#
# Revision 1.11 2005/01/02 02:41:01 zenzen
# Update author email address
#
# Revision 1.10 2003/10/09 03:14:14 zenzen
# Add test for DB API 2.0 optional extension, where database exceptions
# are exposed as attributes on the Connection object.
#
# Revision 1.9 2003/08/13 01:16:36 zenzen
# Minor tweak from Stefan Fleiter
#
# Revision 1.8 2003/04/10 00:13:25 zenzen
# Changes, as per suggestions by M.-A. Lemburg
# - Add a table prefix, to ensure namespace collisions can always be avoided
#
# Revision 1.7 2003/02/26 23:33:37 zenzen
# Break out DDL into helper functions, as per request by David Rushby
#
# Revision 1.6 2003/02/21 03:04:33 zenzen
# Stuff from Henrik Ekelund:
# added test_None
# added test_nextset & hooks
#
# Revision 1.5 2003/02/17 22:08:43 zenzen
# Implement suggestions and code from Henrik Eklund - test that cursor.arraysize
# defaults to 1 & generic cursor.callproc test added
#
# Revision 1.4 2003/02/15 00:16:33 zenzen
# Changes, as per suggestions and bug reports by M.-A. Lemburg,
# Matthew T. Kromer, Federico Di Gregorio and Daniel Dittmar
# - Class renamed
# - Now a subclass of TestCase, to avoid requiring the driver stub
# to use multiple inheritance
# - Reversed the polarity of buggy test in test_description
# - Test exception heirarchy correctly
# - self.populate is now self._populate(), so if a driver stub
# overrides self.ddl1 this change propogates
# - VARCHAR columns now have a width, which will hopefully make the
# DDL even more portible (this will be reversed if it causes more problems)
# - cursor.rowcount being checked after various execute and fetchXXX methods
# - Check for fetchall and fetchmany returning empty lists after results
# are exhausted (already checking for empty lists if select retrieved
# nothing
# - Fix bugs in test_setoutputsize_basic and test_setinputsizes
#
def str2bytes(sval):
if sys.version_info < (3,0) and isinstance(sval, str):
sval = sval.decode("latin1")
return sval.encode("latin1")
class DatabaseAPI20Test(unittest.TestCase):
''' Test a database self.driver for DB API 2.0 compatibility.
This implementation tests Gadfly, but the TestCase
is structured so that other self.drivers can subclass this
test case to ensure compiliance with the DB-API. It is
expected that this TestCase may be expanded in the future
if ambiguities or edge conditions are discovered.
The 'Optional Extensions' are not yet being tested.
self.drivers should subclass this test, overriding setUp, tearDown,
self.driver, connect_args and connect_kw_args. Class specification
should be as follows:
import dbapi20
class mytest(dbapi20.DatabaseAPI20Test):
[...]
Don't 'import DatabaseAPI20Test from dbapi20', or you will
confuse the unit tester - just 'import dbapi20'.
'''
# The self.driver module. This should be the module where the 'connect'
# method is to be found
driver = None
connect_args = () # List of arguments to pass to connect
connect_kw_args = {} # Keyword arguments for connect
table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
ddl2 = 'create table %sbarflys (name varchar(20))' % table_prefix
xddl1 = 'drop table %sbooze' % table_prefix
xddl2 = 'drop table %sbarflys' % table_prefix
lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
# Some drivers may need to override these helpers, for example adding
# a 'commit' after the execute.
def executeDDL1(self,cursor):
cursor.execute(self.ddl1)
def executeDDL2(self,cursor):
cursor.execute(self.ddl2)
def setUp(self):
''' self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.
'''
pass
def tearDown(self):
''' self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.
'''
con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1,self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
# Assume table didn't exist. Other tests will check if
# execute is busted.
pass
finally:
con.close()
def _connect(self):
try:
return self.driver.connect(
*self.connect_args,**self.connect_kw_args
)
except AttributeError:
self.fail("No connect method found in self.driver module")
def test_connect(self):
con = self._connect()
con.close()
def test_apilevel(self):
try:
# Must exist
apilevel = self.driver.apilevel
# Must equal 2.0
self.assertEqual(apilevel,'2.0')
except AttributeError:
self.fail("Driver doesn't define apilevel")
def test_threadsafety(self):
try:
# Must exist
threadsafety = self.driver.threadsafety
# Must be a valid value
self.failUnless(threadsafety in (0,1,2,3))
except AttributeError:
self.fail("Driver doesn't define threadsafety")
def test_paramstyle(self):
try:
# Must exist
paramstyle = self.driver.paramstyle
# Must be a valid value
self.failUnless(paramstyle in (
'qmark','numeric','named','format','pyformat'
))
except AttributeError:
self.fail("Driver doesn't define paramstyle")
def test_Exceptions(self):
# Make sure required exceptions exist, and are in the
# defined heirarchy.
if sys.version[0] == '3': #under Python 3 StardardError no longer exists
self.failUnless(issubclass(self.driver.Warning,Exception))
self.failUnless(issubclass(self.driver.Error,Exception))
else:
self.failUnless(issubclass(self.driver.Warning,Exception))
self.failUnless(issubclass(self.driver.Error,Exception))
self.failUnless(
issubclass(self.driver.InterfaceError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.DatabaseError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.OperationalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.IntegrityError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.InternalError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.ProgrammingError,self.driver.Error)
)
self.failUnless(
issubclass(self.driver.NotSupportedError,self.driver.Error)
)
def test_ExceptionsAsConnectionAttributes(self):
# OPTIONAL EXTENSION
# Test for the optional DB API 2.0 extension, where the exceptions
# are exposed as attributes on the Connection object
# I figure this optional extension will be implemented by any
# driver author who is using this test suite, so it is enabled
# by default.
con = self._connect()
drv = self.driver
self.failUnless(con.Warning is drv.Warning)
self.failUnless(con.Error is drv.Error)
self.failUnless(con.InterfaceError is drv.InterfaceError)
self.failUnless(con.DatabaseError is drv.DatabaseError)
self.failUnless(con.OperationalError is drv.OperationalError)
self.failUnless(con.IntegrityError is drv.IntegrityError)
self.failUnless(con.InternalError is drv.InternalError)
self.failUnless(con.ProgrammingError is drv.ProgrammingError)
self.failUnless(con.NotSupportedError is drv.NotSupportedError)
def test_commit(self):
con = self._connect()
try:
# Commit must work, even if it doesn't do anything
con.commit()
finally:
con.close()
def test_rollback(self):
con = self._connect()
# If rollback is defined, it should either work or throw
# the documented exception
if hasattr(con,'rollback'):
try:
con.rollback()
except self.driver.NotSupportedError:
pass
def test_cursor(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
def test_cursor_isolation(self):
con = self._connect()
try:
# Make sure cursors created from the same connection have
# the documented transaction isolation level
cur1 = con.cursor()
cur2 = con.cursor()
self.executeDDL1(cur1)
cur1.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
cur2.execute("select name from %sbooze" % self.table_prefix)
booze = cur2.fetchall()
self.assertEqual(len(booze),1)
self.assertEqual(len(booze[0]),1)
self.assertEqual(booze[0][0],'Victoria Bitter')
finally:
con.close()
def test_description(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.description,None,
'cursor.description should be none after executing a '
'statement that can return no rows (such as DDL)'
)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(len(cur.description),1,
'cursor.description describes too many columns'
)
self.assertEqual(len(cur.description[0]),7,
'cursor.description[x] tuples must have 7 elements'
)
self.assertEqual(cur.description[0][0].lower(),'name',
'cursor.description[x][0] must return column name'
)
self.assertEqual(cur.description[0][1],self.driver.STRING,
'cursor.description[x][1] must return column type. Got %r'
% cur.description[0][1]
)
# Make sure self.description gets reset
self.executeDDL2(cur)
self.assertEqual(cur.description,None,
'cursor.description not being set to None when executing '
'no-result statements (eg. DDL)'
)
finally:
con.close()
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount should be -1 after executing no-result '
'statements'
)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number or rows inserted, or '
'set to -1 after executing an insert statement'
)
cur.execute("select name from %sbooze" % self.table_prefix)
self.failUnless(cur.rowcount in (-1,1),
'cursor.rowcount should == number of rows returned, or '
'set to -1 after executing a select statement'
)
self.executeDDL2(cur)
self.assertEqual(cur.rowcount,-1,
'cursor.rowcount not being reset to -1 after executing '
'no-result statements'
)
finally:
con.close()
lower_func = 'lower'
def test_callproc(self):
con = self._connect()
try:
cur = con.cursor()
if self.lower_func and hasattr(cur,'callproc'):
r = cur.callproc(self.lower_func,('FOO',))
self.assertEqual(len(r),1)
self.assertEqual(r[0],'FOO')
r = cur.fetchall()
self.assertEqual(len(r),1,'callproc produced no result set')
self.assertEqual(len(r[0]),1,
'callproc produced invalid result set'
)
self.assertEqual(r[0][0],'foo',
'callproc produced invalid results'
)
finally:
con.close()
def test_close(self):
con = self._connect()
try:
cur = con.cursor()
finally:
con.close()
# cursor.execute should raise an Error if called after connection
# closed
self.assertRaises(self.driver.Error,self.executeDDL1,cur)
# laserson note: the next to assertions are not clear to me from PEP 249
# so I am leaving them out
# connection.commit should raise an Error if called after connection'
# closed.'
# self.assertRaises(self.driver.Error,con.commit)
# connection.close should raise an Error if called more than once
# self.assertRaises(self.driver.Error,con.close)
def test_execute(self):
con = self._connect()
try:
cur = con.cursor()
self._paraminsert(cur)
finally:
con.close()
def _paraminsert(self,cur):
self.executeDDL1(cur)
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.failUnless(cur.rowcount in (-1,1))
if self.driver.paramstyle == 'qmark':
cur.execute(
'insert into %sbooze values (?)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'numeric':
cur.execute(
'insert into %sbooze values (:1)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'named':
cur.execute(
'insert into %sbooze values (:beer)' % self.table_prefix,
{'beer':"Cooper's"}
)
elif self.driver.paramstyle == 'format':
cur.execute(
'insert into %sbooze values (%%s)' % self.table_prefix,
("Cooper's",)
)
elif self.driver.paramstyle == 'pyformat':
cur.execute(
'insert into %sbooze values (%%(beer)s)' % self.table_prefix,
{'beer':"Cooper's"}
)
else:
self.fail('Invalid paramstyle')
self.failUnless(cur.rowcount in (-1,1))
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Cooper's",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
self.assertEqual(beers[1],"Victoria Bitter",
'cursor.fetchall retrieved incorrect data, or data inserted '
'incorrectly'
)
def test_executemany(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
largs = [ ("Cooper's",) , ("Boag's",) ]
margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
if self.driver.paramstyle == 'qmark':
cur.executemany(
'insert into %sbooze values (?)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'numeric':
cur.executemany(
'insert into %sbooze values (:1)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'named':
cur.executemany(
'insert into %sbooze values (:beer)' % self.table_prefix,
margs
)
elif self.driver.paramstyle == 'format':
cur.executemany(
'insert into %sbooze values (%%s)' % self.table_prefix,
largs
)
elif self.driver.paramstyle == 'pyformat':
cur.executemany(
'insert into %sbooze values (%%(beer)s)' % (
self.table_prefix
),
margs
)
else:
self.fail('Unknown paramstyle')
self.failUnless(cur.rowcount in (-1,2),
'insert using cursor.executemany set cursor.rowcount to '
'incorrect value %r' % cur.rowcount
)
cur.execute('select name from %sbooze' % self.table_prefix)
res = cur.fetchall()
self.assertEqual(len(res),2,
'cursor.fetchall retrieved incorrect number of rows'
)
beers = [res[0][0],res[1][0]]
beers.sort()
self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error,cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if a query retrieves '
'no rows'
)
self.failUnless(cur.rowcount in (-1,0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute("insert into %sbooze values ('Victoria Bitter')" % (
self.table_prefix
))
self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchone()
self.assertEqual(len(r),1,
'cursor.fetchone should have retrieved a single row'
)
self.assertEqual(r[0],'Victoria Bitter',
'cursor.fetchone retrieved incorrect data'
)
self.assertEqual(cur.fetchone(),None,
'cursor.fetchone should return None if no more rows available'
)
self.failUnless(cur.rowcount in (-1,1))
finally:
con.close()
samples = [
'Carlton Cold',
'Carlton Draft',
'Mountain Goat',
'Redback',
'Victoria Bitter',
'XXXX'
]
def _populate(self):
''' Return a list of sql commands to setup the DB for the fetch
tests.
'''
populate = [
"insert into %sbooze values ('%s')" % (self.table_prefix,s)
for s in self.samples
]
return populate
def test_fetchmany(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchmany should raise an Error if called without
#issuing a query
self.assertRaises(self.driver.Error,cur.fetchmany,4)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany()
self.assertEqual(len(r),1,
'cursor.fetchmany retrieved incorrect number of rows, '
'default of arraysize is one.'
)
cur.arraysize=10
r = cur.fetchmany(3) # Should get 3 rows
self.assertEqual(len(r),3,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should get 2 more
self.assertEqual(len(r),2,
'cursor.fetchmany retrieved incorrect number of rows'
)
r = cur.fetchmany(4) # Should be an empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence after '
'results are exhausted'
)
self.failUnless(cur.rowcount in (-1,6))
# Same as above, using cursor.arraysize
cur.arraysize=4
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchmany() # Should get 4 rows
self.assertEqual(len(r),4,
'cursor.arraysize not being honoured by fetchmany'
)
r = cur.fetchmany() # Should get 2 more
self.assertEqual(len(r),2)
r = cur.fetchmany() # Should be an empty sequence
self.assertEqual(len(r),0)
self.failUnless(cur.rowcount in (-1,6))
cur.arraysize=6
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchmany() # Should get all rows
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows),6)
self.assertEqual(len(rows),6)
rows = [r[0] for r in rows]
rows.sort()
# Make sure we get the right data back out
for i in range(0,6):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved by cursor.fetchmany'
)
rows = cur.fetchmany() # Should return an empty list
self.assertEqual(len(rows),0,
'cursor.fetchmany should return an empty sequence if '
'called after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,6))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
r = cur.fetchmany() # Should get empty sequence
self.assertEqual(len(r),0,
'cursor.fetchmany should return an empty sequence if '
'query retrieved no rows'
)
self.failUnless(cur.rowcount in (-1,0))
finally:
con.close()
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute('select name from %sbooze' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.assertEqual(len(rows),len(self.samples),
'cursor.fetchall did not retrieve all rows'
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'cursor.fetchall retrieved incorrect rows'
)
rows = cur.fetchall()
self.assertEqual(
len(rows),0,
'cursor.fetchall should return an empty list if called '
'after the whole result set has been fetched'
)
self.failUnless(cur.rowcount in (-1,len(self.samples)))
self.executeDDL2(cur)
cur.execute('select name from %sbarflys' % self.table_prefix)
rows = cur.fetchall()
self.failUnless(cur.rowcount in (-1,0))
self.assertEqual(len(rows),0,
'cursor.fetchall should return an empty list if '
'a select query returns no rows'
)
finally:
con.close()
def test_mixedfetch(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
cur.execute('select name from %sbooze' % self.table_prefix)
rows1 = cur.fetchone()
rows23 = cur.fetchmany(2)
rows4 = cur.fetchone()
rows56 = cur.fetchall()
self.failUnless(cur.rowcount in (-1,6))
self.assertEqual(len(rows23),2,
'fetchmany returned incorrect number of rows'
)
self.assertEqual(len(rows56),2,
'fetchall returned incorrect number of rows'
)
rows = [rows1[0]]
rows.extend([rows23[0][0],rows23[1][0]])
rows.append(rows4[0])
rows.extend([rows56[0][0],rows56[1][0]])
rows.sort()
for i in range(0,len(self.samples)):
self.assertEqual(rows[i],self.samples[i],
'incorrect data retrieved or inserted'
)
finally:
con.close()
def help_nextset_setUp(self,cur):
''' Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
'''
raise NotImplementedError('Helper not implemented')
#sql="""
# create procedure deleteme as
# begin
# select count(*) from booze
# select name from booze
# end
#"""
#cur.execute(sql)
def help_nextset_tearDown(self,cur):
'If cleaning up is needed after nextSetTest'
raise NotImplementedError('Helper not implemented')
#cur.execute("drop procedure deleteme")
def test_nextset(self):
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur,'nextset'):
return
try:
self.executeDDL1(cur)
sql=self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc('deleteme')
numberofrows=cur.fetchone()
assert numberofrows[0]== len(self.samples)
assert cur.nextset()
names=cur.fetchall()
assert len(names) == len(self.samples)
s=cur.nextset()
assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
def test_nextset(self):
raise NotImplementedError('Drivers need to override this test')
def test_arraysize(self):
# Not much here - rest of the tests for this are in test_fetchmany
con = self._connect()
try:
cur = con.cursor()
self.failUnless(hasattr(cur,'arraysize'),
'cursor.arraysize must be defined'
)
finally:
con.close()
def test_setinputsizes(self):
con = self._connect()
try:
cur = con.cursor()
cur.setinputsizes( (25,) )
self._paraminsert(cur) # Make sure cursor still works
finally:
con.close()
def test_setoutputsize_basic(self):
# Basic test is to make sure setoutputsize doesn't blow up
con = self._connect()
try:
cur = con.cursor()
cur.setoutputsize(1000)
cur.setoutputsize(2000,0)
self._paraminsert(cur) # Make sure the cursor still works
finally:
con.close()
def test_setoutputsize(self):
# Real test for setoutputsize is driver dependant
raise NotImplementedError('Driver needed to override this test')
def test_None(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
cur.execute('insert into %sbooze values (NULL)' % self.table_prefix)
cur.execute('select name from %sbooze' % self.table_prefix)
r = cur.fetchall()
self.assertEqual(len(r),1)
self.assertEqual(len(r[0]),1)
self.assertEqual(r[0][0],None,'NULL value not returned as None')
finally:
con.close()
def test_Date(self):
d1 = self.driver.Date(2002,12,25)
d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = self.driver.Time(13,45,30)
t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = self.driver.Timestamp(2002,12,25,13,45,30)
t2 = self.driver.TimestampFromTicks(
time.mktime((2002,12,25,13,45,30,0,0,0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = self.driver.Binary(str2bytes('Something'))
b = self.driver.Binary(str2bytes(''))
def test_STRING(self):
self.failUnless(hasattr(self.driver,'STRING'),
'module.STRING must be defined'
)
def test_BINARY(self):
self.failUnless(hasattr(self.driver,'BINARY'),
'module.BINARY must be defined.'
)
def test_NUMBER(self):
self.failUnless(hasattr(self.driver,'NUMBER'),
'module.NUMBER must be defined.'
)
def test_DATETIME(self):
self.failUnless(hasattr(self.driver,'DATETIME'),
'module.DATETIME must be defined.'
)
def test_ROWID(self):
self.failUnless(hasattr(self.driver,'ROWID'),
'module.ROWID must be defined.'
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/mediatek,mt8196-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Functional Clock Controller for MT8196
maintainers:
- Guangjie Song <guangjie.song@mediatek.com>
- Laura Nao <laura.nao@collabora.com>
description: |
The clock architecture in MediaTek SoCs is structured like below:
PLLs -->
dividers -->
muxes
-->
clock gate
The device nodes provide clock gate control in different IP blocks.
properties:
compatible:
items:
- enum:
- mediatek,mt8196-imp-iic-wrap-c
- mediatek,mt8196-imp-iic-wrap-e
- mediatek,mt8196-imp-iic-wrap-n
- mediatek,mt8196-imp-iic-wrap-w
- mediatek,mt8196-mdpsys0
- mediatek,mt8196-mdpsys1
- mediatek,mt8196-pericfg-ao
- mediatek,mt8196-pextp0cfg-ao
- mediatek,mt8196-pextp1cfg-ao
- mediatek,mt8196-ufscfg-ao
- mediatek,mt8196-vencsys
- mediatek,mt8196-vencsys-c1
- mediatek,mt8196-vencsys-c2
- mediatek,mt8196-vdecsys
- mediatek,mt8196-vdecsys-soc
- mediatek,mt8196-vdisp-ao
- const: syscon
reg:
maxItems: 1
'#clock-cells':
const: 1
'#reset-cells':
const: 1
description:
Reset lines for PEXTP0/1 and UFS blocks.
mediatek,hardware-voter:
$ref: /schemas/types.yaml#/definitions/phandle
description: |
Phandle to the "Hardware Voter" (HWV), as named in the vendor
documentation for MT8196/MT6991.
The HWV is a SoC-internal fixed-function MCU used to collect votes from
both the Application Processor and other remote processors within the SoC.
It is intended to transparently enable or disable hardware resources (such
as power domains or clocks) based on internal vote aggregation handled by
the MCU's internal state machine.
However, in practice, this design is incomplete. While the HWV performs
some internal vote aggregation,software is still required to
- Manually enable power supplies externally, if present and if required
- Manually enable parent clocks via direct MMIO writes to clock controllers
- Enable the FENC after the clock has been ungated via direct MMIO
writes to clock controllers
As such, the HWV behaves more like a hardware-managed clock reference
counter than a true voter. Furthermore, it is not a separate
controller. It merely serves as an alternative interface to the same
underlying clock or power controller. Actual control still requires
direct access to the controller's own MMIO register space, in
addition to writing to the HWV's MMIO region.
For this reason, a custom phandle is used here - drivers need to directly
access the HWV MMIO region in a syscon-like fashion, due to how the
hardware is wired. This differs from true hardware voting systems, which
typically do not require custom phandles and rely instead on generic APIs
(clocks, power domains, interconnects).
The name "hardware-voter" is retained to match vendor documentation, but
this should not be reused or misunderstood as a proper voting mechanism.
required:
- compatible
- reg
- '#clock-cells'
additionalProperties: false
examples:
- |
pericfg_ao: clock-controller@16640000 {
compatible = "mediatek,mt8196-pericfg-ao", "syscon";
reg = <0x16640000 0x1000>;
mediatek,hardware-voter = <&scp_hwv>;
#clock-cells = <1>;
};
- |
pextp0cfg_ao: clock-controller@169b0000 {
compatible = "mediatek,mt8196-pextp0cfg-ao", "syscon";
reg = <0x169b0000 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/clock/mediatek,mt8196-clock.yaml
|
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package local
import (
"log"
"sync"
"time"
"github.com/hashicorp/terraform/internal/schemarepo"
"github.com/hashicorp/terraform/internal/states"
"github.com/hashicorp/terraform/internal/states/statemgr"
"github.com/hashicorp/terraform/internal/terraform"
)
// StateHook is a hook that continuously updates the state by calling
// WriteState on a statemgr.Full.
type StateHook struct {
terraform.NilHook
sync.Mutex
StateMgr statemgr.Writer
// If PersistInterval is nonzero then for any new state update after
// the duration has elapsed we'll try to persist a state snapshot
// to the persistent backend too.
// That's only possible if field Schemas is valid, because the
// StateMgr.PersistState function for some backends needs schemas.
PersistInterval time.Duration
// Schemas are the schemas to use when persisting state due to
// PersistInterval. This is ignored if PersistInterval is zero,
// and PersistInterval is ignored if this is nil.
Schemas *schemarepo.Schemas
intermediatePersist statemgr.IntermediateStatePersistInfo
}
var _ terraform.Hook = (*StateHook)(nil)
func (h *StateHook) PostStateUpdate(new *states.State) (terraform.HookAction, error) {
h.Lock()
defer h.Unlock()
h.intermediatePersist.RequestedPersistInterval = h.PersistInterval
if h.intermediatePersist.LastPersist.IsZero() {
// The first PostStateUpdate starts the clock for intermediate
// calls to PersistState.
h.intermediatePersist.LastPersist = time.Now()
}
if h.StateMgr != nil {
if err := h.StateMgr.WriteState(new); err != nil {
return terraform.HookActionHalt, err
}
if mgrPersist, ok := h.StateMgr.(statemgr.Persister); ok && h.PersistInterval != 0 && h.Schemas != nil {
if h.shouldPersist() {
err := mgrPersist.PersistState(h.Schemas)
if err != nil {
return terraform.HookActionHalt, err
}
h.intermediatePersist.LastPersist = time.Now()
} else {
log.Printf("[DEBUG] State storage %T declined to persist a state snapshot", h.StateMgr)
}
}
}
return terraform.HookActionContinue, nil
}
func (h *StateHook) Stopping() {
h.Lock()
defer h.Unlock()
// If Terraform has been asked to stop then that might mean that a hard
// kill signal will follow shortly in case Terraform doesn't stop
// quickly enough, and so we'll try to persist the latest state
// snapshot in the hope that it'll give the user less recovery work to
// do if they _do_ subsequently hard-kill Terraform during an apply.
if mgrPersist, ok := h.StateMgr.(statemgr.Persister); ok && h.Schemas != nil {
// While we're in the stopping phase we'll try to persist every
// new state update to maximize every opportunity we get to avoid
// losing track of objects that have been created or updated.
// Terraform Core won't start any new operations after it's been
// stopped, so at most we should see one more PostStateUpdate
// call per already-active request.
h.intermediatePersist.ForcePersist = true
if h.shouldPersist() {
err := mgrPersist.PersistState(h.Schemas)
if err != nil {
// This hook can't affect Terraform Core's ongoing behavior,
// but it's a best effort thing anyway so we'll just emit a
// log to aid with debugging.
log.Printf("[ERROR] Failed to persist state after interruption: %s", err)
}
} else {
log.Printf("[DEBUG] State storage %T declined to persist a state snapshot", h.StateMgr)
}
}
}
func (h *StateHook) shouldPersist() bool {
if m, ok := h.StateMgr.(statemgr.IntermediateStateConditionalPersister); ok {
return m.ShouldPersistIntermediateState(&h.intermediatePersist)
}
return statemgr.DefaultIntermediateStatePersistRule(&h.intermediatePersist)
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/backend/local/hook_state.go
|
#!/usr/bin/env python
"""Adds whitelisted Friends and Sends Snaps to Story
Usage:
updateStory.py -u <username> [-p <password> -d <tmpdir> -sv] WHITELIST
Options:
-h Show usage
-u=<username> Username
-p=<password> Password(optional, will promp if ommitted)
-d=<tmpdir> Where to save the snaps [default: ./]
-s Save the snaps permanatly in tmpdir
-v Verbose
"""
import os.path
import sys
from getpass import getpass
from docopt import docopt
from snapchat_republisher import sendSnapToStory, addFriends
from pysnap import Snapchat
def main():
arguments = docopt(__doc__)
username = arguments['-u']
if arguments['-p'] is None:
password = getpass('Password:')
else:
password = arguments['-p']
path = arguments['-d']
save = arguments['-s']
verbose = arguments['-v']
whiteListFile = arguments['WHITELIST']
if not os.path.isdir(path):
print('No such directory: {0}'.format(path))
sys.exit(1)
s = Snapchat()
if verbose:
print('Attempting to log in as {0}.'.format(username))
if not s.login(username, password).get('logged'):
print('Invalid username or pasword')
sys.exit(1)
if verbose:
print('Attempting to open whitelist file at {0}.'.format(whiteListFile))
with open(whiteListFile, 'r') as f:
whitelist = [line.rstrip() for line in f]
if verbose:
print('Succesfully read whitelist and extracted {0} lines. Attempting to handle friends'.format(len(whitelist)))
#sys.exit(0)
addFriends(s,whitelist,verbose)
for snap in s.get_snaps():
if verbose:
print('Working with snap')
sendSnapToStory(s,snap,path,save,verbose)
sys.exit(0)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import unittest
import apache_beam as beam
from apache_beam.testing import test_pipeline
class DirectPipelineResultTest(unittest.TestCase):
def test_waiting_on_result_stops_executor_threads(self):
pre_test_threads = set(t.ident for t in threading.enumerate())
pipeline = test_pipeline.TestPipeline()
_ = (pipeline | beam.Create([{'foo': 'bar'}]))
result = pipeline.run()
result.wait_until_finish()
post_test_threads = set(t.ident for t in threading.enumerate())
new_threads = post_test_threads - pre_test_threads
self.assertEqual(len(new_threads), 0)
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
from cinderclient.tests.unit.fixture_data import base
# FIXME(jamielennox): use timeutils from oslo
FORMAT = '%Y-%m-%d %H:%M:%S'
class Fixture(base.Fixture):
base_url = 'os-availability-zone'
def setUp(self):
super(Fixture, self).setUp()
get_availability = {
"availabilityZoneInfo": [
{
"zoneName": "zone-1",
"zoneState": {"available": True},
"hosts": None,
},
{
"zoneName": "zone-2",
"zoneState": {"available": False},
"hosts": None,
},
]
}
self.requests.register_uri('GET', self.url(), json=get_availability)
updated_1 = datetime(2012, 12, 26, 14, 45, 25, 0).strftime(FORMAT)
updated_2 = datetime(2012, 12, 26, 14, 45, 24, 0).strftime(FORMAT)
get_detail = {
"availabilityZoneInfo": [
{
"zoneName": "zone-1",
"zoneState": {"available": True},
"hosts": {
"fake_host-1": {
"cinder-volume": {
"active": True,
"available": True,
"updated_at": updated_1,
}
}
}
},
{
"zoneName": "internal",
"zoneState": {"available": True},
"hosts": {
"fake_host-1": {
"cinder-sched": {
"active": True,
"available": True,
"updated_at": updated_2,
}
}
}
},
{
"zoneName": "zone-2",
"zoneState": {"available": False},
"hosts": None,
},
]
}
self.requests.register_uri('GET', self.url('detail'), json=get_detail)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env ruby
# frozen_string_literal: true
require "forwardable"
require "colorator"
require "liquid"
require "benchmark/ips"
require "memory_profiler"
# Set up (memory) profiler
class Profiler
def self.run
yield new(ARGV[0] || 10_000)
end
def initialize(count)
@count = count.to_i
end
def report(label, color, &block)
prof_report = MemoryProfiler.report { @count.to_i.times(&block) }
allocated_memory = prof_report.scale_bytes(prof_report.total_allocated_memsize)
allocated_objects = prof_report.total_allocated
retained_memory = prof_report.scale_bytes(prof_report.total_retained_memsize)
retained_objects = prof_report.total_retained
puts <<~MSG.send(color)
With #{label} calls
Total allocated: #{allocated_memory} (#{allocated_objects} objects)
Total retained: #{retained_memory} (#{retained_objects} objects)
MSG
end
end
# Set up stage
class Drop < Liquid::Drop
def initialize(obj)
@obj = obj
end
end
class ForwardDrop < Drop
extend Forwardable
def_delegators :@obj, :name
end
class StaticDrop < Drop
def name
@obj.name
end
end
class Document
def name
"lipsum"
end
end
# Set up actors
document = Document.new
alpha = ForwardDrop.new(document)
beta = StaticDrop.new(document)
count = ARGV[0] || 10_000
# Run profilers
puts "\nMemory profiles for #{count} calls to invoke drop key:"
Profiler.run do |x|
x.report("forwarded", :cyan) { alpha["name"] }
x.report("static", :green) { beta["name"] }
end
# Benchmark
puts "\nBenchmarking the two scenarios..."
Benchmark.ips do |x|
x.report("forwarded".cyan) { alpha["name"] }
x.report("static".green) { beta["name"] }
x.compare!
end
|
ruby
|
github
|
https://github.com/jekyll/jekyll
|
benchmark/static-drop-vs-forwarded.rb
|
"""
Tests for bulk operations in Split Modulestore.
"""
# pylint: disable=protected-access
import copy
import unittest
import six
import ddt
from bson.objectid import ObjectId
from mock import MagicMock, Mock, call
from opaque_keys.edx.locator import CourseLocator
from six.moves import range
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection
from xmodule.modulestore.split_mongo.split import SplitBulkWriteMixin
VERSION_GUID_DICT = {
'SAMPLE_VERSION_GUID': 'deadbeef1234' * 2,
'SAMPLE_UNICODE_VERSION_GUID': u'deadbeef1234' * 2,
'BSON_OBJECTID': ObjectId()
}
SAMPLE_GUIDS_LIST = ['SAMPLE_VERSION_GUID', 'SAMPLE_UNICODE_VERSION_GUID', 'BSON_OBJECTID']
class TestBulkWriteMixin(unittest.TestCase): # lint-amnesty, pylint: disable=missing-class-docstring
def setUp(self):
super(TestBulkWriteMixin, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.bulk = SplitBulkWriteMixin()
self.bulk.SCHEMA_VERSION = 1
self.clear_cache = self.bulk._clear_cache = Mock(name='_clear_cache')
self.conn = self.bulk.db_connection = MagicMock(name='db_connection', spec=MongoConnection)
self.conn.get_course_index.return_value = {'initial': 'index'}
self.course_key = CourseLocator('org', 'course', 'run-a', branch='test')
self.course_key_b = CourseLocator('org', 'course', 'run-b', branch='test')
self.structure = {'this': 'is', 'a': 'structure', '_id': ObjectId()}
self.definition = {'this': 'is', 'a': 'definition', '_id': ObjectId()}
self.index_entry = {'this': 'is', 'an': 'index'}
def assertConnCalls(self, *calls):
assert list(calls) == self.conn.mock_calls
def assertCacheNotCleared(self):
assert not self.clear_cache.called
class TestBulkWriteMixinPreviousTransaction(TestBulkWriteMixin):
"""
Verify that opening and closing a transaction doesn't affect later behaviour.
"""
def setUp(self):
super(TestBulkWriteMixinPreviousTransaction, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.bulk._begin_bulk_operation(self.course_key)
self.bulk.insert_course_index(self.course_key, MagicMock('prev-index-entry'))
self.bulk.update_structure(self.course_key, {'this': 'is', 'the': 'previous structure', '_id': ObjectId()})
self.bulk._end_bulk_operation(self.course_key)
self.conn.reset_mock()
self.clear_cache.reset_mock()
@ddt.ddt
class TestBulkWriteMixinClosed(TestBulkWriteMixin):
"""
Tests of the bulk write mixin when bulk operations aren't active.
"""
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_no_bulk_read_structure(self, version_guid_name):
# Reading a structure when no bulk operation is active should just call
# through to the db_connection
version_guid = VERSION_GUID_DICT[version_guid_name]
result = self.bulk.get_structure(self.course_key, version_guid)
self.assertConnCalls(
call.get_structure(self.course_key.as_object_id(version_guid), self.course_key)
)
assert result == self.conn.get_structure.return_value
self.assertCacheNotCleared()
def test_no_bulk_write_structure(self):
# Writing a structure when no bulk operation is active should just
# call through to the db_connection. It should also clear the
# system cache
self.bulk.update_structure(self.course_key, self.structure)
self.assertConnCalls(call.insert_structure(self.structure, self.course_key))
self.clear_cache.assert_called_once_with(self.structure['_id'])
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_no_bulk_read_definition(self, version_guid_name):
# Reading a definition when no bulk operation is active should just call
# through to the db_connection
version_guid = VERSION_GUID_DICT[version_guid_name]
result = self.bulk.get_definition(self.course_key, version_guid)
self.assertConnCalls(
call.get_definition(
self.course_key.as_object_id(version_guid),
self.course_key
)
)
assert result == self.conn.get_definition.return_value
def test_no_bulk_write_definition(self):
# Writing a definition when no bulk operation is active should just
# call through to the db_connection.
self.bulk.update_definition(self.course_key, self.definition)
self.assertConnCalls(call.insert_definition(self.definition, self.course_key))
@ddt.data(True, False)
def test_no_bulk_read_index(self, ignore_case):
# Reading a course index when no bulk operation is active should just call
# through to the db_connection
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
self.assertConnCalls(call.get_course_index(self.course_key, ignore_case))
assert result == self.conn.get_course_index.return_value
self.assertCacheNotCleared()
def test_no_bulk_write_index(self):
# Writing a course index when no bulk operation is active should just call
# through to the db_connection
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls(call.insert_course_index(self.index_entry, self.course_key))
self.assertCacheNotCleared()
def test_out_of_order_end(self):
# Calling _end_bulk_operation without a corresponding _begin...
# is a noop
self.bulk._end_bulk_operation(self.course_key)
def test_write_new_index_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.conn.insert_course_index.assert_called_once_with(self.index_entry, self.course_key)
def test_write_updated_index_on_close(self):
old_index = {'this': 'is', 'an': 'old index'}
self.conn.get_course_index.return_value = old_index
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.insert_course_index(self.course_key, self.index_entry)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.conn.update_course_index.assert_called_once_with(
self.index_entry,
from_index=old_index,
course_context=self.course_key,
)
def test_write_structure_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key, self.structure)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(call.insert_structure(self.structure, self.course_key))
def test_write_multiple_structures_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key.replace(branch='a'), self.structure)
other_structure = {'another': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key.replace(branch='b'), other_structure)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
six.assertCountEqual(
self,
[
call.insert_structure(self.structure, self.course_key),
call.insert_structure(other_structure, self.course_key)
],
self.conn.mock_calls
)
def test_write_index_and_definition_on_close(self):
original_index = {'versions': {}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key, self.definition)
self.bulk.insert_course_index(self.course_key, {'versions': {self.course_key.branch: self.definition['_id']}}) # lint-amnesty, pylint: disable=no-member
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(
call.insert_definition(self.definition, self.course_key),
call.update_course_index(
{'versions': {self.course_key.branch: self.definition['_id']}}, # lint-amnesty, pylint: disable=no-member
from_index=original_index,
course_context=self.course_key
)
)
def test_write_index_and_multiple_definitions_on_close(self):
original_index = {'versions': {'a': ObjectId(), 'b': ObjectId()}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
other_definition = {'another': 'definition', '_id': ObjectId()}
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
self.bulk.insert_course_index(self.course_key, {'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}}) # lint-amnesty, pylint: disable=line-too-long
self.bulk._end_bulk_operation(self.course_key)
six.assertCountEqual(
self,
[
call.insert_definition(self.definition, self.course_key),
call.insert_definition(other_definition, self.course_key),
call.update_course_index(
{'versions': {'a': self.definition['_id'], 'b': other_definition['_id']}},
from_index=original_index,
course_context=self.course_key,
)
],
self.conn.mock_calls
)
def test_write_definition_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key, self.definition)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(call.insert_definition(self.definition, self.course_key))
def test_write_multiple_definitions_on_close(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_definition(self.course_key.replace(branch='a'), self.definition)
other_definition = {'another': 'definition', '_id': ObjectId()}
self.bulk.update_definition(self.course_key.replace(branch='b'), other_definition)
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
six.assertCountEqual(
self,
[
call.insert_definition(self.definition, self.course_key),
call.insert_definition(other_definition, self.course_key)
],
self.conn.mock_calls
)
def test_write_index_and_structure_on_close(self):
original_index = {'versions': {}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key, self.structure)
self.bulk.insert_course_index(self.course_key, {'versions': {self.course_key.branch: self.structure['_id']}}) # lint-amnesty, pylint: disable=no-member
self.assertConnCalls()
self.bulk._end_bulk_operation(self.course_key)
self.assertConnCalls(
call.insert_structure(self.structure, self.course_key),
call.update_course_index(
{'versions': {self.course_key.branch: self.structure['_id']}}, # lint-amnesty, pylint: disable=no-member
from_index=original_index,
course_context=self.course_key,
)
)
def test_write_index_and_multiple_structures_on_close(self):
original_index = {'versions': {'a': ObjectId(), 'b': ObjectId()}}
self.conn.get_course_index.return_value = copy.deepcopy(original_index)
self.bulk._begin_bulk_operation(self.course_key)
self.conn.reset_mock()
self.bulk.update_structure(self.course_key.replace(branch='a'), self.structure)
other_structure = {'another': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key.replace(branch='b'), other_structure)
self.bulk.insert_course_index(self.course_key, {'versions': {'a': self.structure['_id'], 'b': other_structure['_id']}}) # lint-amnesty, pylint: disable=line-too-long
self.bulk._end_bulk_operation(self.course_key)
six.assertCountEqual(
self,
[
call.insert_structure(self.structure, self.course_key),
call.insert_structure(other_structure, self.course_key),
call.update_course_index(
{'versions': {'a': self.structure['_id'], 'b': other_structure['_id']}},
from_index=original_index,
course_context=self.course_key,
)
],
self.conn.mock_calls
)
def test_version_structure_creates_new_version(self):
assert self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'] != self.structure['_id']
def test_version_structure_new_course(self):
self.conn.get_course_index.return_value = None
self.bulk._begin_bulk_operation(self.course_key)
version_result = self.bulk.version_structure(self.course_key, self.structure, 'user_id')
get_result = self.bulk.get_structure(self.course_key, version_result['_id'])
assert version_result == get_result
class TestBulkWriteMixinClosedAfterPrevTransaction(TestBulkWriteMixinClosed, TestBulkWriteMixinPreviousTransaction): # lint-amnesty, pylint: disable=test-inherits-tests
"""
Test that operations on with a closed transaction aren't affected by a previously executed transaction
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
@ddt.ddt
class TestBulkWriteMixinFindMethods(TestBulkWriteMixin):
"""
Tests of BulkWriteMixin methods for finding many structures or indexes
"""
def test_no_bulk_find_matching_course_indexes(self):
branch = Mock(name='branch')
search_targets = MagicMock(name='search_targets')
org_targets = None
self.conn.find_matching_course_indexes.return_value = [Mock(name='result')]
result = self.bulk.find_matching_course_indexes(branch, search_targets)
self.assertConnCalls(call.find_matching_course_indexes(
branch,
search_targets,
org_targets,
course_keys=None
)
)
assert result == self.conn.find_matching_course_indexes.return_value
self.assertCacheNotCleared()
@ddt.data(
(None, None, [], []),
(
'draft',
None,
[{'versions': {'draft': '123'}}],
[
{'versions': {'published': '123'}},
{}
],
),
(
'draft',
{'f1': 'v1'},
[{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}}],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'value2'}},
{'versions': {'published': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
{'versions': {'draft': '123'}},
],
),
(
None,
{'f1': 'v1'},
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}},
{'versions': {'published': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v2'}},
{'versions': {'draft': '123'}, 'search_targets': {'f2': 'v1'}},
{'versions': {'draft': '123'}},
],
),
(
None,
{'f1': 'v1', 'f2': 2},
[
{'search_targets': {'f1': 'v1', 'f2': 2}},
{'search_targets': {'f1': 'v1', 'f2': 2}},
],
[
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v1'}},
{'search_targets': {'f1': 'v1'}},
{'versions': {'draft': '123'}, 'search_targets': {'f1': 'v2'}},
{'versions': {'draft': '123'}},
],
),
)
@ddt.unpack
def test_find_matching_course_indexes(self, branch, search_targets, matching, unmatching):
db_indexes = [{'org': 'what', 'course': 'this', 'run': 'needs'}]
for n, index in enumerate(matching + unmatching):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
for attr in ['org', 'course', 'run']:
index[attr] = getattr(course_key, attr)
self.bulk.insert_course_index(course_key, index)
expected = matching + db_indexes
self.conn.find_matching_course_indexes.return_value = db_indexes
result = self.bulk.find_matching_course_indexes(branch, search_targets)
six.assertCountEqual(self, result, expected)
for item in unmatching:
assert item not in result
def test_no_bulk_find_structures_by_id(self):
ids = [Mock(name='id')]
self.conn.find_structures_by_id.return_value = [MagicMock(name='result')]
result = self.bulk.find_structures_by_id(ids)
self.assertConnCalls(call.find_structures_by_id(ids))
assert result == self.conn.find_structures_by_id.return_value
self.assertCacheNotCleared()
@ddt.data(
([], [], []),
([1, 2, 3], [1, 2], [1, 2]),
([1, 2, 3], [1], [1, 2]),
([1, 2, 3], [], [1, 2]),
)
@ddt.unpack
def test_find_structures_by_id(self, search_ids, active_ids, db_ids):
db_structure = lambda _id: {'db': 'structure', '_id': _id}
active_structure = lambda _id: {'active': 'structure', '_id': _id}
db_structures = [db_structure(_id) for _id in db_ids if _id not in active_ids]
for n, _id in enumerate(active_ids):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
self.bulk.update_structure(course_key, active_structure(_id))
self.conn.find_structures_by_id.return_value = db_structures
results = self.bulk.find_structures_by_id(search_ids)
self.conn.find_structures_by_id.assert_called_once_with(list(set(search_ids) - set(active_ids)))
for _id in active_ids:
if _id in search_ids:
assert active_structure(_id) in results
else:
assert active_structure(_id) not in results
for _id in db_ids:
if _id in search_ids and _id not in active_ids:
assert db_structure(_id) in results
else:
assert db_structure(_id) not in results
@ddt.data(
([], [], []),
([1, 2, 3], [1, 2], [1, 2]),
([1, 2, 3], [1], [1, 2]),
([1, 2, 3], [], [1, 2]),
)
@ddt.unpack
def test_get_definitions(self, search_ids, active_ids, db_ids):
db_definition = lambda _id: {'db': 'definition', '_id': _id}
active_definition = lambda _id: {'active': 'definition', '_id': _id}
db_definitions = [db_definition(_id) for _id in db_ids if _id not in active_ids]
self.bulk._begin_bulk_operation(self.course_key)
for _id in active_ids:
self.bulk.update_definition(self.course_key, active_definition(_id))
self.conn.get_definitions.return_value = db_definitions
results = self.bulk.get_definitions(self.course_key, search_ids)
definitions_gotten = list(set(search_ids) - set(active_ids))
if len(definitions_gotten) > 0:
self.conn.get_definitions.assert_called_once_with(definitions_gotten, self.course_key)
else:
# If no definitions to get, then get_definitions() should *not* have been called.
assert self.conn.get_definitions.call_count == 0
for _id in active_ids:
if _id in search_ids:
assert active_definition(_id) in results
else:
assert active_definition(_id) not in results
for _id in db_ids:
if _id in search_ids and _id not in active_ids:
assert db_definition(_id) in results
else:
assert db_definition(_id) not in results
def test_get_definitions_doesnt_update_db(self):
test_ids = [1, 2]
db_definition = lambda _id: {'db': 'definition', '_id': _id}
db_definitions = [db_definition(_id) for _id in test_ids]
self.conn.get_definitions.return_value = db_definitions
self.bulk._begin_bulk_operation(self.course_key)
self.bulk.get_definitions(self.course_key, test_ids)
self.bulk._end_bulk_operation(self.course_key)
assert not self.conn.insert_definition.called
def test_no_bulk_find_structures_derived_from(self):
ids = [Mock(name='id')]
self.conn.find_structures_derived_from.return_value = [MagicMock(name='result')]
result = self.bulk.find_structures_derived_from(ids)
self.assertConnCalls(call.find_structures_derived_from(ids))
assert result == self.conn.find_structures_derived_from.return_value
self.assertCacheNotCleared()
@ddt.data(
# Test values are:
# - previous_versions to search for
# - documents in the cache with $previous_version.$_id
# - documents in the db with $previous_version.$_id
([], [], []),
(['1', '2', '3'], ['1.a', '1.b', '2.c'], ['1.a', '2.c']),
(['1', '2', '3'], ['1.a'], ['1.a', '2.c']),
(['1', '2', '3'], [], ['1.a', '2.c']),
(['1', '2', '3'], ['4.d'], ['1.a', '2.c']),
)
@ddt.unpack
def test_find_structures_derived_from(self, search_ids, active_ids, db_ids):
def db_structure(_id):
previous, _, current = _id.partition('.')
return {'db': 'structure', 'previous_version': previous, '_id': current}
def active_structure(_id):
previous, _, current = _id.partition('.')
return {'active': 'structure', 'previous_version': previous, '_id': current}
db_structures = [db_structure(_id) for _id in db_ids]
active_structures = []
for n, _id in enumerate(active_ids):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
structure = active_structure(_id)
self.bulk.update_structure(course_key, structure)
active_structures.append(structure)
self.conn.find_structures_derived_from.return_value = db_structures
results = self.bulk.find_structures_derived_from(search_ids)
self.conn.find_structures_derived_from.assert_called_once_with(search_ids)
for structure in active_structures:
if structure['previous_version'] in search_ids:
assert structure in results
else:
assert structure not in results
for structure in db_structures:
if (
structure['previous_version'] in search_ids and # We're searching for this document
not any(active.endswith(structure['_id']) for active in active_ids) # This document doesn't match any active _ids # lint-amnesty, pylint: disable=line-too-long
):
assert structure in results
else:
assert structure not in results
def test_no_bulk_find_ancestor_structures(self):
original_version = Mock(name='original_version')
block_id = Mock(name='block_id')
self.conn.find_ancestor_structures.return_value = [MagicMock(name='result')]
result = self.bulk.find_ancestor_structures(original_version, block_id)
self.assertConnCalls(call.find_ancestor_structures(original_version, block_id))
assert result == self.conn.find_ancestor_structures.return_value
self.assertCacheNotCleared()
@ddt.data(
# Test values are:
# - original_version
# - block_id
# - matching documents in the cache
# - non-matching documents in the cache
# - expected documents returned from the db
# - unexpected documents returned from the db
('ov', 'bi', [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], [], [], []), # lint-amnesty, pylint: disable=line-too-long
('ov', 'bi', [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}, '_id': 'foo'}], [], [], [{'_id': 'foo'}]), # lint-amnesty, pylint: disable=line-too-long
('ov', 'bi', [], [{'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], [], []),
('ov', 'bi', [], [{'original_version': 'ov'}], [], []),
('ov', 'bi', [], [], [{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}], []), # lint-amnesty, pylint: disable=line-too-long
(
'ov',
'bi',
[{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'foo'}}}}],
[],
[{'original_version': 'ov', 'blocks': {'bi': {'edit_info': {'update_version': 'bar'}}}}],
[]
),
)
@ddt.unpack
def test_find_ancestor_structures(self, original_version, block_id, active_match, active_unmatch, db_match, db_unmatch): # lint-amnesty, pylint: disable=line-too-long
for structure in active_match + active_unmatch + db_match + db_unmatch:
structure.setdefault('_id', ObjectId())
for n, structure in enumerate(active_match + active_unmatch):
course_key = CourseLocator('org', 'course', 'run{}'.format(n))
self.bulk._begin_bulk_operation(course_key)
self.bulk.update_structure(course_key, structure)
self.conn.find_ancestor_structures.return_value = db_match + db_unmatch
results = self.bulk.find_ancestor_structures(original_version, block_id)
self.conn.find_ancestor_structures.assert_called_once_with(original_version, block_id)
six.assertCountEqual(self, active_match + db_match, results)
@ddt.ddt
class TestBulkWriteMixinOpen(TestBulkWriteMixin):
"""
Tests of the bulk write mixin when bulk write operations are open
"""
def setUp(self):
super(TestBulkWriteMixinOpen, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.bulk._begin_bulk_operation(self.course_key)
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_read_structure_without_write_from_db(self, version_guid_name):
# Reading a structure before it's been written (while in bulk operation mode)
# returns the structure from the database
version_guid = VERSION_GUID_DICT[version_guid_name]
result = self.bulk.get_structure(self.course_key, version_guid)
assert self.conn.get_structure.call_count == 1
assert result == self.conn.get_structure.return_value
self.assertCacheNotCleared()
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_read_structure_without_write_only_reads_once(self, version_guid_name):
# Reading the same structure multiple times shouldn't hit the database
# more than once
version_guid = VERSION_GUID_DICT[version_guid_name]
for _ in range(2):
result = self.bulk.get_structure(self.course_key, version_guid)
assert self.conn.get_structure.call_count == 1
assert result == self.conn.get_structure.return_value
self.assertCacheNotCleared()
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_read_structure_after_write_no_db(self, version_guid_name):
# Reading a structure that's already been written shouldn't hit the db at all
version_guid = VERSION_GUID_DICT[version_guid_name]
self.structure['_id'] = version_guid
self.bulk.update_structure(self.course_key, self.structure)
result = self.bulk.get_structure(self.course_key, version_guid)
assert self.conn.get_structure.call_count == 0
assert result == self.structure
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_read_structure_after_write_after_read(self, version_guid_name):
# Reading a structure that's been updated after being pulled from the db should
# still get the updated value
version_guid = VERSION_GUID_DICT[version_guid_name]
self.structure['_id'] = version_guid
self.bulk.get_structure(self.course_key, version_guid)
self.bulk.update_structure(self.course_key, self.structure)
result = self.bulk.get_structure(self.course_key, version_guid)
assert self.conn.get_structure.call_count == 1
assert result == self.structure
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_read_definition_without_write_from_db(self, version_guid_name):
# Reading a definition before it's been written (while in bulk operation mode)
# returns the definition from the database
version_guid = VERSION_GUID_DICT[version_guid_name]
result = self.bulk.get_definition(self.course_key, version_guid)
assert self.conn.get_definition.call_count == 1
assert result == self.conn.get_definition.return_value
self.assertCacheNotCleared()
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_read_definition_without_write_only_reads_once(self, version_guid_name):
# Reading the same definition multiple times shouldn't hit the database
# more than once
version_guid = VERSION_GUID_DICT[version_guid_name]
for _ in range(2):
result = self.bulk.get_definition(self.course_key, version_guid)
assert self.conn.get_definition.call_count == 1
assert result == self.conn.get_definition.return_value
self.assertCacheNotCleared()
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_read_definition_after_write_no_db(self, version_guid_name):
# Reading a definition that's already been written shouldn't hit the db at all
version_guid = VERSION_GUID_DICT[version_guid_name]
self.definition['_id'] = version_guid
self.bulk.update_definition(self.course_key, self.definition)
result = self.bulk.get_definition(self.course_key, version_guid)
assert self.conn.get_definition.call_count == 0
assert result == self.definition
@ddt.data(*SAMPLE_GUIDS_LIST)
def test_read_definition_after_write_after_read(self, version_guid_name):
# Reading a definition that's been updated after being pulled from the db should
# still get the updated value
version_guid = VERSION_GUID_DICT[version_guid_name]
self.definition['_id'] = version_guid
self.bulk.get_definition(self.course_key, version_guid)
self.bulk.update_definition(self.course_key, self.definition)
result = self.bulk.get_definition(self.course_key, version_guid)
assert self.conn.get_definition.call_count == 1
assert result == self.definition
@ddt.data(True, False)
def test_read_index_without_write_from_db(self, ignore_case):
# Reading the index without writing to it should pull from the database
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
assert self.conn.get_course_index.call_count == 1
assert self.conn.get_course_index.return_value == result
@ddt.data(True, False)
def test_read_index_without_write_only_reads_once(self, ignore_case):
# Reading the index multiple times should only result in one read from
# the database
for _ in range(2):
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
assert self.conn.get_course_index.call_count == 1
assert self.conn.get_course_index.return_value == result
@ddt.data(True, False)
def test_read_index_after_write(self, ignore_case):
# Reading the index after a write still should hit the database once to fetch the
# initial index, and should return the written index_entry
self.bulk.insert_course_index(self.course_key, self.index_entry)
result = self.bulk.get_course_index(self.course_key, ignore_case=ignore_case)
assert self.conn.get_course_index.call_count == 1
assert self.index_entry == result
def test_read_index_ignore_case(self):
# Reading using ignore case should find an already written entry with a different case
self.bulk.insert_course_index(self.course_key, self.index_entry)
result = self.bulk.get_course_index(
self.course_key.replace(
org=self.course_key.org.upper(),
course=self.course_key.course.title(),
run=self.course_key.run.upper()
),
ignore_case=True
)
assert self.conn.get_course_index.call_count == 1
assert self.index_entry == result
def test_version_structure_creates_new_version_before_read(self):
assert self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'] != self.structure['_id']
def test_version_structure_creates_new_version_after_read(self):
self.conn.get_structure.return_value = copy.deepcopy(self.structure)
self.bulk.get_structure(self.course_key, self.structure['_id'])
assert self.bulk.version_structure(self.course_key, self.structure, 'user_id')['_id'] != self.structure['_id']
def test_copy_branch_versions(self):
# Directly updating an index so that the draft branch points to the published index
# version should work, and should only persist a single structure
self.maxDiff = None
published_structure = {'published': 'structure', '_id': ObjectId()}
self.bulk.update_structure(self.course_key, published_structure)
index = {'versions': {'published': published_structure['_id']}}
self.bulk.insert_course_index(self.course_key, index)
index_copy = copy.deepcopy(index)
index_copy['versions']['draft'] = index['versions']['published']
self.bulk.update_course_index(self.course_key, index_copy)
self.bulk._end_bulk_operation(self.course_key)
self.conn.insert_structure.assert_called_once_with(published_structure, self.course_key)
self.conn.update_course_index.assert_called_once_with(
index_copy,
from_index=self.conn.get_course_index.return_value,
course_context=self.course_key,
)
self.conn.get_course_index.assert_called_once_with(self.course_key, ignore_case=False)
class TestBulkWriteMixinOpenAfterPrevTransaction(TestBulkWriteMixinOpen, TestBulkWriteMixinPreviousTransaction): # lint-amnesty, pylint: disable=test-inherits-tests
"""
Test that operations on with an open transaction aren't affected by a previously executed transaction
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
### BEGIN INIT INFO
# Provides: eth0.1.up
# Required-Start: network-manager
# Required-Stop:
# Default-Start:
# Default-Stop:
# Short-Description: Raise eth0:1 network interface.
### END INIT INFO
# usage:
# -daemon (or --daemon): run in background
LAN_SETTINGS = {
'eth0': {
'address': "10.1.2.3",
'netmask': "255.255.255.0"
}
}
ETH_V1_UP="/sbin/ifconfig %s:1 inet %s netmask %s up"
import os,sys
import dbus, gobject
from dbus.mainloop.glib import DBusGMainLoop
if len(sys.argv) > 1 and sys.argv[1].endswith("-daemon"):
#run this program in background.
try:
pid = os.fork()
except OSError:
sys.exit(1)
if pid > 0:
sys.exit(0)
NetworkManagerServiceName = "org.freedesktop.NetworkManager"
NetworkManagerObjectPath = "/org/freedesktop/NetworkManager"
NetworkManagerInterface = "org.freedesktop.NetworkManager"
NetworkManagerDeviceInterface = "org.freedesktop.NetworkManager.Device"
NetworkManagerDeviceWiredInterface = "org.freedesktop.NetworkManager.Device.Wired"
DBusPropertiesInterface="org.freedesktop.DBus.Properties"
NM_DEVICE_STATE_ACTIVATED=8
class ActivatedHandler(object):
def __init__(self, device_instance):
self.device_instance = device_instance
props = device_instance.GetAll(NetworkManagerDeviceInterface,
dbus_interface=DBusPropertiesInterface)
self.name = props['Interface']
self.device_instance.connect_to_signal("StateChanged",
self.handler,
dbus_interface=NetworkManagerDeviceInterface)
def handler(self, new_state, old_state, reason):
if new_state == NM_DEVICE_STATE_ACTIVATED:
#print "%s activated" % self.name
os.system(ETH_V1_UP % (self.name,
LAN_SETTINGS[self.name]['address'],
LAN_SETTINGS[self.name]['netmask']))
DBusGMainLoop(set_as_default=True)
loop = gobject.MainLoop()
bus = dbus.SystemBus()
try:
nm_instance = bus.get_object(NetworkManagerServiceName, NetworkManagerObjectPath)
except dbus.DBusException:
print "connect to NetworkManager error."
sys.exit(1)
handlers = {}
def IntToDottedQuad(n):
"convert long int to dotted quad string"
d = 256 * 256 * 256
q = []
while d > 0:
m,n = divmod(n,d)
q.append(str(m))
d = d/256
q.reverse()
return '.'.join(q)
def IntToNetmaskAddr(keep_bits):
n = (0xffffffff >> (32 - keep_bits )) << (32 - keep_bits)
d = 256 * 256 * 256
q = []
while d > 0:
m,n = divmod(n,d)
q.append(str(m))
d = d/256
return '.'.join(q)
address_list = []
devices = nm_instance.GetDevices(dbus_interface=NetworkManagerInterface)
for device in devices:
device_instance = bus.get_object(NetworkManagerServiceName, device)
device_props = device_instance.GetAll(NetworkManagerDeviceInterface,
dbus_interface=DBusPropertiesInterface)
wired_props = device_instance.GetAll(NetworkManagerDeviceWiredInterface,
dbus_interface=DBusPropertiesInterface)
device_name = str(device_props['Interface']) # eg. "eth0"
ipv4_address = int(device_props['Ip4Address'])
mac_address = str(wired_props['HwAddress'])
ipv4config_instance = bus.get_object(NetworkManagerServiceName, device_props['Ip4Config'])
ipv4_props = ipv4config_instance.GetAll('org.freedesktop.NetworkManager.IP4Config',
dbus_interface=DBusPropertiesInterface)
print("%s %s %s" % (device_name, IntToDottedQuad(ipv4_address), mac_address))
print(ipv4_props['Addresses'][0])
for addr in ipv4_props['Addresses']:
print(IntToDottedQuad(addr[0]))
print(IntToDottedQuad(addr[1]))
print(IntToDottedQuad(addr[2]))
print(IntToDottedQuad(ipv4_props['Addresses'][0][2]))
netmask = int(ipv4_props['Addresses'][0][1])
#netmask = IntToNetmaskAddr(kb)
address_list.append(
[device_name, IntToDottedQuad(ipv4_address), IntToNetmaskAddr(netmask), mac_address])
#if device_name not in LAN_SETTINGS:
# continue
#if props['State'] == NM_DEVICE_STATE_ACTIVATED:
# #print "%s activated" % device_name
# os.system(ETH_V1_UP % (device_name,
# LAN_SETTINGS[device_name]['address'],
# LAN_SETTINGS[device_name]['netmask']))
#handlers[device_name] = ActivatedHandler(device_instance)
#devices = False
#print "Working..."
#loop.run()
print address_list
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
#
# Copyright (c) 2019 Liu Qingyi, (@smile37773)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_galleryimage_info
version_added: '2.9'
short_description: Get Azure SIG Image info
description:
- Get info of Azure SIG Image.
options:
resource_group:
description:
- The name of the resource group.
type: str
required: true
gallery_name:
description:
- The name of the shared image gallery from which the image definitions are to be retrieved.
type: str
required: true
name:
description:
- Resource name.
type: str
extends_documentation_fragment:
- azure
author:
- Liu Qingyi (@smile37773)
'''
EXAMPLES = '''
- name: List gallery images in a gallery.
azure_rm_galleryimage_info:
resource_group: myResourceGroup
gallery_name: myGallery
- name: Get a gallery image.
azure_rm_galleryimage_info:
resource_group: myResourceGroup
gallery_name: myGallery
name: myImage
'''
RETURN = '''
images:
description:
- A list of dict results where the key is the name of the image and the values are the info for that image.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup
/providers/Microsoft.Compute/galleries/myGallery/images/myImage"
name:
description:
- Resource name.
returned: always
type: str
sample: myImage
location:
description:
- Resource location.
returned: always
type: str
sample: "eastus"
tags:
description:
- Resource tags.
returned: always
type: dict
sample: { "tag": "value" }
os_state:
description:
- The allowed values for OS State are C(generalized).
type: OperatingSystemStateTypes
sample: "Generalized"
os_type:
description:
- This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
type: OperatingSystemTypes
sample: "linux/windows"
identifier:
description:
- This is the gallery image definition identifier.
type: dict
contains:
offer:
description:
- The name of the gallery image definition offer.
type: str
sample: "myOfferName"
publisher:
description:
- The name of the gallery image definition publisher.
type: str
sample: "myPublisherName"
sku:
description:
- The name of the gallery image definition sku.
type: str
sample: "mySkuName"
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.azure_rm_common_rest import GenericRestClient
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
class AzureRMGalleryImagesInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
gallery_name=dict(
type='str',
required=True
),
name=dict(
type='str'
)
)
self.resource_group = None
self.gallery_name = None
self.name = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2019-03-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMGalleryImagesInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
base_url=self._cloud_environment.endpoints.resource_manager)
if (self.resource_group is not None and
self.gallery_name is not None and
self.name is not None):
# self.results['gallery_images'] = self.format_item(self.get())
self.results['images'] = self.get()
elif (self.resource_group is not None and
self.gallery_name is not None):
# self.results['gallery_images'] = self.format_item(self.listbygallery())
self.results['images'] = self.listbygallery()
return self.results
def get(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Compute' +
'/galleries' +
'/{{ gallery_name }}' +
'/images' +
'/{{ image_name }}')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
self.url = self.url.replace('{{ image_name }}', self.name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return self.format_item(results)
def listbygallery(self):
response = None
results = {}
# prepare url
self.url = ('/subscriptions' +
'/{{ subscription_id }}' +
'/resourceGroups' +
'/{{ resource_group }}' +
'/providers' +
'/Microsoft.Compute' +
'/galleries' +
'/{{ gallery_name }}' +
'/images')
self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
self.url = self.url.replace('{{ resource_group }}', self.resource_group)
self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
try:
response = self.mgmt_client.query(self.url,
'GET',
self.query_parameters,
self.header_parameters,
None,
self.status_code,
600,
30)
results = json.loads(response.text)
# self.log('Response : {0}'.format(response))
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return [self.format_item(x) for x in results['value']] if results['value'] else []
def format_item(self, item):
d = {
'id': item['id'],
'name': item['name'],
'location': item['location'],
'tags': item.get('tags'),
'os_state': item['properties']['osState'],
'os_type': item['properties']['osType'],
'identifier': item['properties']['identifier']
}
return d
def main():
AzureRMGalleryImagesInfo()
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from test import test_support
from test.test_support import bigmemtest, _1G, _2G, _4G, precisionbigmemtest
import unittest
import operator
import string
import sys
# Bigmem testing houserules:
#
# - Try not to allocate too many large objects. It's okay to rely on
# refcounting semantics, but don't forget that 's = create_largestring()'
# doesn't release the old 's' (if it exists) until well after its new
# value has been created. Use 'del s' before the create_largestring call.
#
# - Do *not* compare large objects using assertEqual or similar. It's a
# lengty operation and the errormessage will be utterly useless due to
# its size. To make sure whether a result has the right contents, better
# to use the strip or count methods, or compare meaningful slices.
#
# - Don't forget to test for large indices, offsets and results and such,
# in addition to large sizes.
#
# - When repeating an object (say, a substring, or a small list) to create
# a large object, make the subobject of a length that is not a power of
# 2. That way, int-wrapping problems are more easily detected.
#
# - While the bigmemtest decorator speaks of 'minsize', all tests will
# actually be called with a much smaller number too, in the normal
# test run (5Kb currently.) This is so the tests themselves get frequent
# testing. Consequently, always make all large allocations based on the
# passed-in 'size', and don't rely on the size being very large. Also,
# memuse-per-size should remain sane (less than a few thousand); if your
# test uses more, adjust 'size' upward, instead.
class StrTest(unittest.TestCase):
@bigmemtest(minsize=_2G, memuse=2)
def test_capitalize(self, size):
SUBSTR = ' abc def ghi'
s = '-' * size + SUBSTR
caps = s.capitalize()
self.assertEqual(caps[-len(SUBSTR):],
SUBSTR.capitalize())
self.assertEqual(caps.lstrip('-'), SUBSTR)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_center(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.center(size)
self.assertEqual(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEqual(s[lpadsize:-rpadsize], SUBSTR)
self.assertEqual(s.strip(), SUBSTR.strip())
@precisionbigmemtest(size=_2G - 1, memuse=1)
def test_center_unicode(self, size):
SUBSTR = u' abc def ghi'
try:
s = SUBSTR.center(size)
except OverflowError:
pass # acceptable on 32-bit
else:
self.assertEqual(len(s), size)
lpadsize = rpadsize = (len(s) - len(SUBSTR)) // 2
if len(s) % 2:
lpadsize += 1
self.assertEqual(s[lpadsize:-rpadsize], SUBSTR)
self.assertEqual(s.strip(), SUBSTR.strip())
del s
@bigmemtest(minsize=_2G, memuse=2)
def test_count(self, size):
SUBSTR = ' abc def ghi'
s = '.' * size + SUBSTR
self.assertEqual(s.count('.'), size)
s += '.'
self.assertEqual(s.count('.'), size + 1)
self.assertEqual(s.count(' '), 3)
self.assertEqual(s.count('i'), 1)
self.assertEqual(s.count('j'), 0)
@bigmemtest(minsize=_2G + 2, memuse=3)
def test_decode(self, size):
s = '.' * size
self.assertEqual(len(s.decode('utf-8')), size)
def basic_encode_test(self, size, enc, c=u'.', expectedsize=None):
if expectedsize is None:
expectedsize = size
s = c * size
self.assertEqual(len(s.encode(enc)), expectedsize)
@bigmemtest(minsize=_2G + 2, memuse=3)
def test_encode(self, size):
return self.basic_encode_test(size, 'utf-8')
@precisionbigmemtest(size=_4G // 6 + 2, memuse=2)
def test_encode_raw_unicode_escape(self, size):
try:
return self.basic_encode_test(size, 'raw_unicode_escape')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 5 + 70, memuse=3)
def test_encode_utf7(self, size):
try:
return self.basic_encode_test(size, 'utf7')
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_4G // 4 + 5, memuse=6)
def test_encode_utf32(self, size):
try:
return self.basic_encode_test(size, 'utf32', expectedsize=4*size+4)
except MemoryError:
pass # acceptable on 32-bit
@precisionbigmemtest(size=_2G-1, memuse=4)
def test_decodeascii(self, size):
return self.basic_encode_test(size, 'ascii', c='A')
@precisionbigmemtest(size=_4G // 5, memuse=6+2)
def test_unicode_repr_oflw(self, size):
self.skipTest("test crashes - see issue #14904")
try:
s = u"\uAAAA"*size
r = repr(s)
except MemoryError:
pass # acceptable on 32-bit
else:
self.assertTrue(s == eval(r))
@bigmemtest(minsize=_2G, memuse=2)
def test_endswith(self, size):
SUBSTR = ' abc def ghi'
s = '-' * size + SUBSTR
self.assertTrue(s.endswith(SUBSTR))
self.assertTrue(s.endswith(s))
s2 = '...' + s
self.assertTrue(s2.endswith(s))
self.assertFalse(s.endswith('a' + SUBSTR))
self.assertFalse(SUBSTR.endswith(s))
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_expandtabs(self, size):
s = '-' * size
tabsize = 8
self.assertEqual(s.expandtabs(), s)
del s
slen, remainder = divmod(size, tabsize)
s = ' \t' * slen
s = s.expandtabs(tabsize)
self.assertEqual(len(s), size - remainder)
self.assertEqual(len(s.strip(' ')), 0)
@bigmemtest(minsize=_2G, memuse=2)
def test_find(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEqual(s.find(' '), 0)
self.assertEqual(s.find(SUBSTR), 0)
self.assertEqual(s.find(' ', sublen), sublen + size)
self.assertEqual(s.find(SUBSTR, len(SUBSTR)), sublen + size)
self.assertEqual(s.find('i'), SUBSTR.find('i'))
self.assertEqual(s.find('i', sublen),
sublen + size + SUBSTR.find('i'))
self.assertEqual(s.find('i', size),
sublen + size + SUBSTR.find('i'))
self.assertEqual(s.find('j'), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_index(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEqual(s.index(' '), 0)
self.assertEqual(s.index(SUBSTR), 0)
self.assertEqual(s.index(' ', sublen), sublen + size)
self.assertEqual(s.index(SUBSTR, sublen), sublen + size)
self.assertEqual(s.index('i'), SUBSTR.index('i'))
self.assertEqual(s.index('i', sublen),
sublen + size + SUBSTR.index('i'))
self.assertEqual(s.index('i', size),
sublen + size + SUBSTR.index('i'))
self.assertRaises(ValueError, s.index, 'j')
@bigmemtest(minsize=_2G, memuse=2)
def test_isalnum(self, size):
SUBSTR = '123456'
s = 'a' * size + SUBSTR
self.assertTrue(s.isalnum())
s += '.'
self.assertFalse(s.isalnum())
@bigmemtest(minsize=_2G, memuse=2)
def test_isalpha(self, size):
SUBSTR = 'zzzzzzz'
s = 'a' * size + SUBSTR
self.assertTrue(s.isalpha())
s += '.'
self.assertFalse(s.isalpha())
@bigmemtest(minsize=_2G, memuse=2)
def test_isdigit(self, size):
SUBSTR = '123456'
s = '9' * size + SUBSTR
self.assertTrue(s.isdigit())
s += 'z'
self.assertFalse(s.isdigit())
@bigmemtest(minsize=_2G, memuse=2)
def test_islower(self, size):
chars = ''.join([ chr(c) for c in range(255) if not chr(c).isupper() ])
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.islower())
s += 'A'
self.assertFalse(s.islower())
@bigmemtest(minsize=_2G, memuse=2)
def test_isspace(self, size):
whitespace = ' \f\n\r\t\v'
repeats = size // len(whitespace) + 2
s = whitespace * repeats
self.assertTrue(s.isspace())
s += 'j'
self.assertFalse(s.isspace())
@bigmemtest(minsize=_2G, memuse=2)
def test_istitle(self, size):
SUBSTR = '123456'
s = ''.join(['A', 'a' * size, SUBSTR])
self.assertTrue(s.istitle())
s += 'A'
self.assertTrue(s.istitle())
s += 'aA'
self.assertFalse(s.istitle())
@bigmemtest(minsize=_2G, memuse=2)
def test_isupper(self, size):
chars = ''.join([ chr(c) for c in range(255) if not chr(c).islower() ])
repeats = size // len(chars) + 2
s = chars * repeats
self.assertTrue(s.isupper())
s += 'a'
self.assertFalse(s.isupper())
@bigmemtest(minsize=_2G, memuse=2)
def test_join(self, size):
s = 'A' * size
x = s.join(['aaaaa', 'bbbbb'])
self.assertEqual(x.count('a'), 5)
self.assertEqual(x.count('b'), 5)
self.assertTrue(x.startswith('aaaaaA'))
self.assertTrue(x.endswith('Abbbbb'))
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_ljust(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + ' '))
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_lower(self, size):
s = 'A' * size
s = s.lower()
self.assertEqual(len(s), size)
self.assertEqual(s.count('a'), size)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_lstrip(self, size):
SUBSTR = 'abc def ghi'
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.lstrip(), SUBSTR.lstrip())
del s
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
stripped = s.lstrip()
self.assertTrue(stripped is s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_replace(self, size):
replacement = 'a'
s = ' ' * size
s = s.replace(' ', replacement)
self.assertEqual(len(s), size)
self.assertEqual(s.count(replacement), size)
s = s.replace(replacement, ' ', size - 4)
self.assertEqual(len(s), size)
self.assertEqual(s.count(replacement), 4)
self.assertEqual(s[-10:], ' aaaa')
@bigmemtest(minsize=_2G, memuse=2)
def test_rfind(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEqual(s.rfind(' '), sublen + size + SUBSTR.rfind(' '))
self.assertEqual(s.rfind(SUBSTR), sublen + size)
self.assertEqual(s.rfind(' ', 0, size), SUBSTR.rfind(' '))
self.assertEqual(s.rfind(SUBSTR, 0, sublen + size), 0)
self.assertEqual(s.rfind('i'), sublen + size + SUBSTR.rfind('i'))
self.assertEqual(s.rfind('i', 0, sublen), SUBSTR.rfind('i'))
self.assertEqual(s.rfind('i', 0, sublen + size),
SUBSTR.rfind('i'))
self.assertEqual(s.rfind('j'), -1)
@bigmemtest(minsize=_2G, memuse=2)
def test_rindex(self, size):
SUBSTR = ' abc def ghi'
sublen = len(SUBSTR)
s = ''.join([SUBSTR, '-' * size, SUBSTR])
self.assertEqual(s.rindex(' '),
sublen + size + SUBSTR.rindex(' '))
self.assertEqual(s.rindex(SUBSTR), sublen + size)
self.assertEqual(s.rindex(' ', 0, sublen + size - 1),
SUBSTR.rindex(' '))
self.assertEqual(s.rindex(SUBSTR, 0, sublen + size), 0)
self.assertEqual(s.rindex('i'),
sublen + size + SUBSTR.rindex('i'))
self.assertEqual(s.rindex('i', 0, sublen), SUBSTR.rindex('i'))
self.assertEqual(s.rindex('i', 0, sublen + size),
SUBSTR.rindex('i'))
self.assertRaises(ValueError, s.rindex, 'j')
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rjust(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.ljust(size)
self.assertTrue(s.startswith(SUBSTR + ' '))
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_rstrip(self, size):
SUBSTR = ' abc def ghi'
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.rstrip(), SUBSTR.rstrip())
del s
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
stripped = s.rstrip()
self.assertTrue(stripped is s)
# The test takes about size bytes to build a string, and then about
# sqrt(size) substrings of sqrt(size) in size and a list to
# hold sqrt(size) items. It's close but just over 2x size.
@bigmemtest(minsize=_2G, memuse=2.1)
def test_split_small(self, size):
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2)
SUBSTR = 'a' + ' ' * chunksize
s = SUBSTR * chunksize
l = s.split()
self.assertEqual(len(l), chunksize)
self.assertEqual(set(l), set(['a']))
del l
l = s.split('a')
self.assertEqual(len(l), chunksize + 1)
self.assertEqual(set(l), set(['', ' ' * chunksize]))
# Allocates a string of twice size (and briefly two) and a list of
# size. Because of internal affairs, the s.split() call produces a
# list of size times the same one-character string, so we only
# suffer for the list size. (Otherwise, it'd cost another 48 times
# size in bytes!) Nevertheless, a list of size takes
# 8*size bytes.
@bigmemtest(minsize=_2G + 5, memuse=10)
def test_split_large(self, size):
s = ' a' * size + ' '
l = s.split()
self.assertEqual(len(l), size)
self.assertEqual(set(l), set(['a']))
del l
l = s.split('a')
self.assertEqual(len(l), size + 1)
self.assertEqual(set(l), set([' ']))
@bigmemtest(minsize=_2G, memuse=2.1)
def test_splitlines(self, size):
# Crudely calculate an estimate so that the result of s.split won't
# take up an inordinate amount of memory
chunksize = int(size ** 0.5 + 2) // 2
SUBSTR = ' ' * chunksize + '\n' + ' ' * chunksize + '\r\n'
s = SUBSTR * chunksize
l = s.splitlines()
self.assertEqual(len(l), chunksize * 2)
self.assertEqual(set(l), set([' ' * chunksize]))
@bigmemtest(minsize=_2G, memuse=2)
def test_startswith(self, size):
SUBSTR = ' abc def ghi'
s = '-' * size + SUBSTR
self.assertTrue(s.startswith(s))
self.assertTrue(s.startswith('-' * size))
self.assertFalse(s.startswith(SUBSTR))
@bigmemtest(minsize=_2G, memuse=1)
def test_strip(self, size):
SUBSTR = ' abc def ghi '
s = SUBSTR.rjust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
del s
s = SUBSTR.ljust(size)
self.assertEqual(len(s), size)
self.assertEqual(s.strip(), SUBSTR.strip())
@bigmemtest(minsize=_2G, memuse=2)
def test_swapcase(self, size):
SUBSTR = "aBcDeFG12.'\xa9\x00"
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.swapcase()
self.assertEqual(len(s), sublen * repeats)
self.assertEqual(s[:sublen * 3], SUBSTR.swapcase() * 3)
self.assertEqual(s[-sublen * 3:], SUBSTR.swapcase() * 3)
@bigmemtest(minsize=_2G, memuse=2)
def test_title(self, size):
SUBSTR = 'SpaaHAaaAaham'
s = SUBSTR * (size // len(SUBSTR) + 2)
s = s.title()
self.assertTrue(s.startswith((SUBSTR * 3).title()))
self.assertTrue(s.endswith(SUBSTR.lower() * 3))
@bigmemtest(minsize=_2G, memuse=2)
def test_translate(self, size):
trans = string.maketrans('.aZ', '-!$')
SUBSTR = 'aZz.z.Aaz.'
sublen = len(SUBSTR)
repeats = size // sublen + 2
s = SUBSTR * repeats
s = s.translate(trans)
self.assertEqual(len(s), repeats * sublen)
self.assertEqual(s[:sublen], SUBSTR.translate(trans))
self.assertEqual(s[-sublen:], SUBSTR.translate(trans))
self.assertEqual(s.count('.'), 0)
self.assertEqual(s.count('!'), repeats * 2)
self.assertEqual(s.count('z'), repeats * 3)
@bigmemtest(minsize=_2G + 5, memuse=2)
def test_upper(self, size):
s = 'a' * size
s = s.upper()
self.assertEqual(len(s), size)
self.assertEqual(s.count('A'), size)
@bigmemtest(minsize=_2G + 20, memuse=1)
def test_zfill(self, size):
SUBSTR = '-568324723598234'
s = SUBSTR.zfill(size)
self.assertTrue(s.endswith('0' + SUBSTR[1:]))
self.assertTrue(s.startswith('-0'))
self.assertEqual(len(s), size)
self.assertEqual(s.count('0'), size - len(SUBSTR))
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_format(self, size):
s = '-' * size
sf = '%s' % (s,)
self.assertTrue(s == sf)
del sf
sf = '..%s..' % (s,)
self.assertEqual(len(sf), len(s) + 4)
self.assertTrue(sf.startswith('..-'))
self.assertTrue(sf.endswith('-..'))
del s, sf
size //= 2
edge = '-' * size
s = ''.join([edge, '%s', edge])
del edge
s = s % '...'
self.assertEqual(len(s), size * 2 + 3)
self.assertEqual(s.count('.'), 3)
self.assertEqual(s.count('-'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=5)
def test_repr_small(self, size):
s = '-' * size
s = repr(s)
self.assertEqual(len(s), size + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('-'), size)
del s
# repr() will create a string four times as large as this 'binary
# string', but we don't want to allocate much more than twice
# size in total. (We do extra testing in test_repr_large())
s = '\x00' * size
s = repr(s)
self.assertEqual(len(s), size * 4 + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('\\'), size)
self.assertEqual(s.count('0'), size * 2)
@bigmemtest(minsize=_2G + 10, memuse=5)
def test_repr_large(self, size):
s = '\x00' * size
s = repr(s)
self.assertEqual(len(s), size * 4 + 2)
self.assertEqual(s[0], "'")
self.assertEqual(s[-1], "'")
self.assertEqual(s.count('\\'), size)
self.assertEqual(s.count('0'), size * 2)
@bigmemtest(minsize=2**32 // 5, memuse=6+2)
def test_unicode_repr(self, size):
s = u"\uAAAA" * size
self.assertTrue(len(repr(s)) > size)
# This test is meaningful even with size < 2G, as long as the
# doubled string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_concat(self, size):
s = '.' * size
self.assertEqual(len(s), size)
s = s + s
self.assertEqual(len(s), size * 2)
self.assertEqual(s.count('.'), size * 2)
# This test is meaningful even with size < 2G, as long as the
# repeated string is > 2G (but it tests more if both are > 2G :)
@bigmemtest(minsize=_1G + 2, memuse=3)
def test_repeat(self, size):
s = '.' * size
self.assertEqual(len(s), size)
s = s * 2
self.assertEqual(len(s), size * 2)
self.assertEqual(s.count('.'), size * 2)
@bigmemtest(minsize=_2G + 20, memuse=2)
def test_slice_and_getitem(self, size):
SUBSTR = '0123456789'
sublen = len(SUBSTR)
s = SUBSTR * (size // sublen)
stepsize = len(s) // 100
stepsize = stepsize - (stepsize % sublen)
for i in range(0, len(s) - stepsize, stepsize):
self.assertEqual(s[i], SUBSTR[0])
self.assertEqual(s[i:i + sublen], SUBSTR)
self.assertEqual(s[i:i + sublen:2], SUBSTR[::2])
if i > 0:
self.assertEqual(s[i + sublen - 1:i - 1:-3],
SUBSTR[sublen::-3])
# Make sure we do some slicing and indexing near the end of the
# string, too.
self.assertEqual(s[len(s) - 1], SUBSTR[-1])
self.assertEqual(s[-1], SUBSTR[-1])
self.assertEqual(s[len(s) - 10], SUBSTR[0])
self.assertEqual(s[-sublen], SUBSTR[0])
self.assertEqual(s[len(s):], '')
self.assertEqual(s[len(s) - 1:], SUBSTR[-1])
self.assertEqual(s[-1:], SUBSTR[-1])
self.assertEqual(s[len(s) - sublen:], SUBSTR)
self.assertEqual(s[-sublen:], SUBSTR)
self.assertEqual(len(s[:]), len(s))
self.assertEqual(len(s[:len(s) - 5]), len(s) - 5)
self.assertEqual(len(s[5:-5]), len(s) - 10)
self.assertRaises(IndexError, operator.getitem, s, len(s))
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1)
self.assertRaises(IndexError, operator.getitem, s, len(s) + 1<<31)
@bigmemtest(minsize=_2G, memuse=2)
def test_contains(self, size):
SUBSTR = '0123456789'
edge = '-' * (size // 2)
s = ''.join([edge, SUBSTR, edge])
del edge
self.assertIn(SUBSTR, s)
self.assertNotIn(SUBSTR * 2, s)
self.assertIn('-', s)
self.assertNotIn('a', s)
s += 'a'
self.assertIn('a', s)
@bigmemtest(minsize=_2G + 10, memuse=2)
def test_compare(self, size):
s1 = '-' * size
s2 = '-' * size
self.assertTrue(s1 == s2)
del s2
s2 = s1 + 'a'
self.assertFalse(s1 == s2)
del s2
s2 = '.' * size
self.assertFalse(s1 == s2)
@bigmemtest(minsize=_2G + 10, memuse=1)
def test_hash(self, size):
# Not sure if we can do any meaningful tests here... Even if we
# start relying on the exact algorithm used, the result will be
# different depending on the size of the C 'long int'. Even this
# test is dodgy (there's no *guarantee* that the two things should
# have a different hash, even if they, in the current
# implementation, almost always do.)
s = '\x00' * size
h1 = hash(s)
del s
s = '\x00' * (size + 1)
self.assertFalse(h1 == hash(s))
class TupleTest(unittest.TestCase):
# Tuples have a small, fixed-sized head and an array of pointers to
# data. Since we're testing 64-bit addressing, we can assume that the
# pointers are 8 bytes, and that thus that the tuples take up 8 bytes
# per size.
# As a side-effect of testing long tuples, these tests happen to test
# having more than 2<<31 references to any given object. Hence the
# use of different types of objects as contents in different tests.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
t1 = (u'',) * size
t2 = (u'',) * size
self.assertTrue(t1 == t2)
del t2
t2 = (u'',) * (size + 1)
self.assertFalse(t1 == t2)
del t2
t2 = (1,) * size
self.assertFalse(t1 == t2)
# Test concatenating into a single tuple of more than 2G in length,
# and concatenating a tuple of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_concat_test(self, size):
t = ((),) * size
self.assertEqual(len(t), size)
t = t + t
self.assertEqual(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_concat_test(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
t = (1, 2, 3, 4, 5) * size
self.assertEqual(len(t), size * 5)
self.assertIn(5, t)
self.assertNotIn((1, 2, 3, 4, 5), t)
self.assertNotIn(0, t)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
t1 = (0,) * size
h1 = hash(t1)
del t1
t2 = (0,) * (size + 1)
self.assertFalse(h1 == hash(t2))
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
t = (None,) * size
self.assertEqual(len(t), size)
self.assertEqual(t[-1], None)
self.assertEqual(t[5], None)
self.assertEqual(t[size - 1], None)
self.assertRaises(IndexError, operator.getitem, t, size)
self.assertEqual(t[:5], (None,) * 5)
self.assertEqual(t[-5:], (None,) * 5)
self.assertEqual(t[20:25], (None,) * 5)
self.assertEqual(t[-25:-20], (None,) * 5)
self.assertEqual(t[size - 5:], (None,) * 5)
self.assertEqual(t[size - 5:size], (None,) * 5)
self.assertEqual(t[size - 6:size - 2], (None,) * 4)
self.assertEqual(t[size:size], ())
self.assertEqual(t[size:size+5], ())
# Like test_concat, split in two.
def basic_test_repeat(self, size):
t = ('',) * size
self.assertEqual(len(t), size)
t = t * 2
self.assertEqual(len(t), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_1G - 1, memuse=12)
def test_repeat_large_2(self, size):
return self.basic_test_repeat(size)
@precisionbigmemtest(size=_1G - 1, memuse=9)
def test_from_2G_generator(self, size):
try:
t = tuple(xrange(size))
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for item in t:
self.assertEqual(item, count)
count += 1
self.assertEqual(count, size)
@precisionbigmemtest(size=_1G - 25, memuse=9)
def test_from_almost_2G_generator(self, size):
try:
t = tuple(xrange(size))
count = 0
for item in t:
self.assertEqual(item, count)
count += 1
self.assertEqual(count, size)
except MemoryError:
pass # acceptable, expected on 32-bit
# Like test_concat, split in two.
def basic_test_repr(self, size):
t = (0,) * size
s = repr(t)
# The repr of a tuple of 0's is exactly three times the tuple length.
self.assertEqual(len(s), size * 3)
self.assertEqual(s[:5], '(0, 0')
self.assertEqual(s[-5:], '0, 0)')
self.assertEqual(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
class ListTest(unittest.TestCase):
# Like tuples, lists have a small, fixed-sized head and an array of
# pointers to data, so 8 bytes per size. Also like tuples, we make the
# lists hold references to various objects to test their refcount
# limits.
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_compare(self, size):
l1 = [u''] * size
l2 = [u''] * size
self.assertTrue(l1 == l2)
del l2
l2 = [u''] * (size + 1)
self.assertFalse(l1 == l2)
del l2
l2 = [2] * size
self.assertFalse(l1 == l2)
# Test concatenating into a single list of more than 2G in length,
# and concatenating a list of more than 2G in length separately, so
# the smaller test still gets run even if there isn't memory for the
# larger test (but we still let the tester know the larger test is
# skipped, in verbose mode.)
def basic_test_concat(self, size):
l = [[]] * size
self.assertEqual(len(l), size)
l = l + l
self.assertEqual(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_concat_small(self, size):
return self.basic_test_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_concat_large(self, size):
return self.basic_test_concat(size)
def basic_test_inplace_concat(self, size):
l = [sys.stdout] * size
l += l
self.assertEqual(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_inplace_concat_small(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_inplace_concat_large(self, size):
return self.basic_test_inplace_concat(size)
@bigmemtest(minsize=_2G // 5 + 10, memuse=8 * 5)
def test_contains(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEqual(len(l), size * 5)
self.assertIn(5, l)
self.assertNotIn([1, 2, 3, 4, 5], l)
self.assertNotIn(0, l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_hash(self, size):
l = [0] * size
self.assertRaises(TypeError, hash, l)
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_index_and_slice(self, size):
l = [None] * size
self.assertEqual(len(l), size)
self.assertEqual(l[-1], None)
self.assertEqual(l[5], None)
self.assertEqual(l[size - 1], None)
self.assertRaises(IndexError, operator.getitem, l, size)
self.assertEqual(l[:5], [None] * 5)
self.assertEqual(l[-5:], [None] * 5)
self.assertEqual(l[20:25], [None] * 5)
self.assertEqual(l[-25:-20], [None] * 5)
self.assertEqual(l[size - 5:], [None] * 5)
self.assertEqual(l[size - 5:size], [None] * 5)
self.assertEqual(l[size - 6:size - 2], [None] * 4)
self.assertEqual(l[size:size], [])
self.assertEqual(l[size:size+5], [])
l[size - 2] = 5
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], [None, 5, None])
self.assertEqual(l.count(5), 1)
self.assertRaises(IndexError, operator.setitem, l, size, 6)
self.assertEqual(len(l), size)
l[size - 7:] = [1, 2, 3, 4, 5]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[-7:], [None, None, 1, 2, 3, 4, 5])
l[:7] = [1, 2, 3, 4, 5]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[:7], [1, 2, 3, 4, 5, None, None])
del l[size - 1]
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[-1], 4)
del l[-2:]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[-1], 2)
del l[0]
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[0], 2)
del l[:2]
size -= 2
self.assertEqual(len(l), size)
self.assertEqual(l[0], 4)
# Like test_concat, split in two.
def basic_test_repeat(self, size):
l = [] * size
self.assertFalse(l)
l = [''] * size
self.assertEqual(len(l), size)
l = l * 2
self.assertEqual(len(l), size * 2)
@bigmemtest(minsize=_2G // 2 + 2, memuse=24)
def test_repeat_small(self, size):
return self.basic_test_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=24)
def test_repeat_large(self, size):
return self.basic_test_repeat(size)
def basic_test_inplace_repeat(self, size):
l = ['']
l *= size
self.assertEqual(len(l), size)
self.assertTrue(l[0] is l[-1])
del l
l = [''] * size
l *= 2
self.assertEqual(len(l), size * 2)
self.assertTrue(l[size - 1] is l[-1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_inplace_repeat_small(self, size):
return self.basic_test_inplace_repeat(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_inplace_repeat_large(self, size):
return self.basic_test_inplace_repeat(size)
def basic_test_repr(self, size):
l = [0] * size
s = repr(l)
# The repr of a list of 0's is exactly three times the list length.
self.assertEqual(len(s), size * 3)
self.assertEqual(s[:5], '[0, 0')
self.assertEqual(s[-5:], '0, 0]')
self.assertEqual(s.count('0'), size)
@bigmemtest(minsize=_2G // 3 + 2, memuse=8 + 3)
def test_repr_small(self, size):
return self.basic_test_repr(size)
@bigmemtest(minsize=_2G + 2, memuse=8 + 3)
def test_repr_large(self, size):
return self.basic_test_repr(size)
# list overallocates ~1/8th of the total size (on first expansion) so
# the single list.append call puts memuse at 9 bytes per size.
@bigmemtest(minsize=_2G, memuse=9)
def test_append(self, size):
l = [object()] * size
l.append(object())
self.assertEqual(len(l), size+1)
self.assertTrue(l[-3] is l[-2])
self.assertFalse(l[-2] is l[-1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_count(self, size):
l = [1, 2, 3, 4, 5] * size
self.assertEqual(l.count(1), size)
self.assertEqual(l.count("1"), 0)
def basic_test_extend(self, size):
l = [file] * size
l.extend(l)
self.assertEqual(len(l), size * 2)
self.assertTrue(l[0] is l[-1])
self.assertTrue(l[size - 1] is l[size + 1])
@bigmemtest(minsize=_2G // 2 + 2, memuse=16)
def test_extend_small(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G + 2, memuse=16)
def test_extend_large(self, size):
return self.basic_test_extend(size)
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_index(self, size):
l = [1L, 2L, 3L, 4L, 5L] * size
size *= 5
self.assertEqual(l.index(1), 0)
self.assertEqual(l.index(5, size - 5), size - 1)
self.assertEqual(l.index(5, size - 5, size), size - 1)
self.assertRaises(ValueError, l.index, 1, size - 4, size)
self.assertRaises(ValueError, l.index, 6L)
# This tests suffers from overallocation, just like test_append.
@bigmemtest(minsize=_2G + 10, memuse=9)
def test_insert(self, size):
l = [1.0] * size
l.insert(size - 1, "A")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], [1.0, "A", 1.0])
l.insert(size + 1, "B")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-3:], ["A", 1.0, "B"])
l.insert(1, "C")
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[:3], [1.0, "C", 1.0])
self.assertEqual(l[size - 3:], ["A", 1.0, "B"])
@bigmemtest(minsize=_2G // 5 + 4, memuse=8 * 5)
def test_pop(self, size):
l = [u"a", u"b", u"c", u"d", u"e"] * size
size *= 5
self.assertEqual(len(l), size)
item = l.pop()
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, u"e")
self.assertEqual(l[-2:], [u"c", u"d"])
item = l.pop(0)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, u"a")
self.assertEqual(l[:2], [u"b", u"c"])
item = l.pop(size - 2)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(item, u"c")
self.assertEqual(l[-2:], [u"b", u"d"])
@bigmemtest(minsize=_2G + 10, memuse=8)
def test_remove(self, size):
l = [10] * size
self.assertEqual(len(l), size)
l.remove(10)
size -= 1
self.assertEqual(len(l), size)
# Because of the earlier l.remove(), this append doesn't trigger
# a resize.
l.append(5)
size += 1
self.assertEqual(len(l), size)
self.assertEqual(l[-2:], [10, 5])
l.remove(5)
size -= 1
self.assertEqual(len(l), size)
self.assertEqual(l[-2:], [10, 10])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_reverse(self, size):
l = [1, 2, 3, 4, 5] * size
l.reverse()
self.assertEqual(len(l), size * 5)
self.assertEqual(l[-5:], [5, 4, 3, 2, 1])
self.assertEqual(l[:5], [5, 4, 3, 2, 1])
@bigmemtest(minsize=_2G // 5 + 2, memuse=8 * 5)
def test_sort(self, size):
l = [1, 2, 3, 4, 5] * size
l.sort()
self.assertEqual(len(l), size * 5)
self.assertEqual(l.count(1), size)
self.assertEqual(l[:10], [1] * 10)
self.assertEqual(l[-10:], [5] * 10)
class BufferTest(unittest.TestCase):
@precisionbigmemtest(size=_1G, memuse=4)
def test_repeat(self, size):
try:
with test_support.check_py3k_warnings():
b = buffer("AAAA")*size
except MemoryError:
pass # acceptable on 32-bit
else:
count = 0
for c in b:
self.assertEqual(c, 'A')
count += 1
self.assertEqual(count, size*4)
def test_main():
test_support.run_unittest(StrTest, TupleTest, ListTest, BufferTest)
if __name__ == '__main__':
if len(sys.argv) > 1:
test_support.set_memlimit(sys.argv[1])
test_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# pylint: skip-file
import json
import logging
from collections import defaultdict
from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.db import connection
from django.http import HttpResponse
from django.urls import reverse
from django.utils.deprecation import MiddlewareMixin
from opaque_keys.edx.keys import CourseKey, UsageKey, i4xEncoder
from pytz import UTC
from common.djangoapps.student.models import get_user_by_username_or_email
from common.djangoapps.student.roles import GlobalStaff
from lms.djangoapps.courseware.access import has_access
from lms.djangoapps.discussion.django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY
from lms.djangoapps.discussion.django_comment_client.permissions import (
check_permissions_by_view,
get_team,
has_permission
)
from lms.djangoapps.discussion.django_comment_client.settings import MAX_COMMENT_DEPTH
from openedx.core.djangoapps.course_groups.cohorts import get_cohort_id, get_cohort_names, is_course_cohorted
from openedx.core.djangoapps.django_comment_common.models import (
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
CourseDiscussionSettings,
DiscussionsIdMapping,
Role
)
from openedx.core.lib.cache_utils import request_cached
from openedx.core.lib.courses import get_course_by_id
from xmodule.modulestore.django import modulestore
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID
from xmodule.partitions.partitions_service import PartitionService
log = logging.getLogger(__name__)
def extract(dic, keys):
"""
Returns a subset of keys from the provided dictionary
"""
return {k: dic.get(k) for k in keys}
def strip_none(dic):
"""
Returns a dictionary stripped of any keys having values of None
"""
return {k: v for k, v in dic.items() if v is not None}
def strip_blank(dic):
"""
Returns a dictionary stripped of any 'blank' (empty) keys
"""
def _is_blank(v):
"""
Determines if the provided value contains no information
"""
return isinstance(v, str) and len(v.strip()) == 0
return {k: v for k, v in dic.items() if not _is_blank(v)}
# TODO should we be checking if d1 and d2 have the same keys with different values?
def get_role_ids(course_id):
"""
Returns a dictionary having role names as keys and a list of users as values
"""
roles = Role.objects.filter(course_id=course_id).exclude(name=FORUM_ROLE_STUDENT)
return {role.name: list(role.users.values_list('id', flat=True)) for role in roles}
def has_discussion_privileges(user, course_id):
"""
Returns True if the user is privileged in teams discussions for
this course. The user must be one of Discussion Admin, Moderator,
or Community TA.
Args:
user (User): The user to check privileges for.
course_id (CourseKey): A key for the course to check privileges for.
Returns:
bool
"""
# get_role_ids returns a dictionary of only admin, moderator and community TAs.
roles = get_role_ids(course_id)
for role in roles:
if user.id in roles[role]:
return True
return False
def has_forum_access(uname, course_id, rolename):
"""
Boolean operation which tests a user's role-based permissions (not actually forums-specific)
"""
try:
role = Role.objects.get(name=rolename, course_id=course_id)
except Role.DoesNotExist:
return False
return role.users.filter(username=uname).exists()
def is_user_community_ta(user, course_id):
"""
Boolean operation to check whether a user's role is Community TA or not
"""
return has_forum_access(user, course_id, FORUM_ROLE_COMMUNITY_TA)
def has_required_keys(xblock):
"""
Returns True iff xblock has the proper attributes for generating metadata
with get_discussion_id_map_entry()
"""
for key in ('discussion_id', 'discussion_category', 'discussion_target'):
if getattr(xblock, key, None) is None:
log.debug(
"Required key '%s' not in discussion %s, leaving out of category map",
key,
xblock.location
)
return False
return True
def get_accessible_discussion_xblocks(course, user, include_all=False):
"""
Return a list of all valid discussion xblocks in this course that
are accessible to the given user.
"""
include_all = getattr(user, 'is_community_ta', False)
return get_accessible_discussion_xblocks_by_course_id(course.id, user, include_all=include_all)
@request_cached()
def get_accessible_discussion_xblocks_by_course_id(course_id, user=None, include_all=False): # pylint: disable=invalid-name
"""
Return a list of all valid discussion xblocks in this course.
Checks for the given user's access if include_all is False.
"""
all_xblocks = modulestore().get_items(course_id, qualifiers={'category': 'discussion'}, include_orphans=False)
return [
xblock for xblock in all_xblocks
if has_required_keys(xblock) and (include_all or has_access(user, 'load', xblock, course_id))
]
def get_discussion_id_map_entry(xblock):
"""
Returns a tuple of (discussion_id, metadata) suitable for inclusion in the results of get_discussion_id_map().
"""
return (
xblock.discussion_id,
{
"location": xblock.location,
"title": xblock.discussion_category.split("/")[-1].strip() + (" / " + xblock.discussion_target if xblock.discussion_target else "")
}
)
class DiscussionIdMapIsNotCached(Exception):
"""Thrown when the discussion id map is not cached for this course, but an attempt was made to access it."""
pass
@request_cached()
def get_cached_discussion_key(course_id, discussion_id):
"""
Returns the usage key of the discussion xblock associated with discussion_id if it is cached. If the discussion id
map is cached but does not contain discussion_id, returns None. If the discussion id map is not cached for course,
raises a DiscussionIdMapIsNotCached exception.
"""
try:
mapping = DiscussionsIdMapping.objects.get(course_id=course_id).mapping
if not mapping:
raise DiscussionIdMapIsNotCached()
usage_key_string = mapping.get(discussion_id)
if usage_key_string:
return UsageKey.from_string(usage_key_string).map_into_course(course_id)
else:
return None
except DiscussionsIdMapping.DoesNotExist:
raise DiscussionIdMapIsNotCached()
def get_cached_discussion_id_map(course, discussion_ids, user):
"""
Returns a dict mapping discussion_ids to respective discussion xblock metadata if it is cached and visible to the
user. If not, returns the result of get_discussion_id_map
"""
return get_cached_discussion_id_map_by_course_id(course.id, discussion_ids, user)
def get_cached_discussion_id_map_by_course_id(course_id, discussion_ids, user):
"""
Returns a dict mapping discussion_ids to respective discussion xblock metadata if it is cached and visible to the
user. If not, returns the result of get_discussion_id_map
"""
include_all = getattr(user, 'is_community_ta', False)
try:
entries = []
for discussion_id in discussion_ids:
key = get_cached_discussion_key(course_id, discussion_id)
if not key:
continue
xblock = _get_item_from_modulestore(key)
if not (has_required_keys(xblock) and (include_all or has_access(user, 'load', xblock, course_id))):
continue
entries.append(get_discussion_id_map_entry(xblock))
return dict(entries)
except DiscussionIdMapIsNotCached:
return get_discussion_id_map_by_course_id(course_id, user)
def get_discussion_id_map(course, user):
"""
Transform the list of this course's discussion xblocks (visible to a given user) into a dictionary of metadata keyed
by discussion_id.
"""
return get_discussion_id_map_by_course_id(course.id, user)
def get_discussion_id_map_by_course_id(course_id, user):
"""
Transform the list of this course's discussion xblocks (visible to a given user) into a dictionary of metadata keyed
by discussion_id.
"""
xblocks = get_accessible_discussion_xblocks_by_course_id(course_id, user)
return dict(list(map(get_discussion_id_map_entry, xblocks)))
@request_cached()
def _get_item_from_modulestore(key):
return modulestore().get_item(key)
def _filter_unstarted_categories(category_map, course):
"""
Returns a subset of categories from the provided map which have not yet met the start date
Includes information about category children, subcategories (different), and entries
"""
now = datetime.now(UTC)
result_map = {}
unfiltered_queue = [category_map]
filtered_queue = [result_map]
while unfiltered_queue:
unfiltered_map = unfiltered_queue.pop()
filtered_map = filtered_queue.pop()
filtered_map["children"] = []
filtered_map["entries"] = {}
filtered_map["subcategories"] = {}
for child, c_type in unfiltered_map["children"]:
if child in unfiltered_map["entries"] and c_type == TYPE_ENTRY:
if course.self_paced or unfiltered_map["entries"][child]["start_date"] <= now:
filtered_map["children"].append((child, c_type))
filtered_map["entries"][child] = {}
for key in unfiltered_map["entries"][child]:
if key != "start_date":
filtered_map["entries"][child][key] = unfiltered_map["entries"][child][key]
else:
log.debug(
"Filtering out:%s with start_date: %s", child, unfiltered_map["entries"][child]["start_date"]
)
else:
if course.self_paced or unfiltered_map["subcategories"][child]["start_date"] < now:
filtered_map["children"].append((child, c_type))
filtered_map["subcategories"][child] = {}
unfiltered_queue.append(unfiltered_map["subcategories"][child])
filtered_queue.append(filtered_map["subcategories"][child])
return result_map
def _sort_map_entries(category_map, sort_alpha):
"""
Internal helper method to list category entries according to the provided sort order
"""
things = []
for title, entry in category_map["entries"].items():
if entry["sort_key"] is None and sort_alpha:
entry["sort_key"] = title
things.append((title, entry, TYPE_ENTRY))
for title, category in category_map["subcategories"].items():
things.append((title, category, TYPE_SUBCATEGORY))
_sort_map_entries(category_map["subcategories"][title], sort_alpha)
key_method = lambda x: x[1]["sort_key"] if x[1]["sort_key"] is not None else ''
category_map["children"] = [(x[0], x[2]) for x in sorted(things, key=key_method)]
def get_discussion_category_map(course, user, divided_only_if_explicit=False, exclude_unstarted=True):
"""
Transform the list of this course's discussion xblocks into a recursive dictionary structure. This is used
to render the discussion category map in the discussion tab sidebar for a given user.
Args:
course: Course for which to get the ids.
user: User to check for access.
divided_only_if_explicit (bool): If True, inline topics are marked is_divided only if they are
explicitly listed in CourseDiscussionSettings.discussion_topics.
Example:
>>> example = {
>>> "entries": {
>>> "General": {
>>> "sort_key": "General",
>>> "is_divided": True,
>>> "id": "i4x-edx-eiorguegnru-course-foobarbaz"
>>> }
>>> },
>>> "children": [
>>> ["General", "entry"],
>>> ["Getting Started", "subcategory"]
>>> ],
>>> "subcategories": {
>>> "Getting Started": {
>>> "subcategories": {},
>>> "children": [
>>> ["Working with Videos", "entry"],
>>> ["Videos on edX", "entry"]
>>> ],
>>> "entries": {
>>> "Working with Videos": {
>>> "sort_key": None,
>>> "is_divided": False,
>>> "id": "d9f970a42067413cbb633f81cfb12604"
>>> },
>>> "Videos on edX": {
>>> "sort_key": None,
>>> "is_divided": False,
>>> "id": "98d8feb5971041a085512ae22b398613"
>>> }
>>> }
>>> }
>>> }
>>> }
"""
unexpanded_category_map = defaultdict(list)
xblocks = get_accessible_discussion_xblocks(course, user)
discussion_settings = CourseDiscussionSettings.get(course.id)
discussion_division_enabled = course_discussion_division_enabled(discussion_settings)
divided_discussion_ids = discussion_settings.divided_discussions
for xblock in xblocks:
discussion_id = xblock.discussion_id
title = xblock.discussion_target
sort_key = xblock.sort_key
category = " / ".join([x.strip() for x in xblock.discussion_category.split("/")])
# Handle case where xblock.start is None
entry_start_date = xblock.start if xblock.start else datetime.max.replace(tzinfo=UTC)
unexpanded_category_map[category].append({"title": title,
"id": discussion_id,
"sort_key": sort_key,
"start_date": entry_start_date})
category_map = {"entries": defaultdict(dict), "subcategories": defaultdict(dict)}
for category_path, entries in unexpanded_category_map.items():
node = category_map["subcategories"]
path = [x.strip() for x in category_path.split("/")]
# Find the earliest start date for the entries in this category
category_start_date = None
for entry in entries:
if category_start_date is None or entry["start_date"] < category_start_date:
category_start_date = entry["start_date"]
for level in path[:-1]:
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
node = node[level]["subcategories"]
level = path[-1]
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
divide_all_inline_discussions = (
not divided_only_if_explicit and discussion_settings.always_divide_inline_discussions
)
dupe_counters = defaultdict(lambda: 0) # counts the number of times we see each title
for entry in entries:
is_entry_divided = (
discussion_division_enabled and (
divide_all_inline_discussions or entry["id"] in divided_discussion_ids
)
)
title = entry["title"]
if node[level]["entries"][title]:
# If we've already seen this title, append an incrementing number to disambiguate
# the category from other categores sharing the same title in the course discussion UI.
dupe_counters[title] += 1
title = f"{title} ({dupe_counters[title]})"
node[level]["entries"][title] = {"id": entry["id"],
"sort_key": entry["sort_key"],
"start_date": entry["start_date"],
"is_divided": is_entry_divided}
# TODO. BUG! : course location is not unique across multiple course runs!
# (I think Kevin already noticed this) Need to send course_id with requests, store it
# in the backend.
for topic, entry in course.discussion_topics.items():
category_map['entries'][topic] = {
"id": entry["id"],
"sort_key": entry.get("sort_key", topic),
"start_date": datetime.now(UTC),
"is_divided": (
discussion_division_enabled and entry["id"] in divided_discussion_ids
)
}
_sort_map_entries(category_map, course.discussion_sort_alpha)
return _filter_unstarted_categories(category_map, course) if exclude_unstarted else category_map
def discussion_category_id_access(course, user, discussion_id, xblock=None):
"""
Returns True iff the given discussion_id is accessible for user in course.
Assumes that the commentable identified by discussion_id has a null or 'course' context.
Uses the discussion id cache if available, falling back to
get_discussion_categories_ids if there is no cache.
"""
include_all = getattr(user, 'is_community_ta', False)
if discussion_id in course.top_level_discussion_topic_ids:
return True
try:
if not xblock:
key = get_cached_discussion_key(course.id, discussion_id)
if not key:
return False
xblock = _get_item_from_modulestore(key)
return has_required_keys(xblock) and (include_all or has_access(user, 'load', xblock, course.id))
except DiscussionIdMapIsNotCached:
return discussion_id in get_discussion_categories_ids(course, user)
def get_discussion_categories_ids(course, user, include_all=False):
"""
Returns a list of available ids of categories for the course that
are accessible to the given user.
Args:
course: Course for which to get the ids.
user: User to check for access.
include_all (bool): If True, return all ids. Used by configuration views.
"""
accessible_discussion_ids = [
xblock.discussion_id for xblock in get_accessible_discussion_xblocks(course, user, include_all=include_all)
]
return course.top_level_discussion_topic_ids + accessible_discussion_ids
class JsonResponse(HttpResponse):
"""
Django response object delivering JSON representations
"""
def __init__(self, data=None):
"""
Object constructor, converts data (if provided) to JSON
"""
content = json.dumps(data, cls=i4xEncoder)
super().__init__(content, content_type='application/json; charset=utf-8')
class JsonError(HttpResponse):
"""
Django response object delivering JSON exceptions
"""
def __init__(self, error_messages=[], status=400):
"""
Object constructor, returns an error response containing the provided exception messages
"""
if isinstance(error_messages, str):
error_messages = [error_messages]
content = json.dumps({'errors': error_messages}, indent=2, ensure_ascii=False)
super().__init__(content, content_type='application/json; charset=utf-8', status=status)
class HtmlResponse(HttpResponse):
"""
Django response object delivering HTML representations
"""
def __init__(self, html=''):
"""
Object constructor, brokers provided HTML to caller
"""
super().__init__(html, content_type='text/plain')
class ViewNameMiddleware(MiddlewareMixin):
"""
Django middleware object to inject view name into request context
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""
Injects the view name value into the request context
"""
request.view_name = view_func.__name__
class QueryCountDebugMiddleware(MiddlewareMixin):
"""
This middleware will log the number of queries run
and the total time taken for each request (with a
status code of 200). It does not currently support
multi-db setups.
"""
def process_response(self, request, response):
"""
Log information for 200 OK responses as part of the outbound pipeline
"""
if response.status_code == 200:
total_time = 0
for query in connection.queries:
query_time = query.get('time')
if query_time is None:
# django-debug-toolbar monkeypatches the connection
# cursor wrapper and adds extra information in each
# item in connection.queries. The query time is stored
# under the key "duration" rather than "time" and is
# in milliseconds, not seconds.
query_time = query.get('duration', 0) / 1000
total_time += float(query_time)
log.info('%s queries run, total %s seconds', len(connection.queries), total_time)
return response
def get_ability(course_id, content, user):
"""
Return a dictionary of forums-oriented actions and the user's permission to perform them
"""
(user_group_id, content_user_group_id) = get_user_group_ids(course_id, content, user)
return {
'editable': check_permissions_by_view(
user,
course_id,
content,
"update_thread" if content['type'] == 'thread' else "update_comment",
user_group_id,
content_user_group_id
),
'can_reply': check_permissions_by_view(
user, course_id, content, "create_comment" if content['type'] == 'thread' else "create_sub_comment",
),
'can_delete': check_permissions_by_view(
user,
course_id,
content,
"delete_thread" if content['type'] == 'thread' else "delete_comment",
user_group_id,
content_user_group_id
),
'can_openclose': check_permissions_by_view(
user,
course_id,
content,
"openclose_thread" if content['type'] == 'thread' else False,
user_group_id,
content_user_group_id
),
'can_vote': not is_content_authored_by(content, user) and check_permissions_by_view(
user,
course_id,
content,
"vote_for_thread" if content['type'] == 'thread' else "vote_for_comment"
),
'can_report': not is_content_authored_by(content, user) and (check_permissions_by_view(
user,
course_id,
content,
"flag_abuse_for_thread" if content['type'] == 'thread' else "flag_abuse_for_comment"
) or GlobalStaff().has_user(user))
}
# TODO: RENAME
def get_user_group_ids(course_id, content, user=None):
"""
Given a user, course ID, and the content of the thread or comment, returns the group ID for the current user
and the user that posted the thread/comment.
"""
content_user_group_id = None
user_group_id = None
if course_id is not None:
if content.get('username'):
try:
content_user = get_user_by_username_or_email(content.get('username'))
content_user_group_id = get_group_id_for_user_from_cache(content_user, course_id)
except User.DoesNotExist:
content_user_group_id = None
user_group_id = get_group_id_for_user_from_cache(user, course_id) if user else None
return user_group_id, content_user_group_id
def get_annotated_content_info(course_id, content, user, user_info):
"""
Get metadata for an individual content (thread or comment)
"""
voted = ''
if content['id'] in user_info['upvoted_ids']:
voted = 'up'
elif content['id'] in user_info['downvoted_ids']:
voted = 'down'
return {
'voted': voted,
'subscribed': content['id'] in user_info['subscribed_thread_ids'],
'ability': get_ability(course_id, content, user),
}
# TODO: RENAME
def get_annotated_content_infos(course_id, thread, user, user_info):
"""
Get metadata for a thread and its children
"""
infos = {}
def annotate(content):
infos[str(content['id'])] = get_annotated_content_info(course_id, content, user, user_info)
for child in (
content.get('children', []) +
content.get('endorsed_responses', []) +
content.get('non_endorsed_responses', [])
):
annotate(child)
annotate(thread)
return infos
def get_metadata_for_threads(course_id, threads, user, user_info):
"""
Returns annotated content information for the specified course, threads, and user information
"""
def infogetter(thread):
return get_annotated_content_infos(course_id, thread, user, user_info)
metadata = {}
for thread in threads:
metadata.update(infogetter(thread))
return metadata
def permalink(content):
if isinstance(content['course_id'], CourseKey):
course_id = str(content['course_id'])
else:
course_id = content['course_id']
if content['type'] == 'thread':
return reverse('single_thread', args=[course_id, content['commentable_id'], content['id']])
else:
return reverse('single_thread',
args=[course_id, content['commentable_id'], content['thread_id']]) + '#' + content['id']
def extend_content(content):
roles = {}
if content.get('user_id'):
try:
user = User.objects.get(pk=content['user_id'])
roles = {'name': role.name.lower() for role in user.roles.filter(course_id=content['course_id'])}
except User.DoesNotExist:
log.error(
'User ID %s in comment content %s but not in our DB.',
content.get('user_id'),
content.get('id')
)
content_info = {
'displayed_title': content.get('highlighted_title') or content.get('title', ''),
'displayed_body': content.get('highlighted_body') or content.get('body', ''),
'permalink': permalink(content),
'roles': roles,
'updated': content['created_at'] != content['updated_at'],
}
content.update(content_info)
return content
def add_courseware_context(content_list, course, user, id_map=None):
"""
Decorates `content_list` with courseware metadata using the discussion id map cache if available.
"""
if id_map is None:
id_map = get_cached_discussion_id_map(
course,
[content['commentable_id'] for content in content_list],
user
)
for content in content_list:
commentable_id = content['commentable_id']
if commentable_id in id_map:
location = str(id_map[commentable_id]["location"])
title = id_map[commentable_id]["title"]
url = reverse('jump_to', kwargs={"course_id": str(course.id),
"location": location})
content.update({"courseware_url": url, "courseware_title": title})
def prepare_content(content, course_key, is_staff=False, discussion_division_enabled=None, group_names_by_id=None):
"""
This function is used to pre-process thread and comment models in various
ways before adding them to the HTTP response. This includes fixing empty
attribute fields, enforcing author anonymity, and enriching metadata around
group ownership and response endorsement.
@TODO: not all response pre-processing steps are currently integrated into
this function.
Arguments:
content (dict): A thread or comment.
course_key (CourseKey): The course key of the course.
is_staff (bool): Whether the user is a staff member.
discussion_division_enabled (bool): Whether division of course discussions is enabled.
Note that callers of this method do not need to provide this value (it defaults to None)--
it is calculated and then passed to recursive calls of this method.
"""
fields = [
'id', 'title', 'body', 'course_id', 'anonymous', 'anonymous_to_peers',
'endorsed', 'parent_id', 'thread_id', 'votes', 'closed', 'created_at',
'updated_at', 'depth', 'type', 'commentable_id', 'comments_count',
'at_position_list', 'children', 'highlighted_title', 'highlighted_body',
'courseware_title', 'courseware_url', 'unread_comments_count',
'read', 'group_id', 'group_name', 'pinned', 'abuse_flaggers',
'stats', 'resp_skip', 'resp_limit', 'resp_total', 'thread_type',
'endorsed_responses', 'non_endorsed_responses', 'non_endorsed_resp_total',
'endorsement', 'context', 'last_activity_at'
]
if (content.get('anonymous') is False) and ((content.get('anonymous_to_peers') is False) or is_staff):
fields += ['username', 'user_id']
content = strip_none(extract(content, fields))
if content.get("endorsement"):
endorsement = content["endorsement"]
endorser = None
if endorsement["user_id"]:
try:
endorser = User.objects.get(pk=endorsement["user_id"])
except User.DoesNotExist:
log.error(
"User ID %s in endorsement for comment %s but not in our DB.",
content.get('user_id'),
content.get('id')
)
# Only reveal endorser if requester can see author or if endorser is staff
if (
endorser and
("username" in fields or has_permission(endorser, "endorse_comment", course_key))
):
endorsement["username"] = endorser.username
else:
del endorsement["user_id"]
if discussion_division_enabled is None:
discussion_division_enabled = course_discussion_division_enabled(CourseDiscussionSettings.get(course_key))
for child_content_key in ["children", "endorsed_responses", "non_endorsed_responses"]:
if child_content_key in content:
children = [
prepare_content(
child,
course_key,
is_staff,
discussion_division_enabled=discussion_division_enabled,
group_names_by_id=group_names_by_id
)
for child in content[child_content_key]
]
content[child_content_key] = children
if discussion_division_enabled:
# Augment the specified thread info to include the group name if a group id is present.
if content.get('group_id') is not None:
course_discussion_settings = CourseDiscussionSettings.get(course_key)
if group_names_by_id:
content['group_name'] = group_names_by_id.get(content.get('group_id'))
else:
content['group_name'] = get_group_name(content.get('group_id'), course_discussion_settings)
content['is_commentable_divided'] = is_commentable_divided(
course_key, content['commentable_id'], course_discussion_settings
)
else:
# Remove any group information that might remain if the course had previously been divided.
content.pop('group_id', None)
return content
def get_group_id_for_comments_service(request, course_key, commentable_id=None):
"""
Given a user requesting content within a `commentable_id`, determine the
group_id which should be passed to the comments service.
Returns:
int: the group_id to pass to the comments service or None if nothing
should be passed
Raises:
ValueError if the requested group_id is invalid
"""
course_discussion_settings = CourseDiscussionSettings.get(course_key)
if commentable_id is None or is_commentable_divided(course_key, commentable_id, course_discussion_settings):
if request.method == "GET":
requested_group_id = request.GET.get('group_id')
elif request.method == "POST":
requested_group_id = request.POST.get('group_id')
if has_permission(request.user, "see_all_cohorts", course_key):
if not requested_group_id:
return None
group_id = int(requested_group_id)
_verify_group_exists(group_id, course_discussion_settings)
else:
# regular users always query with their own id.
group_id = get_group_id_for_user_from_cache(request.user, course_key)
return group_id
else:
# Never pass a group_id to the comments service for a non-divided
# commentable
return None
@request_cached()
def get_group_id_for_user_from_cache(user, course_id):
"""
Caches the results of get_group_id_for_user, but serializes the course_id
instead of the course_discussions_settings object as cache keys.
"""
return get_group_id_for_user(user, CourseDiscussionSettings.get(course_id))
def get_group_id_for_user(user, course_discussion_settings):
"""
Given a user, return the group_id for that user according to the course_discussion_settings.
If discussions are not divided, this method will return None.
It will also return None if the user is in no group within the specified division_scheme.
"""
division_scheme = _get_course_division_scheme(course_discussion_settings)
if division_scheme == CourseDiscussionSettings.COHORT:
return get_cohort_id(user, course_discussion_settings.course_id)
elif division_scheme == CourseDiscussionSettings.ENROLLMENT_TRACK:
partition_service = PartitionService(course_discussion_settings.course_id)
group_id = partition_service.get_user_group_id_for_partition(user, ENROLLMENT_TRACK_PARTITION_ID)
# We negate the group_ids from dynamic partitions so that they will not conflict
# with cohort IDs (which are an auto-incrementing integer field, starting at 1).
return -1 * group_id if group_id is not None else None
else:
return None
def is_comment_too_deep(parent):
"""
Determine whether a comment with the given parent violates MAX_COMMENT_DEPTH
parent can be None to determine whether root comments are allowed
"""
return (
MAX_COMMENT_DEPTH is not None and (
MAX_COMMENT_DEPTH < 0 or
(parent and (parent["depth"] or 0) >= MAX_COMMENT_DEPTH)
)
)
def is_commentable_divided(course_key, commentable_id, course_discussion_settings=None):
"""
Args:
course_key: CourseKey
commentable_id: string
course_discussion_settings: CourseDiscussionSettings model instance (optional). If not
supplied, it will be retrieved via the course_key.
Returns:
Bool: is this commentable divided, meaning that learners are divided into
groups (either Cohorts or Enrollment Tracks) and only see posts within their group?
Raises:
Http404 if the course doesn't exist.
"""
if not course_discussion_settings:
course_discussion_settings = CourseDiscussionSettings.get(course_key)
course = get_course_by_id(course_key)
if not course_discussion_division_enabled(course_discussion_settings) or get_team(commentable_id):
# this is the easy case :)
ans = False
elif (
commentable_id in course.top_level_discussion_topic_ids or
course_discussion_settings.always_divide_inline_discussions is False
):
# top level discussions have to be manually configured as divided
# (default is not).
# Same thing for inline discussions if the default is explicitly set to False in settings
ans = commentable_id in course_discussion_settings.divided_discussions
else:
# inline discussions are divided by default
ans = True
log.debug("is_commentable_divided(%s, %s) = {%s}", course_key, commentable_id, ans)
return ans
def course_discussion_division_enabled(course_discussion_settings):
"""
Are discussions divided for the course represented by this instance of
course_discussion_settings? This method looks both at
course_discussion_settings.division_scheme, and information about the course
state itself (For example, are cohorts enabled? And are there multiple
enrollment tracks?).
Args:
course_discussion_settings: CourseDiscussionSettings model instance
Returns: True if discussion division is enabled for the course, else False
"""
return _get_course_division_scheme(course_discussion_settings) != CourseDiscussionSettings.NONE
def available_division_schemes(course_key):
"""
Returns a list of possible discussion division schemes for this course.
This takes into account if cohorts are enabled and if there are multiple
enrollment tracks. If no schemes are available, returns an empty list.
Args:
course_key: CourseKey
Returns: list of possible division schemes (for example, CourseDiscussionSettings.COHORT)
"""
available_schemes = []
if is_course_cohorted(course_key):
available_schemes.append(CourseDiscussionSettings.COHORT)
if enrollment_track_group_count(course_key) > 1:
available_schemes.append(CourseDiscussionSettings.ENROLLMENT_TRACK)
return available_schemes
def enrollment_track_group_count(course_key):
"""
Returns the count of possible enrollment track division schemes for this course.
Args:
course_key: CourseKey
Returns:
Count of enrollment track division scheme
"""
return len(_get_enrollment_track_groups(course_key))
def _get_course_division_scheme(course_discussion_settings):
division_scheme = course_discussion_settings.division_scheme
if (
division_scheme == CourseDiscussionSettings.COHORT and
not is_course_cohorted(course_discussion_settings.course_id)
):
division_scheme = CourseDiscussionSettings.NONE
elif (
division_scheme == CourseDiscussionSettings.ENROLLMENT_TRACK and
enrollment_track_group_count(course_discussion_settings.course_id) <= 1
):
division_scheme = CourseDiscussionSettings.NONE
return division_scheme
def get_group_name(group_id, course_discussion_settings):
"""
Given a specified comments_service group_id, returns the learner-facing
name of the Group. If no such Group exists for the specified group_id
(taking into account the division_scheme and course specified by course_discussion_settings),
returns None.
Args:
group_id: the group_id as used by the comments_service code
course_discussion_settings: CourseDiscussionSettings model instance
Returns: learner-facing name of the Group, or None if no such group exists
"""
group_names_by_id = get_group_names_by_id(course_discussion_settings)
return group_names_by_id[group_id] if group_id in group_names_by_id else None
def get_group_names_by_id(course_discussion_settings):
"""
Creates of a dict of group_id to learner-facing group names, for the division_scheme
in use as specified by course_discussion_settings.
Args:
course_discussion_settings: CourseDiscussionSettings model instance
Returns: dict of group_id to learner-facing group names. If no division_scheme
is in use, returns an empty dict.
"""
division_scheme = _get_course_division_scheme(course_discussion_settings)
course_key = course_discussion_settings.course_id
if division_scheme == CourseDiscussionSettings.COHORT:
return get_cohort_names(get_course_by_id(course_key))
elif division_scheme == CourseDiscussionSettings.ENROLLMENT_TRACK:
# We negate the group_ids from dynamic partitions so that they will not conflict
# with cohort IDs (which are an auto-incrementing integer field, starting at 1).
return {-1 * group.id: group.name for group in _get_enrollment_track_groups(course_key)}
else:
return {}
def _get_enrollment_track_groups(course_key):
"""
Helper method that returns an array of the Groups in the EnrollmentTrackUserPartition for the given course.
If no such partition exists on the course, an empty array is returned.
"""
partition_service = PartitionService(course_key)
partition = partition_service.get_user_partition(ENROLLMENT_TRACK_PARTITION_ID)
return partition.groups if partition else []
def _verify_group_exists(group_id, course_discussion_settings):
"""
Helper method that verifies the given group_id corresponds to a Group in the
division scheme being used. If it does not, a ValueError will be raised.
"""
if get_group_name(group_id, course_discussion_settings) is None:
raise ValueError
def is_discussion_enabled(course_id):
"""
Return True if discussions are enabled; else False
"""
return settings.FEATURES.get('ENABLE_DISCUSSION_SERVICE')
def is_content_authored_by(content, user):
"""
Return True if the author is this content is the passed user, else False
"""
try:
return int(content.get('user_id')) == user.id
except (ValueError, TypeError):
return False
|
unknown
|
codeparrot/codeparrot-clean
| ||
//! Peephole optimizations that can be performed while creating clif ir.
use cranelift_codegen::ir::condcodes::IntCC;
use cranelift_codegen::ir::{InstructionData, Opcode, Value, ValueDef};
use cranelift_frontend::FunctionBuilder;
/// If the given value was produced by the lowering of `Rvalue::Not` return the input and true,
/// otherwise return the given value and false.
pub(crate) fn maybe_unwrap_bool_not(bcx: &mut FunctionBuilder<'_>, arg: Value) -> (Value, bool) {
if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
match bcx.func.dfg.insts[arg_inst] {
// This is the lowering of `Rvalue::Not`
InstructionData::IntCompareImm {
opcode: Opcode::IcmpImm,
cond: IntCC::Equal,
arg,
imm,
} if imm.bits() == 0 => (arg, true),
_ => (arg, false),
}
} else {
(arg, false)
}
}
/// Returns whether the branch is statically known to be taken or `None` if it isn't statically known.
pub(crate) fn maybe_known_branch_taken(
bcx: &FunctionBuilder<'_>,
arg: Value,
test_zero: bool,
) -> Option<bool> {
let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) else { return None };
match bcx.func.dfg.insts[arg_inst] {
InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => {
if test_zero {
Some(imm.bits() == 0)
} else {
Some(imm.bits() != 0)
}
}
_ => None,
}
}
|
rust
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
import logging
import base64
import httplib2
import os
from django.contrib.auth import authenticate
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.http import require_POST
from django.shortcuts import render_to_response
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.translation import ugettext as _
from guardian.shortcuts import get_objects_for_user
from geonode.base.models import ResourceBase
from geonode.layers.forms import LayerStyleUploadForm
from geonode.layers.models import Layer, Style
from geonode.layers.views import _resolve_layer, _PERMISSION_MSG_MODIFY
from geonode.geoserver.signals import gs_catalog
from geonode.tasks.update import geoserver_update_layers
from geonode.utils import json_response, _get_basic_auth_info
from geoserver.catalog import FailedRequestError, ConflictingDataError
from lxml import etree
from .helpers import get_stores, ogc_server_settings, set_styles, style_update, create_gs_thumbnail
logger = logging.getLogger(__name__)
def stores(request, store_type=None):
stores = get_stores(store_type)
data = json.dumps(stores)
return HttpResponse(data)
@user_passes_test(lambda u: u.is_superuser)
def updatelayers(request):
params = request.REQUEST
# Get the owner specified in the request if any, otherwise used the logged
# user
owner = params.get('owner', None)
owner = get_user_model().objects.get(
username=owner) if owner is not None else request.user
workspace = params.get('workspace', None)
store = params.get('store', None)
filter = params.get('filter', None)
geoserver_update_layers.delay(ignore_errors=False, owner=owner, workspace=workspace,
store=store, filter=filter)
return HttpResponseRedirect(reverse('layer_browse'))
@login_required
@require_POST
def layer_style(request, layername):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_MODIFY)
style_name = request.POST.get('defaultStyle')
# would be nice to implement
# better handling of default style switching
# in layer model or deeper (gsconfig.py, REST API)
old_default = layer.default_style
if old_default.name == style_name:
return HttpResponse(
"Default style for %s remains %s" %
(layer.name, style_name), status=200)
# This code assumes without checking
# that the new default style name is included
# in the list of possible styles.
new_style = (
style for style in layer.styles if style.name == style_name).next()
# Does this change this in geoserver??
layer.default_style = new_style
layer.styles = [
s for s in layer.styles if s.name != style_name] + [old_default]
layer.save()
return HttpResponse(
"Default style for %s changed to %s" %
(layer.name, style_name), status=200)
@login_required
def layer_style_upload(request, layername):
def respond(*args, **kw):
kw['content_type'] = 'text/html'
return json_response(*args, **kw)
form = LayerStyleUploadForm(request.POST, request.FILES)
if not form.is_valid():
return respond(errors="Please provide an SLD file.")
data = form.cleaned_data
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_MODIFY)
sld = request.FILES['sld'].read()
try:
dom = etree.XML(sld)
except Exception:
return respond(errors="The uploaded SLD file is not valid XML")
el = dom.findall(
"{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}Name")
if len(el) == 0 and not data.get('name'):
return respond(
errors="Please provide a name, unable to extract one from the SLD.")
name = data.get('name') or el[0].text
if data['update']:
match = None
styles = list(layer.styles) + [layer.default_style]
for style in styles:
if style.sld_name == name:
match = style
break
if match is None:
return respond(errors="Cannot locate style : " + name)
match.update_body(sld)
else:
try:
cat = gs_catalog
cat.create_style(name, sld)
layer.styles = layer.styles + \
[type('style', (object,), {'name': name})]
cat.save(layer.publishing)
except ConflictingDataError:
return respond(errors="""A layer with this name exists. Select
the update option if you want to update.""")
return respond(
body={
'success': True,
'style': name,
'updated': data['update']})
@login_required
def layer_style_manage(request, layername):
layer = _resolve_layer(
request,
layername,
'layers.change_layer_style',
_PERMISSION_MSG_MODIFY)
if request.method == 'GET':
try:
cat = gs_catalog
# First update the layer style info from GS to GeoNode's DB
# The try/except is
try:
set_styles(layer, cat)
except AttributeError:
logger.warn(
'Unable to set the default style. Ensure Geoserver is running and that this layer exists.')
all_available_gs_styles = cat.get_styles()
gs_styles = []
for style in all_available_gs_styles:
gs_styles.append((style.name, style.sld_title))
current_layer_styles = layer.styles.all()
layer_styles = []
for style in current_layer_styles:
layer_styles.append((style.name, style.sld_title))
# Render the form
return render_to_response(
'layers/layer_style_manage.html',
RequestContext(request, {
"layer": layer,
"gs_styles": gs_styles,
"layer_styles": layer_styles,
"default_style": (layer.default_style.name, layer.default_style.sld_title)
}
)
)
except (FailedRequestError, EnvironmentError) as e:
msg = ('Could not connect to geoserver at "%s"'
'to manage style information for layer "%s"' % (
ogc_server_settings.LOCATION, layer.name)
)
logger.warn(msg, e)
# If geoserver is not online, return an error
return render_to_response(
'layers/layer_style_manage.html',
RequestContext(request, {
"layer": layer,
"error": msg
}
)
)
elif request.method == 'POST':
try:
selected_styles = request.POST.getlist('style-select')
default_style = request.POST['default_style']
# Save to GeoServer
cat = gs_catalog
gs_layer = cat.get_layer(layer.name)
gs_layer.default_style = cat.get_style(default_style)
styles = []
for style in selected_styles:
styles.append(cat.get_style(style))
gs_layer.styles = styles
cat.save(gs_layer)
# Save to Django
layer = set_styles(layer, cat)
layer.save()
return HttpResponseRedirect(
reverse(
'layer_detail',
args=(
layer.service_typename,
)))
except (FailedRequestError, EnvironmentError, MultiValueDictKeyError) as e:
msg = ('Error Saving Styles for Layer "%s"' % (layer.name)
)
logger.warn(msg, e)
return render_to_response(
'layers/layer_style_manage.html',
RequestContext(request, {
"layer": layer,
"error": msg
}
)
)
def feature_edit_check(request, layername):
"""
If the layer is not a raster and the user has edit permission, return a status of 200 (OK).
Otherwise, return a status of 401 (unauthorized).
"""
layer = _resolve_layer(request, layername)
datastore = ogc_server_settings.DATASTORE
feature_edit = getattr(settings, "GEOGIG_DATASTORE", None) or datastore
if request.user.has_perm(
'change_layer_data',
obj=layer) and layer.storeType == 'dataStore' and feature_edit:
return HttpResponse(
json.dumps({'authorized': True}), content_type="application/json")
else:
return HttpResponse(
json.dumps({'authorized': False}), content_type="application/json")
def style_change_check(request, path):
"""
If the layer has not change_layer_style permission, return a status of
401 (unauthorized)
"""
# a new style is created with a POST and then a PUT,
# a style is updated with a PUT
# a layer is updated with a style with a PUT
# in both case we need to check permissions here
# for PUT path is /gs/rest/styles/san_andres_y_providencia_water_a452004b.xml
# or /ge/rest/layers/geonode:san_andres_y_providencia_coastline.json
# for POST path is /gs/rest/styles
# we will suppose that a user can create a new style only if he is an
# authenticated (we need to discuss about it)
authorized = True
if request.method == 'POST':
# new style
if not request.user.is_authenticated:
authorized = False
if request.method == 'PUT':
if path == 'rest/layers':
# layer update, should be safe to always authorize it
authorized = True
else:
# style update
# we will iterate all layers (should be just one if not using GS)
# to which the posted style is associated
# and check if the user has change_style_layer permissions on each of them
style_name = os.path.splitext(request.path)[0].split('/')[-1]
try:
style = Style.objects.get(name=style_name)
for layer in style.layer_styles.all():
if not request.user.has_perm('change_layer_style', obj=layer):
authorized = False
except:
authorized = False
logger.warn(
'There is not a style with such a name: %s.' % style_name)
return authorized
def geoserver_rest_proxy(request, proxy_path, downstream_path):
if not request.user.is_authenticated():
return HttpResponse(
"You must be logged in to access GeoServer",
content_type="text/plain",
status=401)
def strip_prefix(path, prefix):
assert path.startswith(prefix)
return path[len(prefix):]
path = strip_prefix(request.get_full_path(), proxy_path)
url = str("".join([ogc_server_settings.LOCATION, downstream_path, path]))
http = httplib2.Http()
username, password = ogc_server_settings.credentials
auth = base64.encodestring(username + ':' + password)
# http.add_credentials(*(ogc_server_settings.credentials))
headers = dict()
affected_layers = None
if request.method in ("POST", "PUT") and "CONTENT_TYPE" in request.META:
headers["Content-Type"] = request.META["CONTENT_TYPE"]
headers["Authorization"] = "Basic " + auth
# if user is not authorized, we must stop him
# we need to sync django here and check if some object (styles) can
# be edited by the user
# we should remove this geonode dependency calling layers.views straight
# from GXP, bypassing the proxy
if downstream_path in ('rest/styles', 'rest/layers') and len(request.body) > 0:
if not style_change_check(request, downstream_path):
return HttpResponse(
_("You don't have permissions to change style for this layer"),
content_type="text/plain",
status=401)
if downstream_path == 'rest/styles':
affected_layers = style_update(request, url)
response, content = http.request(
url, request.method,
body=request.body or None,
headers=headers)
# update thumbnails
if affected_layers:
for layer in affected_layers:
logger.debug('Updating thumbnail for layer with uuid %s' % layer.uuid)
create_gs_thumbnail(layer, True)
return HttpResponse(
content=content,
status=response.status,
content_type=response.get("content-type", "text/plain"))
def layer_batch_download(request):
"""
batch download a set of layers
POST - begin download
GET?id=<download_id> monitor status
"""
from geonode.utils import http_client
# currently this just piggy-backs on the map download backend
# by specifying an ad hoc map that contains all layers requested
# for download. assumes all layers are hosted locally.
# status monitoring is handled slightly differently.
if request.method == 'POST':
layers = request.POST.getlist("layer")
layers = Layer.objects.filter(typename__in=list(layers))
def layer_son(layer):
return {
"name": layer.typename,
"service": layer.service_type,
"metadataURL": "",
"serviceURL": ""
}
readme = """This data is provided by GeoNode.\n\nContents:"""
def list_item(lyr):
return "%s - %s.*" % (lyr.title, lyr.name)
readme = "\n".join([readme] + [list_item(l) for l in layers])
fake_map = {
"map": {"readme": readme},
"layers": [layer_son(lyr) for lyr in layers]
}
url = "%srest/process/batchDownload/launch/" % ogc_server_settings.LOCATION
resp, content = http_client.request(
url, 'POST', body=json.dumps(fake_map))
return HttpResponse(content, status=resp.status)
if request.method == 'GET':
# essentially, this just proxies back to geoserver
download_id = request.GET.get('id', None)
if download_id is None:
return HttpResponse(status=404)
url = "%srest/process/batchDownload/status/%s" % (
ogc_server_settings.LOCATION, download_id)
resp, content = http_client.request(url, 'GET')
return HttpResponse(content, status=resp.status)
def resolve_user(request):
user = None
geoserver = False
superuser = False
acl_user = request.user
if 'HTTP_AUTHORIZATION' in request.META:
username, password = _get_basic_auth_info(request)
acl_user = authenticate(username=username, password=password)
if acl_user:
user = acl_user.username
superuser = acl_user.is_superuser
elif _get_basic_auth_info(request) == ogc_server_settings.credentials:
geoserver = True
superuser = True
else:
return HttpResponse(_("Bad HTTP Authorization Credentials."),
status=401,
content_type="text/plain")
if not any([user, geoserver, superuser]
) and not request.user.is_anonymous():
user = request.user.username
superuser = request.user.is_superuser
resp = {
'user': user,
'geoserver': geoserver,
'superuser': superuser,
}
if acl_user and acl_user.is_authenticated():
resp['fullname'] = acl_user.get_full_name()
resp['email'] = acl_user.email
return HttpResponse(json.dumps(resp), content_type="application/json")
def layer_acls(request):
"""
returns json-encoded lists of layer identifiers that
represent the sets of read-write and read-only layers
for the currently authenticated user.
"""
# the layer_acls view supports basic auth, and a special
# user which represents the geoserver administrator that
# is not present in django.
acl_user = request.user
if 'HTTP_AUTHORIZATION' in request.META:
try:
username, password = _get_basic_auth_info(request)
acl_user = authenticate(username=username, password=password)
# Nope, is it the special geoserver user?
if (acl_user is None and
username == ogc_server_settings.USER and
password == ogc_server_settings.PASSWORD):
# great, tell geoserver it's an admin.
result = {
'rw': [],
'ro': [],
'name': username,
'is_superuser': True,
'is_anonymous': False
}
return HttpResponse(
json.dumps(result),
content_type="application/json")
except Exception:
pass
if acl_user is None:
return HttpResponse(_("Bad HTTP Authorization Credentials."),
status=401,
content_type="text/plain")
# Include permissions on the anonymous user
# use of polymorphic selectors/functions to optimize performances
resources_readable = get_objects_for_user(acl_user, 'view_resourcebase',
ResourceBase.objects.instance_of(Layer)).values_list('id', flat=True)
layer_writable = get_objects_for_user(acl_user, 'change_layer_data',
Layer.objects.all())
_read = set(Layer.objects.filter(id__in=resources_readable).values_list('typename', flat=True))
_write = set(layer_writable.values_list('typename', flat=True))
read_only = _read ^ _write
read_write = _read & _write
result = {
'rw': list(read_write),
'ro': list(read_only),
'name': acl_user.username,
'is_superuser': acl_user.is_superuser,
'is_anonymous': acl_user.is_anonymous(),
}
if acl_user.is_authenticated():
result['fullname'] = acl_user.get_full_name()
result['email'] = acl_user.email
return HttpResponse(json.dumps(result), content_type="application/json")
|
unknown
|
codeparrot/codeparrot-clean
| ||
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" Utilities for writing plugins.
This is different from bokeh.pluginutils because these are ways of
patching routes and objects directly into the bokeh server. You
would run this type of code using the --script option
"""
from __future__ import absolute_import
import uuid
from bokeh.exceptions import DataIntegrityException
from bokeh.resources import Resources
from flask import abort, render_template
from ..app import bokeh_app
from ..views.backbone import init_bokeh
from ..views.main import _makedoc
def object_page(prefix):
""" Decorator for a function which turns an object into a web page
from bokeh.server.app import bokeh_app
@bokeh_app.route("/myapp")
@object_page("mypage")
def make_object():
#make some bokeh object here
return obj
This decorator will
- create a randomized title for a bokeh document using the prefix
- initialize bokeh plotting libraries to use that document
- call the function you pass in, add that object to the plot context
- render that object in a web page
"""
def decorator(func):
def wrapper(*args, **kwargs):
## setup the randomly titled document
docname = prefix + str(uuid.uuid4())
bokehuser = bokeh_app.current_user()
try:
doc = _makedoc(bokeh_app.servermodel_storage, bokehuser, docname)
doc.published = True
doc.save(bokeh_app.servermodel_storage)
except DataIntegrityException as e:
return abort(409, e.message)
docid = doc.docid
clientdoc = bokeh_app.backbone_storage.get_document(docid)
## initialize our plotting APIs to use that document
init_bokeh(clientdoc)
obj = func(*args, **kwargs)
clientdoc.add(obj)
bokeh_app.backbone_storage.store_document(clientdoc)
if hasattr(obj, 'extra_generated_classes'):
extra_generated_classes = obj.extra_generated_classes
else:
extra_generated_classes = []
resources = Resources()
return render_template("oneobj.html",
elementid=str(uuid.uuid4()),
docid=docid,
objid=obj._id,
hide_navbar=True,
extra_generated_classes=extra_generated_classes,
public='true',
loglevel=resources.log_level)
wrapper.__name__ = func.__name__
return wrapper
return decorator
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
This tokenizer has been copied from the ``tokenize.py`` standard library
tokenizer. The reason was simple: The standard library tokenizer fails
if the indentation is not right. The fast parser of jedi however requires
"wrong" indentation.
Basically this is a stripped down version of the standard library module, so
you can read the documentation there. Additionally we included some speed and
memory optimizations here.
"""
from __future__ import absolute_import
import string
import re
from collections import namedtuple
import itertools as _itertools
from jedi.parser.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, opmap,
NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT)
from jedi._compatibility import is_py3, py_version, u
from jedi.common import splitlines
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
if is_py3:
# Python 3 has str.isidentifier() to check if a char is a valid identifier
is_identifier = str.isidentifier
else:
namechars = string.ascii_letters + '_'
is_identifier = lambda s: s in namechars
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
def group(*choices, **kwargs):
capture = kwargs.pop('capture', False) # Python 2, arrghhhhh :(
assert not kwargs
start = '('
if not capture:
start += '?:'
return start + '|'.join(choices) + ')'
def any(*choices):
return group(*choices) + '*'
def maybe(*choices):
return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Name = r'\w+'
if py_version >= 36:
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
else:
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
if is_py3:
Octnumber = r'0[oO][0-7]+'
else:
Octnumber = '0[0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permuations (include 'fr', but not
# 'rf'). The various permutations will be generated.
_valid_string_prefixes = ['b', 'r', 'u', 'br']
if py_version >= 36:
_valid_string_prefixes += ['f', 'fr']
if py_version <= 27:
# TODO this is actually not 100% valid. ur is valid in Python 2.7,
# while ru is not.
_valid_string_prefixes.append('ur')
# if we add binary f-strings, add: ['fb', 'fbr']
result = set([''])
for prefix in _valid_string_prefixes:
for t in _itertools.permutations(prefix):
# create a list with upper and lower versions of each
# character
for u in _itertools.product(*[(c, c.upper()) for c in t]):
result.add(''.join(u))
return result
def _compile(expr):
return re.compile(expr, re.UNICODE)
# Note that since _all_string_prefixes includes the empty string,
# StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&@|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, Name, capture=True)
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = group(Whitespace, capture=True) + \
group(PseudoExtras, Number, Funny, ContStr, Name, capture=True)
# For a given string prefix plus quotes, endpats maps it to a regex
# to match the remainder of that string. _prefix can be empty, for
# a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
endpats[_prefix + "'"] = _compile(Single)
endpats[_prefix + '"'] = _compile(Double)
endpats[_prefix + "'''"] = _compile(Single3)
endpats[_prefix + '"""'] = _compile(Double3)
# A set of all of the single and triple quoted string prefixes,
# including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
for p in (t + '"', t + "'"):
single_quoted.add(p)
for p in (t + '"""', t + "'''"):
triple_quoted.add(p)
# TODO add with?
ALWAYS_BREAK_TOKENS = (';', 'import', 'class', 'def', 'try', 'except',
'finally', 'while', 'return')
pseudo_token_compiled = _compile(PseudoToken)
class TokenInfo(namedtuple('Token', ['type', 'string', 'start_pos', 'prefix'])):
def __repr__(self):
return ('TokenInfo(type=%s, string=%r, start=%r, prefix=%r)' %
self._replace(type=self.get_type_name()))
def get_type_name(self, exact=True):
if exact:
typ = self.exact_type
else:
typ = self.type
return tok_name[typ]
@property
def exact_type(self):
if self.type == OP and self.string in opmap:
return opmap[self.string]
else:
return self.type
@property
def end_pos(self):
lines = splitlines(self.string)
if len(lines) > 1:
return self.start_pos[0] + len(lines) - 1, 0
else:
return self.start_pos[0], self.start_pos[1] + len(self.string)
def source_tokens(source, use_exact_op_types=False):
"""Generate tokens from a the source code (string)."""
lines = splitlines(source, keepends=True)
return generate_tokens(lines, use_exact_op_types)
def generate_tokens(lines, use_exact_op_types=False):
"""
A heavily modified Python standard library tokenizer.
Additionally to the default information, yields also the prefix of each
token. This idea comes from lib2to3. The prefix contains all information
that is irrelevant for the parser like newlines in parentheses or comments.
"""
paren_level = 0 # count parentheses
indents = [0]
max = 0
numchars = '0123456789'
contstr = ''
contline = None
# We start with a newline. This makes indent at the first position
# possible. It's not valid Python, but still better than an INDENT in the
# second line (and not in the first). This makes quite a few things in
# Jedi's fast parser possible.
new_line = True
prefix = '' # Should never be required, but here for safety
additional_prefix = ''
for lnum, line in enumerate(lines, 1): # loop over lines in stream
pos, max = 0, len(line)
if contstr: # continued string
endmatch = endprog.match(line)
if endmatch:
pos = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:pos], contstr_start, prefix)
contstr = ''
contline = None
else:
contstr = contstr + line
contline = contline + line
continue
while pos < max:
pseudomatch = pseudo_token_compiled.match(line, pos)
if not pseudomatch: # scan for tokens
txt = line[pos:]
if txt.endswith('\n'):
new_line = True
yield TokenInfo(ERRORTOKEN, txt, (lnum, pos), prefix)
break
prefix = additional_prefix + pseudomatch.group(1)
additional_prefix = ''
start, pos = pseudomatch.span(2)
spos = (lnum, start)
token = pseudomatch.group(2)
initial = token[0]
if new_line and initial not in '\r\n#':
new_line = False
if paren_level == 0:
i = 0
while line[i] == '\f':
i += 1
start -= 1
if start > indents[-1]:
yield TokenInfo(INDENT, '', spos, '')
indents.append(start)
while start < indents[-1]:
yield TokenInfo(DEDENT, '', spos, '')
indents.pop()
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, prefix)
elif initial in '\r\n':
if not new_line and paren_level == 0:
yield TokenInfo(NEWLINE, token, spos, prefix)
else:
additional_prefix = prefix + token
new_line = True
elif initial == '#': # Comments
assert not token.endswith("\n")
additional_prefix = prefix + token
elif token in triple_quoted:
endprog = endpats[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, prefix)
else:
contstr_start = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
contstr_start = lnum, start
endprog = (endpats.get(initial) or endpats.get(token[1])
or endpats.get(token[2]))
contstr = line[start:]
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, prefix)
elif is_identifier(initial): # ordinary name
if token in ALWAYS_BREAK_TOKENS:
paren_level = 0
while True:
indent = indents.pop()
if indent > start:
yield TokenInfo(DEDENT, '', spos, '')
else:
indents.append(indent)
break
yield TokenInfo(NAME, token, spos, prefix)
elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n'): # continued stmt
additional_prefix += prefix + line[start:]
break
else:
if token in '([{':
paren_level += 1
elif token in ')]}':
paren_level -= 1
try:
# This check is needed in any case to check if it's a valid
# operator or just some random unicode character.
exact_type = opmap[token]
except KeyError:
exact_type = typ = ERRORTOKEN
if use_exact_op_types:
typ = exact_type
else:
typ = OP
yield TokenInfo(typ, token, spos, prefix)
if contstr:
yield TokenInfo(ERRORTOKEN, contstr, contstr_start, prefix)
if contstr.endswith('\n'):
new_line = True
end_pos = lnum, max
# As the last position we just take the maximally possible position. We
# remove -1 for the last new line.
for indent in indents[1:]:
yield TokenInfo(DEDENT, '', end_pos, '')
yield TokenInfo(ENDMARKER, '', end_pos, additional_prefix)
if __name__ == "__main__":
import sys
if len(sys.argv) >= 2:
path = sys.argv[1]
with open(path) as f:
code = u(f.read())
else:
code = u(sys.stdin.read())
for token in source_tokens(code, use_exact_op_types=True):
print(token)
|
unknown
|
codeparrot/codeparrot-clean
| ||
// RUN: clang-tidy -dump-config | FileCheck %s
// RUN: clang-tidy -dump-config -use-color | FileCheck -check-prefix=CHECK-CONFIG-COLOR %s
// RUN: clang-tidy -dump-config -use-color=false | FileCheck -check-prefix=CHECK-CONFIG-NO-COLOR %s
// RUN: clang-tidy -config='UseColor: true' -dump-config | FileCheck -check-prefix=CHECK-CONFIG-COLOR %s
// RUN: clang-tidy -config='UseColor: false' -dump-config | FileCheck -check-prefix=CHECK-CONFIG-NO-COLOR %s
// RUN: clang-tidy -help | FileCheck -check-prefix=CHECK-OPT-PRESENT %s
// RUN: clang-tidy -checks='-*, modernize-use-override' -use-color=false %s -- -std=c++11 | FileCheck -check-prefix=CHECK-NO-COLOR %s
// RUN: clang-tidy -checks='-*, modernize-use-override' %s -- -std=c++11 | FileCheck -check-prefix=CHECK-NO-COLOR %s
// RUN: clang-tidy -checks='-*, modernize-use-override' -use-color %s -- -std=c++11 | FileCheck -check-prefix=CHECK-COLOR %s
// CHECK-NOT: UseColor
// CHECK-CONFIG-NO-COLOR: UseColor: false
// CHECK-CONFIG-COLOR: UseColor: true
// CHECK-OPT-PRESENT: --use-color
class Base {
public:
virtual ~Base() = 0;
};
class Delivered : public Base {
public:
virtual ~Delivered() = default;
// CHECK-NO-COLOR: warning: prefer using 'override' or (rarely) 'final' instead of 'virtual' [modernize-use-override]
// CHECK-COLOR: {{.\[0;1;35m}}warning: {{.\[0m}}{{.\[1m}}prefer using 'override' or (rarely) 'final' instead of 'virtual' [modernize-use-override]
};
|
cpp
|
github
|
https://github.com/llvm/llvm-project
|
clang-tools-extra/test/clang-tidy/infrastructure/use-color.cpp
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Tools to help define network clients and servers.
* Other ASF projects use this package, often with their own shaded/unshaded
* versions of protobuf messages.
* Changes to the API signatures will break things, especially changes to
* {@link org.apache.hadoop.ipc.RPC} and {@link org.apache.hadoop.ipc.RpcEngine}.
*/
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce", "YARN", "Hive", "Ozone"})
@InterfaceStability.Evolving
package org.apache.hadoop.ipc;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/package-info.java
|
import docutils.frontend
import docutils.parsers.rst
import docutils.writers.html4css1
import docutils.utils
from sphinx_testing.path import path
from sphinx_testing import with_app as with_sphinx_testing
test_root = path(__file__).parent.joinpath('root').abspath()
test_roots = path(__file__).parent.joinpath('roots').abspath()
def with_app(*args, **kwargs):
"""Decorator for passing a test Sphinx app to a function.
Extends sphinx_testing's version by defaulting to a base test directory
if none is specified. The test directory will be copied to a temporary
directory before calling the function.
"""
if 'srcdir' not in kwargs and 'create_new_srcdir' not in kwargs:
kwargs['srcdir'] = test_root
kwargs['copy_srcdir_to_tmpdir'] = True
return with_sphinx_testing(*args, **kwargs)
def make_document(source_name, contents):
"""Parse ```contents``` into a docutils document."""
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document(
source_name,
docutils.frontend.OptionParser(
components=(
docutils.parsers.rst.Parser,
docutils.writers.html4css1.Writer,
),
).get_default_values(),
)
parser.parse(contents, document)
return document
|
unknown
|
codeparrot/codeparrot-clean
| ||
import logging
from autotest.client.shared import utils, error
from virttest import virsh, virt_vm
from virttest.libvirt_xml import vm_xml
def run_virsh_setmaxmem(test, params, env):
"""
Test command: virsh setmaxmem.
1) Prepare vm environment.
2) Handle params
3) Run test command and get vm started then get maxmem.
4) Recover environment.
5) Check result.
TODO: support more options:--live,--config,--current.
"""
def vmxml_max_mem(vm_name):
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
return int(vmxml.max_mem)
def make_domref(domarg, vm_ref, domid, vm_name, domuuid):
# Specify domain as argument or parameter
if domarg == "yes":
dom_darg_key = "domainarg"
else:
dom_darg_key = "domain"
# How to reference domain
if vm_ref == "domid":
dom_darg_value = domid
elif vm_ref == "domname":
dom_darg_value = vm_name
elif vm_ref == "domuuid":
dom_darg_value = domuuid
elif vm_ref == "none":
dom_darg_value = None
elif vm_ref == "emptystring":
dom_darg_value = '""'
else: # stick in value directly
dom_darg_value = vm_ref
return {dom_darg_key:dom_darg_value}
def make_sizeref(sizearg, mem_ref, original_mem):
if sizearg == "yes":
size_darg_key = "sizearg"
else:
size_darg_key = "size"
if mem_ref == "halfless":
size_darg_value = "%d" % (original_mem / 2)
elif mem_ref == "halfmore":
size_darg_value = "%d" % int(original_mem * 1.5)
elif mem_ref == "same":
size_darg_value = "%d" % original_mem
elif mem_ref == "emptystring":
size_darg_value = '""'
elif mem_ref == "zero":
size_darg_value = "0"
elif mem_ref == "toosmall":
size_darg_value = "1024"
elif mem_ref == "toobig":
size_darg_value = "1099511627776" # (KiB) One Petabyte
elif mem_ref == "none":
size_darg_value = None
else: # stick in value directly
size_darg_value = mem_ref
return {size_darg_key:size_darg_value}
def is_old_libvirt():
regex = r'\s+\[--size\]\s+'
return bool( not virsh.has_command_help_match('setmaxmem', regex) )
def is_xen_host():
check_cmd = "ls /dev/kvm"
return utils.run(check_cmd, ignore_status=True).exit_status
def is_in_range(actual, expected, error_percent):
deviation = 100 - (100 * (float(actual) / float(expected)))
logging.debug("Deviation: %0.2f%%" % float(deviation))
return float(deviation) <= float(error_percent)
def print_debug_stats(original_vmxml_mem, original_dominfo_mem,
expected_mem, test_vmxml_mem, test_dominfo_mem):
dbgmsg = ("Original vmxml mem : %d KiB\n"
"Original dominfo mem : %d KiB\n"
"Expected max mem : %d KiB\n"
"Actual vmxml mem : %d KiB\n"
"Actual dominfo mem : %d KiB\n" % (
original_vmxml_mem,
original_dominfo_mem,
expected_mem,
test_vmxml_mem,
test_dominfo_mem))
for dbgline in dbgmsg.splitlines():
logging.debug(dbgline)
### MAIN TEST CODE ###
# Process cartesian parameters
vm_ref = params.get("setmaxmem_vm_ref", "")
mem_ref = params.get("setmaxmem_mem_ref", "")
status_error = "yes" == params.get("status_error", "no")
flags = params.get("setmaxmem_flags", "")
domarg = params.get("setmaxmem_domarg", "no")
sizearg = params.get("setmaxmem_sizearg", "no")
delta_per = params.get("setmaxmem_delta_per", "10")
vm_name = params.get("main_vm")
# Gather environment parameters
vm = env.get_vm(vm_name)
original_vmxml_mem = vmxml_max_mem(vm_name)
original_dominfo_mem = vm.get_max_mem()
domid = vm.get_id()
domuuid = vm.get_uuid()
uri = vm.connect_uri
old_libvirt = is_old_libvirt()
if old_libvirt:
logging.info("Running test on older libvirt")
use_kilobytes = True
else:
logging.info("Running test on newer libvirt")
use_kilobytes = False
xen_host = is_xen_host()
if xen_host:
logging.info("Running on xen host, %s offset is allowed.", delta_per)
# Argument pattern is complex, build with dargs
dargs = {'flagstr':flags,
'use_kilobytes':use_kilobytes,
'uri':uri, 'ignore_status':True, "debug":True}
dargs.update( make_domref(domarg, vm_ref, domid, vm_name, domuuid) )
dargs.update( make_sizeref(sizearg, mem_ref, original_dominfo_mem) )
if status_error:
logging.info("Error Test: Expecting an error to occur!")
result = virsh.setmaxmem(**dargs)
status = result.exit_status
# Gather status if not running error test
start_status = 0 # Check can guest be started after maxmem-modified.
if not status_error:
if vm.state() == "shut off":
try:
vm.start()
except virt_vm.VMStartError, detail:
start_status = 1
logging.error("Start after VM's max mem modified failed:%s",
detail)
# Actual results
test_vmxml_mem = vmxml_max_mem(vm_name)
test_dominfo_mem = vm.get_max_mem()
# Expected results for both vmxml and dominfo
if sizearg == "yes":
expected_mem = int(dargs["sizearg"])
else:
expected_mem = int(dargs["size"])
print_debug_stats(original_vmxml_mem, original_dominfo_mem,
expected_mem, test_vmxml_mem, test_dominfo_mem)
else:
if vm.state() == "paused":
vm.resume()
# Restore need vm to be shut off.
if vm.state() != "shut off":
vm.destroy()
if status is 0: # Restore original memory
restore_status = virsh.setmaxmem(domainarg=vm_name,
sizearg=original_dominfo_mem,
ignore_status=True).exit_status
if restore_status:
logging.warning("Failed to restore VM's original memory to "
"%s KiB", original_dominfo_mem)
else:
# virsh setmaxmem failed, no need to restore
pass
# Don't care about memory comparison on error test
if status_error:
if status is 0:
raise error.TestFail("Error test did not result in an error.")
else:
vmxml_match = (test_vmxml_mem == expected_mem)
if xen_host:
dominfo_match = is_in_range(test_dominfo_mem, expected_mem,
delta_per)
else:
dominfo_match = (test_dominfo_mem == expected_mem)
if (status or start_status or not vmxml_match or not dominfo_match):
msg = "test conditions not met: "
if status:
msg += "Non-zero virsh setmaxmem exit code. "
if not vmxml_match:
msg += "Max memory in VM's xml is not matched. "
if not dominfo_match:
msg += "Max memory in dominfo's output is not matched. "
if start_status:
msg += "Start after VM's max mem is modified failed."
raise error.TestFail(msg)
logging.info("Test end normally.")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.fixers
~~~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module includes various helpers that fix bugs in web servers. They may
be necessary for some versions of a buggy web server but not others. We try
to stay updated with the status of the bugs as good as possible but you have
to make sure whether they fix the problem you encounter.
If you notice bugs in webservers not fixed in this module consider
contributing a patch.
:copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
from werkzeug.http import parse_options_header, parse_cache_control_header, \
parse_set_header
from werkzeug.useragents import UserAgent
from werkzeug.datastructures import Headers, ResponseCacheControl
class CGIRootFix(object):
"""Wrap the application in this middleware if you are using FastCGI or CGI
and you have problems with your app root being set to the cgi script's path
instead of the path users are going to visit
.. versionchanged:: 0.9
Added `app_root` parameter and renamed from `LighttpdCGIRootFix`.
:param app: the WSGI application
:param app_root: Defaulting to ``'/'``, you can set this to something else
if your app is mounted somewhere else.
"""
def __init__(self, app, app_root='/'):
self.app = app
self.app_root = app_root
def __call__(self, environ, start_response):
# only set PATH_INFO for older versions of Lighty or if no
# server software is provided. That's because the test was
# added in newer Werkzeug versions and we don't want to break
# people's code if they are using this fixer in a test that
# does not set the SERVER_SOFTWARE key.
if 'SERVER_SOFTWARE' not in environ or \
environ['SERVER_SOFTWARE'] < 'lighttpd/1.4.28':
environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
environ.get('PATH_INFO', '')
environ['SCRIPT_NAME'] = self.app_root.strip('/')
return self.app(environ, start_response)
# backwards compatibility
LighttpdCGIRootFix = CGIRootFix
class PathInfoFromRequestUriFix(object):
"""On windows environment variables are limited to the system charset
which makes it impossible to store the `PATH_INFO` variable in the
environment without loss of information on some systems.
This is for example a problem for CGI scripts on a Windows Apache.
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
fix can only be applied if the webserver supports either of these
variables.
:param app: the WSGI application
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
if key not in environ:
continue
request_uri = unquote(environ[key])
script_name = unquote(environ.get('SCRIPT_NAME', ''))
if request_uri.startswith(script_name):
environ['PATH_INFO'] = request_uri[len(script_name):] \
.split('?', 1)[0]
break
return self.app(environ, start_response)
class ProxyFix(object):
"""This middleware can be applied to add HTTP proxy support to an
application that was not designed with HTTP proxies in mind. It
sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers. While
Werkzeug-based applications already can use
:py:func:`werkzeug.wsgi.get_host` to retrieve the current host even if
behind proxy setups, this middleware can be used for applications which
access the WSGI environment directly.
If you have more than one proxy server in front of your app, set
`num_proxies` accordingly.
Do not use this middleware in non-proxy setups for security reasons.
The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
`werkzeug.proxy_fix.orig_http_host`.
:param app: the WSGI application
:param num_proxies: the number of proxy servers in front of the app.
"""
def __init__(self, app, num_proxies=1):
self.app = app
self.num_proxies = num_proxies
def get_remote_addr(self, forwarded_for):
"""Selects the new remote addr from the given list of ips in
X-Forwarded-For. By default it picks the one that the `num_proxies`
proxy server provides. Before 0.9 it would always pick the first.
.. versionadded:: 0.8
"""
if len(forwarded_for) >= self.num_proxies:
return forwarded_for[-1 * self.num_proxies]
def __call__(self, environ, start_response):
getter = environ.get
forwarded_proto = getter('HTTP_X_FORWARDED_PROTO', '')
forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
environ.update({
'werkzeug.proxy_fix.orig_wsgi_url_scheme': getter('wsgi.url_scheme'),
'werkzeug.proxy_fix.orig_remote_addr': getter('REMOTE_ADDR'),
'werkzeug.proxy_fix.orig_http_host': getter('HTTP_HOST')
})
forwarded_for = [x for x in [x.strip() for x in forwarded_for] if x]
remote_addr = self.get_remote_addr(forwarded_for)
if remote_addr is not None:
environ['REMOTE_ADDR'] = remote_addr
if forwarded_host:
environ['HTTP_HOST'] = forwarded_host
if forwarded_proto:
environ['wsgi.url_scheme'] = forwarded_proto
return self.app(environ, start_response)
class HeaderRewriterFix(object):
"""This middleware can remove response headers and add others. This
is for example useful to remove the `Date` header from responses if you
are using a server that adds that header, no matter if it's present or
not or to add `X-Powered-By` headers::
app = HeaderRewriterFix(app, remove_headers=['Date'],
add_headers=[('X-Powered-By', 'WSGI')])
:param app: the WSGI application
:param remove_headers: a sequence of header keys that should be
removed.
:param add_headers: a sequence of ``(key, value)`` tuples that should
be added.
"""
def __init__(self, app, remove_headers=None, add_headers=None):
self.app = app
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
self.add_headers = list(add_headers or ())
def __call__(self, environ, start_response):
def rewriting_start_response(status, headers, exc_info=None):
new_headers = []
for key, value in headers:
if key.lower() not in self.remove_headers:
new_headers.append((key, value))
new_headers += self.add_headers
return start_response(status, new_headers, exc_info)
return self.app(environ, rewriting_start_response)
class InternetExplorerFix(object):
"""This middleware fixes a couple of bugs with Microsoft Internet
Explorer. Currently the following fixes are applied:
- removing of `Vary` headers for unsupported mimetypes which
causes troubles with caching. Can be disabled by passing
``fix_vary=False`` to the constructor.
see: http://support.microsoft.com/kb/824847/en-us
- removes offending headers to work around caching bugs in
Internet Explorer if `Content-Disposition` is set. Can be
disabled by passing ``fix_attach=False`` to the constructor.
If it does not detect affected Internet Explorer versions it won't touch
the request / response.
"""
# This code was inspired by Django fixers for the same bugs. The
# fix_vary and fix_attach fixers were originally implemented in Django
# by Michael Axiak and is available as part of the Django project:
# http://code.djangoproject.com/ticket/4148
def __init__(self, app, fix_vary=True, fix_attach=True):
self.app = app
self.fix_vary = fix_vary
self.fix_attach = fix_attach
def fix_headers(self, environ, headers, status=None):
if self.fix_vary:
header = headers.get('content-type', '')
mimetype, options = parse_options_header(header)
if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
headers.pop('vary', None)
if self.fix_attach and 'content-disposition' in headers:
pragma = parse_set_header(headers.get('pragma', ''))
pragma.discard('no-cache')
header = pragma.to_header()
if not header:
headers.pop('pragma', '')
else:
headers['Pragma'] = header
header = headers.get('cache-control', '')
if header:
cc = parse_cache_control_header(header,
cls=ResponseCacheControl)
cc.no_cache = None
cc.no_store = False
header = cc.to_header()
if not header:
headers.pop('cache-control', '')
else:
headers['Cache-Control'] = header
def run_fixed(self, environ, start_response):
def fixing_start_response(status, headers, exc_info=None):
headers = Headers(headers)
self.fix_headers(environ, headers, status)
return start_response(status, headers.to_wsgi_list(), exc_info)
return self.app(environ, fixing_start_response)
def __call__(self, environ, start_response):
ua = UserAgent(environ)
if ua.browser != 'msie':
return self.app(environ, start_response)
return self.run_fixed(environ, start_response)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* deflate_medium.c -- The deflate_medium deflate strategy
*
* Copyright (C) 2013 Intel Corporation. All rights reserved.
* Authors:
* Arjan van de Ven <arjan@linux.intel.com>
*
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#ifndef NO_MEDIUM_STRATEGY
#include "zbuild.h"
#include "deflate.h"
#include "deflate_p.h"
#include "functable.h"
struct match {
uint16_t match_start;
uint16_t match_length;
uint16_t strstart;
uint16_t orgstart;
};
static int emit_match(deflate_state *s, struct match match) {
int bflush = 0;
/* matches that are not long enough we need to emit as literals */
if (match.match_length < WANT_MIN_MATCH) {
while (match.match_length) {
bflush += zng_tr_tally_lit(s, s->window[match.strstart]);
s->lookahead--;
match.strstart++;
match.match_length--;
}
return bflush;
}
check_match(s, match.strstart, match.match_start, match.match_length);
bflush += zng_tr_tally_dist(s, match.strstart - match.match_start, match.match_length - STD_MIN_MATCH);
s->lookahead -= match.match_length;
return bflush;
}
static void insert_match(deflate_state *s, struct match match) {
if (UNLIKELY(s->lookahead <= (unsigned int)(match.match_length + WANT_MIN_MATCH)))
return;
/* string at strstart already in table */
match.strstart++;
match.match_length--;
/* matches that are not long enough we need to emit as literals */
if (LIKELY(match.match_length < WANT_MIN_MATCH - 1)) {
if (UNLIKELY(match.match_length > 0)) {
if (match.strstart >= match.orgstart) {
if (match.strstart + match.match_length - 1 >= match.orgstart) {
insert_string(s, match.strstart, match.match_length);
} else {
insert_string(s, match.strstart, match.orgstart - match.strstart + 1);
}
match.strstart += match.match_length;
match.match_length = 0;
}
}
return;
}
/* Insert into hash table. */
if (LIKELY(match.strstart >= match.orgstart)) {
if (LIKELY(match.strstart + match.match_length - 1 >= match.orgstart)) {
insert_string(s, match.strstart, match.match_length);
} else {
insert_string(s, match.strstart, match.orgstart - match.strstart + 1);
}
} else if (match.orgstart < match.strstart + match.match_length) {
insert_string(s, match.orgstart, match.strstart + match.match_length - match.orgstart);
}
match.strstart += match.match_length;
match.match_length = 0;
}
static void fizzle_matches(deflate_state *s, struct match *current, struct match *next) {
Pos limit;
unsigned char *match, *orig;
int changed = 0;
struct match c, n;
/* step zero: sanity checks */
if (current->match_length <= 1)
return;
if (UNLIKELY(current->match_length > 1 + next->match_start))
return;
if (UNLIKELY(current->match_length > 1 + next->strstart))
return;
match = s->window - current->match_length + 1 + next->match_start;
orig = s->window - current->match_length + 1 + next->strstart;
/* quick exit check.. if this fails then don't bother with anything else */
if (LIKELY(*match != *orig))
return;
c = *current;
n = *next;
/* step one: try to move the "next" match to the left as much as possible */
limit = next->strstart > MAX_DIST(s) ? next->strstart - (Pos)MAX_DIST(s) : 0;
match = s->window + n.match_start - 1;
orig = s->window + n.strstart - 1;
while (*match == *orig) {
if (UNLIKELY(c.match_length < 1))
break;
if (UNLIKELY(n.strstart <= limit))
break;
if (UNLIKELY(n.match_length >= 256))
break;
if (UNLIKELY(n.match_start <= 1))
break;
n.strstart--;
n.match_start--;
n.match_length++;
c.match_length--;
match--;
orig--;
changed++;
}
if (!changed)
return;
if (c.match_length <= 1 && n.match_length != 2) {
n.orgstart++;
*current = c;
*next = n;
} else {
return;
}
}
Z_INTERNAL block_state deflate_medium(deflate_state *s, int flush) {
/* Align the first struct to start on a new cacheline, this allows us to fit both structs in one cacheline */
ALIGNED_(16) struct match current_match;
struct match next_match;
/* For levels below 5, don't check the next position for a better match */
int early_exit = s->level < 5;
memset(¤t_match, 0, sizeof(struct match));
memset(&next_match, 0, sizeof(struct match));
for (;;) {
Pos hash_head = 0; /* head of the hash chain */
int bflush = 0; /* set if current block must be flushed */
int64_t dist;
/* Make sure that we always have enough lookahead, except
* at the end of the input file. We need STD_MAX_MATCH bytes
* for the next match, plus WANT_MIN_MATCH bytes to insert the
* string following the next current_match.
*/
if (s->lookahead < MIN_LOOKAHEAD) {
PREFIX(fill_window)(s);
if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
return need_more;
}
if (UNLIKELY(s->lookahead == 0))
break; /* flush the current block */
next_match.match_length = 0;
}
/* Insert the string window[strstart .. strstart+2] in the
* dictionary, and set hash_head to the head of the hash chain:
*/
/* If we already have a future match from a previous round, just use that */
if (!early_exit && next_match.match_length > 0) {
current_match = next_match;
next_match.match_length = 0;
} else {
hash_head = 0;
if (s->lookahead >= WANT_MIN_MATCH) {
hash_head = quick_insert_string(s, s->strstart);
}
current_match.strstart = (uint16_t)s->strstart;
current_match.orgstart = current_match.strstart;
/* Find the longest match, discarding those <= prev_length.
* At this point we have always match_length < WANT_MIN_MATCH
*/
dist = (int64_t)s->strstart - hash_head;
if (dist <= MAX_DIST(s) && dist > 0 && hash_head != 0) {
/* To simplify the code, we prevent matches with the string
* of window index 0 (in particular we have to avoid a match
* of the string with itself at the start of the input file).
*/
current_match.match_length = (uint16_t)FUNCTABLE_CALL(longest_match)(s, hash_head);
current_match.match_start = (uint16_t)s->match_start;
if (UNLIKELY(current_match.match_length < WANT_MIN_MATCH))
current_match.match_length = 1;
if (UNLIKELY(current_match.match_start >= current_match.strstart)) {
/* this can happen due to some restarts */
current_match.match_length = 1;
}
} else {
/* Set up the match to be a 1 byte literal */
current_match.match_start = 0;
current_match.match_length = 1;
}
}
insert_match(s, current_match);
/* now, look ahead one */
if (LIKELY(!early_exit && s->lookahead > MIN_LOOKAHEAD && (uint32_t)(current_match.strstart + current_match.match_length) < (s->window_size - MIN_LOOKAHEAD))) {
s->strstart = current_match.strstart + current_match.match_length;
hash_head = quick_insert_string(s, s->strstart);
next_match.strstart = (uint16_t)s->strstart;
next_match.orgstart = next_match.strstart;
/* Find the longest match, discarding those <= prev_length.
* At this point we have always match_length < WANT_MIN_MATCH
*/
dist = (int64_t)s->strstart - hash_head;
if (dist <= MAX_DIST(s) && dist > 0 && hash_head != 0) {
/* To simplify the code, we prevent matches with the string
* of window index 0 (in particular we have to avoid a match
* of the string with itself at the start of the input file).
*/
next_match.match_length = (uint16_t)FUNCTABLE_CALL(longest_match)(s, hash_head);
next_match.match_start = (uint16_t)s->match_start;
if (UNLIKELY(next_match.match_start >= next_match.strstart)) {
/* this can happen due to some restarts */
next_match.match_length = 1;
}
if (next_match.match_length < WANT_MIN_MATCH)
next_match.match_length = 1;
else
fizzle_matches(s, ¤t_match, &next_match);
} else {
/* Set up the match to be a 1 byte literal */
next_match.match_start = 0;
next_match.match_length = 1;
}
s->strstart = current_match.strstart;
} else {
next_match.match_length = 0;
}
/* now emit the current match */
bflush = emit_match(s, current_match);
/* move the "cursor" forward */
s->strstart += current_match.match_length;
if (UNLIKELY(bflush))
FLUSH_BLOCK(s, 0);
}
s->insert = s->strstart < (STD_MIN_MATCH - 1) ? s->strstart : (STD_MIN_MATCH - 1);
if (flush == Z_FINISH) {
FLUSH_BLOCK(s, 1);
return finish_done;
}
if (UNLIKELY(s->sym_next))
FLUSH_BLOCK(s, 0);
return block_done;
}
#endif
|
c
|
github
|
https://github.com/opencv/opencv
|
3rdparty/zlib-ng/deflate_medium.c
|
# -*- coding: UTF-8 -*-
## Copyright 2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
"""
"""
from __future__ import unicode_literals
#~ try:
from lino.projects.std.settings import *
#~ from django.utils.translation import ugettext_lazy as _
class Site(Site):
title = "Lino Events"
verbose_name = "Lino Events"
#~ verbose_name = "Lino Cosi"
#~ description = _("a Lino application to make Belgian accounting simple.")
#~ version = "0.1"
#~ url = "http://www.lino-framework.org/autodoc/lino.projects.cosi"
#~ author = 'Luc Saffre'
#~ author_email = 'luc.saffre@gmail.com'
demo_fixtures = 'std few_countries few_cities vor'.split()
languages = 'de fr nl'
#~ languages = ['de','fr','nl']
#~ languages = 'de fr et en'.split()
def get_installed_apps(self):
for a in super(Site,self).get_installed_apps():
yield a
yield 'lino.modlib.system'
yield 'lino.modlib.countries'
yield 'lino.modlib.events'
SITE = Site(globals())
#~ except Exception as e:
#~ import traceback
#~ traceback.print_exc(e)
#~ sys.exit(1)
#~
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2007-2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# policygentool is a tool for the initial generation of SELinux policy
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA
#
#
########################### cache Template File #############################
########################### Type Enforcement File #############################
te_types="""
type TEMPLATETYPE_cache_t;
files_type(TEMPLATETYPE_cache_t)
"""
te_rules="""
manage_dirs_pattern(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
manage_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
manage_lnk_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
files_var_filetrans(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, { dir file lnk_file })
"""
te_stream_rules="""\
manage_sock_files_pattern(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
files_var_filetrans(TEMPLATETYPE_t, TEMPLATETYPE_cache_t, sock_file)
"""
########################### Interface File #############################
if_rules="""
########################################
## <summary>
## Search TEMPLATETYPE cache directories.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_search_cache',`
gen_require(`
type TEMPLATETYPE_cache_t;
')
allow $1 TEMPLATETYPE_cache_t:dir search_dir_perms;
files_search_var($1)
')
########################################
## <summary>
## Read TEMPLATETYPE cache files.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_read_cache_files',`
gen_require(`
type TEMPLATETYPE_cache_t;
')
files_search_var($1)
read_files_pattern($1, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
')
########################################
## <summary>
## Create, read, write, and delete
## TEMPLATETYPE cache files.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_manage_cache_files',`
gen_require(`
type TEMPLATETYPE_cache_t;
')
files_search_var($1)
manage_files_pattern($1, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
')
########################################
## <summary>
## Manage TEMPLATETYPE cache dirs.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_manage_cache_dirs',`
gen_require(`
type TEMPLATETYPE_cache_t;
')
files_search_var($1)
manage_dirs_pattern($1, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
')
"""
if_stream_rules="""
########################################
## <summary>
## Connect to TEMPLATETYPE over a unix stream socket.
## </summary>
## <param name="domain">
## <summary>
## Domain allowed access.
## </summary>
## </param>
#
interface(`TEMPLATETYPE_stream_connect',`
gen_require(`
type TEMPLATETYPE_t, TEMPLATETYPE_cache_t;
')
stream_connect_pattern($1, TEMPLATETYPE_cache_t, TEMPLATETYPE_cache_t)
')
"""
if_admin_types="""
type TEMPLATETYPE_cache_t;"""
if_admin_rules="""
files_search_var($1)
admin_pattern($1, TEMPLATETYPE_cache_t)
"""
########################### File Context ##################################
fc_file="""\
FILENAME -- gen_context(system_u:object_r:TEMPLATETYPE_cache_t,s0)
"""
fc_dir="""\
FILENAME(/.*)? gen_context(system_u:object_r:TEMPLATETYPE_cache_t,s0)
"""
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.xmpproutertap}.
"""
from twisted.application import internet
from twisted.trial import unittest
from twisted.words import xmpproutertap as tap
from twisted.words.protocols.jabber import component
class XMPPRouterTapTest(unittest.TestCase):
def test_port(self):
"""
The port option is recognised as a parameter.
"""
opt = tap.Options()
opt.parseOptions(['--port', '7001'])
self.assertEquals(opt['port'], '7001')
def test_portDefault(self):
"""
The port option has '5347' as default value
"""
opt = tap.Options()
opt.parseOptions([])
self.assertEquals(opt['port'], 'tcp:5347:interface=127.0.0.1')
def test_secret(self):
"""
The secret option is recognised as a parameter.
"""
opt = tap.Options()
opt.parseOptions(['--secret', 'hushhush'])
self.assertEquals(opt['secret'], 'hushhush')
def test_secretDefault(self):
"""
The secret option has 'secret' as default value
"""
opt = tap.Options()
opt.parseOptions([])
self.assertEquals(opt['secret'], 'secret')
def test_verbose(self):
"""
The verbose option is recognised as a flag.
"""
opt = tap.Options()
opt.parseOptions(['--verbose'])
self.assertTrue(opt['verbose'])
def test_makeService(self):
"""
The service gets set up with a router and factory.
"""
opt = tap.Options()
opt.parseOptions([])
s = tap.makeService(opt)
self.assertIsInstance(s, internet.TCPServer)
self.assertEquals('127.0.0.1', s.kwargs['interface'])
self.assertEquals(2, len(s.args))
self.assertEquals(5347, s.args[0])
factory = s.args[1]
self.assertIsInstance(factory, component.XMPPComponentServerFactory)
self.assertIsInstance(factory.router, component.Router)
self.assertEquals('secret', factory.secret)
self.assertFalse(factory.logTraffic)
def test_makeServiceVerbose(self):
"""
The verbose flag enables traffic logging.
"""
opt = tap.Options()
opt.parseOptions(['--verbose'])
s = tap.makeService(opt)
factory = s.args[1]
self.assertTrue(factory.logTraffic)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#
# Tkinter interface to PyInstaller.
#
import sys
import subprocess
from Tkinter import *
import tkFileDialog
import FileDialog
class PyInstallerGUI:
def make_checkbutton(self, frame, text):
var = IntVar()
widget = Checkbutton(frame, text=text, variable=var)
widget.grid(sticky="NW")
return var
def __init__(self):
root = Tk()
root.title("PyInstaller GUI")
fr1 = Frame(root, width=300, height=100)
fr1.pack(side="top")
fr2 = Frame(root, width=300, height=300,
borderwidth=2, relief="ridge")
fr2.pack(ipadx=10, ipady=10)
fr4 = Frame(root, width=300, height=100)
fr4.pack(side="bottom", pady=10)
getFileButton = Button(fr1, text="Script to bundle ...")
getFileButton.bind("<Button>", self.GetFile)
getFileButton.pack(side="left")
self.filein = Entry(fr1)
self.filein.pack(side="right")
self.filetype = self.make_checkbutton(fr2, "One File Package")
self.ascii = self.make_checkbutton(fr2, "Do NOT include decodings")
self.debug = self.make_checkbutton(fr2, "Use debug versions")
if sys.platform.startswith('win'):
self.noconsole = self.make_checkbutton(fr2, "No console (Windows only)")
else:
self.noconsole = IntVar()
if not sys.platform.startswith('win'):
self.strip = self.make_checkbutton(fr2, "Strip the exe and shared libs")
else:
self.strip = IntVar()
okaybutton = Button(fr4, text="Okay ")
okaybutton.bind("<Button>", self.makePackage)
okaybutton.pack(side="left")
cancelbutton = Button(fr4, text="Cancel")
cancelbutton.bind("<Button>", self.killapp)
cancelbutton.pack(side="right")
self.fin = ''
self.fout = ''
ws = root.winfo_screenwidth()
hs = root.winfo_screenheight()
x = (ws/2) - (400/2)
y = (hs/2) - (250/2)
root.geometry('%dx%d+%d+%d' % (400, 250, x, y))
root.mainloop()
def killapp(self, event):
sys.exit(0)
def makePackage(self, event):
commands = ['python', 'pyinstaller.py']
if self.filetype.get():
commands.append('--onefile')
if self.ascii.get():
commands.append('--ascii')
if self.debug.get():
commands.append('--debug')
if self.noconsole.get():
commands.append('--noconsole')
if self.strip.get():
commands.append('--strip')
commands.append(self.fin)
retcode = subprocess.call(commands)
sys.exit(retcode)
def GetFile(self, event):
self.fin = tkFileDialog.askopenfilename()
self.filein.insert(0, self.fin)
if __name__ == "__main__":
raise SystemExit("Please use just 'pyinstaller.py'. Gui is not maintained.")
try:
app = PyInstallerGUI()
except KeyboardInterrupt:
raise SystemExit("Aborted by user request.")
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**********************************************************************
windows_1257.c - Oniguruma (regular expression library)
**********************************************************************/
/*-
* Copyright (c) 2002-2007 K.Kosako <sndgk393 AT ybb DOT ne DOT jp>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "regenc.h"
#include "iso_8859.h"
/*
* Name: windows-1257
* MIBenum: 2257
* Link: http://www.iana.org/assignments/character-sets
* Link: http://www.microsoft.com/globaldev/reference/sbcs/1257.mspx
* Link: https://en.wikipedia.org/wiki/Windows-1257
*/
#define ENC_CP1252_TO_LOWER_CASE(c) EncCP1252_ToLowerCaseTable[c]
#define ENC_IS_CP1252_CTYPE(code,ctype) \
((EncCP1252_CtypeTable[code] & CTYPE_TO_BIT(ctype)) != 0)
static const UChar EncCP1252_ToLowerCaseTable[256] = {
'\000', '\001', '\002', '\003', '\004', '\005', '\006', '\007',
'\010', '\011', '\012', '\013', '\014', '\015', '\016', '\017',
'\020', '\021', '\022', '\023', '\024', '\025', '\026', '\027',
'\030', '\031', '\032', '\033', '\034', '\035', '\036', '\037',
'\040', '\041', '\042', '\043', '\044', '\045', '\046', '\047',
'\050', '\051', '\052', '\053', '\054', '\055', '\056', '\057',
'\060', '\061', '\062', '\063', '\064', '\065', '\066', '\067',
'\070', '\071', '\072', '\073', '\074', '\075', '\076', '\077',
'\100', '\141', '\142', '\143', '\144', '\145', '\146', '\147',
'\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157',
'\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167',
'\170', '\171', '\172', '\133', '\134', '\135', '\136', '\137',
'\140', '\141', '\142', '\143', '\144', '\145', '\146', '\147',
'\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157',
'\160', '\161', '\162', '\163', '\164', '\165', '\166', '\167',
'\170', '\171', '\172', '\173', '\174', '\175', '\176', '\177',
'\200', '\201', '\202', '\203', '\204', '\205', '\206', '\207',
'\210', '\211', '\212', '\213', '\214', '\215', '\216', '\217',
'\220', '\221', '\222', '\223', '\224', '\225', '\226', '\227',
'\230', '\231', '\232', '\233', '\234', '\235', '\236', '\237',
'\240', '\241', '\242', '\243', '\244', '\245', '\246', '\247',
'\270', '\251', '\272', '\253', '\254', '\255', '\256', '\277',
'\260', '\261', '\262', '\263', '\264', '\265', '\266', '\267',
'\270', '\271', '\272', '\273', '\274', '\275', '\276', '\277',
'\340', '\341', '\342', '\343', '\344', '\345', '\346', '\347',
'\350', '\351', '\352', '\353', '\354', '\355', '\356', '\357',
'\360', '\361', '\362', '\363', '\364', '\365', '\366', '\327',
'\370', '\371', '\372', '\373', '\374', '\375', '\376', '\337',
'\340', '\341', '\342', '\343', '\344', '\345', '\346', '\347',
'\350', '\351', '\352', '\353', '\354', '\355', '\356', '\357',
'\360', '\361', '\362', '\363', '\364', '\365', '\366', '\367',
'\370', '\371', '\372', '\373', '\374', '\375', '\376', '\377'
};
static const unsigned short EncCP1252_CtypeTable[256] = {
0x4008, 0x4008, 0x4008, 0x4008, 0x4008, 0x4008, 0x4008, 0x4008,
0x4008, 0x420c, 0x4209, 0x4208, 0x4208, 0x4208, 0x4008, 0x4008,
0x4008, 0x4008, 0x4008, 0x4008, 0x4008, 0x4008, 0x4008, 0x4008,
0x4008, 0x4008, 0x4008, 0x4008, 0x4008, 0x4008, 0x4008, 0x4008,
0x4284, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x41a0,
0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x41a0,
0x78b0, 0x78b0, 0x78b0, 0x78b0, 0x78b0, 0x78b0, 0x78b0, 0x78b0,
0x78b0, 0x78b0, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x41a0,
0x41a0, 0x7ca2, 0x7ca2, 0x7ca2, 0x7ca2, 0x7ca2, 0x7ca2, 0x74a2,
0x74a2, 0x74a2, 0x74a2, 0x74a2, 0x74a2, 0x74a2, 0x74a2, 0x74a2,
0x74a2, 0x74a2, 0x74a2, 0x74a2, 0x74a2, 0x74a2, 0x74a2, 0x74a2,
0x74a2, 0x74a2, 0x74a2, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x51a0,
0x41a0, 0x78e2, 0x78e2, 0x78e2, 0x78e2, 0x78e2, 0x78e2, 0x70e2,
0x70e2, 0x70e2, 0x70e2, 0x70e2, 0x70e2, 0x70e2, 0x70e2, 0x70e2,
0x70e2, 0x70e2, 0x70e2, 0x70e2, 0x70e2, 0x70e2, 0x70e2, 0x70e2,
0x70e2, 0x70e2, 0x70e2, 0x41a0, 0x41a0, 0x41a0, 0x41a0, 0x4008,
0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008,
0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008,
0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008,
0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008, 0x0008,
0x0284, 0x01a0, 0x00a0, 0x00a0, 0x00a0, 0x01a0, 0x00a0, 0x00a0,
0x34a2, 0x00a0, 0x34a2, 0x01a0, 0x00a0, 0x01a0, 0x00a0, 0x34a2,
0x00a0, 0x00a0, 0x10a0, 0x10a0, 0x01a0, 0x30e2, 0x00a0, 0x01a0,
0x30e2, 0x10a0, 0x30e2, 0x01a0, 0x10a0, 0x10a0, 0x10a0, 0x30e2,
0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2,
0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2,
0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x00a0,
0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x34a2, 0x30e2,
0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2,
0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2,
0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x00a0,
0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x30e2, 0x01a0
};
static int
mbc_case_fold(OnigCaseFoldType flag,
const UChar** pp, const UChar* end ARG_UNUSED,
UChar* lower, OnigEncoding enc ARG_UNUSED)
{
const UChar* p = *pp;
if (*p == SHARP_s && (flag & INTERNAL_ONIGENC_CASE_FOLD_MULTI_CHAR) != 0) {
*lower++ = 's';
*lower = 's';
(*pp)++;
return 2;
}
*lower = ENC_CP1252_TO_LOWER_CASE(*p);
(*pp)++;
return 1;
}
#if 0
static int
is_mbc_ambiguous(OnigCaseFoldType flag, const UChar** pp, const UChar* end)
{
int v;
const UChar* p = *pp;
if (*p == SHARP_s && (flag & INTERNAL_ONIGENC_CASE_FOLD_MULTI_CHAR) != 0) {
(*pp)++;
return TRUE;
}
(*pp)++;
v = (EncCP1252_CtypeTable[*p] & (BIT_CTYPE_UPPER | BIT_CTYPE_LOWER));
if ((v | BIT_CTYPE_LOWER) != 0) {
/* 0xdf, 0xb5 are lower case letter, but can't convert. */
if (*p == 0xb5)
return FALSE;
else
return TRUE;
}
return (v != 0 ? TRUE : FALSE);
}
#endif
static int
is_code_ctype(OnigCodePoint code, unsigned int ctype, OnigEncoding enc ARG_UNUSED)
{
if (code < 256)
return ENC_IS_CP1252_CTYPE(code, ctype);
else
return FALSE;
}
static const OnigPairCaseFoldCodes CaseFoldMap[] = {
{ 0xa8, 0xb8 },
{ 0xaa, 0xba },
{ 0xaf, 0xbf },
{ 0xc0, 0xe0 },
{ 0xc1, 0xe1 },
{ 0xc2, 0xe2 },
{ 0xc3, 0xe3 },
{ 0xc4, 0xe4 },
{ 0xc5, 0xe5 },
{ 0xc6, 0xe6 },
{ 0xc7, 0xe7 },
{ 0xc8, 0xe8 },
{ 0xc9, 0xe9 },
{ 0xca, 0xea },
{ 0xcb, 0xeb },
{ 0xcc, 0xec },
{ 0xcd, 0xed },
{ 0xce, 0xee },
{ 0xcf, 0xef },
{ 0xd0, 0xf0 },
{ 0xd1, 0xf1 },
{ 0xd2, 0xf2 },
{ 0xd3, 0xf3 },
{ 0xd4, 0xf4 },
{ 0xd5, 0xf5 },
{ 0xd6, 0xf6 },
{ 0xd8, 0xf8 },
{ 0xd9, 0xf9 },
{ 0xda, 0xfa },
{ 0xdb, 0xfb },
{ 0xdc, 0xfc },
{ 0xdd, 0xfd },
{ 0xde, 0xfe }
};
static int
apply_all_case_fold(OnigCaseFoldType flag,
OnigApplyAllCaseFoldFunc f, void* arg,
OnigEncoding enc ARG_UNUSED)
{
return onigenc_apply_all_case_fold_with_map(
numberof(CaseFoldMap), CaseFoldMap, 1,
flag, f, arg);
}
static int
get_case_fold_codes_by_str(OnigCaseFoldType flag,
const OnigUChar* p, const OnigUChar* end,
OnigCaseFoldCodeItem items[],
OnigEncoding enc ARG_UNUSED)
{
return onigenc_get_case_fold_codes_by_str_with_map(
numberof(CaseFoldMap), CaseFoldMap, 1,
flag, p, end, items);
}
#ifdef USE_CASE_MAP_API
#define DOTLESS_i (0xB9)
#define I_WITH_DOT_ABOVE (0xA9)
static int
case_map(OnigCaseFoldType* flagP, const OnigUChar** pp,
const OnigUChar* end, OnigUChar* to, OnigUChar* to_end,
const struct OnigEncodingTypeST* enc)
{
OnigCodePoint code;
OnigUChar *to_start = to;
OnigCaseFoldType flags = *flagP;
while (*pp < end && to < to_end) {
code = *(*pp)++;
if (code == SHARP_s) {
if (flags & ONIGENC_CASE_UPCASE) {
flags |= ONIGENC_CASE_MODIFIED;
*to++ = 'S';
code = (flags & ONIGENC_CASE_TITLECASE) ? 's' : 'S';
}
else if (flags & ONIGENC_CASE_FOLD) {
flags |= ONIGENC_CASE_MODIFIED;
*to++ = 's';
code = 's';
}
}
else if (code == 0xB5)
;
else if ((EncCP1252_CtypeTable[code] & BIT_CTYPE_UPPER)
&& (flags & (ONIGENC_CASE_DOWNCASE | ONIGENC_CASE_FOLD))) {
flags |= ONIGENC_CASE_MODIFIED;
if (code == 'I')
code = flags & ONIGENC_CASE_FOLD_TURKISH_AZERI ? DOTLESS_i : 'i';
else
code = ENC_CP1252_TO_LOWER_CASE(code);
}
else if ((EncCP1252_CtypeTable[code]&BIT_CTYPE_LOWER)
&& (flags & ONIGENC_CASE_UPCASE)) {
flags |= ONIGENC_CASE_MODIFIED;
if (code == 'i')
code = flags & ONIGENC_CASE_FOLD_TURKISH_AZERI ? I_WITH_DOT_ABOVE : 'I';
else if (code == DOTLESS_i)
code = 'I';
else if (code >= 0xB0 && code <= 0xBF)
code -= 0x10;
else
code -= 0x20;
}
*to++ = code;
if (flags & ONIGENC_CASE_TITLECASE) /* switch from titlecase to lowercase for capitalize */
flags ^= (ONIGENC_CASE_UPCASE | ONIGENC_CASE_DOWNCASE | ONIGENC_CASE_TITLECASE);
}
*flagP = flags;
return (int )(to - to_start);
}
#endif
OnigEncodingDefine(windows_1257, Windows_1257) = {
onigenc_single_byte_mbc_enc_len,
"Windows-1257", /* name */
1, /* max enc length */
1, /* min enc length */
onigenc_is_mbc_newline_0x0a,
onigenc_single_byte_mbc_to_code,
onigenc_single_byte_code_to_mbclen,
onigenc_single_byte_code_to_mbc,
mbc_case_fold,
apply_all_case_fold,
get_case_fold_codes_by_str,
onigenc_minimum_property_name_to_ctype,
is_code_ctype,
onigenc_not_support_get_ctype_code_range,
onigenc_single_byte_left_adjust_char_head,
onigenc_always_true_is_allowed_reverse_match,
#ifdef USE_CASE_MAP_API
case_map,
#else
NULL,
#endif
0,
ONIGENC_FLAG_NONE,
};
ENC_ALIAS("CP1257", "Windows-1257")
|
c
|
github
|
https://github.com/ruby/ruby
|
enc/windows_1257.c
|
""" Utility functions for sparse matrix module
"""
from __future__ import division, print_function, absolute_import
__all__ = ['upcast','getdtype','isscalarlike','isintlike',
'isshape','issequence','isdense','ismatrix']
import warnings
import numpy as np
from scipy._lib._version import NumpyVersion
# keep this list syncronized with sparsetools
#supported_dtypes = ['bool', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
# 'int64', 'uint64', 'float32', 'float64',
# 'complex64', 'complex128']
supported_dtypes = ['bool', 'int8','uint8','short','ushort','intc','uintc',
'longlong','ulonglong','single','double','longdouble',
'csingle','cdouble','clongdouble']
supported_dtypes = [np.typeDict[x] for x in supported_dtypes]
_upcast_memo = {}
def upcast(*args):
"""Returns the nearest supported sparse dtype for the
combination of one or more types.
upcast(t0, t1, ..., tn) -> T where T is a supported dtype
Examples
--------
>>> upcast('int32')
<type 'numpy.int32'>
>>> upcast('bool')
<type 'numpy.bool_'>
>>> upcast('int32','float32')
<type 'numpy.float64'>
>>> upcast('bool',complex,float)
<type 'numpy.complex128'>
"""
t = _upcast_memo.get(hash(args))
if t is not None:
return t
if np.all([np.issubdtype(bool, arg) for arg in args]):
# numpy 1.5.x compat - it gives int8 for
# np.find_common_type([bool, bool)
upcast = bool
else:
upcast = np.find_common_type(args, [])
for t in supported_dtypes:
if np.can_cast(upcast, t):
_upcast_memo[hash(args)] = t
return t
raise TypeError('no supported conversion for types: %r' % (args,))
def upcast_char(*args):
"""Same as `upcast` but taking dtype.char as input (faster)."""
t = _upcast_memo.get(args)
if t is not None:
return t
t = upcast(*map(np.dtype, args))
_upcast_memo[args] = t
return t
def upcast_scalar(dtype, scalar):
"""Determine data type for binary operation between an array of
type `dtype` and a scalar.
"""
return (np.array([0], dtype=dtype) * scalar).dtype
def downcast_intp_index(arr):
"""
Down-cast index array to np.intp dtype if it is of a larger dtype.
Raise an error if the array contains a value that is too large for
intp.
"""
if arr.dtype.itemsize > np.dtype(np.intp).itemsize:
if arr.size == 0:
return arr.astype(np.intp)
maxval = arr.max()
minval = arr.min()
if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:
raise ValueError("Cannot deal with arrays with indices larger "
"than the machine maximum address size "
"(e.g. 64-bit indices on 32-bit machine).")
return arr.astype(np.intp)
return arr
def to_native(A):
return np.asarray(A,dtype=A.dtype.newbyteorder('native'))
def getdtype(dtype, a=None, default=None):
"""Function used to simplify argument processing. If 'dtype' is not
specified (is None), returns a.dtype; otherwise returns a np.dtype
object created from the specified dtype argument. If 'dtype' and 'a'
are both None, construct a data type out of the 'default' parameter.
Furthermore, 'dtype' must be in 'allowed' set.
"""
#TODO is this really what we want?
if dtype is None:
try:
newdtype = a.dtype
except AttributeError:
if default is not None:
newdtype = np.dtype(default)
else:
raise TypeError("could not interpret data type")
else:
newdtype = np.dtype(dtype)
if newdtype == np.object_:
warnings.warn("object dtype is not supported by sparse matrices")
return newdtype
def get_index_dtype(arrays=(), maxval=None, check_contents=False):
"""
Based on input (integer) arrays `a`, determine a suitable index data
type that can hold the data in the arrays.
Parameters
----------
arrays : tuple of array_like
Input arrays whose types/contents to check
maxval : float, optional
Maximum value needed
check_contents : bool, optional
Whether to check the values in the arrays and not just their types.
Default: False (check only the types)
Returns
-------
dtype : dtype
Suitable index data type (int32 or int64)
"""
int32max = np.iinfo(np.int32).max
dtype = np.intc
if maxval is not None:
if maxval > int32max:
dtype = np.int64
if isinstance(arrays, np.ndarray):
arrays = (arrays,)
for arr in arrays:
arr = np.asarray(arr)
if arr.dtype > np.int32:
if check_contents:
if arr.size == 0:
# a bigger type not needed
continue
elif np.issubdtype(arr.dtype, np.integer):
maxval = arr.max()
minval = arr.min()
if minval >= np.iinfo(np.int32).min and maxval <= np.iinfo(np.int32).max:
# a bigger type not needed
continue
dtype = np.int64
break
return dtype
def isscalarlike(x):
"""Is x either a scalar, an array scalar, or a 0-dim array?"""
return np.isscalar(x) or (isdense(x) and x.ndim == 0)
def isintlike(x):
"""Is x appropriate as an index into a sparse matrix? Returns True
if it can be cast safely to a machine int.
"""
if issequence(x):
return False
else:
try:
if int(x) == x:
return True
else:
return False
except TypeError:
return False
def isshape(x):
"""Is x a valid 2-tuple of dimensions?
"""
try:
# Assume it's a tuple of matrix dimensions (M, N)
(M, N) = x
except:
return False
else:
if isintlike(M) and isintlike(N):
if np.ndim(M) == 0 and np.ndim(N) == 0:
return True
return False
def issequence(t):
return (isinstance(t, (list, tuple)) and (len(t) == 0 or np.isscalar(t[0]))) \
or (isinstance(t, np.ndarray) and (t.ndim == 1))
def ismatrix(t):
return ((isinstance(t, (list, tuple)) and len(t) > 0 and issequence(t[0]))
or (isinstance(t, np.ndarray) and t.ndim == 2))
def isdense(x):
return isinstance(x, np.ndarray)
class IndexMixin(object):
"""
This class simply exists to hold the methods necessary for fancy indexing.
"""
def _slicetoarange(self, j, shape):
""" Given a slice object, use numpy arange to change it to a 1D
array.
"""
start, stop, step = j.indices(shape)
return np.arange(start, stop, step)
def _unpack_index(self, index):
""" Parse index. Always return a tuple of the form (row, col).
Where row/col is a integer, slice, or array of integers.
"""
# First, check if indexing with single boolean matrix.
from .base import spmatrix # This feels dirty but...
if (isinstance(index, (spmatrix, np.ndarray)) and
(index.ndim == 2) and index.dtype.kind == 'b'):
return index.nonzero()
# Parse any ellipses.
index = self._check_ellipsis(index)
# Next, parse the tuple or object
if isinstance(index, tuple):
if len(index) == 2:
row, col = index
elif len(index) == 1:
row, col = index[0], slice(None)
else:
raise IndexError('invalid number of indices')
else:
row, col = index, slice(None)
# Next, check for validity, or transform the index as needed.
row, col = self._check_boolean(row, col)
return row, col
def _check_ellipsis(self, index):
"""Process indices with Ellipsis. Returns modified index."""
if index is Ellipsis:
return (slice(None), slice(None))
elif isinstance(index, tuple):
# Find first ellipsis
for j, v in enumerate(index):
if v is Ellipsis:
first_ellipsis = j
break
else:
first_ellipsis = None
# Expand the first one
if first_ellipsis is not None:
# Shortcuts
if len(index) == 1:
return (slice(None), slice(None))
elif len(index) == 2:
if first_ellipsis == 0:
if index[1] is Ellipsis:
return (slice(None), slice(None))
else:
return (slice(None), index[1])
else:
return (index[0], slice(None))
# General case
tail = ()
for v in index[first_ellipsis+1:]:
if v is not Ellipsis:
tail = tail + (v,)
nd = first_ellipsis + len(tail)
nslice = max(0, 2 - nd)
return index[:first_ellipsis] + (slice(None),)*nslice + tail
return index
def _check_boolean(self, row, col):
from .base import isspmatrix # ew...
# Supporting sparse boolean indexing with both row and col does
# not work because spmatrix.ndim is always 2.
if isspmatrix(row) or isspmatrix(col):
raise IndexError("Indexing with sparse matrices is not supported"
" except boolean indexing where matrix and index are equal"
" shapes.")
if isinstance(row, np.ndarray) and row.dtype.kind == 'b':
row = self._boolean_index_to_array(row)
if isinstance(col, np.ndarray) and col.dtype.kind == 'b':
col = self._boolean_index_to_array(col)
return row, col
def _boolean_index_to_array(self, i):
if i.ndim > 1:
raise IndexError('invalid index shape')
return i.nonzero()[0]
def _index_to_arrays(self, i, j):
i, j = self._check_boolean(i, j)
i_slice = isinstance(i, slice)
if i_slice:
i = self._slicetoarange(i, self.shape[0])[:,None]
else:
i = np.atleast_1d(i)
if isinstance(j, slice):
j = self._slicetoarange(j, self.shape[1])[None,:]
if i.ndim == 1:
i = i[:,None]
elif not i_slice:
raise IndexError('index returns 3-dim structure')
elif isscalarlike(j):
# row vector special case
j = np.atleast_1d(j)
if i.ndim == 1:
i, j = np.broadcast_arrays(i, j)
i = i[:, None]
j = j[:, None]
return i, j
else:
j = np.atleast_1d(j)
if i_slice and j.ndim > 1:
raise IndexError('index returns 3-dim structure')
i, j = np.broadcast_arrays(i, j)
if i.ndim == 1:
# return column vectors for 1-D indexing
i = i[None,:]
j = j[None,:]
elif i.ndim > 2:
raise IndexError("Index dimension must be <= 2")
return i, j
def _compat_unique_impl(ar, return_index=False, return_inverse=False):
"""
Copy of numpy.unique() from Numpy 1.7.1.
Earlier versions have bugs in how return_index behaves.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asanyarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, bool), np.empty(0, bool)
elif return_inverse or return_index:
return ar, np.empty(0, bool)
else:
return ar
if return_inverse or return_index:
if return_index:
perm = ar.argsort(kind='mergesort')
else:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
if NumpyVersion(np.__version__) > '1.7.0-dev':
_compat_unique = np.unique
else:
_compat_unique = _compat_unique_impl
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
DefaultHeaders downloader middleware
See documentation in docs/topics/downloader-middleware.rst
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.python import without_none_values
if TYPE_CHECKING:
from collections.abc import Iterable
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request, Spider
from scrapy.crawler import Crawler
from scrapy.http import Response
class DefaultHeadersMiddleware:
def __init__(self, headers: Iterable[tuple[str, str]]):
self._headers: Iterable[tuple[str, str]] = headers
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
headers = without_none_values(crawler.settings["DEFAULT_REQUEST_HEADERS"])
return cls(headers.items())
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
for k, v in self._headers:
request.headers.setdefault(k, v)
return None
|
python
|
github
|
https://github.com/scrapy/scrapy
|
scrapy/downloadermiddlewares/defaultheaders.py
|
/**
* Represents the completion of an asynchronous operation
*/
interface Promise<T> {
/**
* Attaches a callback that is invoked when the Promise is settled (fulfilled or rejected). The
* resolved value cannot be modified from the callback.
* @param onfinally The callback to execute when the Promise is settled (fulfilled or rejected).
* @returns A Promise for the completion of the callback.
*/
finally(onfinally?: (() => void) | undefined | null): Promise<T>;
}
|
typescript
|
github
|
https://github.com/microsoft/TypeScript
|
src/lib/es2018.promise.d.ts
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io;
import java.io.*;
import java.lang.reflect.Array;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/** A Writable for 2D arrays containing a matrix of instances of a class. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class TwoDArrayWritable implements Writable {
private Class valueClass;
private Writable[][] values;
public TwoDArrayWritable(Class valueClass) {
this.valueClass = valueClass;
}
public TwoDArrayWritable(Class valueClass, Writable[][] values) {
this(valueClass);
this.values = values;
}
public Object toArray() {
int dimensions[] = {values.length, 0};
Object result = Array.newInstance(valueClass, dimensions);
for (int i = 0; i < values.length; i++) {
Object resultRow = Array.newInstance(valueClass, values[i].length);
Array.set(result, i, resultRow);
for (int j = 0; j < values[i].length; j++) {
Array.set(resultRow, j, values[i][j]);
}
}
return result;
}
public void set(Writable[][] values) { this.values = values; }
public Writable[][] get() { return values; }
@Override
public void readFields(DataInput in) throws IOException {
// construct matrix
values = new Writable[in.readInt()][];
for (int i = 0; i < values.length; i++) {
values[i] = new Writable[in.readInt()];
}
// construct values
for (int i = 0; i < values.length; i++) {
for (int j = 0; j < values[i].length; j++) {
Writable value; // construct value
try {
value = (Writable)valueClass.newInstance();
} catch (InstantiationException e) {
throw new RuntimeException(e.toString());
} catch (IllegalAccessException e) {
throw new RuntimeException(e.toString());
}
value.readFields(in); // read a value
values[i][j] = value; // store it in values
}
}
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(values.length); // write values
for (int i = 0; i < values.length; i++) {
out.writeInt(values[i].length);
}
for (int i = 0; i < values.length; i++) {
for (int j = 0; j < values[i].length; j++) {
values[i][j].write(out);
}
}
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/TwoDArrayWritable.java
|
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "ReplaceDisallowCopyAndAssignMacroCheck.h"
#include "../utils/LexerUtils.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/FormatVariadic.h"
#include <optional>
namespace clang::tidy::modernize {
namespace {
class ReplaceDisallowCopyAndAssignMacroCallbacks : public PPCallbacks {
public:
explicit ReplaceDisallowCopyAndAssignMacroCallbacks(
ReplaceDisallowCopyAndAssignMacroCheck &Check, Preprocessor &PP)
: Check(Check), PP(PP) {}
void MacroExpands(const Token &MacroNameTok, const MacroDefinition &MD,
SourceRange Range, const MacroArgs *Args) override {
const IdentifierInfo *Info = MacroNameTok.getIdentifierInfo();
if (!Info || !Args || Args->getNumMacroArguments() != 1)
return;
if (Info->getName() != Check.getMacroName())
return;
// The first argument to the DISALLOW_COPY_AND_ASSIGN macro is expected to
// be the class name.
const Token *ClassNameTok = Args->getUnexpArgument(0);
if (Args->ArgNeedsPreexpansion(ClassNameTok, PP))
// For now we only support simple argument that don't need to be
// pre-expanded.
return;
const clang::IdentifierInfo *ClassIdent = ClassNameTok->getIdentifierInfo();
if (!ClassIdent)
return;
const std::string Replacement = llvm::formatv(
R"cpp({0}(const {0} &) = delete;
const {0} &operator=(const {0} &) = delete{1})cpp",
ClassIdent->getName(), shouldAppendSemi(Range) ? ";" : "");
Check.diag(MacroNameTok.getLocation(),
"prefer deleting copy constructor and assignment operator over "
"using macro '%0'")
<< Check.getMacroName()
<< FixItHint::CreateReplacement(
PP.getSourceManager().getExpansionRange(Range), Replacement);
}
private:
/// \returns \c true if the next token after the given \p MacroLoc is \b not a
/// semicolon.
bool shouldAppendSemi(SourceRange MacroLoc) {
std::optional<Token> Next = utils::lexer::findNextTokenSkippingComments(
MacroLoc.getEnd(), PP.getSourceManager(), PP.getLangOpts());
return !(Next && Next->is(tok::semi));
}
ReplaceDisallowCopyAndAssignMacroCheck &Check;
Preprocessor &PP;
};
} // namespace
ReplaceDisallowCopyAndAssignMacroCheck::ReplaceDisallowCopyAndAssignMacroCheck(
StringRef Name, ClangTidyContext *Context)
: ClangTidyCheck(Name, Context),
MacroName(Options.get("MacroName", "DISALLOW_COPY_AND_ASSIGN")) {}
void ReplaceDisallowCopyAndAssignMacroCheck::registerPPCallbacks(
const SourceManager &SM, Preprocessor *PP, Preprocessor *ModuleExpanderPP) {
PP->addPPCallbacks(
::std::make_unique<ReplaceDisallowCopyAndAssignMacroCallbacks>(
*this, *ModuleExpanderPP));
}
void ReplaceDisallowCopyAndAssignMacroCheck::storeOptions(
ClangTidyOptions::OptionMap &Opts) {
Options.store(Opts, "MacroName", MacroName);
}
} // namespace clang::tidy::modernize
|
cpp
|
github
|
https://github.com/llvm/llvm-project
|
clang-tools-extra/clang-tidy/modernize/ReplaceDisallowCopyAndAssignMacroCheck.cpp
|
#include <acl/acl.h>
#include <unistd.h> // fork()
#include <iostream>
int main(int /*argc*/, char** /*argv*/)
{
int ret = aclInit(NULL);
if (ret != 0)
{
std::cerr << "Failed to initialize Ascend, ret = " << ret;
}
ret = aclFinalize();
if (ret != 0)
{
std::cerr << "Failed to de-initialize Ascend, ret = " << ret;
}
return 0;
}
|
cpp
|
github
|
https://github.com/opencv/opencv
|
cmake/checks/cann.cpp
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.producer.internals;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.RecordBatchTooLargeException;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.record.internal.AbstractRecords;
import org.apache.kafka.common.record.internal.CompressionRatioEstimator;
import org.apache.kafka.common.record.internal.CompressionType;
import org.apache.kafka.common.record.internal.MemoryRecords;
import org.apache.kafka.common.record.internal.MemoryRecordsBuilder;
import org.apache.kafka.common.record.internal.MutableRecordBatch;
import org.apache.kafka.common.record.internal.Record;
import org.apache.kafka.common.record.internal.RecordBatch;
import org.apache.kafka.common.requests.ProduceResponse;
import org.apache.kafka.common.utils.ProducerIdAndEpoch;
import org.apache.kafka.common.utils.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Deque;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.OptionalInt;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import static org.apache.kafka.common.record.internal.RecordBatch.MAGIC_VALUE_V2;
import static org.apache.kafka.common.record.internal.RecordBatch.NO_TIMESTAMP;
/**
* A batch of records that is or will be sent.
*
* This class is not thread safe and external synchronization must be used when modifying it
*/
public final class ProducerBatch {
private static final Logger log = LoggerFactory.getLogger(ProducerBatch.class);
private enum FinalState { ABORTED, FAILED, SUCCEEDED }
final long createdMs;
final TopicPartition topicPartition;
final ProduceRequestResult produceFuture;
private final List<Thunk> thunks = new ArrayList<>();
private final MemoryRecordsBuilder recordsBuilder;
private final AtomicInteger attempts = new AtomicInteger(0);
private final boolean isSplitBatch;
private final AtomicReference<FinalState> finalState = new AtomicReference<>(null);
private boolean bufferDeallocated = false;
// Tracks if the batch has been sent to the NetworkClient
private boolean inflight = false;
int recordCount;
int maxRecordSize;
private long lastAttemptMs;
private long lastAppendTime;
private long drainedMs;
private boolean retry;
private boolean reopened;
// Tracks the current-leader's epoch to which this batch would be sent, in the current to produce the batch.
private OptionalInt currentLeaderEpoch;
// Tracks the attempt in which leader was changed to currentLeaderEpoch for the 1st time.
private int attemptsWhenLeaderLastChanged;
public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long createdMs) {
this(tp, recordsBuilder, createdMs, false);
}
public ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long createdMs, boolean isSplitBatch) {
this.createdMs = createdMs;
this.lastAttemptMs = createdMs;
this.recordsBuilder = recordsBuilder;
this.topicPartition = tp;
this.lastAppendTime = createdMs;
this.produceFuture = new ProduceRequestResult(topicPartition);
this.retry = false;
this.isSplitBatch = isSplitBatch;
float compressionRatioEstimation = CompressionRatioEstimator.estimation(topicPartition.topic(),
recordsBuilder.compression().type());
this.currentLeaderEpoch = OptionalInt.empty();
this.attemptsWhenLeaderLastChanged = 0;
recordsBuilder.setEstimatedCompressionRatio(compressionRatioEstimation);
}
/**
* It will update the leader to which this batch will be produced for the ongoing attempt, if a newer leader is known.
* @param latestLeaderEpoch latest leader's epoch.
*/
void maybeUpdateLeaderEpoch(OptionalInt latestLeaderEpoch) {
if (latestLeaderEpoch.isPresent()
&& (currentLeaderEpoch.isEmpty() || currentLeaderEpoch.getAsInt() < latestLeaderEpoch.getAsInt())) {
log.trace("For {}, leader will be updated, currentLeaderEpoch: {}, attemptsWhenLeaderLastChanged:{}, latestLeaderEpoch: {}, current attempt: {}",
this, currentLeaderEpoch, attemptsWhenLeaderLastChanged, latestLeaderEpoch, attempts);
attemptsWhenLeaderLastChanged = attempts();
currentLeaderEpoch = latestLeaderEpoch;
} else {
log.trace("For {}, leader wasn't updated, currentLeaderEpoch: {}, attemptsWhenLeaderLastChanged:{}, latestLeaderEpoch: {}, current attempt: {}",
this, currentLeaderEpoch, attemptsWhenLeaderLastChanged, latestLeaderEpoch, attempts);
}
}
/**
* It will return true, for a when batch is being retried, it will be retried to a newer leader.
*/
boolean hasLeaderChangedForTheOngoingRetry() {
int attempts = attempts();
boolean isRetry = attempts >= 1;
if (!isRetry)
return false;
return attempts == attemptsWhenLeaderLastChanged;
}
/**
* Append the record to the current record set and return the relative offset within that record set
*
* @return The RecordSend corresponding to this record or null if there isn't sufficient room.
*/
public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) {
if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) {
return null;
} else {
this.recordsBuilder.append(timestamp, key, value, headers);
this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(),
recordsBuilder.compression().type(), key, value, headers));
this.lastAppendTime = now;
FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount,
timestamp,
key == null ? -1 : key.length,
value == null ? -1 : value.length,
Time.SYSTEM);
// we have to keep every future returned to the users in case the batch needs to be
// split to several new batches and resent.
thunks.add(new Thunk(callback, future));
this.recordCount++;
return future;
}
}
/**
* This method is only used by {@link #split(int)} when splitting a large batch to smaller ones.
* @return true if the record has been successfully appended, false otherwise.
*/
private boolean tryAppendForSplit(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers, Thunk thunk) {
if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) {
return false;
} else {
// No need to get the CRC.
this.recordsBuilder.append(timestamp, key, value, headers);
this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(),
recordsBuilder.compression().type(), key, value, headers));
FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount,
timestamp,
key == null ? -1 : key.remaining(),
value == null ? -1 : value.remaining(),
Time.SYSTEM);
// Chain the future to the original thunk.
thunk.future.chain(future);
this.thunks.add(thunk);
this.recordCount++;
return true;
}
}
/**
* Abort the batch and complete the future and callbacks.
*
* @param exception The exception to use to complete the future and awaiting callbacks.
*/
public void abort(RuntimeException exception) {
if (!finalState.compareAndSet(null, FinalState.ABORTED))
throw new IllegalStateException("Batch has already been completed in final state " + finalState.get());
log.trace("Aborting batch for partition {}", topicPartition, exception);
completeFutureAndFireCallbacks(ProduceResponse.INVALID_OFFSET, RecordBatch.NO_TIMESTAMP, index -> exception);
}
/**
* Check if the batch has been completed (either successfully or exceptionally).
* @return `true` if the batch has been completed, `false` otherwise.
*/
public boolean isDone() {
return finalState() != null;
}
/**
* Complete the batch successfully.
* @param baseOffset The base offset of the messages assigned by the server
* @param logAppendTime The log append time or -1 if CreateTime is being used
* @return true if the batch was completed as a result of this call, and false
* if it had been completed previously
*/
public boolean complete(long baseOffset, long logAppendTime) {
return done(baseOffset, logAppendTime, null, null);
}
/**
* Complete the batch exceptionally. The provided top-level exception will be used
* for each record future contained in the batch.
*
* @param topLevelException top-level partition error
* @param recordExceptions Record exception function mapping batchIndex to the respective record exception
* @return true if the batch was completed as a result of this call, and false
* if it had been completed previously
*/
public boolean completeExceptionally(
RuntimeException topLevelException,
Function<Integer, RuntimeException> recordExceptions
) {
Objects.requireNonNull(topLevelException);
Objects.requireNonNull(recordExceptions);
return done(ProduceResponse.INVALID_OFFSET, RecordBatch.NO_TIMESTAMP, topLevelException, recordExceptions);
}
/**
* Finalize the state of a batch. Final state, once set, is immutable. This function may be called
* once or twice on a batch. It may be called twice if
* 1. An inflight batch expires before a response from the broker is received. The batch's final
* state is set to FAILED. But it could succeed on the broker and second time around batch.done() may
* try to set SUCCEEDED final state.
* 2. If a transaction abortion happens or if the producer is closed forcefully, the final state is
* ABORTED but again it could succeed if broker responds with a success.
*
* Attempted transitions from [FAILED | ABORTED] --> SUCCEEDED are logged.
* Attempted transitions from one failure state to the same or a different failed state are ignored.
* Attempted transitions from SUCCEEDED to the same or a failed state throw an exception.
*
* @param baseOffset The base offset of the messages assigned by the server
* @param logAppendTime The log append time or -1 if CreateTime is being used
* @param topLevelException The exception that occurred (or null if the request was successful)
* @param recordExceptions Record exception function mapping batchIndex to the respective record exception
* @return true if the batch was completed successfully and false if the batch was previously aborted
*/
private boolean done(
long baseOffset,
long logAppendTime,
RuntimeException topLevelException,
Function<Integer, RuntimeException> recordExceptions
) {
final FinalState tryFinalState = (topLevelException == null) ? FinalState.SUCCEEDED : FinalState.FAILED;
if (tryFinalState == FinalState.SUCCEEDED) {
log.trace("Successfully produced messages to {} with base offset {}.", topicPartition, baseOffset);
} else {
log.trace("Failed to produce messages to {} with base offset {}.", topicPartition, baseOffset, topLevelException);
}
if (this.finalState.compareAndSet(null, tryFinalState)) {
completeFutureAndFireCallbacks(baseOffset, logAppendTime, recordExceptions);
return true;
}
if (this.finalState.get() != FinalState.SUCCEEDED) {
if (tryFinalState == FinalState.SUCCEEDED) {
// Log if a previously unsuccessful batch succeeded later on.
log.debug("ProduceResponse returned {} for {} after batch with base offset {} had already been {}.",
tryFinalState, topicPartition, baseOffset, this.finalState.get());
} else {
// FAILED --> FAILED and ABORTED --> FAILED transitions are ignored.
log.debug("Ignored state transition {} -> {} for {} batch with base offset {}",
this.finalState.get(), tryFinalState, topicPartition, baseOffset);
}
} else {
// A SUCCESSFUL batch must not attempt another state change.
throw new IllegalStateException("A " + this.finalState.get() + " batch must not attempt another state change to " + tryFinalState);
}
return false;
}
private void completeFutureAndFireCallbacks(
long baseOffset,
long logAppendTime,
Function<Integer, RuntimeException> recordExceptions
) {
// Set the future before invoking the callbacks as we rely on its state for the `onCompletion` call
produceFuture.set(baseOffset, logAppendTime, recordExceptions);
// execute callbacks
for (int i = 0; i < thunks.size(); i++) {
try {
Thunk thunk = thunks.get(i);
if (thunk.callback != null) {
if (recordExceptions == null) {
RecordMetadata metadata = thunk.future.value();
thunk.callback.onCompletion(metadata, null);
} else {
RuntimeException exception = recordExceptions.apply(i);
thunk.callback.onCompletion(null, exception);
}
}
} catch (Exception e) {
log.error("Error executing user-provided callback on message for topic-partition '{}'", topicPartition, e);
}
}
produceFuture.done();
}
public Deque<ProducerBatch> split(int splitBatchSize) {
RecordBatch recordBatch = validateAndGetRecordBatch();
Deque<ProducerBatch> batches = splitRecordsIntoBatches(recordBatch, splitBatchSize);
finalizeSplitBatches(batches);
return batches;
}
private RecordBatch validateAndGetRecordBatch() {
MemoryRecords memoryRecords = recordsBuilder.build();
Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
if (!recordBatchIter.hasNext())
throw new IllegalStateException("Cannot split an empty producer batch.");
RecordBatch recordBatch = recordBatchIter.next();
if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " +
"with version v0 and v1");
if (recordBatchIter.hasNext())
throw new IllegalArgumentException("A producer batch should only have one record batch.");
return recordBatch;
}
private Deque<ProducerBatch> splitRecordsIntoBatches(RecordBatch recordBatch, int splitBatchSize) {
Deque<ProducerBatch> batches = new ArrayDeque<>();
Iterator<Thunk> thunkIter = thunks.iterator();
// We always allocate batch size because we are already splitting a big batch.
// And we also Retain the create time of the original batch.
ProducerBatch batch = null;
for (Record record : recordBatch) {
assert thunkIter.hasNext();
Thunk thunk = thunkIter.next();
if (batch == null)
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
// A newly created batch can always host the first message.
if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
batches.add(batch);
batch.closeForRecordAppends();
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
}
}
// Close the last batch and add it to the batch list after split.
if (batch != null) {
batches.add(batch);
batch.closeForRecordAppends();
}
return batches;
}
private void finalizeSplitBatches(Deque<ProducerBatch> batches) {
// Chain all split batch ProduceRequestResults to the original batch's produceFuture
// Ensures the original batch's future doesn't complete until all split batches complete
for (ProducerBatch splitBatch : batches) {
produceFuture.addDependent(splitBatch.produceFuture);
}
produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, index -> new RecordBatchTooLargeException());
produceFuture.done();
assignProducerStateToBatches(batches);
}
private void assignProducerStateToBatches(Deque<ProducerBatch> batches) {
if (hasSequence()) {
int sequence = baseSequence();
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
for (ProducerBatch newBatch : batches) {
newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
sequence += newBatch.recordCount;
}
}
}
private ProducerBatch createBatchOffAccumulatorForRecord(Record record, int batchSize) {
int initialSize = Math.max(AbstractRecords.estimateSizeInBytesUpperBound(magic(),
recordsBuilder.compression().type(), record.key(), record.value(), record.headers()), batchSize);
ByteBuffer buffer = ByteBuffer.allocate(initialSize);
// Note that we intentionally do not set producer state (producerId, epoch, sequence, and isTransactional)
// for the newly created batch. This will be set when the batch is dequeued for sending (which is consistent
// with how normal batches are handled).
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic(), recordsBuilder.compression(),
TimestampType.CREATE_TIME, 0L);
return new ProducerBatch(topicPartition, builder, this.createdMs, true);
}
public boolean isCompressed() {
return recordsBuilder.compression().type() != CompressionType.NONE;
}
/**
* A callback and the associated FutureRecordMetadata argument to pass to it.
*/
private static final class Thunk {
final Callback callback;
final FutureRecordMetadata future;
Thunk(Callback callback, FutureRecordMetadata future) {
this.callback = callback;
this.future = future;
}
}
@Override
public String toString() {
return "ProducerBatch(topicPartition=" + topicPartition + ", recordCount=" + recordCount + ")";
}
boolean hasReachedDeliveryTimeout(long deliveryTimeoutMs, long now) {
return deliveryTimeoutMs <= now - this.createdMs;
}
public FinalState finalState() {
return this.finalState.get();
}
int attempts() {
return attempts.get();
}
void reenqueued(long now) {
attempts.getAndIncrement();
lastAttemptMs = Math.max(lastAppendTime, now);
lastAppendTime = Math.max(lastAppendTime, now);
retry = true;
}
long queueTimeMs() {
return drainedMs - createdMs;
}
long waitedTimeMs(long nowMs) {
return Math.max(0, nowMs - lastAttemptMs);
}
void drained(long nowMs) {
this.drainedMs = Math.max(drainedMs, nowMs);
}
boolean isSplitBatch() {
return isSplitBatch;
}
/**
* Returns if the batch is been retried for sending to kafka
*/
public boolean inRetry() {
return this.retry;
}
public MemoryRecords records() {
return recordsBuilder.build();
}
public int estimatedSizeInBytes() {
return recordsBuilder.estimatedSizeInBytes();
}
public double compressionRatio() {
return recordsBuilder.compressionRatio();
}
public boolean isFull() {
return recordsBuilder.isFull();
}
public void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional) {
recordsBuilder.setProducerState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, baseSequence, isTransactional);
}
public void resetProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence) {
log.info("Resetting sequence number of batch with current sequence {} for partition {} to {}",
this.baseSequence(), this.topicPartition, baseSequence);
reopened = true;
recordsBuilder.reopenAndRewriteProducerState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, baseSequence, isTransactional());
}
/**
* Release resources required for record appends (e.g. compression buffers). Once this method is called, it's only
* possible to update the RecordBatch header.
*/
public void closeForRecordAppends() {
recordsBuilder.closeForRecordAppends();
}
public void close() {
recordsBuilder.close();
if (!recordsBuilder.isControlBatch()) {
CompressionRatioEstimator.updateEstimation(topicPartition.topic(),
recordsBuilder.compression().type(),
(float) recordsBuilder.compressionRatio());
}
reopened = false;
}
/**
* Abort the record builder and reset the state of the underlying buffer. This is used prior to aborting
* the batch with {@link #abort(RuntimeException)} and ensures that no record previously appended can be
* read. This is used in scenarios where we want to ensure a batch ultimately gets aborted, but in which
* it is not safe to invoke the completion callbacks (e.g. because we are holding a lock, such as
* when aborting batches in {@link RecordAccumulator}).
*/
public void abortRecordAppends() {
recordsBuilder.abort();
}
public boolean isClosed() {
return recordsBuilder.isClosed();
}
public ByteBuffer buffer() {
return recordsBuilder.buffer();
}
public int initialCapacity() {
return recordsBuilder.initialCapacity();
}
public boolean isWritable() {
return !recordsBuilder.isClosed();
}
public byte magic() {
return recordsBuilder.magic();
}
public long producerId() {
return recordsBuilder.producerId();
}
public short producerEpoch() {
return recordsBuilder.producerEpoch();
}
public int baseSequence() {
return recordsBuilder.baseSequence();
}
public int lastSequence() {
return recordsBuilder.baseSequence() + recordsBuilder.numRecords() - 1;
}
public boolean hasSequence() {
return baseSequence() != RecordBatch.NO_SEQUENCE;
}
public boolean isTransactional() {
return recordsBuilder.isTransactional();
}
public boolean sequenceHasBeenReset() {
return reopened;
}
public boolean isBufferDeallocated() {
return bufferDeallocated;
}
public void markBufferDeallocated() {
bufferDeallocated = true;
}
public boolean isInflight() {
return inflight;
}
public void setInflight(boolean inflight) {
this.inflight = inflight;
}
// VisibleForTesting
OptionalInt currentLeaderEpoch() {
return currentLeaderEpoch;
}
// VisibleForTesting
int attemptsWhenLeaderLastChanged() {
return attemptsWhenLeaderLastChanged;
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerBatch.java
|
#!/usr/bin/env python3
# Copyright (C) 2017 Inria
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
PS_EXPECTED = (
('\tpid | name | state Q | pri | stack ( used) | '
'base addr | current | runtime | switches'),
('\t - | isr_stack | - - | - | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+'),
('\t 1 | idle | pending Q | 15 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 2 | main | running Q | 7 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 3 | thread | bl rx _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 4 | thread | bl rx _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 5 | thread | bl rx _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 6 | thread | bl mutex _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t 7 | thread | bl rx _ | 6 | \d+ ( -?\d+) | '
'0x\d+ | 0x\d+ | \d+\.\d+% | \d+'),
('\t | SUM | | | \d+ (\d+)')
)
def _check_startup(child):
for i in range(5):
child.expect_exact('Creating thread #{}, next={}'
.format(i, (i + 1) % 5))
def _check_help(child):
child.sendline('')
child.expect('>')
child.sendline('help')
child.expect_exact('Command Description')
child.expect_exact('---------------------------------------')
child.expect_exact('reboot Reboot the node')
child.expect_exact('ps Prints information about '
'running threads.')
def _check_ps(child):
child.sendline('ps')
for line in PS_EXPECTED:
child.expect(line)
def testfunc(child):
_check_startup(child)
_check_help(child)
_check_ps(child)
if __name__ == "__main__":
sys.path.append(os.path.join(os.environ['RIOTTOOLS'], 'testrunner'))
from testrunner import run
sys.exit(run(testfunc))
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import absolute_import
from __future__ import with_statement
from mock import patch
from celery import current_app
from celery import states
from celery.exceptions import RetryTaskError
from celery.task.trace import TraceInfo, eager_trace_task, trace_task
from celery.tests.utils import Case, Mock
@current_app.task
def add(x, y):
return x + y
@current_app.task(ignore_result=True)
def add_cast(x, y):
return x + y
@current_app.task
def raises(exc):
raise exc
def trace(task, args=(), kwargs={}, propagate=False):
return eager_trace_task(task, 'id-1', args, kwargs,
propagate=propagate)
class test_trace(Case):
def test_trace_successful(self):
retval, info = trace(add, (2, 2), {})
self.assertIsNone(info)
self.assertEqual(retval, 4)
def test_trace_SystemExit(self):
with self.assertRaises(SystemExit):
trace(raises, (SystemExit(), ), {})
def test_trace_RetryTaskError(self):
exc = RetryTaskError('foo', 'bar')
_, info = trace(raises, (exc, ), {})
self.assertEqual(info.state, states.RETRY)
self.assertIs(info.retval, exc)
def test_trace_exception(self):
exc = KeyError('foo')
_, info = trace(raises, (exc, ), {})
self.assertEqual(info.state, states.FAILURE)
self.assertIs(info.retval, exc)
def test_trace_exception_propagate(self):
with self.assertRaises(KeyError):
trace(raises, (KeyError('foo'), ), {}, propagate=True)
@patch('celery.task.trace.build_tracer')
@patch('celery.task.trace.report_internal_error')
def test_outside_body_error(self, report_internal_error, build_tracer):
tracer = Mock()
tracer.side_effect = KeyError('foo')
build_tracer.return_value = tracer
@current_app.task
def xtask():
pass
trace_task(xtask, 'uuid', (), {})
self.assertTrue(report_internal_error.call_count)
self.assertIs(xtask.__trace__, tracer)
class test_TraceInfo(Case):
class TI(TraceInfo):
__slots__ = TraceInfo.__slots__ + ('__dict__', )
def test_handle_error_state(self):
x = self.TI(states.FAILURE)
x.handle_failure = Mock()
x.handle_error_state(add_cast)
x.handle_failure.assert_called_with(
add_cast,
store_errors=add_cast.store_errors_even_if_ignored,
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import annotations
from collections import deque
from typing import TYPE_CHECKING
from twisted.internet import defer
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.web.client import (
URI,
BrowserLikePolicyForHTTPS,
ResponseFailed,
_StandardEndpointFactory,
)
from twisted.web.error import SchemeNotSupported
from scrapy.core.downloader.contextfactory import AcceptableProtocolsContextFactory
from scrapy.core.http2.protocol import H2ClientFactory, H2ClientProtocol
if TYPE_CHECKING:
from twisted.internet.base import ReactorBase
from twisted.internet.endpoints import HostnameEndpoint
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
ConnectionKeyT = tuple[bytes, bytes, int]
class H2ConnectionPool:
def __init__(self, reactor: ReactorBase, settings: Settings) -> None:
self._reactor = reactor
self.settings = settings
# Store a dictionary which is used to get the respective
# H2ClientProtocolInstance using the key as Tuple(scheme, hostname, port)
self._connections: dict[ConnectionKeyT, H2ClientProtocol] = {}
# Save all requests that arrive before the connection is established
self._pending_requests: dict[
ConnectionKeyT, deque[Deferred[H2ClientProtocol]]
] = {}
def get_connection(
self, key: ConnectionKeyT, uri: URI, endpoint: HostnameEndpoint
) -> Deferred[H2ClientProtocol]:
if key in self._pending_requests:
# Received a request while connecting to remote
# Create a deferred which will fire with the H2ClientProtocol
# instance
d: Deferred[H2ClientProtocol] = Deferred()
self._pending_requests[key].append(d)
return d
# Check if we already have a connection to the remote
conn = self._connections.get(key, None)
if conn:
# Return this connection instance wrapped inside a deferred
return defer.succeed(conn)
# No connection is established for the given URI
return self._new_connection(key, uri, endpoint)
def _new_connection(
self, key: ConnectionKeyT, uri: URI, endpoint: HostnameEndpoint
) -> Deferred[H2ClientProtocol]:
self._pending_requests[key] = deque()
conn_lost_deferred: Deferred[list[BaseException]] = Deferred()
conn_lost_deferred.addCallback(self._remove_connection, key)
factory = H2ClientFactory(uri, self.settings, conn_lost_deferred)
conn_d = endpoint.connect(factory)
conn_d.addCallback(self.put_connection, key)
d: Deferred[H2ClientProtocol] = Deferred()
self._pending_requests[key].append(d)
return d
def put_connection(
self, conn: H2ClientProtocol, key: ConnectionKeyT
) -> H2ClientProtocol:
self._connections[key] = conn
# Now as we have established a proper HTTP/2 connection
# we fire all the deferred's with the connection instance
pending_requests = self._pending_requests.pop(key, None)
while pending_requests:
d = pending_requests.popleft()
d.callback(conn)
return conn
def _remove_connection(
self, errors: list[BaseException], key: ConnectionKeyT
) -> None:
self._connections.pop(key)
# Call the errback of all the pending requests for this connection
pending_requests = self._pending_requests.pop(key, None)
while pending_requests:
d = pending_requests.popleft()
d.errback(ResponseFailed(errors))
def close_connections(self) -> None:
"""Close all the HTTP/2 connections and remove them from pool
Returns:
Deferred that fires when all connections have been closed
"""
for conn in self._connections.values():
assert conn.transport is not None # typing
conn.transport.abortConnection()
class H2Agent:
def __init__(
self,
reactor: ReactorBase,
pool: H2ConnectionPool,
context_factory: BrowserLikePolicyForHTTPS = BrowserLikePolicyForHTTPS(),
connect_timeout: float | None = None,
bind_address: bytes | None = None,
) -> None:
self._reactor = reactor
self._pool = pool
self._context_factory = AcceptableProtocolsContextFactory(
context_factory, acceptable_protocols=[b"h2"]
)
self.endpoint_factory = _StandardEndpointFactory(
self._reactor, self._context_factory, connect_timeout, bind_address
)
def get_endpoint(self, uri: URI) -> HostnameEndpoint:
return self.endpoint_factory.endpointForURI(uri)
def get_key(self, uri: URI) -> ConnectionKeyT:
"""
Arguments:
uri - URI obtained directly from request URL
"""
return uri.scheme, uri.host, uri.port
def request(self, request: Request, spider: Spider) -> Deferred[Response]:
uri = URI.fromBytes(bytes(request.url, encoding="utf-8"))
try:
endpoint = self.get_endpoint(uri)
except SchemeNotSupported:
return defer.fail(Failure())
key = self.get_key(uri)
d: Deferred[H2ClientProtocol] = self._pool.get_connection(key, uri, endpoint)
d2: Deferred[Response] = d.addCallback(
lambda conn: conn.request(request, spider)
)
return d2
class ScrapyProxyH2Agent(H2Agent):
def __init__(
self,
reactor: ReactorBase,
proxy_uri: URI,
pool: H2ConnectionPool,
context_factory: BrowserLikePolicyForHTTPS = BrowserLikePolicyForHTTPS(),
connect_timeout: float | None = None,
bind_address: bytes | None = None,
) -> None:
super().__init__(
reactor=reactor,
pool=pool,
context_factory=context_factory,
connect_timeout=connect_timeout,
bind_address=bind_address,
)
self._proxy_uri = proxy_uri
def get_endpoint(self, uri: URI) -> HostnameEndpoint:
return self.endpoint_factory.endpointForURI(self._proxy_uri)
def get_key(self, uri: URI) -> ConnectionKeyT:
"""We use the proxy uri instead of uri obtained from request url"""
return b"http-proxy", self._proxy_uri.host, self._proxy_uri.port
|
python
|
github
|
https://github.com/scrapy/scrapy
|
scrapy/core/http2/agent.py
|
"""Agent factory for creating agents with middleware support."""
from __future__ import annotations
import itertools
from dataclasses import dataclass, field
from typing import (
TYPE_CHECKING,
Annotated,
Any,
Generic,
cast,
get_args,
get_origin,
get_type_hints,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, AnyMessage, SystemMessage, ToolMessage
from langchain_core.tools import BaseTool
from langgraph._internal._runnable import RunnableCallable
from langgraph.constants import END, START
from langgraph.graph.state import StateGraph
from langgraph.prebuilt.tool_node import ToolCallWithContext, ToolNode
from langgraph.types import Command, Send
from typing_extensions import NotRequired, Required, TypedDict
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ExtendedModelResponse,
JumpTo,
ModelRequest,
ModelResponse,
OmitFromSchema,
ResponseT,
StateT_co,
_InputAgentState,
_OutputAgentState,
)
from langchain.agents.structured_output import (
AutoStrategy,
MultipleStructuredOutputsError,
OutputToolBinding,
ProviderStrategy,
ProviderStrategyBinding,
ResponseFormat,
StructuredOutputError,
StructuredOutputValidationError,
ToolStrategy,
)
from langchain.chat_models import init_chat_model
@dataclass
class _ComposedExtendedModelResponse(Generic[ResponseT]):
"""Internal result from composed ``wrap_model_call`` middleware.
Unlike ``ExtendedModelResponse`` (user-facing, single command), this holds the
full list of commands accumulated across all middleware layers during
composition.
"""
model_response: ModelResponse[ResponseT]
"""The underlying model response."""
commands: list[Command[Any]] = field(default_factory=list)
"""Commands accumulated from all middleware layers (inner-first, then outer)."""
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable, Sequence
from langchain_core.runnables import Runnable, RunnableConfig
from langgraph.cache.base import BaseCache
from langgraph.graph.state import CompiledStateGraph
from langgraph.runtime import Runtime
from langgraph.store.base import BaseStore
from langgraph.types import Checkpointer
from langchain.agents.middleware.types import ToolCallRequest, ToolCallWrapper
_ModelCallHandler = Callable[
[ModelRequest[ContextT], Callable[[ModelRequest[ContextT]], ModelResponse]],
ModelResponse | AIMessage | ExtendedModelResponse,
]
_ComposedModelCallHandler = Callable[
[ModelRequest[ContextT], Callable[[ModelRequest[ContextT]], ModelResponse]],
_ComposedExtendedModelResponse,
]
_AsyncModelCallHandler = Callable[
[ModelRequest[ContextT], Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse]]],
Awaitable[ModelResponse | AIMessage | ExtendedModelResponse],
]
_ComposedAsyncModelCallHandler = Callable[
[ModelRequest[ContextT], Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse]]],
Awaitable[_ComposedExtendedModelResponse],
]
STRUCTURED_OUTPUT_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
DYNAMIC_TOOL_ERROR_TEMPLATE = """
Middleware added tools that the agent doesn't know how to execute.
Unknown tools: {unknown_tool_names}
Registered tools: {available_tool_names}
This happens when middleware modifies `request.tools` in `wrap_model_call` to include
tools that weren't passed to `create_agent()`.
How to fix this:
Option 1: Register tools at agent creation (recommended for most cases)
Pass the tools to `create_agent(tools=[...])` or set them on `middleware.tools`.
This makes tools available for every agent invocation.
Option 2: Handle dynamic tools in middleware (for tools created at runtime)
Implement `wrap_tool_call` to execute tools that are added dynamically:
class MyMiddleware(AgentMiddleware):
def wrap_tool_call(self, request, handler):
if request.tool_call["name"] == "dynamic_tool":
# Execute the dynamic tool yourself or override with tool instance
return handler(request.override(tool=my_dynamic_tool))
return handler(request)
""".strip()
FALLBACK_MODELS_WITH_STRUCTURED_OUTPUT = [
# if model profile data are not available, these models are assumed to support
# structured output
"grok",
"gpt-5",
"gpt-4.1",
"gpt-4o",
"gpt-oss",
"o3-pro",
"o3-mini",
]
def _normalize_to_model_response(
result: ModelResponse | AIMessage | ExtendedModelResponse,
) -> ModelResponse:
"""Normalize middleware return value to ModelResponse.
At inner composition boundaries, ``ExtendedModelResponse`` is unwrapped to its
underlying ``ModelResponse`` so that inner middleware always sees ``ModelResponse``
from the handler.
"""
if isinstance(result, AIMessage):
return ModelResponse(result=[result], structured_response=None)
if isinstance(result, ExtendedModelResponse):
return result.model_response
return result
def _build_commands(
model_response: ModelResponse,
middleware_commands: list[Command[Any]] | None = None,
) -> list[Command[Any]]:
"""Build a list of Commands from a model response and middleware commands.
The first Command contains the model response state (messages and optional
structured_response). Middleware commands are appended as-is.
Args:
model_response: The model response containing messages and optional
structured output.
middleware_commands: Commands accumulated from middleware layers during
composition (inner-first ordering).
Returns:
List of ``Command`` objects ready to be returned from a model node.
"""
state: dict[str, Any] = {"messages": model_response.result}
if model_response.structured_response is not None:
state["structured_response"] = model_response.structured_response
for cmd in middleware_commands or []:
if cmd.goto:
msg = (
"Command goto is not yet supported in wrap_model_call middleware. "
"Use the jump_to state field with before_model/after_model hooks instead."
)
raise NotImplementedError(msg)
if cmd.resume:
msg = "Command resume is not yet supported in wrap_model_call middleware."
raise NotImplementedError(msg)
if cmd.graph:
msg = "Command graph is not yet supported in wrap_model_call middleware."
raise NotImplementedError(msg)
commands: list[Command[Any]] = [Command(update=state)]
commands.extend(middleware_commands or [])
return commands
def _chain_model_call_handlers(
handlers: Sequence[_ModelCallHandler[ContextT]],
) -> _ComposedModelCallHandler[ContextT] | None:
"""Compose multiple ``wrap_model_call`` handlers into single middleware stack.
Composes handlers so first in list becomes outermost layer. Each handler receives a
handler callback to execute inner layers. Commands from each layer are accumulated
into a list (inner-first, then outer) without merging.
Args:
handlers: List of handlers.
First handler wraps all others.
Returns:
Composed handler returning ``_ComposedExtendedModelResponse``,
or ``None`` if handlers empty.
"""
if not handlers:
return None
def _to_composed_result(
result: ModelResponse | AIMessage | ExtendedModelResponse | _ComposedExtendedModelResponse,
extra_commands: list[Command[Any]] | None = None,
) -> _ComposedExtendedModelResponse:
"""Normalize any handler result to _ComposedExtendedModelResponse."""
commands: list[Command[Any]] = list(extra_commands or [])
if isinstance(result, _ComposedExtendedModelResponse):
commands.extend(result.commands)
model_response = result.model_response
elif isinstance(result, ExtendedModelResponse):
model_response = result.model_response
if result.command is not None:
commands.append(result.command)
else:
model_response = _normalize_to_model_response(result)
return _ComposedExtendedModelResponse(model_response=model_response, commands=commands)
if len(handlers) == 1:
single_handler = handlers[0]
def normalized_single(
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse],
) -> _ComposedExtendedModelResponse:
return _to_composed_result(single_handler(request, handler))
return normalized_single
def compose_two(
outer: _ModelCallHandler[ContextT] | _ComposedModelCallHandler[ContextT],
inner: _ModelCallHandler[ContextT] | _ComposedModelCallHandler[ContextT],
) -> _ComposedModelCallHandler[ContextT]:
"""Compose two handlers where outer wraps inner."""
def composed(
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse],
) -> _ComposedExtendedModelResponse:
# Closure variable to capture inner's commands before normalizing
accumulated_commands: list[Command[Any]] = []
def inner_handler(req: ModelRequest[ContextT]) -> ModelResponse:
# Clear on each call for retry safety
accumulated_commands.clear()
inner_result = inner(req, handler)
if isinstance(inner_result, _ComposedExtendedModelResponse):
accumulated_commands.extend(inner_result.commands)
return inner_result.model_response
if isinstance(inner_result, ExtendedModelResponse):
if inner_result.command is not None:
accumulated_commands.append(inner_result.command)
return inner_result.model_response
return _normalize_to_model_response(inner_result)
outer_result = outer(request, inner_handler)
return _to_composed_result(
outer_result,
extra_commands=accumulated_commands or None,
)
return composed
# Compose right-to-left: outer(inner(innermost(handler)))
composed_handler = compose_two(handlers[-2], handlers[-1])
for h in reversed(handlers[:-2]):
composed_handler = compose_two(h, composed_handler)
return composed_handler
def _chain_async_model_call_handlers(
handlers: Sequence[_AsyncModelCallHandler[ContextT]],
) -> _ComposedAsyncModelCallHandler[ContextT] | None:
"""Compose multiple async ``wrap_model_call`` handlers into single middleware stack.
Commands from each layer are accumulated into a list (inner-first, then outer)
without merging.
Args:
handlers: List of async handlers.
First handler wraps all others.
Returns:
Composed async handler returning ``_ComposedExtendedModelResponse``,
or ``None`` if handlers empty.
"""
if not handlers:
return None
def _to_composed_result(
result: ModelResponse | AIMessage | ExtendedModelResponse | _ComposedExtendedModelResponse,
extra_commands: list[Command[Any]] | None = None,
) -> _ComposedExtendedModelResponse:
"""Normalize any handler result to _ComposedExtendedModelResponse."""
commands: list[Command[Any]] = list(extra_commands or [])
if isinstance(result, _ComposedExtendedModelResponse):
commands.extend(result.commands)
model_response = result.model_response
elif isinstance(result, ExtendedModelResponse):
model_response = result.model_response
if result.command is not None:
commands.append(result.command)
else:
model_response = _normalize_to_model_response(result)
return _ComposedExtendedModelResponse(model_response=model_response, commands=commands)
if len(handlers) == 1:
single_handler = handlers[0]
async def normalized_single(
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse]],
) -> _ComposedExtendedModelResponse:
return _to_composed_result(await single_handler(request, handler))
return normalized_single
def compose_two(
outer: _AsyncModelCallHandler[ContextT] | _ComposedAsyncModelCallHandler[ContextT],
inner: _AsyncModelCallHandler[ContextT] | _ComposedAsyncModelCallHandler[ContextT],
) -> _ComposedAsyncModelCallHandler[ContextT]:
"""Compose two async handlers where outer wraps inner."""
async def composed(
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse]],
) -> _ComposedExtendedModelResponse:
# Closure variable to capture inner's commands before normalizing
accumulated_commands: list[Command[Any]] = []
async def inner_handler(req: ModelRequest[ContextT]) -> ModelResponse:
# Clear on each call for retry safety
accumulated_commands.clear()
inner_result = await inner(req, handler)
if isinstance(inner_result, _ComposedExtendedModelResponse):
accumulated_commands.extend(inner_result.commands)
return inner_result.model_response
if isinstance(inner_result, ExtendedModelResponse):
if inner_result.command is not None:
accumulated_commands.append(inner_result.command)
return inner_result.model_response
return _normalize_to_model_response(inner_result)
outer_result = await outer(request, inner_handler)
return _to_composed_result(
outer_result,
extra_commands=accumulated_commands or None,
)
return composed
# Compose right-to-left: outer(inner(innermost(handler)))
composed_handler = compose_two(handlers[-2], handlers[-1])
for h in reversed(handlers[:-2]):
composed_handler = compose_two(h, composed_handler)
return composed_handler
def _resolve_schema(schemas: set[type], schema_name: str, omit_flag: str | None = None) -> type:
"""Resolve schema by merging schemas and optionally respecting `OmitFromSchema` annotations.
Args:
schemas: List of schema types to merge
schema_name: Name for the generated `TypedDict`
omit_flag: If specified, omit fields with this flag set (`'input'` or
`'output'`)
Returns:
Merged schema as `TypedDict`
"""
all_annotations = {}
for schema in schemas:
hints = get_type_hints(schema, include_extras=True)
for field_name, field_type in hints.items():
should_omit = False
if omit_flag:
# Check for omission in the annotation metadata
metadata = _extract_metadata(field_type)
for meta in metadata:
if isinstance(meta, OmitFromSchema) and getattr(meta, omit_flag) is True:
should_omit = True
break
if not should_omit:
all_annotations[field_name] = field_type
return TypedDict(schema_name, all_annotations) # type: ignore[operator]
def _extract_metadata(type_: type) -> list[Any]:
"""Extract metadata from a field type, handling `Required`/`NotRequired` and `Annotated` wrappers.""" # noqa: E501
# Handle Required[Annotated[...]] or NotRequired[Annotated[...]]
if get_origin(type_) in {Required, NotRequired}:
inner_type = get_args(type_)[0]
if get_origin(inner_type) is Annotated:
return list(get_args(inner_type)[1:])
# Handle direct Annotated[...]
elif get_origin(type_) is Annotated:
return list(get_args(type_)[1:])
return []
def _get_can_jump_to(middleware: AgentMiddleware[Any, Any], hook_name: str) -> list[JumpTo]:
"""Get the `can_jump_to` list from either sync or async hook methods.
Args:
middleware: The middleware instance to inspect.
hook_name: The name of the hook (`'before_model'` or `'after_model'`).
Returns:
List of jump destinations, or empty list if not configured.
"""
# Get the base class method for comparison
base_sync_method = getattr(AgentMiddleware, hook_name, None)
base_async_method = getattr(AgentMiddleware, f"a{hook_name}", None)
# Try sync method first - only if it's overridden from base class
sync_method = getattr(middleware.__class__, hook_name, None)
if (
sync_method
and sync_method is not base_sync_method
and hasattr(sync_method, "__can_jump_to__")
):
return sync_method.__can_jump_to__
# Try async method - only if it's overridden from base class
async_method = getattr(middleware.__class__, f"a{hook_name}", None)
if (
async_method
and async_method is not base_async_method
and hasattr(async_method, "__can_jump_to__")
):
return async_method.__can_jump_to__
return []
def _supports_provider_strategy(
model: str | BaseChatModel, tools: list[BaseTool | dict[str, Any]] | None = None
) -> bool:
"""Check if a model supports provider-specific structured output.
Args:
model: Model name string or `BaseChatModel` instance.
tools: Optional list of tools provided to the agent.
Needed because some models don't support structured output together with tool calling.
Returns:
`True` if the model supports provider-specific structured output, `False` otherwise.
"""
model_name: str | None = None
if isinstance(model, str):
model_name = model
elif isinstance(model, BaseChatModel):
model_name = (
getattr(model, "model_name", None)
or getattr(model, "model", None)
or getattr(model, "model_id", "")
)
model_profile = model.profile
if (
model_profile is not None
and model_profile.get("structured_output")
# We make an exception for Gemini models, which currently do not support
# simultaneous tool use with structured output
and not (tools and isinstance(model_name, str) and "gemini" in model_name.lower())
):
return True
return (
any(part in model_name.lower() for part in FALLBACK_MODELS_WITH_STRUCTURED_OUTPUT)
if model_name
else False
)
def _handle_structured_output_error(
exception: Exception,
response_format: ResponseFormat[Any],
) -> tuple[bool, str]:
"""Handle structured output error.
Returns `(should_retry, retry_tool_message)`.
"""
if not isinstance(response_format, ToolStrategy):
return False, ""
handle_errors = response_format.handle_errors
if handle_errors is False:
return False, ""
if handle_errors is True:
return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
if isinstance(handle_errors, str):
return True, handle_errors
if isinstance(handle_errors, type):
if issubclass(handle_errors, Exception) and isinstance(exception, handle_errors):
return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
return False, ""
if isinstance(handle_errors, tuple):
if any(isinstance(exception, exc_type) for exc_type in handle_errors):
return True, STRUCTURED_OUTPUT_ERROR_TEMPLATE.format(error=str(exception))
return False, ""
return True, handle_errors(exception)
def _chain_tool_call_wrappers(
wrappers: Sequence[ToolCallWrapper],
) -> ToolCallWrapper | None:
"""Compose wrappers into middleware stack (first = outermost).
Args:
wrappers: Wrappers in middleware order.
Returns:
Composed wrapper, or `None` if empty.
Example:
```python
wrapper = _chain_tool_call_wrappers([auth, cache, retry])
# Request flows: auth -> cache -> retry -> tool
# Response flows: tool -> retry -> cache -> auth
```
"""
if not wrappers:
return None
if len(wrappers) == 1:
return wrappers[0]
def compose_two(outer: ToolCallWrapper, inner: ToolCallWrapper) -> ToolCallWrapper:
"""Compose two wrappers where outer wraps inner."""
def composed(
request: ToolCallRequest,
execute: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
# Create a callable that invokes inner with the original execute
def call_inner(req: ToolCallRequest) -> ToolMessage | Command[Any]:
return inner(req, execute)
# Outer can call call_inner multiple times
return outer(request, call_inner)
return composed
# Chain all wrappers: first -> second -> ... -> last
result = wrappers[-1]
for wrapper in reversed(wrappers[:-1]):
result = compose_two(wrapper, result)
return result
def _chain_async_tool_call_wrappers(
wrappers: Sequence[
Callable[
[ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
Awaitable[ToolMessage | Command[Any]],
]
],
) -> (
Callable[
[ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
Awaitable[ToolMessage | Command[Any]],
]
| None
):
"""Compose async wrappers into middleware stack (first = outermost).
Args:
wrappers: Async wrappers in middleware order.
Returns:
Composed async wrapper, or `None` if empty.
"""
if not wrappers:
return None
if len(wrappers) == 1:
return wrappers[0]
def compose_two(
outer: Callable[
[ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
Awaitable[ToolMessage | Command[Any]],
],
inner: Callable[
[ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
Awaitable[ToolMessage | Command[Any]],
],
) -> Callable[
[ToolCallRequest, Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]]],
Awaitable[ToolMessage | Command[Any]],
]:
"""Compose two async wrappers where outer wraps inner."""
async def composed(
request: ToolCallRequest,
execute: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
) -> ToolMessage | Command[Any]:
# Create an async callable that invokes inner with the original execute
async def call_inner(req: ToolCallRequest) -> ToolMessage | Command[Any]:
return await inner(req, execute)
# Outer can call call_inner multiple times
return await outer(request, call_inner)
return composed
# Chain all wrappers: first -> second -> ... -> last
result = wrappers[-1]
for wrapper in reversed(wrappers[:-1]):
result = compose_two(wrapper, result)
return result
def create_agent(
model: str | BaseChatModel,
tools: Sequence[BaseTool | Callable[..., Any] | dict[str, Any]] | None = None,
*,
system_prompt: str | SystemMessage | None = None,
middleware: Sequence[AgentMiddleware[StateT_co, ContextT]] = (),
response_format: ResponseFormat[ResponseT] | type[ResponseT] | dict[str, Any] | None = None,
state_schema: type[AgentState[ResponseT]] | None = None,
context_schema: type[ContextT] | None = None,
checkpointer: Checkpointer | None = None,
store: BaseStore | None = None,
interrupt_before: list[str] | None = None,
interrupt_after: list[str] | None = None,
debug: bool = False,
name: str | None = None,
cache: BaseCache[Any] | None = None,
) -> CompiledStateGraph[
AgentState[ResponseT], ContextT, _InputAgentState, _OutputAgentState[ResponseT]
]:
"""Creates an agent graph that calls tools in a loop until a stopping condition is met.
For more details on using `create_agent`,
visit the [Agents](https://docs.langchain.com/oss/python/langchain/agents) docs.
Args:
model: The language model for the agent.
Can be a string identifier (e.g., `"openai:gpt-4"`) or a direct chat model
instance (e.g., [`ChatOpenAI`][langchain_openai.ChatOpenAI] or other another
[LangChain chat model](https://docs.langchain.com/oss/python/integrations/chat)).
For a full list of supported model strings, see
[`init_chat_model`][langchain.chat_models.init_chat_model(model_provider)].
!!! tip ""
See the [Models](https://docs.langchain.com/oss/python/langchain/models)
docs for more information.
tools: A list of tools, `dict`, or `Callable`.
If `None` or an empty list, the agent will consist of a model node without a
tool calling loop.
!!! tip ""
See the [Tools](https://docs.langchain.com/oss/python/langchain/tools)
docs for more information.
system_prompt: An optional system prompt for the LLM.
Can be a `str` (which will be converted to a `SystemMessage`) or a
`SystemMessage` instance directly. The system message is added to the
beginning of the message list when calling the model.
middleware: A sequence of middleware instances to apply to the agent.
Middleware can intercept and modify agent behavior at various stages.
!!! tip ""
See the [Middleware](https://docs.langchain.com/oss/python/langchain/middleware)
docs for more information.
response_format: An optional configuration for structured responses.
Can be a `ToolStrategy`, `ProviderStrategy`, or a Pydantic model class.
If provided, the agent will handle structured output during the
conversation flow.
Raw schemas will be wrapped in an appropriate strategy based on model
capabilities.
!!! tip ""
See the [Structured output](https://docs.langchain.com/oss/python/langchain/structured-output)
docs for more information.
state_schema: An optional `TypedDict` schema that extends `AgentState`.
When provided, this schema is used instead of `AgentState` as the base
schema for merging with middleware state schemas. This allows users to
add custom state fields without needing to create custom middleware.
Generally, it's recommended to use `state_schema` extensions via middleware
to keep relevant extensions scoped to corresponding hooks / tools.
context_schema: An optional schema for runtime context.
checkpointer: An optional checkpoint saver object.
Used for persisting the state of the graph (e.g., as chat memory) for a
single thread (e.g., a single conversation).
store: An optional store object.
Used for persisting data across multiple threads (e.g., multiple
conversations / users).
interrupt_before: An optional list of node names to interrupt before.
Useful if you want to add a user confirmation or other interrupt
before taking an action.
interrupt_after: An optional list of node names to interrupt after.
Useful if you want to return directly or run additional processing
on an output.
debug: Whether to enable verbose logging for graph execution.
When enabled, prints detailed information about each node execution, state
updates, and transitions during agent runtime. Useful for debugging
middleware behavior and understanding agent execution flow.
name: An optional name for the `CompiledStateGraph`.
This name will be automatically used when adding the agent graph to
another graph as a subgraph node - particularly useful for building
multi-agent systems.
cache: An optional `BaseCache` instance to enable caching of graph execution.
Returns:
A compiled `StateGraph` that can be used for chat interactions.
Raises:
AssertionError: If duplicate middleware instances are provided.
The agent node calls the language model with the messages list (after applying
the system prompt). If the resulting [`AIMessage`][langchain.messages.AIMessage]
contains `tool_calls`, the graph will then call the tools. The tools node executes
the tools and adds the responses to the messages list as
[`ToolMessage`][langchain.messages.ToolMessage] objects. The agent node then calls
the language model again. The process repeats until no more `tool_calls` are present
in the response. The agent then returns the full list of messages.
Example:
```python
from langchain.agents import create_agent
def check_weather(location: str) -> str:
'''Return the weather forecast for the specified location.'''
return f"It's always sunny in {location}"
graph = create_agent(
model="anthropic:claude-sonnet-4-5-20250929",
tools=[check_weather],
system_prompt="You are a helpful assistant",
)
inputs = {"messages": [{"role": "user", "content": "what is the weather in sf"}]}
for chunk in graph.stream(inputs, stream_mode="updates"):
print(chunk)
```
"""
# init chat model
if isinstance(model, str):
model = init_chat_model(model)
# Convert system_prompt to SystemMessage if needed
system_message: SystemMessage | None = None
if system_prompt is not None:
if isinstance(system_prompt, SystemMessage):
system_message = system_prompt
else:
system_message = SystemMessage(content=system_prompt)
# Handle tools being None or empty
if tools is None:
tools = []
# Convert response format and setup structured output tools
# Raw schemas are wrapped in AutoStrategy to preserve auto-detection intent.
# AutoStrategy is converted to ToolStrategy upfront to calculate tools during agent creation,
# but may be replaced with ProviderStrategy later based on model capabilities.
initial_response_format: ToolStrategy[Any] | ProviderStrategy[Any] | AutoStrategy[Any] | None
if response_format is None:
initial_response_format = None
elif isinstance(response_format, (ToolStrategy, ProviderStrategy)):
# Preserve explicitly requested strategies
initial_response_format = response_format
elif isinstance(response_format, AutoStrategy):
# AutoStrategy provided - preserve it for later auto-detection
initial_response_format = response_format
else:
# Raw schema - wrap in AutoStrategy to enable auto-detection
initial_response_format = AutoStrategy(schema=response_format)
# For AutoStrategy, convert to ToolStrategy to setup tools upfront
# (may be replaced with ProviderStrategy later based on model)
tool_strategy_for_setup: ToolStrategy[Any] | None = None
if isinstance(initial_response_format, AutoStrategy):
tool_strategy_for_setup = ToolStrategy(schema=initial_response_format.schema)
elif isinstance(initial_response_format, ToolStrategy):
tool_strategy_for_setup = initial_response_format
structured_output_tools: dict[str, OutputToolBinding[Any]] = {}
if tool_strategy_for_setup:
for response_schema in tool_strategy_for_setup.schema_specs:
structured_tool_info = OutputToolBinding.from_schema_spec(response_schema)
structured_output_tools[structured_tool_info.tool.name] = structured_tool_info
middleware_tools = [t for m in middleware for t in getattr(m, "tools", [])]
# Collect middleware with wrap_tool_call or awrap_tool_call hooks
# Include middleware with either implementation to ensure NotImplementedError is raised
# when middleware doesn't support the execution path
middleware_w_wrap_tool_call = [
m
for m in middleware
if m.__class__.wrap_tool_call is not AgentMiddleware.wrap_tool_call
or m.__class__.awrap_tool_call is not AgentMiddleware.awrap_tool_call
]
# Chain all wrap_tool_call handlers into a single composed handler
wrap_tool_call_wrapper = None
if middleware_w_wrap_tool_call:
wrappers = [m.wrap_tool_call for m in middleware_w_wrap_tool_call]
wrap_tool_call_wrapper = _chain_tool_call_wrappers(wrappers)
# Collect middleware with awrap_tool_call or wrap_tool_call hooks
# Include middleware with either implementation to ensure NotImplementedError is raised
# when middleware doesn't support the execution path
middleware_w_awrap_tool_call = [
m
for m in middleware
if m.__class__.awrap_tool_call is not AgentMiddleware.awrap_tool_call
or m.__class__.wrap_tool_call is not AgentMiddleware.wrap_tool_call
]
# Chain all awrap_tool_call handlers into a single composed async handler
awrap_tool_call_wrapper = None
if middleware_w_awrap_tool_call:
async_wrappers = [m.awrap_tool_call for m in middleware_w_awrap_tool_call]
awrap_tool_call_wrapper = _chain_async_tool_call_wrappers(async_wrappers)
# Setup tools
tool_node: ToolNode | None = None
# Extract built-in provider tools (dict format) and regular tools (BaseTool/callables)
built_in_tools = [t for t in tools if isinstance(t, dict)]
regular_tools = [t for t in tools if not isinstance(t, dict)]
# Tools that require client-side execution (must be in ToolNode)
available_tools = middleware_tools + regular_tools
# Create ToolNode if we have client-side tools OR if middleware defines wrap_tool_call
# (which may handle dynamically registered tools)
tool_node = (
ToolNode(
tools=available_tools,
wrap_tool_call=wrap_tool_call_wrapper,
awrap_tool_call=awrap_tool_call_wrapper,
)
if available_tools or wrap_tool_call_wrapper or awrap_tool_call_wrapper
else None
)
# Default tools for ModelRequest initialization
# Use converted BaseTool instances from ToolNode (not raw callables)
# Include built-ins and converted tools (can be changed dynamically by middleware)
# Structured tools are NOT included - they're added dynamically based on response_format
if tool_node:
default_tools = list(tool_node.tools_by_name.values()) + built_in_tools
else:
default_tools = list(built_in_tools)
# validate middleware
if len({m.name for m in middleware}) != len(middleware):
msg = "Please remove duplicate middleware instances."
raise AssertionError(msg)
middleware_w_before_agent = [
m
for m in middleware
if m.__class__.before_agent is not AgentMiddleware.before_agent
or m.__class__.abefore_agent is not AgentMiddleware.abefore_agent
]
middleware_w_before_model = [
m
for m in middleware
if m.__class__.before_model is not AgentMiddleware.before_model
or m.__class__.abefore_model is not AgentMiddleware.abefore_model
]
middleware_w_after_model = [
m
for m in middleware
if m.__class__.after_model is not AgentMiddleware.after_model
or m.__class__.aafter_model is not AgentMiddleware.aafter_model
]
middleware_w_after_agent = [
m
for m in middleware
if m.__class__.after_agent is not AgentMiddleware.after_agent
or m.__class__.aafter_agent is not AgentMiddleware.aafter_agent
]
# Collect middleware with wrap_model_call or awrap_model_call hooks
# Include middleware with either implementation to ensure NotImplementedError is raised
# when middleware doesn't support the execution path
middleware_w_wrap_model_call = [
m
for m in middleware
if m.__class__.wrap_model_call is not AgentMiddleware.wrap_model_call
or m.__class__.awrap_model_call is not AgentMiddleware.awrap_model_call
]
# Collect middleware with awrap_model_call or wrap_model_call hooks
# Include middleware with either implementation to ensure NotImplementedError is raised
# when middleware doesn't support the execution path
middleware_w_awrap_model_call = [
m
for m in middleware
if m.__class__.awrap_model_call is not AgentMiddleware.awrap_model_call
or m.__class__.wrap_model_call is not AgentMiddleware.wrap_model_call
]
# Compose wrap_model_call handlers into a single middleware stack (sync)
wrap_model_call_handler = None
if middleware_w_wrap_model_call:
sync_handlers = [m.wrap_model_call for m in middleware_w_wrap_model_call]
wrap_model_call_handler = _chain_model_call_handlers(sync_handlers)
# Compose awrap_model_call handlers into a single middleware stack (async)
awrap_model_call_handler = None
if middleware_w_awrap_model_call:
async_handlers = [m.awrap_model_call for m in middleware_w_awrap_model_call]
awrap_model_call_handler = _chain_async_model_call_handlers(async_handlers)
state_schemas: set[type] = {m.state_schema for m in middleware}
# Use provided state_schema if available, otherwise use base AgentState
base_state = state_schema if state_schema is not None else AgentState
state_schemas.add(base_state)
resolved_state_schema = _resolve_schema(state_schemas, "StateSchema", None)
input_schema = _resolve_schema(state_schemas, "InputSchema", "input")
output_schema = _resolve_schema(state_schemas, "OutputSchema", "output")
# create graph, add nodes
graph: StateGraph[
AgentState[ResponseT], ContextT, _InputAgentState, _OutputAgentState[ResponseT]
] = StateGraph(
state_schema=resolved_state_schema,
input_schema=input_schema,
output_schema=output_schema,
context_schema=context_schema,
)
def _handle_model_output(
output: AIMessage, effective_response_format: ResponseFormat[Any] | None
) -> dict[str, Any]:
"""Handle model output including structured responses.
Args:
output: The AI message output from the model.
effective_response_format: The actual strategy used (may differ from initial
if auto-detected).
"""
# Handle structured output with provider strategy
if isinstance(effective_response_format, ProviderStrategy):
if not output.tool_calls:
provider_strategy_binding = ProviderStrategyBinding.from_schema_spec(
effective_response_format.schema_spec
)
try:
structured_response = provider_strategy_binding.parse(output)
except Exception as exc:
schema_name = getattr(
effective_response_format.schema_spec.schema, "__name__", "response_format"
)
validation_error = StructuredOutputValidationError(schema_name, exc, output)
raise validation_error from exc
else:
return {"messages": [output], "structured_response": structured_response}
return {"messages": [output]}
# Handle structured output with tool strategy
if (
isinstance(effective_response_format, ToolStrategy)
and isinstance(output, AIMessage)
and output.tool_calls
):
structured_tool_calls = [
tc for tc in output.tool_calls if tc["name"] in structured_output_tools
]
if structured_tool_calls:
exception: StructuredOutputError | None = None
if len(structured_tool_calls) > 1:
# Handle multiple structured outputs error
tool_names = [tc["name"] for tc in structured_tool_calls]
exception = MultipleStructuredOutputsError(tool_names, output)
should_retry, error_message = _handle_structured_output_error(
exception, effective_response_format
)
if not should_retry:
raise exception
# Add error messages and retry
tool_messages = [
ToolMessage(
content=error_message,
tool_call_id=tc["id"],
name=tc["name"],
)
for tc in structured_tool_calls
]
return {"messages": [output, *tool_messages]}
# Handle single structured output
tool_call = structured_tool_calls[0]
try:
structured_tool_binding = structured_output_tools[tool_call["name"]]
structured_response = structured_tool_binding.parse(tool_call["args"])
tool_message_content = (
effective_response_format.tool_message_content
or f"Returning structured response: {structured_response}"
)
return {
"messages": [
output,
ToolMessage(
content=tool_message_content,
tool_call_id=tool_call["id"],
name=tool_call["name"],
),
],
"structured_response": structured_response,
}
except Exception as exc:
exception = StructuredOutputValidationError(tool_call["name"], exc, output)
should_retry, error_message = _handle_structured_output_error(
exception, effective_response_format
)
if not should_retry:
raise exception from exc
return {
"messages": [
output,
ToolMessage(
content=error_message,
tool_call_id=tool_call["id"],
name=tool_call["name"],
),
],
}
return {"messages": [output]}
def _get_bound_model(
request: ModelRequest[ContextT],
) -> tuple[Runnable[Any, Any], ResponseFormat[Any] | None]:
"""Get the model with appropriate tool bindings.
Performs auto-detection of strategy if needed based on model capabilities.
Args:
request: The model request containing model, tools, and response format.
Returns:
Tuple of `(bound_model, effective_response_format)` where
`effective_response_format` is the actual strategy used (may differ from
initial if auto-detected).
Raises:
ValueError: If middleware returned unknown client-side tool names.
ValueError: If `ToolStrategy` specifies tools not declared upfront.
"""
# Validate ONLY client-side tools that need to exist in tool_node
# Skip validation when wrap_tool_call is defined, as middleware may handle
# dynamic tools that are added at runtime via wrap_model_call
has_wrap_tool_call = wrap_tool_call_wrapper or awrap_tool_call_wrapper
# Build map of available client-side tools from the ToolNode
# (which has already converted callables)
available_tools_by_name = {}
if tool_node:
available_tools_by_name = tool_node.tools_by_name.copy()
# Check if any requested tools are unknown CLIENT-SIDE tools
# Only validate if wrap_tool_call is NOT defined (no dynamic tool handling)
if not has_wrap_tool_call:
unknown_tool_names = []
for t in request.tools:
# Only validate BaseTool instances (skip built-in dict tools)
if isinstance(t, dict):
continue
if isinstance(t, BaseTool) and t.name not in available_tools_by_name:
unknown_tool_names.append(t.name)
if unknown_tool_names:
available_tool_names = sorted(available_tools_by_name.keys())
msg = DYNAMIC_TOOL_ERROR_TEMPLATE.format(
unknown_tool_names=unknown_tool_names,
available_tool_names=available_tool_names,
)
raise ValueError(msg)
# Normalize raw schemas to AutoStrategy
# (handles middleware override with raw Pydantic classes)
response_format: ResponseFormat[Any] | Any | None = request.response_format
if response_format is not None and not isinstance(
response_format, (AutoStrategy, ToolStrategy, ProviderStrategy)
):
response_format = AutoStrategy(schema=response_format)
# Determine effective response format (auto-detect if needed)
effective_response_format: ResponseFormat[Any] | None
if isinstance(response_format, AutoStrategy):
# User provided raw schema via AutoStrategy - auto-detect best strategy based on model
if _supports_provider_strategy(request.model, tools=request.tools):
# Model supports provider strategy - use it
effective_response_format = ProviderStrategy(schema=response_format.schema)
elif response_format is initial_response_format and tool_strategy_for_setup is not None:
# Model doesn't support provider strategy - use ToolStrategy
# Reuse the strategy from setup if possible to preserve tool names
effective_response_format = tool_strategy_for_setup
else:
effective_response_format = ToolStrategy(schema=response_format.schema)
else:
# User explicitly specified a strategy - preserve it
effective_response_format = response_format
# Build final tools list including structured output tools
# request.tools now only contains BaseTool instances (converted from callables)
# and dicts (built-ins)
final_tools = list(request.tools)
if isinstance(effective_response_format, ToolStrategy):
# Add structured output tools to final tools list
structured_tools = [info.tool for info in structured_output_tools.values()]
final_tools.extend(structured_tools)
# Bind model based on effective response format
if isinstance(effective_response_format, ProviderStrategy):
# (Backward compatibility) Use OpenAI format structured output
kwargs = effective_response_format.to_model_kwargs()
return (
request.model.bind_tools(
final_tools, strict=True, **kwargs, **request.model_settings
),
effective_response_format,
)
if isinstance(effective_response_format, ToolStrategy):
# Current implementation requires that tools used for structured output
# have to be declared upfront when creating the agent as part of the
# response format. Middleware is allowed to change the response format
# to a subset of the original structured tools when using ToolStrategy,
# but not to add new structured tools that weren't declared upfront.
# Compute output binding
for tc in effective_response_format.schema_specs:
if tc.name not in structured_output_tools:
msg = (
f"ToolStrategy specifies tool '{tc.name}' "
"which wasn't declared in the original "
"response format when creating the agent."
)
raise ValueError(msg)
# Force tool use if we have structured output tools
tool_choice = "any" if structured_output_tools else request.tool_choice
return (
request.model.bind_tools(
final_tools, tool_choice=tool_choice, **request.model_settings
),
effective_response_format,
)
# No structured output - standard model binding
if final_tools:
return (
request.model.bind_tools(
final_tools, tool_choice=request.tool_choice, **request.model_settings
),
None,
)
return request.model.bind(**request.model_settings), None
def _execute_model_sync(request: ModelRequest[ContextT]) -> ModelResponse:
"""Execute model and return response.
This is the core model execution logic wrapped by `wrap_model_call` handlers.
Raises any exceptions that occur during model invocation.
"""
# Get the bound model (with auto-detection if needed)
model_, effective_response_format = _get_bound_model(request)
messages = request.messages
if request.system_message:
messages = [request.system_message, *messages]
output = model_.invoke(messages)
if name:
output.name = name
# Handle model output to get messages and structured_response
handled_output = _handle_model_output(output, effective_response_format)
messages_list = handled_output["messages"]
structured_response = handled_output.get("structured_response")
return ModelResponse(
result=messages_list,
structured_response=structured_response,
)
def model_node(state: AgentState[Any], runtime: Runtime[ContextT]) -> list[Command[Any]]:
"""Sync model request handler with sequential middleware processing."""
request = ModelRequest(
model=model,
tools=default_tools,
system_message=system_message,
response_format=initial_response_format,
messages=state["messages"],
tool_choice=None,
state=state,
runtime=runtime,
)
if wrap_model_call_handler is None:
model_response = _execute_model_sync(request)
return _build_commands(model_response)
result = wrap_model_call_handler(request, _execute_model_sync)
return _build_commands(result.model_response, result.commands)
async def _execute_model_async(request: ModelRequest[ContextT]) -> ModelResponse:
"""Execute model asynchronously and return response.
This is the core async model execution logic wrapped by `wrap_model_call`
handlers.
Raises any exceptions that occur during model invocation.
"""
# Get the bound model (with auto-detection if needed)
model_, effective_response_format = _get_bound_model(request)
messages = request.messages
if request.system_message:
messages = [request.system_message, *messages]
output = await model_.ainvoke(messages)
if name:
output.name = name
# Handle model output to get messages and structured_response
handled_output = _handle_model_output(output, effective_response_format)
messages_list = handled_output["messages"]
structured_response = handled_output.get("structured_response")
return ModelResponse(
result=messages_list,
structured_response=structured_response,
)
async def amodel_node(state: AgentState[Any], runtime: Runtime[ContextT]) -> list[Command[Any]]:
"""Async model request handler with sequential middleware processing."""
request = ModelRequest(
model=model,
tools=default_tools,
system_message=system_message,
response_format=initial_response_format,
messages=state["messages"],
tool_choice=None,
state=state,
runtime=runtime,
)
if awrap_model_call_handler is None:
model_response = await _execute_model_async(request)
return _build_commands(model_response)
result = await awrap_model_call_handler(request, _execute_model_async)
return _build_commands(result.model_response, result.commands)
# Use sync or async based on model capabilities
graph.add_node("model", RunnableCallable(model_node, amodel_node, trace=False))
# Only add tools node if we have tools
if tool_node is not None:
graph.add_node("tools", tool_node)
# Add middleware nodes
for m in middleware:
if (
m.__class__.before_agent is not AgentMiddleware.before_agent
or m.__class__.abefore_agent is not AgentMiddleware.abefore_agent
):
# Use RunnableCallable to support both sync and async
# Pass None for sync if not overridden to avoid signature conflicts
sync_before_agent = (
m.before_agent
if m.__class__.before_agent is not AgentMiddleware.before_agent
else None
)
async_before_agent = (
m.abefore_agent
if m.__class__.abefore_agent is not AgentMiddleware.abefore_agent
else None
)
before_agent_node = RunnableCallable(sync_before_agent, async_before_agent, trace=False)
graph.add_node(
f"{m.name}.before_agent", before_agent_node, input_schema=resolved_state_schema
)
if (
m.__class__.before_model is not AgentMiddleware.before_model
or m.__class__.abefore_model is not AgentMiddleware.abefore_model
):
# Use RunnableCallable to support both sync and async
# Pass None for sync if not overridden to avoid signature conflicts
sync_before = (
m.before_model
if m.__class__.before_model is not AgentMiddleware.before_model
else None
)
async_before = (
m.abefore_model
if m.__class__.abefore_model is not AgentMiddleware.abefore_model
else None
)
before_node = RunnableCallable(sync_before, async_before, trace=False)
graph.add_node(
f"{m.name}.before_model", before_node, input_schema=resolved_state_schema
)
if (
m.__class__.after_model is not AgentMiddleware.after_model
or m.__class__.aafter_model is not AgentMiddleware.aafter_model
):
# Use RunnableCallable to support both sync and async
# Pass None for sync if not overridden to avoid signature conflicts
sync_after = (
m.after_model
if m.__class__.after_model is not AgentMiddleware.after_model
else None
)
async_after = (
m.aafter_model
if m.__class__.aafter_model is not AgentMiddleware.aafter_model
else None
)
after_node = RunnableCallable(sync_after, async_after, trace=False)
graph.add_node(f"{m.name}.after_model", after_node, input_schema=resolved_state_schema)
if (
m.__class__.after_agent is not AgentMiddleware.after_agent
or m.__class__.aafter_agent is not AgentMiddleware.aafter_agent
):
# Use RunnableCallable to support both sync and async
# Pass None for sync if not overridden to avoid signature conflicts
sync_after_agent = (
m.after_agent
if m.__class__.after_agent is not AgentMiddleware.after_agent
else None
)
async_after_agent = (
m.aafter_agent
if m.__class__.aafter_agent is not AgentMiddleware.aafter_agent
else None
)
after_agent_node = RunnableCallable(sync_after_agent, async_after_agent, trace=False)
graph.add_node(
f"{m.name}.after_agent", after_agent_node, input_schema=resolved_state_schema
)
# Determine the entry node (runs once at start): before_agent -> before_model -> model
if middleware_w_before_agent:
entry_node = f"{middleware_w_before_agent[0].name}.before_agent"
elif middleware_w_before_model:
entry_node = f"{middleware_w_before_model[0].name}.before_model"
else:
entry_node = "model"
# Determine the loop entry node (beginning of agent loop, excludes before_agent)
# This is where tools will loop back to for the next iteration
if middleware_w_before_model:
loop_entry_node = f"{middleware_w_before_model[0].name}.before_model"
else:
loop_entry_node = "model"
# Determine the loop exit node (end of each iteration, can run multiple times)
# This is after_model or model, but NOT after_agent
if middleware_w_after_model:
loop_exit_node = f"{middleware_w_after_model[0].name}.after_model"
else:
loop_exit_node = "model"
# Determine the exit node (runs once at end): after_agent or END
if middleware_w_after_agent:
exit_node = f"{middleware_w_after_agent[-1].name}.after_agent"
else:
exit_node = END
graph.add_edge(START, entry_node)
# add conditional edges only if tools exist
if tool_node is not None:
# Only include exit_node in destinations if any tool has return_direct=True
# or if there are structured output tools
tools_to_model_destinations = [loop_entry_node]
if (
any(tool.return_direct for tool in tool_node.tools_by_name.values())
or structured_output_tools
):
tools_to_model_destinations.append(exit_node)
graph.add_conditional_edges(
"tools",
RunnableCallable(
_make_tools_to_model_edge(
tool_node=tool_node,
model_destination=loop_entry_node,
structured_output_tools=structured_output_tools,
end_destination=exit_node,
),
trace=False,
),
tools_to_model_destinations,
)
# base destinations are tools and exit_node
# we add the loop_entry node to edge destinations if:
# - there is an after model hook(s) -- allows jump_to to model
# potentially artificially injected tool messages, ex HITL
# - there is a response format -- to allow for jumping to model to handle
# regenerating structured output tool calls
model_to_tools_destinations = ["tools", exit_node]
if response_format or loop_exit_node != "model":
model_to_tools_destinations.append(loop_entry_node)
graph.add_conditional_edges(
loop_exit_node,
RunnableCallable(
_make_model_to_tools_edge(
model_destination=loop_entry_node,
structured_output_tools=structured_output_tools,
end_destination=exit_node,
),
trace=False,
),
model_to_tools_destinations,
)
elif len(structured_output_tools) > 0:
graph.add_conditional_edges(
loop_exit_node,
RunnableCallable(
_make_model_to_model_edge(
model_destination=loop_entry_node,
end_destination=exit_node,
),
trace=False,
),
[loop_entry_node, exit_node],
)
elif loop_exit_node == "model":
# If no tools and no after_model, go directly to exit_node
graph.add_edge(loop_exit_node, exit_node)
# No tools but we have after_model - connect after_model to exit_node
else:
_add_middleware_edge(
graph,
name=f"{middleware_w_after_model[0].name}.after_model",
default_destination=exit_node,
model_destination=loop_entry_node,
end_destination=exit_node,
can_jump_to=_get_can_jump_to(middleware_w_after_model[0], "after_model"),
)
# Add before_agent middleware edges
if middleware_w_before_agent:
for m1, m2 in itertools.pairwise(middleware_w_before_agent):
_add_middleware_edge(
graph,
name=f"{m1.name}.before_agent",
default_destination=f"{m2.name}.before_agent",
model_destination=loop_entry_node,
end_destination=exit_node,
can_jump_to=_get_can_jump_to(m1, "before_agent"),
)
# Connect last before_agent to loop_entry_node (before_model or model)
_add_middleware_edge(
graph,
name=f"{middleware_w_before_agent[-1].name}.before_agent",
default_destination=loop_entry_node,
model_destination=loop_entry_node,
end_destination=exit_node,
can_jump_to=_get_can_jump_to(middleware_w_before_agent[-1], "before_agent"),
)
# Add before_model middleware edges
if middleware_w_before_model:
for m1, m2 in itertools.pairwise(middleware_w_before_model):
_add_middleware_edge(
graph,
name=f"{m1.name}.before_model",
default_destination=f"{m2.name}.before_model",
model_destination=loop_entry_node,
end_destination=exit_node,
can_jump_to=_get_can_jump_to(m1, "before_model"),
)
# Go directly to model after the last before_model
_add_middleware_edge(
graph,
name=f"{middleware_w_before_model[-1].name}.before_model",
default_destination="model",
model_destination=loop_entry_node,
end_destination=exit_node,
can_jump_to=_get_can_jump_to(middleware_w_before_model[-1], "before_model"),
)
# Add after_model middleware edges
if middleware_w_after_model:
graph.add_edge("model", f"{middleware_w_after_model[-1].name}.after_model")
for idx in range(len(middleware_w_after_model) - 1, 0, -1):
m1 = middleware_w_after_model[idx]
m2 = middleware_w_after_model[idx - 1]
_add_middleware_edge(
graph,
name=f"{m1.name}.after_model",
default_destination=f"{m2.name}.after_model",
model_destination=loop_entry_node,
end_destination=exit_node,
can_jump_to=_get_can_jump_to(m1, "after_model"),
)
# Note: Connection from after_model to after_agent/END is handled above
# in the conditional edges section
# Add after_agent middleware edges
if middleware_w_after_agent:
# Chain after_agent middleware (runs once at the very end, before END)
for idx in range(len(middleware_w_after_agent) - 1, 0, -1):
m1 = middleware_w_after_agent[idx]
m2 = middleware_w_after_agent[idx - 1]
_add_middleware_edge(
graph,
name=f"{m1.name}.after_agent",
default_destination=f"{m2.name}.after_agent",
model_destination=loop_entry_node,
end_destination=exit_node,
can_jump_to=_get_can_jump_to(m1, "after_agent"),
)
# Connect the last after_agent to END
_add_middleware_edge(
graph,
name=f"{middleware_w_after_agent[0].name}.after_agent",
default_destination=END,
model_destination=loop_entry_node,
end_destination=exit_node,
can_jump_to=_get_can_jump_to(middleware_w_after_agent[0], "after_agent"),
)
config: RunnableConfig = {"recursion_limit": 10_000}
if name:
config["metadata"] = {"lc_agent_name": name}
return graph.compile(
checkpointer=checkpointer,
store=store,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
debug=debug,
name=name,
cache=cache,
).with_config(config)
def _resolve_jump(
jump_to: JumpTo | None,
*,
model_destination: str,
end_destination: str,
) -> str | None:
if jump_to == "model":
return model_destination
if jump_to == "end":
return end_destination
if jump_to == "tools":
return "tools"
return None
def _fetch_last_ai_and_tool_messages(
messages: list[AnyMessage],
) -> tuple[AIMessage | None, list[ToolMessage]]:
"""Return the last AI message and any subsequent tool messages.
Args:
messages: List of messages to search through.
Returns:
A tuple of (last_ai_message, tool_messages). If no AIMessage is found,
returns (None, []). Callers must handle the None case appropriately.
"""
for i in range(len(messages) - 1, -1, -1):
if isinstance(messages[i], AIMessage):
last_ai_message = cast("AIMessage", messages[i])
tool_messages = [m for m in messages[i + 1 :] if isinstance(m, ToolMessage)]
return last_ai_message, tool_messages
return None, []
def _make_model_to_tools_edge(
*,
model_destination: str,
structured_output_tools: dict[str, OutputToolBinding[Any]],
end_destination: str,
) -> Callable[[dict[str, Any]], str | list[Send] | None]:
def model_to_tools(
state: dict[str, Any],
) -> str | list[Send] | None:
# 1. If there's an explicit jump_to in the state, use it
if jump_to := state.get("jump_to"):
return _resolve_jump(
jump_to,
model_destination=model_destination,
end_destination=end_destination,
)
last_ai_message, tool_messages = _fetch_last_ai_and_tool_messages(state["messages"])
# 2. if no AIMessage exists (e.g., messages were cleared), exit the loop
if last_ai_message is None:
return end_destination
tool_message_ids = [m.tool_call_id for m in tool_messages]
# 3. If the model hasn't called any tools, exit the loop
# this is the classic exit condition for an agent loop
if len(last_ai_message.tool_calls) == 0:
return end_destination
pending_tool_calls = [
c
for c in last_ai_message.tool_calls
if c["id"] not in tool_message_ids and c["name"] not in structured_output_tools
]
# 4. If there are pending tool calls, jump to the tool node
if pending_tool_calls:
return [
Send(
"tools",
ToolCallWithContext(
__type="tool_call_with_context",
tool_call=tool_call,
state=state,
),
)
for tool_call in pending_tool_calls
]
# 5. If there is a structured response, exit the loop
if "structured_response" in state:
return end_destination
# 6. AIMessage has tool calls, but there are no pending tool calls which suggests
# the injection of artificial tool messages. Jump to the model node
return model_destination
return model_to_tools
def _make_model_to_model_edge(
*,
model_destination: str,
end_destination: str,
) -> Callable[[dict[str, Any]], str | list[Send] | None]:
def model_to_model(
state: dict[str, Any],
) -> str | list[Send] | None:
# 1. Priority: Check for explicit jump_to directive from middleware
if jump_to := state.get("jump_to"):
return _resolve_jump(
jump_to,
model_destination=model_destination,
end_destination=end_destination,
)
# 2. Exit condition: A structured response was generated
if "structured_response" in state:
return end_destination
# 3. Default: Continue the loop, there may have been an issue with structured
# output generation, so we need to retry
return model_destination
return model_to_model
def _make_tools_to_model_edge(
*,
tool_node: ToolNode,
model_destination: str,
structured_output_tools: dict[str, OutputToolBinding[Any]],
end_destination: str,
) -> Callable[[dict[str, Any]], str | None]:
def tools_to_model(state: dict[str, Any]) -> str | None:
last_ai_message, tool_messages = _fetch_last_ai_and_tool_messages(state["messages"])
# 1. If no AIMessage exists (e.g., messages were cleared), route to model
if last_ai_message is None:
return model_destination
# 2. Exit condition: All executed tools have return_direct=True
# Filter to only client-side tools (provider tools are not in tool_node)
client_side_tool_calls = [
c for c in last_ai_message.tool_calls if c["name"] in tool_node.tools_by_name
]
if client_side_tool_calls and all(
tool_node.tools_by_name[c["name"]].return_direct for c in client_side_tool_calls
):
return end_destination
# 3. Exit condition: A structured output tool was executed
if any(t.name in structured_output_tools for t in tool_messages):
return end_destination
# 4. Default: Continue the loop
# Tool execution completed successfully, route back to the model
# so it can process the tool results and decide the next action.
return model_destination
return tools_to_model
def _add_middleware_edge(
graph: StateGraph[
AgentState[ResponseT], ContextT, _InputAgentState, _OutputAgentState[ResponseT]
],
*,
name: str,
default_destination: str,
model_destination: str,
end_destination: str,
can_jump_to: list[JumpTo] | None,
) -> None:
"""Add an edge to the graph for a middleware node.
Args:
graph: The graph to add the edge to.
name: The name of the middleware node.
default_destination: The default destination for the edge.
model_destination: The destination for the edge to the model.
end_destination: The destination for the edge to the end.
can_jump_to: The conditionally jumpable destinations for the edge.
"""
if can_jump_to:
def jump_edge(state: dict[str, Any]) -> str:
return (
_resolve_jump(
state.get("jump_to"),
model_destination=model_destination,
end_destination=end_destination,
)
or default_destination
)
destinations = [default_destination]
if "end" in can_jump_to:
destinations.append(end_destination)
if "tools" in can_jump_to:
destinations.append("tools")
if "model" in can_jump_to and name != model_destination:
destinations.append(model_destination)
graph.add_conditional_edges(name, RunnableCallable(jump_edge, trace=False), destinations)
else:
graph.add_edge(name, default_destination)
__all__ = [
"create_agent",
]
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain_v1/langchain/agents/factory.py
|
package daemon
import (
"context"
"errors"
"fmt"
"io"
"github.com/containerd/log"
"github.com/moby/go-archive"
"github.com/moby/go-archive/chrootarchive"
"github.com/moby/go-archive/compression"
"github.com/moby/moby/api/types/events"
"github.com/moby/moby/v2/daemon/container"
"github.com/moby/moby/v2/errdefs"
)
// ContainerExport writes the contents of the container to the given
// writer. An error is returned if the container cannot be found.
func (daemon *Daemon) ContainerExport(ctx context.Context, name string, out io.Writer) error {
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
if isWindows && ctr.ImagePlatform.OS == "windows" {
return errors.New("the daemon on this operating system does not support exporting Windows containers")
}
if ctr.State.IsDead() {
err := fmt.Errorf("You cannot export container %s which is Dead", ctr.ID)
return errdefs.Conflict(err)
}
if ctr.State.IsRemovalInProgress() {
err := fmt.Errorf("You cannot export container %s which is being removed", ctr.ID)
return errdefs.Conflict(err)
}
err = daemon.containerExport(ctx, ctr, out)
if err != nil {
return fmt.Errorf("Error exporting container %s: %v", name, err)
}
return nil
}
func (daemon *Daemon) containerExport(ctx context.Context, ctr *container.Container, out io.Writer) error {
rwl := ctr.RWLayer
if rwl == nil {
return fmt.Errorf("container %s has no rootfs", ctr.ID)
}
if err := ctx.Err(); err != nil {
return err
}
basefs, err := rwl.Mount(ctr.GetMountLabel())
if err != nil {
return err
}
defer func() {
if err := rwl.Unmount(); err != nil {
log.G(ctx).WithFields(log.Fields{"error": err, "container": ctr.ID}).Warn("Failed to unmount container RWLayer after export")
}
}()
archv, err := chrootarchive.Tar(basefs, &archive.TarOptions{
Compression: compression.None,
IDMap: daemon.idMapping,
}, basefs)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
context.AfterFunc(ctx, func() {
_ = archv.Close()
})
// Stream the entire contents of the container (basically a volatile snapshot)
if _, err := io.Copy(out, archv); err != nil {
if err := ctx.Err(); err != nil {
return errdefs.Cancelled(err)
}
return err
}
daemon.LogContainerEvent(ctr, events.ActionExport)
return nil
}
|
go
|
github
|
https://github.com/moby/moby
|
daemon/export.go
|
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal;
import org.elasticsearch.gradle.Version;
import org.elasticsearch.gradle.internal.info.DevelopmentBranch;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import static java.util.Collections.reverseOrder;
import static java.util.Collections.unmodifiableList;
import static java.util.Comparator.comparing;
/**
* A container for elasticsearch supported version information used in BWC testing.
* <p>
* Parse the Java source file containing the versions declarations and use the known rules to figure out which are all
* the version the current one is wire and index compatible with.
* On top of this, figure out which of these are unreleased and provide the branch they can be built from.
* <p>
* Note that in this context, currentVersion is the unreleased version this build operates on.
* At any point in time there will be at least three such versions and potentially four in the case of a staged release.
* <p>
* <ul>
* <li>the current version on the `main` branch</li>
* <li>the staged next <b>minor</b> on the `M.N` branch</li>
* <li>the unreleased <b>bugfix</b>, `M.N-1` branch</li>
* <li>the unreleased <b>maintenance</b>, M-1.d.e ( d > 0, e > 0) on the `(M-1).d` branch</li>
* </ul>
* <p>
* Each build is only concerned with versions before it, as those are the ones that need to be tested
* for backwards compatibility. We never look forward, and don't add forward facing version number to branches of previous
* version.
* <p>
* Each branch has a current version, and expected compatible versions are parsed from the server code's Version` class.
* We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased
* version number to server in all branches when a version is released.
* E.x when M.N.c is released M.N.c+1 is added to the Version class mentioned above in all the following branches:
* `M.N`, and `main` so we can reliably assume that the leafs of the version tree are unreleased.
* This convention is enforced by checking the versions we consider to be unreleased against an
* authoritative source (maven central).
* We are then able to map the unreleased version to branches in git and Gradle projects that are capable of checking
* out and building them, so we can include these in the testing plan as well.
*/
public class BwcVersions implements Serializable {
private static final String GLIBC_VERSION_ENV_VAR = "GLIBC_VERSION";
private final Version currentVersion;
private final transient List<Version> versions;
private final Map<Version, UnreleasedVersionInfo> unreleased;
public BwcVersions(Version currentVersionProperty, List<Version> allVersions, List<DevelopmentBranch> developmentBranches) {
if (allVersions.isEmpty()) {
throw new IllegalArgumentException("Could not parse any versions");
}
this.versions = allVersions;
this.currentVersion = allVersions.get(allVersions.size() - 1);
assertCurrentVersionMatchesParsed(currentVersionProperty);
this.unreleased = computeUnreleased(developmentBranches);
}
private void assertCurrentVersionMatchesParsed(Version currentVersionProperty) {
if (currentVersionProperty.equals(currentVersion) == false) {
throw new IllegalStateException(
"Parsed versions latest version does not match the one configured in build properties. "
+ "Parsed latest version is "
+ currentVersion
+ " but the build has "
+ currentVersionProperty
);
}
}
/**
* Returns info about the unreleased version, or {@code null} if the version is released.
*/
public UnreleasedVersionInfo unreleasedInfo(Version version) {
return unreleased.get(version);
}
public void forPreviousUnreleased(Consumer<UnreleasedVersionInfo> consumer) {
getUnreleased().stream().filter(version -> version.equals(currentVersion) == false).map(unreleased::get).forEach(consumer);
}
private Map<Version, UnreleasedVersionInfo> computeUnreleased(List<DevelopmentBranch> developmentBranches) {
Map<Version, UnreleasedVersionInfo> result = new TreeMap<>();
Map<String, List<DevelopmentBranch>> bwcBranches = developmentBranches.stream()
.filter(developmentBranch -> developmentBranch.version().before(currentVersion))
.sorted(reverseOrder(comparing(DevelopmentBranch::version)))
.collect(Collectors.groupingBy(branch -> {
if (branch.version().getMajor() == currentVersion.getMajor()) {
return "minor";
} else if (branch.version().getMajor() == currentVersion.getMajor() - 1) {
return "major";
}
return "older";
}));
developmentBranches.stream()
.filter(branch -> branch.version().equals(currentVersion))
.findFirst()
.ifPresent(
developmentBranch -> result.put(
currentVersion,
new UnreleasedVersionInfo(currentVersion, developmentBranch.name(), ":distribution")
)
);
List<DevelopmentBranch> previousMinorBranches = bwcBranches.getOrDefault("minor", Collections.emptyList());
for (int i = 0; i < previousMinorBranches.size(); i++) {
DevelopmentBranch previousMinorBranch = previousMinorBranches.get(i);
result.put(
previousMinorBranch.version(),
new UnreleasedVersionInfo(previousMinorBranch.version(), previousMinorBranch.name(), ":distribution:bwc:minor" + (i + 1))
);
}
List<DevelopmentBranch> previousMajorBranches = bwcBranches.getOrDefault("major", Collections.emptyList());
for (int i = 0; i < previousMajorBranches.size(); i++) {
DevelopmentBranch previousMajorBranch = previousMajorBranches.get(i);
result.put(
previousMajorBranch.version(),
new UnreleasedVersionInfo(previousMajorBranch.version(), previousMajorBranch.name(), ":distribution:bwc:major" + (i + 1))
);
}
return Collections.unmodifiableMap(result);
}
public List<Version> getUnreleased() {
return unreleased.keySet().stream().sorted().toList();
}
public Optional<UnreleasedVersionInfo> getUnmaintainedPreviousMajor() {
return getUnreleased().stream()
.filter(v -> v.getMajor() == currentVersion.getMajor() - 1)
.min(Comparator.reverseOrder())
.isPresent()
? Optional.empty()
: getReleased().stream()
.filter(v -> v.getMajor() == currentVersion.getMajor() - 1)
.min(Comparator.reverseOrder())
.map(
version -> new UnreleasedVersionInfo(
version,
String.format("%d.%d", version.getMajor(), version.getMinor()),
":distribution:bwc:major1"
)
);
}
public void compareToAuthoritative(List<Version> authoritativeReleasedVersions) {
Set<Version> notReallyReleased = new HashSet<>(getReleased());
notReallyReleased.removeAll(authoritativeReleasedVersions);
if (notReallyReleased.isEmpty() == false) {
throw new IllegalStateException(
"out-of-date released versions"
+ "\nFollowing versions are not really released, but the build thinks they are: "
+ notReallyReleased
);
}
Set<Version> incorrectlyConsideredUnreleased = new HashSet<>(authoritativeReleasedVersions);
incorrectlyConsideredUnreleased.retainAll(getUnreleased());
if (incorrectlyConsideredUnreleased.isEmpty() == false) {
throw new IllegalStateException(
"out-of-date released versions"
+ "\nBuild considers versions unreleased, "
+ "but they are released according to an authoritative source: "
+ incorrectlyConsideredUnreleased
+ "\nThe next versions probably needs to be added to Version.java (CURRENT doesn't count)."
);
}
}
public List<Version> getReleased() {
return versions.stream()
.filter(v -> v.getMajor() >= currentVersion.getMajor() - 1)
.filter(v -> unreleased.containsKey(v) == false)
.toList();
}
public List<Version> getReadOnlyIndexCompatible() {
// Lucene can read indices in version N-2
int compatibleMajor = currentVersion.getMajor() - 2;
return versions.stream().filter(v -> v.getMajor() == compatibleMajor).sorted(Comparator.naturalOrder()).toList();
}
public void withLatestReadOnlyIndexCompatible(Consumer<Version> versionAction) {
var compatibleVersions = getReadOnlyIndexCompatible();
if (compatibleVersions == null || compatibleVersions.isEmpty()) {
throw new IllegalStateException("No read-only compatible version found.");
}
versionAction.accept(compatibleVersions.getLast());
}
/**
* Return versions of Elasticsearch which are index compatible with the current version.
*/
public List<Version> getIndexCompatible() {
return versions.stream().filter(v -> v.getMajor() >= (currentVersion.getMajor() - 1)).toList();
}
public void withIndexCompatible(BiConsumer<Version, String> versionAction) {
getIndexCompatible().forEach(v -> versionAction.accept(v, "v" + v.toString()));
}
public void withIndexCompatible(Predicate<Version> filter, BiConsumer<Version, String> versionAction) {
getIndexCompatible().stream().filter(filter).forEach(v -> versionAction.accept(v, "v" + v.toString()));
}
public List<Version> getWireCompatible() {
return versions.stream().filter(v -> v.compareTo(getMinimumWireCompatibleVersion()) >= 0).toList();
}
public void withWireCompatible(BiConsumer<Version, String> versionAction) {
getWireCompatible().forEach(v -> versionAction.accept(v, "v" + v.toString()));
}
public void withWireCompatible(Predicate<Version> filter, BiConsumer<Version, String> versionAction) {
getWireCompatible().stream().filter(filter).forEach(v -> versionAction.accept(v, "v" + v.toString()));
}
public List<Version> getUnreleasedIndexCompatible() {
List<Version> unreleasedIndexCompatible = new ArrayList<>(getIndexCompatible());
unreleasedIndexCompatible.retainAll(getUnreleased());
return unmodifiableList(unreleasedIndexCompatible);
}
public List<Version> getUnreleasedWireCompatible() {
List<Version> unreleasedWireCompatible = new ArrayList<>(getWireCompatible());
unreleasedWireCompatible.retainAll(getUnreleased());
return unmodifiableList(unreleasedWireCompatible);
}
public Version getMinimumWireCompatibleVersion() {
// Determine minimum wire compatible version from list of known versions.
// Current BWC policy states the minimum wire compatible version is the last minor release or the previous major version.
return versions.stream()
.filter(v -> v.getRevision() == 0)
.filter(v -> v.getMajor() == currentVersion.getMajor() - 1)
.max(Comparator.naturalOrder())
.orElseThrow(() -> new IllegalStateException("Unable to determine minimum wire compatible version."));
}
public Version getCurrentVersion() {
return currentVersion;
}
public record UnreleasedVersionInfo(Version version, String branch, String gradleProjectPath) {}
/**
* Determine whether the given version of Elasticsearch is compatible with ML features on the host system.
*
* @see <a href="https://github.com/elastic/elasticsearch/issues/86877">https://github.com/elastic/elasticsearch/issues/86877</a>
*/
public static boolean isMlCompatible(Version version) {
Version glibcVersion = Optional.ofNullable(System.getenv(GLIBC_VERSION_ENV_VAR))
.map(v -> Version.fromString(v, Version.Mode.RELAXED))
.orElse(null);
// glibc version 2.34 introduced incompatibilities in ML syscall filters that were fixed in 7.17.5+ and 8.2.2+
if (glibcVersion != null && glibcVersion.onOrAfter(Version.fromString("2.34", Version.Mode.RELAXED))) {
if (version.before(Version.fromString("7.17.5"))) {
return false;
} else if (version.getMajor() > 7 && version.before(Version.fromString("8.2.2"))) {
return false;
}
}
return true;
}
}
|
java
|
github
|
https://github.com/elastic/elasticsearch
|
build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java
|
#encoding=utf8
import os
import sys
import math
import pdb
import random
class BinNode:
'''
二项树节点
'''
def __init__(self, e):
self.element = e
self.left_child = None
self.next_sibling = None
class BinQueue:
'''
二项队列
一些二项树组成的森林
二项队列的NB之处在于:
插入操作O(N), 最坏O(logN)
DeleteMin O(logN)
Merge O(logN)
'''
MAX = sys.maxint
MIN = -sys.maxint
def __init__(self, max_trees=32):
self._max_trees = max_trees
self._trees = [None] * max_trees #bin trees
self._size = 0
def trees(self):
return self._max_trees
def empty(self):
return self._size == 0
def size(self):
return self._size
def insert(self, x):
node = BinNode(x)
H = BinQueue(1)
H._size = 1
H._trees[0] = node
self.__merge(self, H)
return True
def __combine_trees(self, T1, T2):
'''
值大的树作为值小的树的儿子
'''
##这种实现对出现两个相同值的情况就会死循环
#if (T1.element < T2.element):
# T1->next_sibling = T2
#return self.__combine_trees(T2, T1)
#链表插入
if (T1.element > T2.element):
T1, T2 = T2, T1
#T1小, T2大
T2.next_sibling = T1.left_child
T1.left_child = T2
return T1
def __merge(self, H1, H2):
'''
其实现跟二进制的加法器差不多
'''
H1._size += H2._size
carry = None #进位
i = 0
j = 1
while j <= H1.size():
T1 = H1._trees[i] if i < len(H1._trees) else None
T2 = H2._trees[i] if i < len(H2._trees) else None
k = (not not carry) << 2 | (not not T2) << 1 | (not not T1)
if k in (0, 1): #0 means no trees, 1 means only H1
pass
elif k == 2: #Only H2
H1._trees[i] = T2
H2._trees[i] = None
elif k == 3: # H1 and H2
carry = self.__combine_trees(T1, T2)
H1._trees[i] = H2._trees[i] = None
elif k == 4: #Only carry
H1._trees[i] = carry
carry = None
elif k == 5:# H1 and carry
carry = self.__combine_trees(T1, carry)
H1._trees[i] = None
elif k == 6: #H2 and carry
carry = self.__combine_trees(T2, carry)
H2._trees[i] = None
elif k == 7: #H1 and H2 and carry
H1._trees[i] = carry
carry = self.__combine_trees(T1, T2)
H2._trees[i] = None
i += 1
j *= 2
return H1
def delete_min(self):
'''
二项队列H中关键字最小的二项树为Bk
H' = H - Bk
H'' = Bk 弹出关键字最小的节点
H' + H''即为所求
'''
assert not self.empty()
min_item = self.MAX
min_tree_idx = None
for i in range(self.trees()):
if self._trees[i] and self._trees[i].element < min_item:
min_item = self._trees[i].element
min_tree_idx = i
deleted_tree = self._trees[min_tree_idx].left_child #min_tree_idx 删除关键字最小的节点,相当于H''
#删除了root,还得把tree拆开成多颗二项树,
deleted_queue = BinQueue(min_tree_idx)
deleted_queue._size = 2 ** min_tree_idx - 1 # min_tree的节点数量是2^min_tree_idx,然后删除了一个元素
for i in range(min_tree_idx - 1, -1, -1): #(min_tree_idx, 0]
deleted_queue._trees[i] = deleted_tree
deleted_tree = deleted_tree.next_sibling
deleted_queue._trees[i].next_sibling = None
self._trees[min_tree_idx] = None #H'
self._size -= deleted_queue.size() + 1
self.__merge(self, deleted_queue) #H' + H''
return min_item
if __name__ == '__main__':
threshold = 5000
queue = BinQueue(13)
#array = [61,13,1,79,93,78,72,89,57,81]
array1 = []
for i in range(threshold):
v = random.randint(0, 10000)
array1.append(v)
queue.insert(v)
array1 = sorted(array1)
array2 = []
for i in range(threshold):
v = queue.delete_min()
array2.append(v)
for i in range(threshold):
v1 = array1[i]
v2 = array2[i]
assert v1 == v2, '%s vs %s' % (v1, v2)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from .xmlreader import InputSource
from .handler import ContentHandler, ErrorHandler
from ._exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
from io import BytesIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
#if "PY_SAX_PARSER" in os.environ:
# default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError as e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from appengine_wrappers import GetAppVersion
from compiled_file_system import CompiledFileSystem
from copy import deepcopy
from file_system import FileNotFoundError
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_object_store import TestObjectStore
import unittest
_TEST_DATA = {
'404.html': '404.html contents',
'apps': {
'a11y.html': 'a11y.html contents',
'about_apps.html': 'about_apps.html contents',
'fakedir': {
'file.html': 'file.html contents'
}
},
'extensions': {
'activeTab.html': 'activeTab.html contents',
'alarms.html': 'alarms.html contents'
}
}
identity = lambda _, x: x
def _CreateFactory():
return CompiledFileSystem.Factory(
TestFileSystem(deepcopy(_TEST_DATA)),
ObjectStoreCreator(start_empty=False,
store_type=TestObjectStore,
disable_wrappers=True))
class CompiledFileSystemTest(unittest.TestCase):
def testPopulateNamespace(self):
def CheckNamespace(expected_file, expected_list, fs):
self.assertEqual(expected_file, fs._file_object_store.namespace)
self.assertEqual(expected_list, fs._list_object_store.namespace)
factory = _CreateFactory()
f = lambda x: x
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/list&'
'app_version=%s' % GetAppVersion(),
factory.Create(f, CompiledFileSystemTest))
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/list&'
'app_version=%s' % GetAppVersion(),
factory.Create(f, CompiledFileSystemTest, category='foo'))
def testPopulateFromFile(self):
def Sleepy(key, val):
return '%s%s' % ('Z' * len(key), 'z' * len(val))
compiled_fs = _CreateFactory().Create(Sleepy, CompiledFileSystemTest)
self.assertEqual('ZZZZZZZZzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('404.html'))
self.assertEqual('ZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('apps/a11y.html'))
self.assertEqual('ZZZZZZZZZZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('/apps/fakedir/file.html'))
def testCaching(self):
compiled_fs = _CreateFactory().Create(identity, CompiledFileSystemTest)
self.assertEqual('404.html contents', compiled_fs.GetFromFile('404.html'))
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir')))
compiled_fs._file_system._obj['404.html'] = 'boom'
compiled_fs._file_system._obj['apps']['fakedir']['boom.html'] = 'blam'
self.assertEqual('404.html contents', compiled_fs.GetFromFile('404.html'))
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir')))
compiled_fs._file_system.IncrementStat()
self.assertEqual('boom', compiled_fs.GetFromFile('404.html'))
self.assertEqual(set(('file.html', 'boom.html')),
set(compiled_fs.GetFromFileListing('apps/fakedir')))
def testFailures(self):
compiled_fs = _CreateFactory().Create(identity, CompiledFileSystemTest)
self.assertRaises(FileNotFoundError, compiled_fs.GetFromFile, '405.html')
# TODO(kalman): would be nice to test this fails since apps/ is a dir.
compiled_fs.GetFromFile('apps/')
#self.assertRaises(SomeError, compiled_fs.GetFromFile, 'apps/')
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing, 'nodir/')
# TODO(kalman): likewise, not a FileNotFoundError.
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing, '404.html')
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/i2c/i2c-arb-gpio-challenge.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: GPIO-based I2C Arbitration Using a Challenge & Response Mechanism
maintainers:
- Doug Anderson <dianders@chromium.org>
- Peter Rosin <peda@axentia.se>
description: |
This uses GPIO lines and a challenge & response mechanism to arbitrate who is
the master of an I2C bus in a multimaster situation.
In many cases using GPIOs to arbitrate is not needed and a design can use the
standard I2C multi-master rules. Using GPIOs is generally useful in the case
where there is a device on the bus that has errata and/or bugs that makes
standard multimaster mode not feasible.
Note that this scheme works well enough but has some downsides:
* It is nonstandard (not using standard I2C multimaster)
* Having two masters on a bus in general makes it relatively hard to debug
problems (hard to tell if i2c issues were caused by one master, another,
or some device on the bus).
Algorithm:
All masters on the bus have a 'bus claim' line which is an output that the
others can see. These are all active low with pull-ups enabled. We'll
describe these lines as:
* OUR_CLAIM: output from us signaling to other hosts that we want the bus
* THEIR_CLAIMS: output from others signaling that they want the bus
The basic algorithm is to assert your line when you want the bus, then make
sure that the other side doesn't want it also. A detailed explanation is
best done with an example.
Let's say we want to claim the bus. We:
1. Assert OUR_CLAIM.
2. Waits a little bit for the other sides to notice (slew time, say 10
microseconds).
3. Check THEIR_CLAIMS. If none are asserted then the we have the bus and we
are done.
4. Otherwise, wait for a few milliseconds and see if THEIR_CLAIMS are released.
5. If not, back off, release the claim and wait for a few more milliseconds.
6. Go back to 1 (until retry time has expired).
properties:
compatible:
const: i2c-arb-gpio-challenge
i2c-parent:
$ref: /schemas/types.yaml#/definitions/phandle
description:
The I2C bus that this multiplexer's master-side port is connected to.
our-claim-gpios:
maxItems: 1
description:
The GPIO that we use to claim the bus.
slew-delay-us:
default: 10
description:
Time to wait for a GPIO to go high.
their-claim-gpios:
minItems: 1
maxItems: 8
description:
The GPIOs that the other sides use to claim the bus. Note that some
implementations may only support a single other master.
wait-free-us:
default: 50000
description:
We'll give up after this many microseconds.
wait-retry-us:
default: 3000
description:
We'll attempt another claim after this many microseconds.
i2c-arb:
type: object
$ref: /schemas/i2c/i2c-controller.yaml
unevaluatedProperties: false
description:
I2C arbitration bus node.
required:
- compatible
- i2c-arb
- our-claim-gpios
- their-claim-gpios
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
i2c-arbitrator {
compatible = "i2c-arb-gpio-challenge";
i2c-parent = <&i2c_4>;
our-claim-gpios = <&gpf0 3 GPIO_ACTIVE_LOW>;
their-claim-gpios = <&gpe0 4 GPIO_ACTIVE_LOW>;
slew-delay-us = <10>;
wait-retry-us = <3000>;
wait-free-us = <50000>;
i2c-arb {
#address-cells = <1>;
#size-cells = <0>;
sbs-battery@b {
compatible = "sbs,sbs-battery";
reg = <0xb>;
sbs,poll-retry-count = <1>;
};
embedded-controller@1e {
compatible = "google,cros-ec-i2c";
reg = <0x1e>;
interrupts = <6 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&gpx1>;
pinctrl-names = "default";
pinctrl-0 = <&ec_irq>;
wakeup-source;
};
};
};
|
unknown
|
github
|
https://github.com/torvalds/linux
|
Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.yaml
|
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
def _LicenseHeader(input_api):
"""Returns the license header regexp."""
# Accept any year number from 2003 to the current year
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2003, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
license_header = (
r'.*? Copyright( \(c\))? %(year)s The WebRTC [Pp]roject [Aa]uthors\. '
r'All [Rr]ights [Rr]eserved\.\n'
r'.*?\n'
r'.*? Use of this source code is governed by a BSD-style license\n'
r'.*? that can be found in the LICENSE file in the root of the source\n'
r'.*? tree\. An additional intellectual property rights grant can be '
r'found\n'
r'.*? in the file PATENTS\. All contributing project authors may\n'
r'.*? be found in the AUTHORS file in the root of the source tree\.\n'
) % {
'year': years_re,
}
return license_header
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.CheckLicense(
input_api, output_api, _LicenseHeader(input_api)))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
|
unknown
|
codeparrot/codeparrot-clean
| ||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the cert manager RPC API.
"""
from nova.openstack.common import cfg
import nova.openstack.common.rpc.proxy
rpcapi_opts = [
cfg.StrOpt('cert_topic',
default='cert',
help='the topic cert nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
class CertAPI(nova.openstack.common.rpc.proxy.RpcProxy):
'''Client side of the cert rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_backdoor_port()
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
super(CertAPI, self).__init__(
topic=CONF.cert_topic,
default_version=self.BASE_RPC_API_VERSION)
def revoke_certs_by_user(self, ctxt, user_id):
return self.call(ctxt, self.make_msg('revoke_certs_by_user',
user_id=user_id))
def revoke_certs_by_project(self, ctxt, project_id):
return self.call(ctxt, self.make_msg('revoke_certs_by_project',
project_id=project_id))
def revoke_certs_by_user_and_project(self, ctxt, user_id, project_id):
return self.call(ctxt,
self.make_msg('revoke_certs_by_user_and_project',
user_id=user_id, project_id=project_id))
def generate_x509_cert(self, ctxt, user_id, project_id):
return self.call(ctxt, self.make_msg('generate_x509_cert',
user_id=user_id,
project_id=project_id))
def fetch_ca(self, ctxt, project_id):
return self.call(ctxt, self.make_msg('fetch_ca',
project_id=project_id))
def fetch_crl(self, ctxt, project_id):
return self.call(ctxt, self.make_msg('fetch_crl',
project_id=project_id))
def decrypt_text(self, ctxt, project_id, text):
return self.call(ctxt, self.make_msg('decrypt_text',
project_id=project_id,
text=text))
def get_backdoor_port(self, context, host):
return self.call(context, self.make_msg('get_backdoor_port'),
version='1.1')
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* 'OpenSSL for Ruby' project
* Copyright (C) 2001-2002 Michal Rokos <m.rokos@sh.cvut.cz>
* All rights reserved.
*/
/*
* This program is licensed under the same licence as Ruby.
* (See the file 'COPYING'.)
*/
#if !defined(_OSSL_H_)
#define _OSSL_H_
#include RUBY_EXTCONF_H
#include <assert.h>
#include <ruby.h>
#include <errno.h>
#include <ruby/io.h>
#include <ruby/thread.h>
#ifdef HAVE_RUBY_RACTOR_H
#include <ruby/ractor.h>
#else
#define RUBY_TYPED_FROZEN_SHAREABLE 0
#endif
#include <openssl/opensslv.h>
#include <openssl/err.h>
#include <openssl/asn1.h>
#include <openssl/x509v3.h>
#include <openssl/ssl.h>
#include <openssl/pkcs12.h>
#include <openssl/pkcs7.h>
#include <openssl/rand.h>
#include <openssl/conf.h>
#ifndef OPENSSL_NO_TS
#include <openssl/ts.h>
#endif
#include <openssl/crypto.h>
#if !defined(OPENSSL_NO_OCSP)
# include <openssl/ocsp.h>
#endif
#include <openssl/bn.h>
#include <openssl/rsa.h>
#include <openssl/dsa.h>
#include <openssl/evp.h>
#include <openssl/dh.h>
#include "openssl_missing.h"
#ifndef LIBRESSL_VERSION_NUMBER
# define OSSL_IS_LIBRESSL 0
# define OSSL_OPENSSL_PREREQ(maj, min, pat) \
(OPENSSL_VERSION_NUMBER >= ((maj << 28) | (min << 20) | (pat << 12)))
# define OSSL_LIBRESSL_PREREQ(maj, min, pat) 0
#else
# define OSSL_IS_LIBRESSL 1
# define OSSL_OPENSSL_PREREQ(maj, min, pat) 0
# define OSSL_LIBRESSL_PREREQ(maj, min, pat) \
(LIBRESSL_VERSION_NUMBER >= ((maj << 28) | (min << 20) | (pat << 12)))
#endif
#if OSSL_OPENSSL_PREREQ(3, 0, 0)
# define OSSL_3_const const
#else
# define OSSL_3_const /* const */
#endif
#if !defined(OPENSSL_NO_ENGINE) && !OSSL_OPENSSL_PREREQ(3, 0, 0)
# define OSSL_USE_ENGINE
#endif
#if OSSL_OPENSSL_PREREQ(3, 0, 0)
# define OSSL_USE_PROVIDER
# include <openssl/provider.h>
#endif
#if OSSL_OPENSSL_PREREQ(3, 0, 0)
# define OSSL_HAVE_IMMUTABLE_PKEY
#endif
/*
* Common Module
*/
extern VALUE mOSSL;
/*
* Common Error Class
*/
extern VALUE eOSSLError;
/*
* CheckTypes
*/
#define OSSL_Check_Kind(obj, klass) do {\
if (!rb_obj_is_kind_of((obj), (klass))) {\
ossl_raise(rb_eTypeError, "wrong argument (%"PRIsVALUE")! (Expected kind of %"PRIsVALUE")",\
rb_obj_class(obj), (klass));\
}\
} while (0)
/*
* Type conversions
*/
#if !defined(NUM2UINT64T) /* in case Ruby starts to provide */
# if SIZEOF_LONG == 8
# define NUM2UINT64T(x) ((uint64_t)NUM2ULONG(x))
# elif defined(HAVE_LONG_LONG) && SIZEOF_LONG_LONG == 8
# define NUM2UINT64T(x) ((uint64_t)NUM2ULL(x))
# else
# error "unknown platform; no 64-bit width integer"
# endif
#endif
/*
* Data Conversion
*/
STACK_OF(X509) *ossl_x509_ary2sk(VALUE);
STACK_OF(X509) *ossl_protect_x509_ary2sk(VALUE,int*);
VALUE ossl_x509_sk2ary(const STACK_OF(X509) *certs);
VALUE ossl_x509crl_sk2ary(const STACK_OF(X509_CRL) *crl);
VALUE ossl_x509name_sk2ary(const STACK_OF(X509_NAME) *names);
VALUE ossl_buf2str(char *buf, int len);
VALUE ossl_str_new(const char *, long, int *);
#define ossl_str_adjust(str, p) \
do{\
long newlen = (long)((p) - (unsigned char*)RSTRING_PTR(str));\
assert(newlen <= RSTRING_LEN(str));\
rb_str_set_len((str), newlen);\
}while(0)
/*
* Convert binary string to hex string. The caller is responsible for
* ensuring out has (2 * len) bytes of capacity.
*/
void ossl_bin2hex(const unsigned char *in, char *out, size_t len);
/*
* Our default PEM callback
*/
/* Convert the argument to String and validate the length. Note this may raise. */
VALUE ossl_pem_passwd_value(VALUE);
/* Can be casted to pem_password_cb. If a password (String) is passed as the
* "arbitrary data" (typically the last parameter of PEM_{read,write}_
* functions), uses the value. If not, but a block is given, yields to it.
* If not either, fallbacks to PEM_def_callback() which reads from stdin. */
int ossl_pem_passwd_cb(char *, int, int, void *);
/*
* Clear BIO* with this in PEM/DER fallback scenarios to avoid decoding
* errors piling up in OpenSSL::Errors
*/
#define OSSL_BIO_reset(bio) do { \
(void)BIO_reset((bio)); \
ossl_clear_error(); \
} while (0)
/*
* ERRor messages
*/
PRINTF_ARGS(NORETURN(void ossl_raise(VALUE, const char *, ...)), 2, 3);
/* Make exception instance from str and OpenSSL error reason string. */
VALUE ossl_make_error(VALUE exc, VALUE str);
/* Clear OpenSSL error queue. If dOSSL is set, rb_warn() them. */
void ossl_clear_error(void);
/*
* String to DER String
*/
VALUE ossl_to_der(VALUE);
VALUE ossl_to_der_if_possible(VALUE);
/*
* Debug
*/
extern VALUE dOSSL;
#define OSSL_Debug(...) do { \
if (dOSSL == Qtrue) { \
fprintf(stderr, "OSSL_DEBUG: "); \
fprintf(stderr, __VA_ARGS__); \
fprintf(stderr, " [%s:%d]\n", __FILE__, __LINE__); \
} \
} while (0)
/*
* Include all parts
*/
#include "ossl_asn1.h"
#include "ossl_bio.h"
#include "ossl_bn.h"
#include "ossl_cipher.h"
#include "ossl_config.h"
#include "ossl_digest.h"
#include "ossl_engine.h"
#include "ossl_hmac.h"
#include "ossl_kdf.h"
#include "ossl_ns_spki.h"
#include "ossl_ocsp.h"
#include "ossl_pkcs12.h"
#include "ossl_pkcs7.h"
#include "ossl_pkey.h"
#include "ossl_provider.h"
#include "ossl_rand.h"
#include "ossl_ssl.h"
#include "ossl_ts.h"
#include "ossl_x509.h"
void Init_openssl(void);
#endif /* _OSSL_H_ */
|
c
|
github
|
https://github.com/ruby/ruby
|
ext/openssl/ossl.h
|
use super::MAX_SAFE_MILLIS_DURATION;
use crate::time::{Clock, Duration, Instant};
/// A structure which handles conversion from Instants to `u64` timestamps.
#[derive(Debug)]
pub(crate) struct TimeSource {
start_time: Instant,
}
impl TimeSource {
pub(crate) fn new(clock: &Clock) -> Self {
Self {
start_time: clock.now(),
}
}
pub(crate) fn deadline_to_tick(&self, t: Instant) -> u64 {
// Round up to the end of a ms
self.instant_to_tick(t + Duration::from_nanos(999_999))
}
pub(crate) fn instant_to_tick(&self, t: Instant) -> u64 {
// round up
let dur: Duration = t.saturating_duration_since(self.start_time);
let ms = dur
.as_millis()
.try_into()
.unwrap_or(MAX_SAFE_MILLIS_DURATION);
ms.min(MAX_SAFE_MILLIS_DURATION)
}
pub(crate) fn tick_to_duration(&self, t: u64) -> Duration {
Duration::from_millis(t)
}
pub(crate) fn now(&self, clock: &Clock) -> u64 {
self.instant_to_tick(clock.now())
}
#[cfg(test)]
#[allow(dead_code)]
pub(super) fn start_time(&self) -> Instant {
self.start_time
}
}
|
rust
|
github
|
https://github.com/tokio-rs/tokio
|
tokio/src/runtime/time/source.rs
|
#! /usr/bin/env python
# Copyright (c) 2009, Code Aurora Forum. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Code Aurora nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/msm[0-9]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
shutil.copyfile(dotconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
global make_command
make_command = ["oldconfig"]
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'NP-Likeness-2.1.jar'
default_jvm_pass_args = ['-help']
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""
Construct list of Java arguments based on our argument list.
"""
pass_args = []
for arg in argv:
pass_args.append(arg)
if pass_args == []:
pass_args = default_jvm_pass_args
return pass_args
def main():
java = java_executable()
"""
Calling of the tool ensuring that the java runtime is invoked with the right options.
"""
pass_args = jvm_opts(sys.argv[1:])
jar_dir = real_dirname(sys.argv[0])
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// SPDX-License-Identifier: GPL-2.0-only
/*
* async.c: Asynchronous function calls for boot performance
*
* (C) Copyright 2009 Intel Corporation
* Author: Arjan van de Ven <arjan@linux.intel.com>
*/
/*
Goals and Theory of Operation
The primary goal of this feature is to reduce the kernel boot time,
by doing various independent hardware delays and discovery operations
decoupled and not strictly serialized.
More specifically, the asynchronous function call concept allows
certain operations (primarily during system boot) to happen
asynchronously, out of order, while these operations still
have their externally visible parts happen sequentially and in-order.
(not unlike how out-of-order CPUs retire their instructions in order)
Key to the asynchronous function call implementation is the concept of
a "sequence cookie" (which, although it has an abstracted type, can be
thought of as a monotonically incrementing number).
The async core will assign each scheduled event such a sequence cookie and
pass this to the called functions.
The asynchronously called function should before doing a globally visible
operation, such as registering device numbers, call the
async_synchronize_cookie() function and pass in its own cookie. The
async_synchronize_cookie() function will make sure that all asynchronous
operations that were scheduled prior to the operation corresponding with the
cookie have completed.
Subsystem/driver initialization code that scheduled asynchronous probe
functions, but which shares global resources with other drivers/subsystems
that do not use the asynchronous call feature, need to do a full
synchronization with the async_synchronize_full() function, before returning
from their init function. This is to maintain strict ordering between the
asynchronous and synchronous parts of the kernel.
*/
#include <linux/async.h>
#include <linux/atomic.h>
#include <linux/export.h>
#include <linux/ktime.h>
#include <linux/pid.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "workqueue_internal.h"
static async_cookie_t next_cookie = 1;
#define MAX_WORK 32768
#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
static LIST_HEAD(async_global_pending); /* pending from all registered doms */
static ASYNC_DOMAIN(async_dfl_domain);
static DEFINE_SPINLOCK(async_lock);
static struct workqueue_struct *async_wq;
struct async_entry {
struct list_head domain_list;
struct list_head global_list;
struct work_struct work;
async_cookie_t cookie;
async_func_t func;
void *data;
struct async_domain *domain;
};
static DECLARE_WAIT_QUEUE_HEAD(async_done);
static atomic_t entry_count;
static long long microseconds_since(ktime_t start)
{
ktime_t now = ktime_get();
return ktime_to_ns(ktime_sub(now, start)) >> 10;
}
static async_cookie_t lowest_in_progress(struct async_domain *domain)
{
struct async_entry *first = NULL;
async_cookie_t ret = ASYNC_COOKIE_MAX;
unsigned long flags;
spin_lock_irqsave(&async_lock, flags);
if (domain) {
if (!list_empty(&domain->pending))
first = list_first_entry(&domain->pending,
struct async_entry, domain_list);
} else {
if (!list_empty(&async_global_pending))
first = list_first_entry(&async_global_pending,
struct async_entry, global_list);
}
if (first)
ret = first->cookie;
spin_unlock_irqrestore(&async_lock, flags);
return ret;
}
/*
* pick the first pending entry and run it
*/
static void async_run_entry_fn(struct work_struct *work)
{
struct async_entry *entry =
container_of(work, struct async_entry, work);
unsigned long flags;
ktime_t calltime;
/* 1) run (and print duration) */
pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie,
entry->func, task_pid_nr(current));
calltime = ktime_get();
entry->func(entry->data, entry->cookie);
pr_debug("initcall %lli_%pS returned after %lld usecs\n",
(long long)entry->cookie, entry->func,
microseconds_since(calltime));
/* 2) remove self from the pending queues */
spin_lock_irqsave(&async_lock, flags);
list_del_init(&entry->domain_list);
list_del_init(&entry->global_list);
/* 3) free the entry */
kfree(entry);
atomic_dec(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
/* 4) wake up any waiters */
wake_up(&async_done);
}
static async_cookie_t __async_schedule_node_domain(async_func_t func,
void *data, int node,
struct async_domain *domain,
struct async_entry *entry)
{
async_cookie_t newcookie;
unsigned long flags;
INIT_LIST_HEAD(&entry->domain_list);
INIT_LIST_HEAD(&entry->global_list);
INIT_WORK(&entry->work, async_run_entry_fn);
entry->func = func;
entry->data = data;
entry->domain = domain;
spin_lock_irqsave(&async_lock, flags);
/* allocate cookie and queue */
newcookie = entry->cookie = next_cookie++;
list_add_tail(&entry->domain_list, &domain->pending);
if (domain->registered)
list_add_tail(&entry->global_list, &async_global_pending);
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
/* schedule for execution */
queue_work_node(node, async_wq, &entry->work);
return newcookie;
}
/**
* async_schedule_node_domain - NUMA specific version of async_schedule_domain
* @func: function to execute asynchronously
* @data: data pointer to pass to the function
* @node: NUMA node that we want to schedule this on or close to
* @domain: the domain
*
* Returns an async_cookie_t that may be used for checkpointing later.
* @domain may be used in the async_synchronize_*_domain() functions to
* wait within a certain synchronization domain rather than globally.
*
* Note: This function may be called from atomic or non-atomic contexts.
*
* The node requested will be honored on a best effort basis. If the node
* has no CPUs associated with it then the work is distributed among all
* available CPUs.
*/
async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
int node, struct async_domain *domain)
{
struct async_entry *entry;
unsigned long flags;
async_cookie_t newcookie;
/* allow irq-off callers */
entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
/*
* If we're out of memory or if there's too much work
* pending already, we execute synchronously.
*/
if (!entry || atomic_read(&entry_count) > MAX_WORK) {
kfree(entry);
spin_lock_irqsave(&async_lock, flags);
newcookie = next_cookie++;
spin_unlock_irqrestore(&async_lock, flags);
/* low on memory.. run synchronously */
func(data, newcookie);
return newcookie;
}
return __async_schedule_node_domain(func, data, node, domain, entry);
}
EXPORT_SYMBOL_GPL(async_schedule_node_domain);
/**
* async_schedule_node - NUMA specific version of async_schedule
* @func: function to execute asynchronously
* @data: data pointer to pass to the function
* @node: NUMA node that we want to schedule this on or close to
*
* Returns an async_cookie_t that may be used for checkpointing later.
* Note: This function may be called from atomic or non-atomic contexts.
*
* The node requested will be honored on a best effort basis. If the node
* has no CPUs associated with it then the work is distributed among all
* available CPUs.
*/
async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
{
return async_schedule_node_domain(func, data, node, &async_dfl_domain);
}
EXPORT_SYMBOL_GPL(async_schedule_node);
/**
* async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
* @func: function to execute asynchronously
* @dev: device argument to be passed to function
*
* @dev is used as both the argument for the function and to provide NUMA
* context for where to run the function.
*
* If the asynchronous execution of @func is scheduled successfully, return
* true. Otherwise, do nothing and return false, unlike async_schedule_dev()
* that will run the function synchronously then.
*/
bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
{
struct async_entry *entry;
entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
/* Give up if there is no memory or too much work. */
if (!entry || atomic_read(&entry_count) > MAX_WORK) {
kfree(entry);
return false;
}
__async_schedule_node_domain(func, dev, dev_to_node(dev),
&async_dfl_domain, entry);
return true;
}
/**
* async_synchronize_full - synchronize all asynchronous function calls
*
* This function waits until all asynchronous function calls have been done.
*/
void async_synchronize_full(void)
{
async_synchronize_full_domain(NULL);
}
EXPORT_SYMBOL_GPL(async_synchronize_full);
/**
* async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
* @domain: the domain to synchronize
*
* This function waits until all asynchronous function calls for the
* synchronization domain specified by @domain have been done.
*/
void async_synchronize_full_domain(struct async_domain *domain)
{
async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
}
EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
/**
* async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
* @cookie: async_cookie_t to use as checkpoint
* @domain: the domain to synchronize (%NULL for all registered domains)
*
* This function waits until all asynchronous function calls for the
* synchronization domain specified by @domain submitted prior to @cookie
* have been done.
*/
void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
{
ktime_t starttime;
pr_debug("async_waiting @ %i\n", task_pid_nr(current));
starttime = ktime_get();
wait_event(async_done, lowest_in_progress(domain) >= cookie);
pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
microseconds_since(starttime));
}
EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
/**
* async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
* @cookie: async_cookie_t to use as checkpoint
*
* This function waits until all asynchronous function calls prior to @cookie
* have been done.
*/
void async_synchronize_cookie(async_cookie_t cookie)
{
async_synchronize_cookie_domain(cookie, &async_dfl_domain);
}
EXPORT_SYMBOL_GPL(async_synchronize_cookie);
/**
* current_is_async - is %current an async worker task?
*
* Returns %true if %current is an async worker task.
*/
bool current_is_async(void)
{
struct worker *worker = current_wq_worker();
return worker && worker->current_func == async_run_entry_fn;
}
EXPORT_SYMBOL_GPL(current_is_async);
void __init async_init(void)
{
/*
* Async can schedule a number of interdependent work items. However,
* unbound workqueues can handle only upto min_active interdependent
* work items. The default min_active of 8 isn't sufficient for async
* and can lead to stalls. Let's use a dedicated workqueue with raised
* min_active.
*/
async_wq = alloc_workqueue("async", WQ_UNBOUND, 0);
BUG_ON(!async_wq);
workqueue_set_min_active(async_wq, WQ_DFL_ACTIVE);
}
|
c
|
github
|
https://github.com/torvalds/linux
|
kernel/async.c
|
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool import steps
class RollChromiumDEPS(AbstractSequencedCommand):
name = "roll-chromium-deps"
help_text = "Updates Chromium DEPS (defaults to the last-known good revision of Chromium)"
argument_names = "[CHROMIUM_REVISION]"
steps = [
steps.UpdateChromiumDEPS,
steps.PrepareChangeLogForDEPSRoll,
steps.ConfirmDiff,
steps.Commit,
]
def _prepare_state(self, options, args, tool):
return {
"chromium_revision": (args and args[0]),
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
/******************************************************************************
* Remote Debugging Module - Asyncio Functions
*
* This file contains functions for parsing asyncio tasks, coroutines,
* and awaited_by relationships from remote process memory.
******************************************************************************/
#include "_remote_debugging.h"
/* ============================================================================
* ASYNCIO DEBUG ADDRESS FUNCTIONS
* ============================================================================ */
uintptr_t
_Py_RemoteDebug_GetAsyncioDebugAddress(proc_handle_t* handle)
{
uintptr_t address;
#ifdef MS_WINDOWS
// On Windows, search for asyncio debug in executable or DLL
address = search_windows_map_for_section(handle, "AsyncioD", L"_asyncio",
NULL);
if (address == 0) {
// Error out: 'python' substring covers both executable and DLL
PyObject *exc = PyErr_GetRaisedException();
PyErr_SetString(PyExc_RuntimeError, "Failed to find the AsyncioDebug section in the process.");
_PyErr_ChainExceptions1(exc);
}
#elif defined(__linux__) && HAVE_PROCESS_VM_READV
// On Linux, search for asyncio debug in executable or DLL
address = search_linux_map_for_section(handle, "AsyncioDebug", "python",
NULL);
if (address == 0) {
// Error out: 'python' substring covers both executable and DLL
PyObject *exc = PyErr_GetRaisedException();
PyErr_SetString(PyExc_RuntimeError, "Failed to find the AsyncioDebug section in the process.");
_PyErr_ChainExceptions1(exc);
}
#elif defined(__APPLE__) && TARGET_OS_OSX
// On macOS, try libpython first, then fall back to python
address = search_map_for_section(handle, "AsyncioDebug", "libpython",
NULL);
if (address == 0) {
PyErr_Clear();
address = search_map_for_section(handle, "AsyncioDebug", "python",
NULL);
}
if (address == 0) {
// Error out: 'python' substring covers both executable and DLL
PyObject *exc = PyErr_GetRaisedException();
PyErr_SetString(PyExc_RuntimeError, "Failed to find the AsyncioDebug section in the process.");
_PyErr_ChainExceptions1(exc);
}
#else
Py_UNREACHABLE();
#endif
return address;
}
int
read_async_debug(RemoteUnwinderObject *unwinder)
{
uintptr_t async_debug_addr = _Py_RemoteDebug_GetAsyncioDebugAddress(&unwinder->handle);
if (!async_debug_addr) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to get AsyncioDebug address");
return -1;
}
size_t size = sizeof(struct _Py_AsyncioModuleDebugOffsets);
int result = _Py_RemoteDebug_PagedReadRemoteMemory(&unwinder->handle, async_debug_addr, size, &unwinder->async_debug_offsets);
if (result < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read AsyncioDebug offsets");
}
return result;
}
int
ensure_async_debug_offsets(RemoteUnwinderObject *unwinder)
{
// If already available, nothing to do
if (unwinder->async_debug_offsets_available) {
return 0;
}
// Try to load async debug offsets (the target process may have
// loaded asyncio since we last checked)
if (read_async_debug(unwinder) < 0) {
PyErr_Clear();
PyErr_SetString(PyExc_RuntimeError, "AsyncioDebug section not available");
set_exception_cause(unwinder, PyExc_RuntimeError,
"AsyncioDebug section unavailable - asyncio module may not be loaded in target process");
return -1;
}
unwinder->async_debug_offsets_available = 1;
return 0;
}
/* ============================================================================
* SET ITERATION FUNCTIONS
* ============================================================================ */
int
iterate_set_entries(
RemoteUnwinderObject *unwinder,
uintptr_t set_addr,
set_entry_processor_func processor,
void *context
) {
char set_object[SIZEOF_SET_OBJ];
if (_Py_RemoteDebug_PagedReadRemoteMemory(&unwinder->handle, set_addr,
SIZEOF_SET_OBJ, set_object) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read set object");
return -1;
}
Py_ssize_t num_els = GET_MEMBER(Py_ssize_t, set_object, unwinder->debug_offsets.set_object.used);
Py_ssize_t mask = GET_MEMBER(Py_ssize_t, set_object, unwinder->debug_offsets.set_object.mask);
uintptr_t table_ptr = GET_MEMBER(uintptr_t, set_object, unwinder->debug_offsets.set_object.table);
// Validate mask and num_els to prevent huge loop iterations from garbage data
if (mask < 0 || mask >= MAX_SET_TABLE_SIZE || num_els < 0 || num_els > mask + 1) {
set_exception_cause(unwinder, PyExc_RuntimeError,
"Invalid set object (corrupted remote memory)");
return -1;
}
Py_ssize_t set_len = mask + 1;
Py_ssize_t i = 0;
Py_ssize_t els = 0;
while (i < set_len && els < num_els) {
uintptr_t key_addr;
if (read_py_ptr(unwinder, table_ptr, &key_addr) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read set entry key");
return -1;
}
if ((void*)key_addr != NULL) {
Py_ssize_t ref_cnt;
if (read_Py_ssize_t(unwinder, table_ptr, &ref_cnt) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read set entry ref count");
return -1;
}
if (ref_cnt) {
// Process this valid set entry
if (processor(unwinder, key_addr, context) < 0) {
return -1;
}
els++;
}
}
table_ptr += sizeof(void*) * 2;
i++;
}
return 0;
}
/* ============================================================================
* TASK NAME PARSING
* ============================================================================ */
PyObject *
parse_task_name(
RemoteUnwinderObject *unwinder,
uintptr_t task_address
) {
// Read the entire TaskObj at once
char task_obj[SIZEOF_TASK_OBJ];
int err = _Py_RemoteDebug_PagedReadRemoteMemory(
&unwinder->handle,
task_address,
(size_t)unwinder->async_debug_offsets.asyncio_task_object.size,
task_obj);
if (err < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read task object");
return NULL;
}
uintptr_t task_name_addr = GET_MEMBER_NO_TAG(uintptr_t, task_obj, unwinder->async_debug_offsets.asyncio_task_object.task_name);
// The task name can be a long or a string so we need to check the type
char task_name_obj[SIZEOF_PYOBJECT];
err = _Py_RemoteDebug_PagedReadRemoteMemory(
&unwinder->handle,
task_name_addr,
SIZEOF_PYOBJECT,
task_name_obj);
if (err < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read task name object");
return NULL;
}
// Now read the type object to get the flags
char type_obj[SIZEOF_TYPE_OBJ];
err = _Py_RemoteDebug_PagedReadRemoteMemory(
&unwinder->handle,
GET_MEMBER(uintptr_t, task_name_obj, unwinder->debug_offsets.pyobject.ob_type),
SIZEOF_TYPE_OBJ,
type_obj);
if (err < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read task name type object");
return NULL;
}
if ((GET_MEMBER(unsigned long, type_obj, unwinder->debug_offsets.type_object.tp_flags) & Py_TPFLAGS_LONG_SUBCLASS)) {
long res = read_py_long(unwinder, task_name_addr);
if (res == -1) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Task name PyLong parsing failed");
return NULL;
}
return PyUnicode_FromFormat("Task-%d", res);
}
if(!(GET_MEMBER(unsigned long, type_obj, unwinder->debug_offsets.type_object.tp_flags) & Py_TPFLAGS_UNICODE_SUBCLASS)) {
PyErr_SetString(PyExc_RuntimeError, "Invalid task name object");
set_exception_cause(unwinder, PyExc_RuntimeError, "Task name object is neither long nor unicode");
return NULL;
}
return read_py_str(
unwinder,
task_name_addr,
255
);
}
/* ============================================================================
* COROUTINE CHAIN PARSING
* ============================================================================ */
static int
handle_yield_from_frame(
RemoteUnwinderObject *unwinder,
uintptr_t gi_iframe_addr,
uintptr_t gen_type_addr,
PyObject *render_to
) {
// Read the entire interpreter frame at once
char iframe[SIZEOF_INTERP_FRAME];
int err = _Py_RemoteDebug_PagedReadRemoteMemory(
&unwinder->handle,
gi_iframe_addr,
SIZEOF_INTERP_FRAME,
iframe);
if (err < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read interpreter frame in yield_from handler");
return -1;
}
if (GET_MEMBER(char, iframe, unwinder->debug_offsets.interpreter_frame.owner) != FRAME_OWNED_BY_GENERATOR) {
PyErr_SetString(
PyExc_RuntimeError,
"generator doesn't own its frame \\_o_/");
set_exception_cause(unwinder, PyExc_RuntimeError, "Frame ownership mismatch in yield_from");
return -1;
}
uintptr_t stackpointer_addr = GET_MEMBER_NO_TAG(uintptr_t, iframe, unwinder->debug_offsets.interpreter_frame.stackpointer);
if ((void*)stackpointer_addr != NULL) {
uintptr_t gi_await_addr;
err = read_py_ptr(
unwinder,
stackpointer_addr - sizeof(void*),
&gi_await_addr);
if (err) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read gi_await address");
return -1;
}
if ((void*)gi_await_addr != NULL) {
uintptr_t gi_await_addr_type_addr;
err = read_ptr(
unwinder,
gi_await_addr + (uintptr_t)unwinder->debug_offsets.pyobject.ob_type,
&gi_await_addr_type_addr);
if (err) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read gi_await type address");
return -1;
}
if (gen_type_addr == gi_await_addr_type_addr) {
/* This needs an explanation. We always start with parsing
native coroutine / generator frames. Ultimately they
are awaiting on something. That something can be
a native coroutine frame or... an iterator.
If it's the latter -- we can't continue building
our chain. So the condition to bail out of this is
to do that when the type of the current coroutine
doesn't match the type of whatever it points to
in its cr_await.
*/
err = parse_coro_chain(unwinder, gi_await_addr, render_to);
if (err) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to parse coroutine chain in yield_from");
return -1;
}
}
}
}
return 0;
}
int
parse_coro_chain(
RemoteUnwinderObject *unwinder,
uintptr_t coro_address,
PyObject *render_to
) {
assert((void*)coro_address != NULL);
// Read the entire generator object at once
char gen_object[SIZEOF_GEN_OBJ];
int err = _Py_RemoteDebug_PagedReadRemoteMemory(
&unwinder->handle,
coro_address,
SIZEOF_GEN_OBJ,
gen_object);
if (err < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read generator object in coro chain");
return -1;
}
int8_t frame_state = GET_MEMBER(int8_t, gen_object, unwinder->debug_offsets.gen_object.gi_frame_state);
if (frame_state == FRAME_CLEARED) {
return 0;
}
uintptr_t gen_type_addr = GET_MEMBER(uintptr_t, gen_object, unwinder->debug_offsets.pyobject.ob_type);
PyObject* name = NULL;
// Parse the previous frame using the gi_iframe from local copy
uintptr_t prev_frame;
uintptr_t gi_iframe_addr = coro_address + (uintptr_t)unwinder->debug_offsets.gen_object.gi_iframe;
uintptr_t address_of_code_object = 0;
if (parse_frame_object(unwinder, &name, gi_iframe_addr, &address_of_code_object, &prev_frame) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to parse frame object in coro chain");
return -1;
}
if (!name) {
return 0;
}
if (PyList_Append(render_to, name)) {
Py_DECREF(name);
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to append frame to coro chain");
return -1;
}
Py_DECREF(name);
if (frame_state == FRAME_SUSPENDED_YIELD_FROM) {
return handle_yield_from_frame(unwinder, gi_iframe_addr, gen_type_addr, render_to);
}
return 0;
}
/* ============================================================================
* TASK PARSING FUNCTIONS
* ============================================================================ */
static PyObject*
create_task_result(
RemoteUnwinderObject *unwinder,
uintptr_t task_address
) {
PyObject* result = NULL;
PyObject *call_stack = NULL;
PyObject *tn = NULL;
char task_obj[SIZEOF_TASK_OBJ];
uintptr_t coro_addr;
// Create call_stack first since it's the first tuple element
call_stack = PyList_New(0);
if (call_stack == NULL) {
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create call stack list");
goto error;
}
// Create task name/address for second tuple element
tn = PyLong_FromUnsignedLongLong(task_address);
if (tn == NULL) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to create task name/address");
goto error;
}
// Parse coroutine chain
if (_Py_RemoteDebug_PagedReadRemoteMemory(&unwinder->handle, task_address,
(size_t)unwinder->async_debug_offsets.asyncio_task_object.size,
task_obj) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read task object for coro chain");
goto error;
}
coro_addr = GET_MEMBER_NO_TAG(uintptr_t, task_obj, unwinder->async_debug_offsets.asyncio_task_object.task_coro);
if ((void*)coro_addr != NULL) {
if (parse_coro_chain(unwinder, coro_addr, call_stack) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to parse coroutine chain");
goto error;
}
if (PyList_Reverse(call_stack)) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to reverse call stack");
goto error;
}
}
// Create final CoroInfo result
RemoteDebuggingState *state = RemoteDebugging_GetStateFromObject((PyObject*)unwinder);
result = PyStructSequence_New(state->CoroInfo_Type);
if (result == NULL) {
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create CoroInfo");
goto error;
}
// PyStructSequence_SetItem steals references, so we don't need to DECREF on success
PyStructSequence_SetItem(result, 0, call_stack); // This steals the reference
PyStructSequence_SetItem(result, 1, tn); // This steals the reference
return result;
error:
Py_XDECREF(result);
Py_XDECREF(call_stack);
Py_XDECREF(tn);
return NULL;
}
int
parse_task(
RemoteUnwinderObject *unwinder,
uintptr_t task_address,
PyObject *render_to
) {
char is_task;
PyObject* result = NULL;
int err;
err = read_char(
unwinder,
task_address + (uintptr_t)unwinder->async_debug_offsets.asyncio_task_object.task_is_task,
&is_task);
if (err) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read is_task flag");
goto error;
}
if (is_task) {
result = create_task_result(unwinder, task_address);
if (!result) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to create task result");
goto error;
}
} else {
// Create an empty CoroInfo for non-task objects
RemoteDebuggingState *state = RemoteDebugging_GetStateFromObject((PyObject*)unwinder);
result = PyStructSequence_New(state->CoroInfo_Type);
if (result == NULL) {
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create empty CoroInfo");
goto error;
}
PyObject *empty_list = PyList_New(0);
if (empty_list == NULL) {
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create empty list");
goto error;
}
PyObject *task_name = PyLong_FromUnsignedLongLong(task_address);
if (task_name == NULL) {
Py_DECREF(empty_list);
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to create task name");
goto error;
}
PyStructSequence_SetItem(result, 0, empty_list); // This steals the reference
PyStructSequence_SetItem(result, 1, task_name); // This steals the reference
}
if (PyList_Append(render_to, result)) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to append task result to render list");
goto error;
}
Py_DECREF(result);
return 0;
error:
Py_XDECREF(result);
return -1;
}
/* ============================================================================
* TASK AWAITED_BY PROCESSING
* ============================================================================ */
// Forward declaration for mutual recursion
static int process_waiter_task(RemoteUnwinderObject *unwinder, uintptr_t key_addr, void *context);
// Processor function for parsing tasks in sets
static int
process_task_parser(
RemoteUnwinderObject *unwinder,
uintptr_t key_addr,
void *context
) {
PyObject *awaited_by = (PyObject *)context;
return parse_task(unwinder, key_addr, awaited_by);
}
static int
parse_task_awaited_by(
RemoteUnwinderObject *unwinder,
uintptr_t task_address,
PyObject *awaited_by
) {
return process_task_awaited_by(unwinder, task_address, process_task_parser, awaited_by);
}
int
process_task_awaited_by(
RemoteUnwinderObject *unwinder,
uintptr_t task_address,
set_entry_processor_func processor,
void *context
) {
// Read the entire TaskObj at once
char task_obj[SIZEOF_TASK_OBJ];
if (_Py_RemoteDebug_PagedReadRemoteMemory(&unwinder->handle, task_address,
(size_t)unwinder->async_debug_offsets.asyncio_task_object.size,
task_obj) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read task object");
return -1;
}
uintptr_t task_ab_addr = GET_MEMBER_NO_TAG(uintptr_t, task_obj, unwinder->async_debug_offsets.asyncio_task_object.task_awaited_by);
if ((void*)task_ab_addr == NULL) {
return 0; // No tasks waiting for this one
}
char awaited_by_is_a_set = GET_MEMBER(char, task_obj, unwinder->async_debug_offsets.asyncio_task_object.task_awaited_by_is_set);
if (awaited_by_is_a_set) {
return iterate_set_entries(unwinder, task_ab_addr, processor, context);
} else {
// Single task waiting
return processor(unwinder, task_ab_addr, context);
}
}
int
process_single_task_node(
RemoteUnwinderObject *unwinder,
uintptr_t task_addr,
PyObject **task_info,
PyObject *result
) {
PyObject *tn = NULL;
PyObject *current_awaited_by = NULL;
PyObject *task_id = NULL;
PyObject *result_item = NULL;
PyObject *coroutine_stack = NULL;
tn = parse_task_name(unwinder, task_addr);
if (tn == NULL) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to parse task name in single task node");
goto error;
}
current_awaited_by = PyList_New(0);
if (current_awaited_by == NULL) {
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create awaited_by list in single task node");
goto error;
}
// Extract the coroutine stack for this task
coroutine_stack = PyList_New(0);
if (coroutine_stack == NULL) {
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create coroutine stack list in single task node");
goto error;
}
if (parse_task(unwinder, task_addr, coroutine_stack) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to parse task coroutine stack in single task node");
goto error;
}
task_id = PyLong_FromUnsignedLongLong(task_addr);
if (task_id == NULL) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to create task ID in single task node");
goto error;
}
RemoteDebuggingState *state = RemoteDebugging_GetStateFromObject((PyObject*)unwinder);
result_item = PyStructSequence_New(state->TaskInfo_Type);
if (result_item == NULL) {
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create TaskInfo in single task node");
goto error;
}
PyStructSequence_SetItem(result_item, 0, task_id); // steals ref
PyStructSequence_SetItem(result_item, 1, tn); // steals ref
PyStructSequence_SetItem(result_item, 2, coroutine_stack); // steals ref
PyStructSequence_SetItem(result_item, 3, current_awaited_by); // steals ref
// References transferred to tuple
task_id = NULL;
tn = NULL;
coroutine_stack = NULL;
current_awaited_by = NULL;
if (PyList_Append(result, result_item)) {
Py_DECREF(result_item);
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to append result item in single task node");
return -1;
}
if (task_info != NULL) {
*task_info = result_item;
}
Py_DECREF(result_item);
// Get back current_awaited_by reference for parse_task_awaited_by
current_awaited_by = PyStructSequence_GetItem(result_item, 3);
if (parse_task_awaited_by(unwinder, task_addr, current_awaited_by) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to parse awaited_by in single task node");
// No cleanup needed here since all references were transferred to result_item
// and result_item was already added to result list and decreffed
return -1;
}
return 0;
error:
Py_XDECREF(tn);
Py_XDECREF(current_awaited_by);
Py_XDECREF(task_id);
Py_XDECREF(result_item);
Py_XDECREF(coroutine_stack);
return -1;
}
int
process_task_and_waiters(
RemoteUnwinderObject *unwinder,
uintptr_t task_addr,
PyObject *result
) {
// First, add this task to the result
if (process_single_task_node(unwinder, task_addr, NULL, result) < 0) {
return -1;
}
// Now find all tasks that are waiting for this task and process them
return process_task_awaited_by(unwinder, task_addr, process_waiter_task, result);
}
// Processor function for task waiters
static int
process_waiter_task(
RemoteUnwinderObject *unwinder,
uintptr_t key_addr,
void *context
) {
PyObject *result = (PyObject *)context;
return process_task_and_waiters(unwinder, key_addr, result);
}
/* ============================================================================
* RUNNING TASK FUNCTIONS
* ============================================================================ */
int
find_running_task_in_thread(
RemoteUnwinderObject *unwinder,
uintptr_t thread_state_addr,
uintptr_t *running_task_addr
) {
*running_task_addr = (uintptr_t)NULL;
uintptr_t address_of_running_loop;
int bytes_read = read_py_ptr(
unwinder,
thread_state_addr + (uintptr_t)unwinder->async_debug_offsets.asyncio_thread_state.asyncio_running_loop,
&address_of_running_loop);
if (bytes_read == -1) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read running loop address");
return -1;
}
// no asyncio loop is now running
if ((void*)address_of_running_loop == NULL) {
return 0;
}
int err = read_ptr(
unwinder,
thread_state_addr + (uintptr_t)unwinder->async_debug_offsets.asyncio_thread_state.asyncio_running_task,
running_task_addr);
if (err) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read running task address");
return -1;
}
return 0;
}
int
get_task_code_object(RemoteUnwinderObject *unwinder, uintptr_t task_addr, uintptr_t *code_obj_addr) {
uintptr_t running_coro_addr = 0;
if(read_py_ptr(
unwinder,
task_addr + (uintptr_t)unwinder->async_debug_offsets.asyncio_task_object.task_coro,
&running_coro_addr) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Running task coro read failed");
return -1;
}
if (running_coro_addr == 0) {
PyErr_SetString(PyExc_RuntimeError, "Running task coro is NULL");
set_exception_cause(unwinder, PyExc_RuntimeError, "Running task coro address is NULL");
return -1;
}
// note: genobject's gi_iframe is an embedded struct so the address to
// the offset leads directly to its first field: f_executable
if (read_py_ptr(
unwinder,
running_coro_addr + (uintptr_t)unwinder->debug_offsets.gen_object.gi_iframe, code_obj_addr) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read running task code object");
return -1;
}
if (*code_obj_addr == 0) {
PyErr_SetString(PyExc_RuntimeError, "Running task code object is NULL");
set_exception_cause(unwinder, PyExc_RuntimeError, "Running task code object address is NULL");
return -1;
}
return 0;
}
/* ============================================================================
* ASYNC FRAME CHAIN PARSING
* ============================================================================ */
int
parse_async_frame_chain(
RemoteUnwinderObject *unwinder,
PyObject *calls,
uintptr_t address_of_thread,
uintptr_t running_task_code_obj
) {
uintptr_t address_of_current_frame;
if (find_running_frame(unwinder, address_of_thread, &address_of_current_frame) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Running frame search failed in async chain");
return -1;
}
while ((void*)address_of_current_frame != NULL) {
PyObject* frame_info = NULL;
uintptr_t address_of_code_object;
int res = parse_frame_object(
unwinder,
&frame_info,
address_of_current_frame,
&address_of_code_object,
&address_of_current_frame
);
if (res < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Async frame object parsing failed in chain");
return -1;
}
if (!frame_info) {
continue;
}
if (PyList_Append(calls, frame_info) == -1) {
Py_DECREF(frame_info);
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to append frame info to async chain");
return -1;
}
Py_DECREF(frame_info);
if (address_of_code_object == running_task_code_obj) {
break;
}
}
return 0;
}
/* ============================================================================
* AWAITED BY PARSING FUNCTIONS
* ============================================================================ */
static int
append_awaited_by_for_thread(
RemoteUnwinderObject *unwinder,
uintptr_t head_addr,
PyObject *result
) {
char task_node[SIZEOF_LLIST_NODE];
if (_Py_RemoteDebug_PagedReadRemoteMemory(&unwinder->handle, head_addr,
sizeof(task_node), task_node) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read task node head");
return -1;
}
size_t iteration_count = 0;
const size_t MAX_ITERATIONS = 2 << 15; // A reasonable upper bound
while (GET_MEMBER(uintptr_t, task_node, unwinder->debug_offsets.llist_node.next) != head_addr) {
if (++iteration_count > MAX_ITERATIONS) {
PyErr_SetString(PyExc_RuntimeError, "Task list appears corrupted");
set_exception_cause(unwinder, PyExc_RuntimeError, "Task list iteration limit exceeded");
return -1;
}
uintptr_t next_node = GET_MEMBER(uintptr_t, task_node, unwinder->debug_offsets.llist_node.next);
if (next_node == 0) {
PyErr_SetString(PyExc_RuntimeError,
"Invalid linked list structure reading remote memory");
set_exception_cause(unwinder, PyExc_RuntimeError, "NULL pointer in task linked list");
return -1;
}
uintptr_t task_addr = next_node
- (uintptr_t)unwinder->async_debug_offsets.asyncio_task_object.task_node;
if (process_single_task_node(unwinder, task_addr, NULL, result) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to process task node in awaited_by");
return -1;
}
// Read next node
if (_Py_RemoteDebug_PagedReadRemoteMemory(
&unwinder->handle,
next_node,
sizeof(task_node),
task_node) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to read next task node in awaited_by");
return -1;
}
}
return 0;
}
int
append_awaited_by(
RemoteUnwinderObject *unwinder,
unsigned long tid,
uintptr_t head_addr,
PyObject *result)
{
PyObject *tid_py = PyLong_FromUnsignedLong(tid);
if (tid_py == NULL) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to create thread ID object");
return -1;
}
PyObject* awaited_by_for_thread = PyList_New(0);
if (awaited_by_for_thread == NULL) {
Py_DECREF(tid_py);
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create awaited_by thread list");
return -1;
}
RemoteDebuggingState *state = RemoteDebugging_GetStateFromObject((PyObject*)unwinder);
PyObject *result_item = PyStructSequence_New(state->AwaitedInfo_Type);
if (result_item == NULL) {
Py_DECREF(tid_py);
Py_DECREF(awaited_by_for_thread);
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create AwaitedInfo");
return -1;
}
PyStructSequence_SetItem(result_item, 0, tid_py); // steals ref
PyStructSequence_SetItem(result_item, 1, awaited_by_for_thread); // steals ref
if (PyList_Append(result, result_item)) {
Py_DECREF(result_item);
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to append awaited_by result item");
return -1;
}
Py_DECREF(result_item);
if (append_awaited_by_for_thread(unwinder, head_addr, awaited_by_for_thread))
{
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to append awaited_by for thread");
return -1;
}
return 0;
}
/* ============================================================================
* THREAD PROCESSOR FUNCTIONS FOR ASYNCIO
* ============================================================================ */
int
process_thread_for_awaited_by(
RemoteUnwinderObject *unwinder,
uintptr_t thread_state_addr,
unsigned long tid,
void *context
) {
PyObject *result = (PyObject *)context;
uintptr_t head_addr = thread_state_addr + (uintptr_t)unwinder->async_debug_offsets.asyncio_thread_state.asyncio_tasks_head;
return append_awaited_by(unwinder, tid, head_addr, result);
}
static int
process_running_task_chain(
RemoteUnwinderObject *unwinder,
uintptr_t running_task_addr,
uintptr_t thread_state_addr,
PyObject *result
) {
uintptr_t running_task_code_obj = 0;
if(get_task_code_object(unwinder, running_task_addr, &running_task_code_obj) < 0) {
return -1;
}
// First, add this task to the result
PyObject *task_info = NULL;
if (process_single_task_node(unwinder, running_task_addr, &task_info, result) < 0) {
return -1;
}
// Get the chain from the current frame to this task
PyObject *coro_chain = PyStructSequence_GET_ITEM(task_info, 2);
assert(coro_chain != NULL);
if (PyList_GET_SIZE(coro_chain) != 1) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Coro chain is not a single item");
return -1;
}
PyObject *coro_info = PyList_GET_ITEM(coro_chain, 0);
assert(coro_info != NULL);
PyObject *frame_chain = PyStructSequence_GET_ITEM(coro_info, 0);
assert(frame_chain != NULL);
// Clear the coro_chain
if (PyList_Clear(frame_chain) < 0) {
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to clear coroutine chain");
return -1;
}
// Add the chain from the current frame to this task
if (parse_async_frame_chain(unwinder, frame_chain, thread_state_addr, running_task_code_obj) < 0) {
return -1;
}
// Now find all tasks that are waiting for this task and process them
if (process_task_awaited_by(unwinder, running_task_addr, process_waiter_task, result) < 0) {
return -1;
}
return 0;
}
int
process_thread_for_async_stack_trace(
RemoteUnwinderObject *unwinder,
uintptr_t thread_state_addr,
unsigned long tid,
void *context
) {
PyObject *result = (PyObject *)context;
// Find running task in this thread
uintptr_t running_task_addr;
if (find_running_task_in_thread(unwinder, thread_state_addr, &running_task_addr) < 0) {
return 0;
}
// If we found a running task, process it and its waiters
if ((void*)running_task_addr != NULL) {
PyObject *task_list = PyList_New(0);
if (task_list == NULL) {
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create task list for thread");
return -1;
}
if (process_running_task_chain(unwinder, running_task_addr, thread_state_addr, task_list) < 0) {
Py_DECREF(task_list);
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to process running task chain");
return -1;
}
// Create AwaitedInfo structure for this thread
PyObject *tid_py = PyLong_FromUnsignedLong(tid);
if (tid_py == NULL) {
Py_DECREF(task_list);
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to create thread ID");
return -1;
}
RemoteDebuggingState *state = RemoteDebugging_GetStateFromObject((PyObject*)unwinder);
PyObject *awaited_info = PyStructSequence_New(state->AwaitedInfo_Type);
if (awaited_info == NULL) {
Py_DECREF(tid_py);
Py_DECREF(task_list);
set_exception_cause(unwinder, PyExc_MemoryError, "Failed to create AwaitedInfo");
return -1;
}
PyStructSequence_SetItem(awaited_info, 0, tid_py); // steals ref
PyStructSequence_SetItem(awaited_info, 1, task_list); // steals ref
if (PyList_Append(result, awaited_info)) {
Py_DECREF(awaited_info);
set_exception_cause(unwinder, PyExc_RuntimeError, "Failed to append AwaitedInfo to result");
return -1;
}
Py_DECREF(awaited_info);
}
return 0;
}
|
c
|
github
|
https://github.com/python/cpython
|
Modules/_remote_debugging/asyncio.c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, Christoph Gohlke
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and metadata can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
SGI, ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG and CCITT compressed image data or EXIF, IPTC, GPS,
and XMP metadata is not implemented.
Only primary info records are read for STK, FluoView, MicroManager, and
NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SGI, SEQ, GEL,
and OME-TIFF, are custom extensions defined by Molecular Devices (Universal
Imaging Corporation), Carl Zeiss MicroImaging, Olympus, Silicon Graphics
International, Media Cybernetics, Molecular Dynamics, and the Open Microscopy
Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.08.24
Requirements
------------
* `CPython 2.7 or 3.4 <http://www.python.org>`_
* `Numpy 1.8.2 <http://www.numpy.org>`_
* `Matplotlib 1.4 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.11.05 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_ (same as fabio.TiffIO)
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats
<https://github.com/CellProfiler/python-bioformats>`_
Acknowledgements
----------------
* Egor Zindy, University of Manchester, for cz_lsm_scan_info specifics.
* Wim Lewis for a bug fix and some read_cz_lsm functions.
* Hadrien Mary for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) Image File Format Description LSM 5/7 Release 6.0 (ZEN 2010).
Carl Zeiss MicroImaging GmbH. BioSciences. May 10, 2011
(5) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(8) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
(9) Tags for TIFF and Related Specifications. Digital Preservation.
http://www.digitalpreservation.gov/formats/content/tiff_tags.shtml
Examples
--------
>>> data = numpy.random.rand(5, 301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> numpy.testing.assert_array_equal(image, data)
>>> with TiffFile('temp.tif') as tif:
... images = tif.asarray()
... for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import tempfile
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as etree
import numpy
try:
import _tifffile
except ImportError:
warnings.warn(
"failed to import the optional _tifffile C extension module.\n"
"Loading of some compressed images will be slow.\n"
"Tifffile.c can be obtained at http://www.lfd.uci.edu/~gohlke/")
__version__ = '2014.08.24'
__docformat__ = 'restructuredtext en'
__all__ = ('imsave', 'imread', 'imshow', 'TiffFile', 'TiffWriter',
'TiffSequence')
def imsave(filename, data, **kwargs):
"""Write image data to TIFF file.
Refer to the TiffWriter class and member functions for documentation.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
kwargs : dict
Parameters 'byteorder', 'bigtiff', and 'software' are passed to
the TiffWriter class.
Parameters 'photometric', 'planarconfig', 'resolution',
'description', 'compress', 'volume', and 'extratags' are passed to
the TiffWriter.save function.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> description = u'{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, compress=6,
... extratags=[(270, 's', 0, description, True)])
Save tiles with compression enabled
>>> data = numpy.random.rand(400, 300)
>>> imsave('temp.tif', data, compress=6, tile_width=150, tile_length=100)
>>> with TiffFile('temp.tif') as tif:
... image = tif.asarray()
... page = tif[0]
>>> numpy.testing.assert_array_equal(image, data)
>>> page.tags['tile_width'].value
150
>>> page.tags['tile_length'].value
100
Save tiles with compression disabled
>>> data = numpy.random.rand(400, 300)
>>> imsave('temp.tif', data, compress=0, tile_width=150, tile_length=100)
>>> with TiffFile('temp.tif') as tif:
... image = tif.asarray()
... page = tif[0]
>>> numpy.testing.assert_array_equal(image, data)
>>> page.tags['tile_width'].value
150
>>> page.tags['tile_length'].value
100
Save tiles with compression enabled, 3 samples per pixel
>>> data = numpy.random.rand(3, 400, 300)
>>> imsave('temp.tif', data, compress=6, tile_width=150, tile_length=100)
>>> with TiffFile('temp.tif') as tif:
... image = tif.asarray()
... page = tif[0]
>>> numpy.testing.assert_array_equal(image, data)
>>> page.tags['tile_width'].value
150
>>> page.tags['tile_length'].value
100
Save colormap
>>> data = (numpy.random.rand(400, 300)*250).astype(numpy.uint8)
>>> cmap1ch = [x*256 for x in range(256)]
>>> cmap = cmap1ch + cmap1ch + cmap1ch
>>> data_colored = numpy.take(cmap1ch, data)
>>> data_colored = numpy.dstack((data_colored, data_colored, data_colored))
>>> data_colored = numpy.swapaxes(numpy.swapaxes(data_colored,0,2),1,2)
>>> imsave('temp.tif', data, photometric='palette', colormap = cmap)
>>> with TiffFile('temp.tif') as tif:
... image = tif.asarray()
... page = tif[0]
>>> numpy.testing.assert_array_equal(image, data_colored)
>>> numpy.testing.assert_array_equal(page.tags['color_map'].value, cmap)
"""
tifargs = {}
for key in ('byteorder', 'bigtiff', 'software', 'writeshape'):
if key in kwargs:
tifargs[key] = kwargs[key]
del kwargs[key]
if 'writeshape' not in kwargs:
kwargs['writeshape'] = True
if 'bigtiff' not in tifargs and data.size*data.dtype.itemsize > 2000*2**20:
tifargs['bigtiff'] = True
with TiffWriter(filename, **tifargs) as tif:
tif.save(data, **kwargs)
class TiffWriter(object):
"""Write image data to TIFF file.
TiffWriter instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Examples
--------
>>> data = numpy.random.rand(2, 5, 3, 301, 219)
>>> with TiffWriter('temp.tif', bigtiff=True) as tif:
... for i in range(data.shape[0]):
... tif.save(data[i], compress=6)
"""
TYPES = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
TAGS = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'tile_width': 322, 'tile_length': 323, 'tile_offsets': 324,
'tile_byte_counts': 325, 'extra_samples': 338, 'sample_format': 339,
'image_depth': 32997, 'tile_depth': 32998}
def __init__(self, filename, bigtiff=False, byteorder=None,
software='tifffile.py'):
"""Create a new TIFF file for writing.
Use bigtiff=True when creating files greater than 2 GB.
Parameters
----------
filename : str
Name of file to write.
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
"""
if byteorder not in (None, '<', '>'):
raise ValueError("invalid byteorder %s" % byteorder)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
self._byteorder = byteorder
self._software = software
self._fh = open(filename, 'wb')
self._fh.write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
self._bigtiff = True
self._offset_size = 8
self._tag_size = 20
self._numtag_format = 'Q'
self._offset_format = 'Q'
self._val_format = '8s'
self._fh.write(struct.pack(byteorder+'HHH', 43, 8, 0))
else:
self._bigtiff = False
self._offset_size = 4
self._tag_size = 12
self._numtag_format = 'H'
self._offset_format = 'I'
self._val_format = '4s'
self._fh.write(struct.pack(byteorder+'H', 42))
# first IFD
self._ifd_offset = self._fh.tell()
self._fh.write(struct.pack(byteorder+self._offset_format, 0))
def save(self, data, photometric=None, planarconfig=None, resolution=None,
description=None, volume=False, writeshape=False, compress=0,
colormap=None, extrasamples_type=1, tile_width=None,
tile_length=None, extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 to 4 (depending on photometric mode, planar
configuration, and SGI mode) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
data : array_like
Input image. The last dimensions are assumed to be image depth,
height, width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb', 'palette'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
colormap : list of uint16's (3 concatenated lists for RGB)
Individual RGB arrays describing the color value for the
corresponding data value. For example, image data with a data
type of unsigned 8-bit integers have 256 possible values (0-255).
So the colormap will have 3*256 values ranging from 0 to
65535 (2**16 - 1).
tile_width : int
If not none, data is stored in tiles of size
(tile_length, tile_width). Only in conjunction with
defined tile_length (default : None)
tile_length : int
If not none, data is stored in tiles of size
(tile_length, tile_width). Only in conjunction with
defined tile_width (default : None)
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
if photometric not in (None, 'minisblack', 'miniswhite', 'rgb', 'palette'):
raise ValueError("invalid photometric %s" % photometric)
if planarconfig not in (None, 'contig', 'planar'):
raise ValueError("invalid planarconfig %s" % planarconfig)
if not 0 <= compress <= 9:
raise ValueError("invalid compression level %s" % compress)
fh = self._fh
byteorder = self._byteorder
numtag_format = self._numtag_format
val_format = self._val_format
offset_format = self._offset_format
offset_size = self._offset_size
tag_size = self._tag_size
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
# enable tile writing if tile width and length specified
if tile_length is not None and tile_width is not None:
write_tiles = 1
else:
write_tiles = 0
# normalize shape of data
samplesperpixel = 1
extrasamples = 0
if volume and data.ndim < 3:
volume = False
if photometric is None:
if planarconfig:
photometric = 'rgb'
elif data.ndim > 2 and shape[-1] in (3, 4):
photometric = 'rgb'
elif volume and data.ndim > 3 and shape[-4] in (3, 4):
photometric = 'rgb'
elif data.ndim > 2 and shape[-3] in (3, 4):
photometric = 'rgb'
else:
photometric = 'minisblack'
if planarconfig and len(shape) <= (3 if volume else 2) and (
photometric != 'palette'):
planarconfig = None
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if len(shape) < 4:
volume = False
if planarconfig is None:
if shape[-1] in (3, 4):
planarconfig = 'contig'
elif shape[-4 if volume else -3] in (3, 4):
planarconfig = 'planar'
elif shape[-1] > shape[-4 if volume else -3]:
planarconfig = 'planar'
else:
planarconfig = 'contig'
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
if samplesperpixel > 3:
extrasamples = samplesperpixel - 3
elif photometric == 'palette':
if len(shape) > 2:
raise ValueError("not a 1-channel image")
samplesperpixel = 1
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
elif planarconfig and len(shape) > (3 if volume else 2):
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
samplesperpixel = data.shape[-1]
else:
data = data.reshape(
(-1,) + shape[(-4 if volume else -3):] + (1,))
samplesperpixel = data.shape[1]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
if len(shape) < 3:
volume = False
if False and (
len(shape) > (3 if volume else 2) and shape[-1] < 5 and
all(shape[-1] < i
for i in shape[(-4 if volume else -3):-1])):
# DISABLED: non-standard TIFF, e.g. (220, 320, 2)
planarconfig = 'contig'
samplesperpixel = shape[-1]
data = data.reshape((-1, 1) + shape[(-4 if volume else -3):])
else:
data = data.reshape(
(-1, 1) + shape[(-3 if volume else -2):] + (1,))
if samplesperpixel == 2:
warnings.warn("writing non-standard TIFF (samplesperpixel 2)")
if volume and (data.shape[-2] % 16 or data.shape[-3] % 16):
warnings.warn("volume width or length are not multiple of 16")
volume = False
data = numpy.swapaxes(data, 1, 2)
data = data.reshape(
(data.shape[0] * data.shape[1],) + data.shape[2:])
# data.shape is now normalized 5D or 6D, depending on volume
# (pages, planar_samples, (depth,) height, width, contig_samples)
assert len(data.shape) in (5, 6)
shape = data.shape
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
if volume or write_tiles:
# use tiles to save volume data or explicitly requests
tag_byte_counts = TiffWriter.TAGS['tile_byte_counts']
tag_offsets = TiffWriter.TAGS['tile_offsets']
else:
# else use strips
tag_byte_counts = TiffWriter.TAGS['strip_byte_counts']
tag_offsets = TiffWriter.TAGS['strip_offsets']
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# Compute ifdentry & ifdvalue bytes from code, dtype, count, value.
# Append (code, ifdentry, ifdvalue, writeonce) to tags list.
code = int(TiffWriter.TAGS.get(code, code))
try:
tifftype = TiffWriter.TYPES[dtype]
except KeyError:
raise ValueError("unknown dtype %s" % dtype)
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format,
pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if self._software:
addtag('software', 's', 0, self._software, writeonce=True)
self._software = None # only save to first page
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif writeshape and shape[0] > 1 and shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
if volume:
addtag('image_depth', 'I', 1, shape[-4])
addtag('tile_depth', 'I', 1, shape[-4])
addtag('tile_width', 'I', 1, shape[-2])
addtag('tile_length', 'I', 1, shape[-3])
elif write_tiles:
addtag('tile_width', 'I', 1, tile_width)
addtag('tile_length', 'I', 1, tile_length)
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
# addtag('sample_format', 'H', 1,
# {'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('sample_format', 'H', samplesperpixel,
({'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind],) * samplesperpixel)
addtag('photometric', 'H', 1,
{'miniswhite': 0,
'minisblack': 1,
'rgb': 2,
'palette': 3}[photometric])
if photometric == 'palette':
if colormap == None:
raise ValueError(
"photometric 'palette' specified but colormap missing")
else:
addtag('color_map', 'H',
3 * (2 ** (data.dtype.itemsize * 8 * samplesperpixel)),
colormap)
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig and samplesperpixel > 1:
addtag('planar_configuration', 'H', 1, 1
if planarconfig == 'contig' else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb' and extrasamples == 1:
addtag('extra_samples', 'H', 1, extrasamples_type) # associated alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0,) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
if not write_tiles:
addtag('rows_per_strip', 'I', 1,
shape[-3] * (shape[-4] if volume else 1))
if write_tiles:
# use multiple tiles per plane
tiles_x = (shape[3] + tile_width - 1) // tile_width
tiles_y = (shape[2] + tile_length - 1) // tile_length
strip_byte_counts = \
(tile_width * tile_length * shape[-1] * data.dtype.itemsize,) \
* shape[1] * tiles_x * tiles_y
else:
# use one strip or tile per plane
tiles_x = tiles_y = 1
strip_byte_counts = \
(data[0, 0].size * data.dtype.itemsize,) * shape[1]
addtag(tag_byte_counts,
offset_format, shape[1] * tiles_x * tiles_y, strip_byte_counts)
addtag(tag_offsets,
offset_format, shape[1] * tiles_x * tiles_y,
(0, ) * shape[1] * tiles_x * tiles_y)
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
if not self._bigtiff and (fh.tell() + data.size*data.dtype.itemsize
> 2**31-1):
raise ValueError("data too large for non-bigtiff file")
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = fh.tell()
fh.seek(self._ifd_offset)
fh.write(pack(offset_format, pos))
fh.seek(pos)
# write ifdentries
fh.write(pack(numtag_format, len(tags)))
tag_offset = fh.tell()
fh.write(b''.join(t[1] for t in tags))
self._ifd_offset = fh.tell()
fh.write(pack(offset_format, 0)) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = fh.tell()
fh.seek(tag_offset + tagindex*tag_size + offset_size + 4)
fh.write(pack(offset_format, pos))
fh.seek(pos)
if tag[0] == tag_offsets:
strip_offsets_offset = pos
elif tag[0] == tag_byte_counts:
strip_byte_counts_offset = pos
fh.write(tag[2])
# write image data
data_offset = fh.tell()
if write_tiles:
# multiple tiles per page
if compress:
# reset and use compress sizes
strip_byte_counts = []
for plane in data[pageindex]:
for ty in xrange(0, tiles_y):
for tx in xrange(0, tiles_x):
# allocate fixed size tile filled with zeros
tile = numpy.zeros((tile_width * tile_length,
shape[-1]), data.dtype)
# clipping right and bottom if necessary
# tile length filled with image data
itl = min(tile_length,
shape[2] - ty*tile_length)
# tile width filled with image data
itw = min(tile_width,
shape[3] - tx*tile_width)
ioffs = tx*tile_width
for tl in xrange(0, itl):
# copy data to tile line
ir = ty*tile_length+tl
tile[tl*tile_width:tl*tile_width+itw] \
= plane[ir, ioffs:ioffs+itw]
if compress:
tile = zlib.compress(tile, compress)
strip_byte_counts.append(len(tile))
fh.write(tile)
else:
tile.tofile(fh)
fh.flush()
else:
# one strip/tile per page
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip and tile offsets and byte_counts if necessary
pos = fh.tell()
for tagindex, tag in enumerate(tags):
if tag[0] == tag_offsets: # strip or tile offsets
if tag[2]:
fh.seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
fh.write(pack(offset_format, strip_offset))
strip_offset += size
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, data_offset))
elif tag[0] == tag_byte_counts: # strip or tile byte_counts
if compress:
if tag[2]:
fh.seek(strip_byte_counts_offset)
for size in strip_byte_counts:
fh.write(pack(offset_format, size))
else:
fh.seek(tag_offset + tagindex*tag_size +
offset_size + 4)
fh.write(pack(offset_format, strip_byte_counts[0]))
break
fh.seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def close(self):
self._fh.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def imread(files, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
kwargs : dict
Additional parameters passed to the TiffFile or TiffSequence asarray
function.
Examples
--------
>>> im = imread('test.tif', key=0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(**kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(**kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and metadata from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> with TiffFile('test.tif') as tif:
... data = tif.asarray()
... data.shape
(256, 256, 4)
"""
def __init__(self, arg, name=None, offset=None, size=None,
multifile=True, multifile_close=True):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
"""
self._fh = FileHandle(arg, name=name, offset=offset, size=size)
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
self._multifile_close = bool(multifile_close)
self._files = {self._fh.name: self} # cache of TiffFiles
try:
self._fromfile()
except Exception:
self._fh.close()
raise
@property
def filehandle(self):
"""Return file handle."""
return self._fh
@property
def filename(self):
"""Return name of file handle."""
return self._fh.name
def close(self):
"""Close open file handle(s)."""
for tif in self._files.values():
tif._fh.close()
self._files = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
if self.is_lsm:
self._fix_lsm_strip_offsets()
self._fix_lsm_strip_byte_counts()
def _fix_lsm_strip_offsets(self):
"""Unwrap strip offsets for LSM files greater than 4 GB."""
for series in self.series:
wrap = 0
previous_offset = 0
for page in series.pages:
strip_offsets = []
for current_offset in page.strip_offsets:
if current_offset < previous_offset:
wrap += 2**32
strip_offsets.append(current_offset + wrap)
previous_offset = current_offset
page.strip_offsets = tuple(strip_offsets)
def _fix_lsm_strip_byte_counts(self):
"""Set strip_byte_counts to size of compressed data.
The strip_byte_counts tag in LSM files contains the number of bytes
for the uncompressed data.
"""
if not self.pages:
return
strips = {}
for page in self.pages:
assert len(page.strip_offsets) == len(page.strip_byte_counts)
for offset, bytecount in zip(page.strip_offsets,
page.strip_byte_counts):
strips[offset] = bytecount
offsets = sorted(strips.keys())
offsets.append(min(offsets[-1] + strips[offsets[-1]], self._fh.size))
for i, offset in enumerate(offsets[:-1]):
strips[offset] = min(strips[offset], offsets[i+1] - offset)
for page in self.pages:
if page.compression:
page.strip_byte_counts = tuple(
strips[offset] for offset in page.strip_offsets)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
if not self.pages:
return []
series = []
page0 = self.pages[0]
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(page0.mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(page0.dtype))]
elif self.is_lsm:
lsmi = page0.cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if page0.is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = tuple(getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes)
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype))]
if len(pages) != len(self.pages): # reduced RGB pages
pages = [p for p in self.pages if p.is_reduced]
cp = 1
i = 0
while cp < len(pages) and i < len(shape)-2:
cp *= shape[i]
i += 1
shape = shape[:i] + pages[0].shape
axes = axes[:i] + 'CYX'
series.append(Record(axes=axes, shape=shape, pages=pages,
dtype=numpy.dtype(pages[0].dtype)))
elif self.is_imagej:
shape = []
axes = []
ij = page0.imagej_tags
if 'frames' in ij:
shape.append(ij['frames'])
axes.append('T')
if 'slices' in ij:
shape.append(ij['slices'])
axes.append('Z')
if 'channels' in ij and not self.is_rgb:
shape.append(ij['channels'])
axes.append('C')
remain = len(self.pages) // (product(shape) if shape else 1)
if remain > 1:
shape.append(remain)
axes.append('I')
shape.extend(page0.shape)
axes.extend(page0.axes)
axes = ''.join(axes)
series = [Record(pages=self.pages, shape=tuple(shape), axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif self.is_nih:
if len(self.pages) == 1:
shape = page0.shape
axes = page0.axes
else:
shape = (len(self.pages),) + page0.shape
axes = 'I' + page0.axes
series = [Record(pages=self.pages, shape=shape, axes=axes,
dtype=numpy.dtype(page0.dtype))]
elif page0.is_shaped:
# TODO: shaped files can contain multiple series
shape = page0.tags['image_description'].value[7:-1]
shape = tuple(int(i) for i in shape.split(b','))
series = [Record(pages=self.pages, shape=shape,
axes='Q' * len(shape),
dtype=numpy.dtype(page0.dtype))]
# generic detection of series
if not series:
shapes = []
pages = {}
for page in self.pages:
if not page.shape:
continue
shape = page.shape + (page.axes,
page.compression in TIFF_DECOMPESSORS)
if shape not in pages:
shapes.append(shape)
pages[shape] = [page]
else:
pages[shape].append(page)
series = [Record(pages=pages[s],
axes=(('I' + s[-2])
if len(pages[s]) > 1 else s[-2]),
dtype=numpy.dtype(pages[s][0].dtype),
shape=((len(pages[s]), ) + s[:-2]
if len(pages[s]) > 1 else s[:-2]))
for s in shapes]
# remove empty series, e.g. in MD Gel files
series = [s for s in series if sum(s.shape) > 0]
return series
def asarray(self, key=None, series=None, memmap=False):
"""Return image data from multiple TIFF pages as numpy array.
By default the first image series is returned.
Parameters
----------
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages to return as array.
memmap : bool
If True, return an array stored in a binary file on disk
if possible.
"""
if key is None and series is None:
series = 0
if series is not None:
pages = self.series[series].pages
else:
pages = self.pages
if key is None:
pass
elif isinstance(key, int):
pages = [pages[key]]
elif isinstance(key, slice):
pages = pages[key]
elif isinstance(key, collections.Iterable):
pages = [pages[k] for k in key]
else:
raise TypeError("key must be an int, slice, or sequence")
if not len(pages):
raise ValueError("no pages selected")
if self.is_nih:
if pages[0].is_palette:
result = stack_pages(pages, colormapped=False, squeeze=False)
result = numpy.take(pages[0].color_map, result, axis=1)
result = numpy.swapaxes(result, 0, 1)
else:
result = stack_pages(pages, memmap=memmap,
colormapped=False, squeeze=False)
elif len(pages) == 1:
return pages[0].asarray(memmap=memmap)
elif self.is_ome:
assert not self.is_palette, "color mapping disabled for ome-tiff"
if any(p is None for p in pages):
# zero out missing pages
firstpage = next(p for p in pages if p)
nopage = numpy.zeros_like(
firstpage.asarray(memmap=False))
s = self.series[series]
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=s.dtype, shape=s.shape)
result = result.reshape(-1)
else:
result = numpy.empty(s.shape, s.dtype).reshape(-1)
index = 0
class KeepOpen:
# keep Tiff files open between consecutive pages
def __init__(self, parent, close):
self.master = parent
self.parent = parent
self._close = close
def open(self, page):
if self._close and page and page.parent != self.parent:
if self.parent != self.master:
self.parent.filehandle.close()
self.parent = page.parent
self.parent.filehandle.open()
def close(self):
if self._close and self.parent != self.master:
self.parent.filehandle.close()
keep = KeepOpen(self, self._multifile_close)
for page in pages:
keep.open(page)
if page:
a = page.asarray(memmap=False, colormapped=False,
reopen=False)
else:
a = nopage
try:
result[index:index + a.size] = a.reshape(-1)
except ValueError as e:
warnings.warn("ome-tiff: %s" % e)
break
index += a.size
keep.close()
else:
result = stack_pages(pages, memmap=memmap)
if key is None:
try:
result.shape = self.series[series].shape
except ValueError:
try:
warnings.warn("failed to reshape %s to %s" % (
result.shape, self.series[series].shape))
# try series of expected shapes
result.shape = (-1,) + self.series[series].shape
except ValueError:
# revert to generic shape
result.shape = (-1,) + pages[0].shape
else:
result.shape = (-1,) + pages[0].shape
return result
def _omeseries(self):
"""Return image series in OME-TIFF file(s)."""
root = etree.fromstring(self.pages[0].tags['image_description'].value)
uuid = root.attrib.get('UUID', None)
self._files = {uuid: self}
dirname = self._fh.dirname
modulo = {}
result = []
for element in root:
if element.tag.endswith('BinaryOnly'):
warnings.warn("ome-xml: not an ome-tiff master file")
break
if element.tag.endswith('StructuredAnnotations'):
for annot in element:
if not annot.attrib.get('Namespace',
'').endswith('modulo'):
continue
for value in annot:
for modul in value:
for along in modul:
if not along.tag[:-1].endswith('Along'):
continue
axis = along.tag[-1]
newaxis = along.attrib.get('Type', 'other')
newaxis = AXES_LABELS[newaxis]
if 'Start' in along.attrib:
labels = range(
int(along.attrib['Start']),
int(along.attrib['End']) + 1,
int(along.attrib.get('Step', 1)))
else:
labels = [label.text for label in along
if label.tag.endswith('Label')]
modulo[axis] = (newaxis, labels)
if not element.tag.endswith('Image'):
continue
for pixels in element:
if not pixels.tag.endswith('Pixels'):
continue
atr = pixels.attrib
dtype = atr.get('Type', None)
axes = ''.join(reversed(atr['DimensionOrder']))
shape = list(int(atr['Size'+ax]) for ax in axes)
size = product(shape[:-2])
ifds = [None] * size
for data in pixels:
if not data.tag.endswith('TiffData'):
continue
atr = data.attrib
ifd = int(atr.get('IFD', 0))
num = int(atr.get('NumPlanes', 1 if 'IFD' in atr else 0))
num = int(atr.get('PlaneCount', num))
idx = [int(atr.get('First'+ax, 0)) for ax in axes[:-2]]
try:
idx = numpy.ravel_multi_index(idx, shape[:-2])
except ValueError:
# ImageJ produces invalid ome-xml when cropping
warnings.warn("ome-xml: invalid TiffData index")
continue
for uuid in data:
if not uuid.tag.endswith('UUID'):
continue
if uuid.text not in self._files:
if not self._multifile:
# abort reading multifile OME series
# and fall back to generic series
return []
fname = uuid.attrib['FileName']
try:
tif = TiffFile(os.path.join(dirname, fname))
except (IOError, ValueError):
tif.close()
warnings.warn(
"ome-xml: failed to read '%s'" % fname)
break
self._files[uuid.text] = tif
if self._multifile_close:
tif.close()
pages = self._files[uuid.text].pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
# only process first uuid
break
else:
pages = self.pages
try:
for i in range(num if num else len(pages)):
ifds[idx + i] = pages[ifd + i]
except IndexError:
warnings.warn("ome-xml: index out of range")
if all(i is None for i in ifds):
# skip images without data
continue
dtype = next(i for i in ifds if i).dtype
result.append(Record(axes=axes, shape=shape, pages=ifds,
dtype=numpy.dtype(dtype)))
for record in result:
for axis, (newaxis, labels) in modulo.items():
i = record.axes.index(axis)
size = len(labels)
if record.shape[i] == size:
record.axes = record.axes.replace(axis, newaxis, 1)
else:
record.shape[i] //= size
record.shape.insert(i+1, size)
record.axes = record.axes.replace(axis, axis+newaxis, 1)
record.shape = tuple(record.shape)
# squeeze dimensions
for record in result:
record.shape, record.axes = squeeze_axes(record.shape, record.axes)
return result
def __len__(self):
"""Return number of image pages in file."""
return len(self.pages)
def __getitem__(self, key):
"""Return specified page."""
return self.pages[key]
def __iter__(self):
"""Return iterator over pages."""
return iter(self.pages)
def __str__(self):
"""Return string containing information about file."""
result = [
self._fh.name.capitalize(),
format_size(self._fh.size),
{'<': 'little endian', '>': 'big endian'}[self.byteorder]]
if self.is_bigtiff:
result.append("bigtiff")
if len(self.pages) > 1:
result.append("%i pages" % len(self.pages))
if len(self.series) > 1:
result.append("%i series" % len(self.series))
if len(self._files) > 1:
result.append("%i files" % (len(self._files)))
return ", ".join(result)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@lazyattr
def fstat(self):
try:
return os.fstat(self._fh.fileno())
except Exception: # io.UnsupportedOperation
return None
@lazyattr
def is_bigtiff(self):
return self.offset_size != 4
@lazyattr
def is_rgb(self):
return all(p.is_rgb for p in self.pages)
@lazyattr
def is_palette(self):
return all(p.is_palette for p in self.pages)
@lazyattr
def is_mdgel(self):
return any(p.is_mdgel for p in self.pages)
@lazyattr
def is_mediacy(self):
return any(p.is_mediacy for p in self.pages)
@lazyattr
def is_stk(self):
return all(p.is_stk for p in self.pages)
@lazyattr
def is_lsm(self):
return self.pages[0].is_lsm
@lazyattr
def is_imagej(self):
return self.pages[0].is_imagej
@lazyattr
def is_micromanager(self):
return self.pages[0].is_micromanager
@lazyattr
def is_nih(self):
return self.pages[0].is_nih
@lazyattr
def is_fluoview(self):
return self.pages[0].is_fluoview
@lazyattr
def is_ome(self):
return self.pages[0].is_ome
class TiffPage(object):
"""A TIFF image file directory (IFD).
Attributes
----------
index : int
Index of page in file.
dtype : str {TIFF_SAMPLE_DTYPES}
Data type of image, colormapped if applicable.
shape : tuple
Dimensions of the image array in TIFF page,
colormapped and with one alpha channel if applicable.
axes : str
Axes label codes:
'X' width, 'Y' height, 'S' sample, 'I' image series|page|plane,
'Z' depth, 'C' color|em-wavelength|channel, 'E' ex-wavelength|lambda,
'T' time, 'R' region|tile, 'A' angle, 'P' phase, 'H' lifetime,
'L' exposure, 'V' event, 'Q' unknown, '_' missing
tags : TiffTags
Dictionary of tags in page.
Tag values are also directly accessible as attributes.
color_map : numpy array
Color look up table, if exists.
cz_lsm_scan_info: Record(dict)
LSM scan info attributes, if exists.
imagej_tags: Record(dict)
Consolidated ImageJ description and metadata tags, if exists.
uic_tags: Record(dict)
Consolidated MetaMorph STK/UIC tags, if exists.
All attributes are read-only.
Notes
-----
The internal, normalized '_shape' attribute is 6 dimensional:
0. number planes (stk)
1. planar samples_per_pixel
2. image_depth Z (sgi)
3. image_length Y
4. image_width X
5. contig samples_per_pixel
"""
def __init__(self, parent):
"""Initialize instance from file."""
self.parent = parent
self.index = len(parent.pages)
self.shape = self._shape = ()
self.dtype = self._dtype = None
self.axes = ""
self.tags = TiffTags()
self._fromfile()
self._process_tags()
def _fromfile(self):
"""Read TIFF IFD structure and its tags from file.
File cursor must be at storage position of IFD offset and is left at
offset to next IFD.
Raises StopIteration if offset (first bytes read) is 0.
"""
fh = self.parent.filehandle
byteorder = self.parent.byteorder
offset_size = self.parent.offset_size
fmt = {4: 'I', 8: 'Q'}[offset_size]
offset = struct.unpack(byteorder + fmt, fh.read(offset_size))[0]
if not offset:
raise StopIteration()
# read standard tags
tags = self.tags
fh.seek(offset)
fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size]
try:
numtags = struct.unpack(byteorder + fmt, fh.read(size))[0]
except Exception:
warnings.warn("corrupted page list")
raise StopIteration()
tagcode = 0
for _ in range(numtags):
try:
tag = TiffTag(self.parent)
# print(tag)
except TiffTag.Error as e:
warnings.warn(str(e))
continue
if tagcode > tag.code:
# expected for early LSM and tifffile versions
warnings.warn("tags are not ordered by code")
tagcode = tag.code
if tag.name not in tags:
tags[tag.name] = tag
else:
# some files contain multiple IFD with same code
# e.g. MicroManager files contain two image_description
i = 1
while True:
name = "%s_%i" % (tag.name, i)
if name not in tags:
tags[name] = tag
break
pos = fh.tell()
if self.is_lsm or (self.index and self.parent.is_lsm):
# correct non standard LSM bitspersample tags
self.tags['bits_per_sample']._correct_lsm_bitspersample(self)
if self.is_lsm:
# read LSM info subrecords
for name, reader in CZ_LSM_INFO_READERS.items():
try:
offset = self.cz_lsm_info['offset_'+name]
except KeyError:
continue
if offset < 8:
# older LSM revision
continue
fh.seek(offset)
try:
setattr(self, 'cz_lsm_'+name, reader(fh))
except ValueError:
pass
elif self.is_stk and 'uic1tag' in tags and not tags['uic1tag'].value:
# read uic1tag now that plane count is known
uic1tag = tags['uic1tag']
fh.seek(uic1tag.value_offset)
tags['uic1tag'].value = Record(
read_uic1tag(fh, byteorder, uic1tag.dtype, uic1tag.count,
tags['uic2tag'].count))
fh.seek(pos)
def _process_tags(self):
"""Validate standard tags and initialize attributes.
Raise ValueError if tag values are not supported.
"""
tags = self.tags
for code, (name, default, dtype, count, validate) in TIFF_TAGS.items():
if not (name in tags or default is None):
tags[name] = TiffTag(code, dtype=dtype, count=count,
value=default, name=name)
if name in tags and validate:
try:
if tags[name].count == 1:
setattr(self, name, validate[tags[name].value])
else:
setattr(self, name, tuple(
validate[value] for value in tags[name].value))
except KeyError:
raise ValueError("%s.value (%s) not supported" %
(name, tags[name].value))
tag = tags['bits_per_sample']
if tag.count == 1:
self.bits_per_sample = tag.value
else:
# LSM might list more items than samples_per_pixel
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.bits_per_sample = value
else:
self.bits_per_sample = value[0]
tag = tags['sample_format']
if tag.count == 1:
self.sample_format = TIFF_SAMPLE_FORMATS[tag.value]
else:
value = tag.value[:self.samples_per_pixel]
if any((v-value[0] for v in value)):
self.sample_format = [TIFF_SAMPLE_FORMATS[v] for v in value]
else:
self.sample_format = TIFF_SAMPLE_FORMATS[value[0]]
if 'photometric' not in tags:
self.photometric = None
if 'image_depth' not in tags:
self.image_depth = 1
if 'image_length' in tags:
self.strips_per_image = int(math.floor(
float(self.image_length + self.rows_per_strip - 1) /
self.rows_per_strip))
else:
self.strips_per_image = 0
key = (self.sample_format, self.bits_per_sample)
self.dtype = self._dtype = TIFF_SAMPLE_DTYPES.get(key, None)
if 'image_length' not in self.tags or 'image_width' not in self.tags:
# some GEL file pages are missing image data
self.image_length = 0
self.image_width = 0
self.image_depth = 0
self.strip_offsets = 0
self._shape = ()
self.shape = ()
self.axes = ''
if self.is_palette:
self.dtype = self.tags['color_map'].dtype[1]
self.color_map = numpy.array(self.color_map, self.dtype)
dmax = self.color_map.max()
if dmax < 256:
self.dtype = numpy.uint8
self.color_map = self.color_map.astype(self.dtype)
#else:
# self.dtype = numpy.uint8
# self.color_map >>= 8
# self.color_map = self.color_map.astype(self.dtype)
self.color_map.shape = (3, -1)
# determine shape of data
image_length = self.image_length
image_width = self.image_width
image_depth = self.image_depth
samples_per_pixel = self.samples_per_pixel
if self.is_stk:
assert self.image_depth == 1
planes = self.tags['uic2tag'].count
if self.is_contig:
self._shape = (planes, 1, 1, image_length, image_width,
samples_per_pixel)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, image_length, image_width,
samples_per_pixel)
self.axes = 'YXS'
else:
self._shape = (planes, samples_per_pixel, 1, image_length,
image_width, 1)
if samples_per_pixel == 1:
self.shape = (planes, image_length, image_width)
self.axes = 'YX'
else:
self.shape = (planes, samples_per_pixel, image_length,
image_width)
self.axes = 'SYX'
# detect type of series
if planes == 1:
self.shape = self.shape[1:]
elif numpy.all(self.uic2tag.z_distance != 0):
self.axes = 'Z' + self.axes
elif numpy.all(numpy.diff(self.uic2tag.time_created) != 0):
self.axes = 'T' + self.axes
else:
self.axes = 'I' + self.axes
# DISABLED
if self.is_palette:
assert False, "color mapping disabled for stk"
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, planes, image_length, image_width)
else:
self.shape = (3, planes, image_depth, image_length,
image_width)
self.axes = 'C' + self.axes
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
elif self.is_palette:
samples = 1
if 'extra_samples' in self.tags:
samples += len(self.extra_samples)
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples)
else:
self._shape = (1, samples, image_depth, image_length,
image_width, 1)
if self.color_map.shape[1] >= 2**self.bits_per_sample:
if image_depth == 1:
self.shape = (3, image_length, image_width)
self.axes = 'CYX'
else:
self.shape = (3, image_depth, image_length, image_width)
self.axes = 'CZYX'
else:
warnings.warn("palette cannot be applied")
self.is_palette = False
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
elif self.is_rgb or samples_per_pixel > 1:
if self.is_contig:
self._shape = (1, 1, image_depth, image_length, image_width,
samples_per_pixel)
if image_depth == 1:
self.shape = (image_length, image_width, samples_per_pixel)
self.axes = 'YXS'
else:
self.shape = (image_depth, image_length, image_width,
samples_per_pixel)
self.axes = 'ZYXS'
else:
self._shape = (1, samples_per_pixel, image_depth,
image_length, image_width, 1)
if image_depth == 1:
self.shape = (samples_per_pixel, image_length, image_width)
self.axes = 'SYX'
else:
self.shape = (samples_per_pixel, image_depth,
image_length, image_width)
self.axes = 'SZYX'
if False and self.is_rgb and 'extra_samples' in self.tags:
# DISABLED: only use RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for exs in extra_samples:
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
self.shape = self.shape[:-1] + (4,)
else:
self.shape = (4,) + self.shape[1:]
break
else:
self._shape = (1, 1, image_depth, image_length, image_width, 1)
if image_depth == 1:
self.shape = (image_length, image_width)
self.axes = 'YX'
else:
self.shape = (image_depth, image_length, image_width)
self.axes = 'ZYX'
if not self.compression and 'strip_byte_counts' not in tags:
self.strip_byte_counts = (
product(self.shape) * (self.bits_per_sample // 8), )
assert len(self.shape) == len(self.axes)
def asarray(self, squeeze=True, colormapped=True, rgbonly=False,
scale_mdgel=False, memmap=False, reopen=True):
"""Read image data from file and return as numpy array.
Raise ValueError if format is unsupported.
If any of 'squeeze', 'colormapped', or 'rgbonly' are not the default,
the shape of the returned array might be different from the page shape.
Parameters
----------
squeeze : bool
If True, all length-1 dimensions (except X and Y) are
squeezed out from result.
colormapped : bool
If True, color mapping is applied for palette-indexed images.
rgbonly : bool
If True, return RGB(A) image without additional extra samples.
memmap : bool
If True, use numpy.memmap to read arrays from file if possible.
For use on 64 bit systems and files with few huge contiguous data.
reopen : bool
If True and the parent file handle is closed, the file is
temporarily re-opened (and closed if no exception occurs).
scale_mdgel : bool
If True, MD Gel data will be scaled according to the private
metadata in the second TIFF page. The dtype will be float32.
"""
if not self._shape:
return
if self.dtype is None:
raise ValueError("data type not supported: %s%i" % (
self.sample_format, self.bits_per_sample))
if self.compression not in TIFF_DECOMPESSORS:
raise ValueError("cannot decompress %s" % self.compression)
tag = self.tags['sample_format']
if tag.count != 1 and any((i-tag.value[0] for i in tag.value)):
raise ValueError("sample formats don't match %s" % str(tag.value))
fh = self.parent.filehandle
closed = fh.closed
if closed:
if reopen:
fh.open()
else:
raise IOError("file handle is closed")
dtype = self._dtype
shape = self._shape
image_width = self.image_width
image_length = self.image_length
image_depth = self.image_depth
typecode = self.parent.byteorder + dtype
bits_per_sample = self.bits_per_sample
if self.is_tiled:
if 'tile_offsets' in self.tags:
byte_counts = self.tile_byte_counts
offsets = self.tile_offsets
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
tile_width = self.tile_width
tile_length = self.tile_length
tile_depth = self.tile_depth if 'tile_depth' in self.tags else 1
tw = (image_width + tile_width - 1) // tile_width
tl = (image_length + tile_length - 1) // tile_length
td = (image_depth + tile_depth - 1) // tile_depth
shape = (shape[0], shape[1],
td*tile_depth, tl*tile_length, tw*tile_width, shape[-1])
tile_shape = (tile_depth, tile_length, tile_width, shape[-1])
runlen = tile_width
else:
byte_counts = self.strip_byte_counts
offsets = self.strip_offsets
runlen = image_width
if any(o < 2 for o in offsets):
raise ValueError("corrupted page")
if memmap and self._is_memmappable(rgbonly, colormapped):
result = fh.memmap_array(typecode, shape, offset=offsets[0])
elif self.is_contiguous:
fh.seek(offsets[0])
result = fh.read_array(typecode, product(shape))
result = result.astype('=' + dtype)
else:
if self.is_contig:
runlen *= self.samples_per_pixel
if bits_per_sample in (8, 16, 32, 64, 128):
if (bits_per_sample * runlen) % 8:
raise ValueError("data and sample size mismatch")
def unpack(x):
try:
return numpy.fromstring(x, typecode)
except ValueError as e:
# strips may be missing EOI
warnings.warn("unpack: %s" % e)
xlen = ((len(x) // (bits_per_sample // 8))
* (bits_per_sample // 8))
return numpy.fromstring(x[:xlen], typecode)
elif isinstance(bits_per_sample, tuple):
def unpack(x):
return unpackrgb(x, typecode, bits_per_sample)
else:
def unpack(x):
return unpackints(x, typecode, bits_per_sample, runlen)
decompress = TIFF_DECOMPESSORS[self.compression]
if self.compression == 'jpeg':
table = self.jpeg_tables if 'jpeg_tables' in self.tags else b''
decompress = lambda x: decodejpg(x, table, self.photometric)
if self.is_tiled:
result = numpy.empty(shape, dtype)
tw, tl, td, pl = 0, 0, 0, 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
tile = unpack(decompress(fh.read(bytecount)))
tile.shape = tile_shape
if self.predictor == 'horizontal':
numpy.cumsum(tile, axis=-2, dtype=dtype, out=tile)
result[0, pl, td:td+tile_depth,
tl:tl+tile_length, tw:tw+tile_width, :] = tile
del tile
tw += tile_width
if tw >= shape[4]:
tw, tl = 0, tl + tile_length
if tl >= shape[3]:
tl, td = 0, td + tile_depth
if td >= shape[2]:
td, pl = 0, pl + 1
result = result[...,
:image_depth, :image_length, :image_width, :]
else:
strip_size = (self.rows_per_strip * self.image_width *
self.samples_per_pixel)
result = numpy.empty(shape, dtype).reshape(-1)
index = 0
for offset, bytecount in zip(offsets, byte_counts):
fh.seek(offset)
strip = fh.read(bytecount)
strip = decompress(strip)
strip = unpack(strip)
size = min(result.size, strip.size, strip_size,
result.size - index)
result[index:index+size] = strip[:size]
del strip
index += size
result.shape = self._shape
if self.predictor == 'horizontal' and not (self.is_tiled and not
self.is_contiguous):
# work around bug in LSM510 software
if not (self.parent.is_lsm and not self.compression):
numpy.cumsum(result, axis=-2, dtype=dtype, out=result)
if colormapped and self.is_palette:
if self.color_map.shape[1] >= 2**bits_per_sample:
# FluoView and LSM might fail here
result = numpy.take(self.color_map,
result[:, 0, :, :, :, 0], axis=1)
elif rgbonly and self.is_rgb and 'extra_samples' in self.tags:
# return only RGB and first alpha channel if exists
extra_samples = self.extra_samples
if self.tags['extra_samples'].count == 1:
extra_samples = (extra_samples, )
for i, exs in enumerate(extra_samples):
if exs in ('unassalpha', 'assocalpha', 'unspecified'):
if self.is_contig:
result = result[..., [0, 1, 2, 3+i]]
else:
result = result[:, [0, 1, 2, 3+i]]
break
else:
if self.is_contig:
result = result[..., :3]
else:
result = result[:, :3]
if squeeze:
try:
result.shape = self.shape
except ValueError:
warnings.warn("failed to reshape from %s to %s" % (
str(result.shape), str(self.shape)))
if scale_mdgel and self.parent.is_mdgel:
# MD Gel stores private metadata in the second page
tags = self.parent.pages[1]
if tags.md_file_tag in (2, 128):
scale = tags.md_scale_pixel
scale = scale[0] / scale[1] # rational
result = result.astype('float32')
if tags.md_file_tag == 2:
result **= 2 # squary root data format
result *= scale
if closed:
# TODO: file remains open if an exception occurred above
fh.close()
return result
def _is_memmappable(self, rgbonly, colormapped):
"""Return if image data in file can be memory mapped."""
if not self.parent.filehandle.is_file or not self.is_contiguous:
return False
return not (self.predictor or
(rgbonly and 'extra_samples' in self.tags) or
(colormapped and self.is_palette) or
({'big': '>', 'little': '<'}[sys.byteorder] !=
self.parent.byteorder))
@lazyattr
def is_contiguous(self):
"""Return offset and size of contiguous data, else None.
Excludes prediction and colormapping.
"""
if self.compression or self.bits_per_sample not in (8, 16, 32, 64):
return
if self.is_tiled:
if (self.image_width != self.tile_width or
self.image_length % self.tile_length or
self.tile_width % 16 or self.tile_length % 16):
return
if ('image_depth' in self.tags and 'tile_depth' in self.tags and
(self.image_length != self.tile_length or
self.image_depth % self.tile_depth)):
return
offsets = self.tile_offsets
byte_counts = self.tile_byte_counts
else:
offsets = self.strip_offsets
byte_counts = self.strip_byte_counts
if len(offsets) == 1:
return offsets[0], byte_counts[0]
if self.is_stk or all(offsets[i] + byte_counts[i] == offsets[i+1]
or byte_counts[i+1] == 0 # no data/ignore offset
for i in range(len(offsets)-1)):
return offsets[0], sum(byte_counts)
def __str__(self):
"""Return string containing information about page."""
s = ', '.join(s for s in (
' x '.join(str(i) for i in self.shape),
str(numpy.dtype(self.dtype)),
'%s bit' % str(self.bits_per_sample),
self.photometric if 'photometric' in self.tags else '',
self.compression if self.compression else 'raw',
'|'.join(t[3:] for t in (
'is_stk', 'is_lsm', 'is_nih', 'is_ome', 'is_imagej',
'is_micromanager', 'is_fluoview', 'is_mdgel', 'is_mediacy',
'is_sgi', 'is_reduced', 'is_tiled',
'is_contiguous') if getattr(self, t))) if s)
return "Page %i: %s" % (self.index, s)
def __getattr__(self, name):
"""Return tag value."""
if name in self.tags:
value = self.tags[name].value
setattr(self, name, value)
return value
raise AttributeError(name)
@lazyattr
def uic_tags(self):
"""Consolidate UIC tags."""
if not self.is_stk:
raise AttributeError("uic_tags")
tags = self.tags
result = Record()
result.number_planes = tags['uic2tag'].count
if 'image_description' in tags:
result.plane_descriptions = self.image_description.split(b'\x00')
if 'uic1tag' in tags:
result.update(tags['uic1tag'].value)
if 'uic3tag' in tags:
result.update(tags['uic3tag'].value) # wavelengths
if 'uic4tag' in tags:
result.update(tags['uic4tag'].value) # override uic1 tags
uic2tag = tags['uic2tag'].value
result.z_distance = uic2tag.z_distance
result.time_created = uic2tag.time_created
result.time_modified = uic2tag.time_modified
try:
result.datetime_created = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_created, uic2tag.time_created)]
result.datetime_modified = [
julian_datetime(*dt) for dt in
zip(uic2tag.date_modified, uic2tag.time_modified)]
except ValueError as e:
warnings.warn("uic_tags: %s" % e)
return result
@lazyattr
def imagej_tags(self):
"""Consolidate ImageJ metadata."""
if not self.is_imagej:
raise AttributeError("imagej_tags")
tags = self.tags
if 'image_description_1' in tags:
# MicroManager
result = imagej_description(tags['image_description_1'].value)
else:
result = imagej_description(tags['image_description'].value)
if 'imagej_metadata' in tags:
try:
result.update(imagej_metadata(
tags['imagej_metadata'].value,
tags['imagej_byte_counts'].value,
self.parent.byteorder))
except Exception as e:
warnings.warn(str(e))
return Record(result)
@lazyattr
def is_rgb(self):
"""True if page contains a RGB image."""
return ('photometric' in self.tags and
self.tags['photometric'].value == 2)
@lazyattr
def is_contig(self):
"""True if page contains a contiguous image."""
return ('planar_configuration' in self.tags and
self.tags['planar_configuration'].value == 1)
@lazyattr
def is_palette(self):
"""True if page contains a palette-colored image and not OME or STK."""
try:
# turn off color mapping for OME-TIFF and STK
if self.is_stk or self.is_ome or self.parent.is_ome:
return False
except IndexError:
pass # OME-XML not found in first page
return ('photometric' in self.tags and
self.tags['photometric'].value == 3)
@lazyattr
def is_tiled(self):
"""True if page contains tiled image."""
return 'tile_width' in self.tags
@lazyattr
def is_reduced(self):
"""True if page is a reduced image of another image."""
return bool(self.tags['new_subfile_type'].value & 1)
@lazyattr
def is_mdgel(self):
"""True if page contains md_file_tag tag."""
return 'md_file_tag' in self.tags
@lazyattr
def is_mediacy(self):
"""True if page contains Media Cybernetics Id tag."""
return ('mc_id' in self.tags and
self.tags['mc_id'].value.startswith(b'MC TIFF'))
@lazyattr
def is_stk(self):
"""True if page contains UIC2Tag tag."""
return 'uic2tag' in self.tags
@lazyattr
def is_lsm(self):
"""True if page contains LSM CZ_LSM_INFO tag."""
return 'cz_lsm_info' in self.tags
@lazyattr
def is_fluoview(self):
"""True if page contains FluoView MM_STAMP tag."""
return 'mm_stamp' in self.tags
@lazyattr
def is_nih(self):
"""True if page contains NIH image header."""
return 'nih_image_header' in self.tags
@lazyattr
def is_sgi(self):
"""True if page contains SGI image and tile depth tags."""
return 'image_depth' in self.tags and 'tile_depth' in self.tags
@lazyattr
def is_ome(self):
"""True if page contains OME-XML in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'<?xml version='))
@lazyattr
def is_shaped(self):
"""True if page contains shape in image_description tag."""
return ('image_description' in self.tags and self.tags[
'image_description'].value.startswith(b'shape=('))
@lazyattr
def is_imagej(self):
"""True if page contains ImageJ description."""
return (
('image_description' in self.tags and
self.tags['image_description'].value.startswith(b'ImageJ=')) or
('image_description_1' in self.tags and # Micromanager
self.tags['image_description_1'].value.startswith(b'ImageJ=')))
@lazyattr
def is_micromanager(self):
"""True if page contains Micro-Manager metadata."""
return 'micromanager_metadata' in self.tags
class TiffTag(object):
"""A TIFF tag structure.
Attributes
----------
name : string
Attribute name of tag.
code : int
Decimal code of tag.
dtype : str
Datatype of tag data. One of TIFF_DATA_TYPES.
count : int
Number of values.
value : various types
Tag data as Python object.
value_offset : int
Location of value in file, if any.
All attributes are read-only.
"""
__slots__ = ('code', 'name', 'count', 'dtype', 'value', 'value_offset',
'_offset', '_value', '_type')
class Error(Exception):
pass
def __init__(self, arg, **kwargs):
"""Initialize instance from file or arguments."""
self._offset = None
if hasattr(arg, '_fh'):
self._fromfile(arg, **kwargs)
else:
self._fromdata(arg, **kwargs)
def _fromdata(self, code, dtype, count, value, name=None):
"""Initialize instance from arguments."""
self.code = int(code)
self.name = name if name else str(code)
self.dtype = TIFF_DATA_TYPES[dtype]
self.count = int(count)
self.value = value
self._value = value
self._type = dtype
def _fromfile(self, parent):
"""Read tag structure from open file. Advance file cursor."""
fh = parent.filehandle
byteorder = parent.byteorder
self._offset = fh.tell()
self.value_offset = self._offset + parent.offset_size + 4
fmt, size = {4: ('HHI4s', 12), 8: ('HHQ8s', 20)}[parent.offset_size]
data = fh.read(size)
code, dtype = struct.unpack(byteorder + fmt[:2], data[:4])
count, value = struct.unpack(byteorder + fmt[2:], data[4:])
self._value = value
self._type = dtype
if code in TIFF_TAGS:
name = TIFF_TAGS[code][0]
elif code in CUSTOM_TAGS:
name = CUSTOM_TAGS[code][0]
else:
name = str(code)
try:
dtype = TIFF_DATA_TYPES[self._type]
except KeyError:
raise TiffTag.Error("unknown tag data type %i" % self._type)
fmt = '%s%i%s' % (byteorder, count*int(dtype[0]), dtype[1])
size = struct.calcsize(fmt)
if size > parent.offset_size or code in CUSTOM_TAGS:
pos = fh.tell()
tof = {4: 'I', 8: 'Q'}[parent.offset_size]
self.value_offset = offset = struct.unpack(byteorder+tof, value)[0]
if offset < 0 or offset > parent.filehandle.size:
raise TiffTag.Error("corrupt file - invalid tag value offset")
elif offset < 4:
raise TiffTag.Error("corrupt value offset for tag %i" % code)
fh.seek(offset)
if code in CUSTOM_TAGS:
readfunc = CUSTOM_TAGS[code][1]
value = readfunc(fh, byteorder, dtype, count)
if isinstance(value, dict): # numpy.core.records.record
value = Record(value)
elif code in TIFF_TAGS or dtype[-1] == 's':
value = struct.unpack(fmt, fh.read(size))
else:
value = read_numpy(fh, byteorder, dtype, count)
fh.seek(pos)
else:
value = struct.unpack(fmt, value[:size])
if code not in CUSTOM_TAGS and code not in (273, 279, 324, 325):
# scalar value if not strip/tile offsets/byte_counts
if len(value) == 1:
value = value[0]
if (dtype.endswith('s') and isinstance(value, bytes)
and self._type != 7):
# TIFF ASCII fields can contain multiple strings,
# each terminated with a NUL
value = stripascii(value)
self.code = code
self.name = name
self.dtype = dtype
self.count = count
self.value = value
def _correct_lsm_bitspersample(self, parent):
"""Correct LSM bitspersample tag.
Old LSM writers may use a separate region for two 16-bit values,
although they fit into the tag value element of the tag.
"""
if self.code == 258 and self.count == 2:
# TODO: test this. Need example file.
warnings.warn("correcting LSM bitspersample tag")
fh = parent.filehandle
tof = {4: '<I', 8: '<Q'}[parent.offset_size]
self.value_offset = struct.unpack(tof, self._value)[0]
fh.seek(self.value_offset)
self.value = struct.unpack("<HH", fh.read(4))
def as_str(self):
"""Return value as human readable string."""
return ((str(self.value).split('\n', 1)[0]) if (self._type != 7)
else '<undefined>')
def __str__(self):
"""Return string containing information about tag."""
return ' '.join(str(getattr(self, s)) for s in self.__slots__)
class TiffSequence(object):
"""Sequence of image files.
The data shape and dtype of all files must match.
Properties
----------
files : list
List of file names.
shape : tuple
Shape of image sequence.
axes : str
Labels of axes in shape.
Examples
--------
>>> tifs = TiffSequence("test.oif.files/*.tif")
>>> tifs.shape, tifs.axes
((2, 100), 'CT')
>>> data = tifs.asarray()
>>> data.shape
(2, 100, 256, 256)
"""
_patterns = {
'axes': r"""
# matches Olympus OIF and Leica TIFF series
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
_?(?:(q|l|p|a|c|t|x|y|z|ch|tp)(\d{1,4}))?
"""}
class ParseError(Exception):
pass
def __init__(self, files, imread=TiffFile, pattern='axes',
*args, **kwargs):
"""Initialize instance from multiple files.
Parameters
----------
files : str, or sequence of str
Glob pattern or sequence of file names.
imread : function or class
Image read function or class with asarray function returning numpy
array from single file.
pattern : str
Regular expression pattern that matches axes names and sequence
indices in file names.
By default this matches Olympus OIF and Leica TIFF series.
"""
if isinstance(files, basestring):
files = natural_sorted(glob.glob(files))
files = list(files)
if not files:
raise ValueError("no files found")
#if not os.path.isfile(files[0]):
# raise ValueError("file not found")
self.files = files
if hasattr(imread, 'asarray'):
# redefine imread
_imread = imread
def imread(fname, *args, **kwargs):
with _imread(fname) as im:
return im.asarray(*args, **kwargs)
self.imread = imread
self.pattern = self._patterns.get(pattern, pattern)
try:
self._parse()
if not self.axes:
self.axes = 'I'
except self.ParseError:
self.axes = 'I'
self.shape = (len(files),)
self._start_index = (0,)
self._indices = tuple((i,) for i in range(len(files)))
def __str__(self):
"""Return string with information about image sequence."""
return "\n".join([
self.files[0],
'* files: %i' % len(self.files),
'* axes: %s' % self.axes,
'* shape: %s' % str(self.shape)])
def __len__(self):
return len(self.files)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
pass
def asarray(self, memmap=False, *args, **kwargs):
"""Read image data from all files and return as single numpy array.
If memmap is True, return an array stored in a binary file on disk.
The args and kwargs parameters are passed to the imread function.
Raise IndexError or ValueError if image shapes don't match.
"""
im = self.imread(self.files[0], *args, **kwargs)
shape = self.shape + im.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=im.dtype, shape=shape)
else:
result = numpy.zeros(shape, dtype=im.dtype)
result = result.reshape(-1, *im.shape)
for index, fname in zip(self._indices, self.files):
index = [i-j for i, j in zip(index, self._start_index)]
index = numpy.ravel_multi_index(index, self.shape)
im = self.imread(fname, *args, **kwargs)
result[index] = im
result.shape = shape
return result
def _parse(self):
"""Get axes and shape from file names."""
if not self.pattern:
raise self.ParseError("invalid pattern")
pattern = re.compile(self.pattern, re.IGNORECASE | re.VERBOSE)
matches = pattern.findall(self.files[0])
if not matches:
raise self.ParseError("pattern doesn't match file names")
matches = matches[-1]
if len(matches) % 2:
raise self.ParseError("pattern doesn't match axis name and index")
axes = ''.join(m for m in matches[::2] if m)
if not axes:
raise self.ParseError("pattern doesn't match file names")
indices = []
for fname in self.files:
matches = pattern.findall(fname)[-1]
if axes != ''.join(m for m in matches[::2] if m):
raise ValueError("axes don't match within the image sequence")
indices.append([int(m) for m in matches[1::2] if m])
shape = tuple(numpy.max(indices, axis=0))
start_index = tuple(numpy.min(indices, axis=0))
shape = tuple(i-j+1 for i, j in zip(shape, start_index))
if product(shape) != len(self.files):
warnings.warn("files are missing. Missing data are zeroed")
self.axes = axes.upper()
self.shape = shape
self._indices = indices
self._start_index = start_index
class Record(dict):
"""Dictionary with attribute access.
Can also be initialized with numpy.core.records.record.
"""
__slots__ = ()
def __init__(self, arg=None, **kwargs):
if kwargs:
arg = kwargs
elif arg is None:
arg = {}
try:
dict.__init__(self, arg)
except (TypeError, ValueError):
for i, name in enumerate(arg.dtype.names):
v = arg[i]
self[name] = v if v.dtype.char != 'S' else stripnull(v)
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self.__setitem__(name, value)
def __str__(self):
"""Pretty print Record."""
s = []
lists = []
for k in sorted(self):
try:
if k.startswith('_'): # does not work with byte
continue
except AttributeError:
pass
v = self[k]
if isinstance(v, (list, tuple)) and len(v):
if isinstance(v[0], Record):
lists.append((k, v))
continue
elif isinstance(v[0], TiffPage):
v = [i.index for i in v if i]
s.append(
("* %s: %s" % (k, str(v))).split("\n", 1)[0]
[:PRINT_LINE_LEN].rstrip())
for k, v in lists:
l = []
for i, w in enumerate(v):
l.append("* %s[%i]\n %s" % (k, i,
str(w).replace("\n", "\n ")))
s.append('\n'.join(l))
return '\n'.join(s)
class TiffTags(Record):
"""Dictionary of TiffTag with attribute access."""
def __str__(self):
"""Return string with information about all tags."""
s = []
for tag in sorted(self.values(), key=lambda x: x.code):
typecode = "%i%s" % (tag.count * int(tag.dtype[0]), tag.dtype[1])
line = "* %i %s (%s) %s" % (
tag.code, tag.name, typecode, tag.as_str())
s.append(line[:PRINT_LINE_LEN].lstrip())
return '\n'.join(s)
class FileHandle(object):
"""Binary file handle.
* Handle embedded files (for CZI within CZI files).
* Allow to re-open closed files (for multi file formats such as OME-TIFF).
* Read numpy arrays and records from file like objects.
Only binary read, seek, tell, and close are supported on embedded files.
When initialized from another file handle, do not use it unless this
FileHandle is closed.
Attributes
----------
name : str
Name of the file.
path : str
Absolute path to file.
size : int
Size of file in bytes.
is_file : bool
If True, file has a filno and can be memory mapped.
All attributes are read-only.
"""
__slots__ = ('_fh', '_arg', '_mode', '_name', '_dir',
'_offset', '_size', '_close', 'is_file')
def __init__(self, arg, mode='rb', name=None, offset=None, size=None):
"""Initialize file handle from file name or another file handle.
Parameters
----------
arg : str, File, or FileHandle
File name or open file handle.
mode : str
File open mode in case 'arg' is a file name.
name : str
Optional name of file in case 'arg' is a file handle.
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
"""
self._fh = None
self._arg = arg
self._mode = mode
self._name = name
self._dir = ''
self._offset = offset
self._size = size
self._close = True
self.is_file = False
self.open()
def open(self):
"""Open or re-open file."""
if self._fh:
return # file is open
if isinstance(self._arg, basestring):
# file name
self._arg = os.path.abspath(self._arg)
self._dir, self._name = os.path.split(self._arg)
self._fh = open(self._arg, self._mode)
self._close = True
if self._offset is None:
self._offset = 0
elif isinstance(self._arg, FileHandle):
# FileHandle
self._fh = self._arg._fh
if self._offset is None:
self._offset = 0
self._offset += self._arg._offset
self._close = False
if not self._name:
if self._offset:
name, ext = os.path.splitext(self._arg._name)
self._name = "%s@%i%s" % (name, self._offset, ext)
else:
self._name = self._arg._name
self._dir = self._arg._dir
else:
# open file object
self._fh = self._arg
if self._offset is None:
self._offset = self._arg.tell()
self._close = False
if not self._name:
try:
self._dir, self._name = os.path.split(self._fh.name)
except AttributeError:
self._name = "Unnamed stream"
if self._offset:
self._fh.seek(self._offset)
if self._size is None:
pos = self._fh.tell()
self._fh.seek(self._offset, 2)
self._size = self._fh.tell()
self._fh.seek(pos)
try:
self._fh.fileno()
self.is_file = True
except Exception:
self.is_file = False
def read(self, size=-1):
"""Read 'size' bytes from file, or until EOF is reached."""
if size < 0 and self._offset:
size = self._size
return self._fh.read(size)
def memmap_array(self, dtype, shape, offset=0, mode='r', order='C'):
"""Return numpy.memmap of data stored in file."""
if not self.is_file:
raise ValueError("Can not memory map file without fileno.")
return numpy.memmap(self._fh, dtype=dtype, mode=mode,
offset=self._offset + offset,
shape=shape, order=order)
def read_array(self, dtype, count=-1, sep=""):
"""Return numpy array from file.
Work around numpy issue #2230, "numpy.fromfile does not accept
StringIO object" https://github.com/numpy/numpy/issues/2230.
"""
try:
return numpy.fromfile(self._fh, dtype, count, sep)
except IOError:
if count < 0:
size = self._size
else:
size = count * numpy.dtype(dtype).itemsize
data = self._fh.read(size)
return numpy.fromstring(data, dtype, count, sep)
def read_record(self, dtype, shape=1, byteorder=None):
"""Return numpy record from file."""
try:
rec = numpy.rec.fromfile(self._fh, dtype, shape,
byteorder=byteorder)
except Exception:
dtype = numpy.dtype(dtype)
if shape is None:
shape = self._size // dtype.itemsize
size = product(sequence(shape)) * dtype.itemsize
data = self._fh.read(size)
return numpy.rec.fromstring(data, dtype, shape,
byteorder=byteorder)
return rec[0] if shape == 1 else rec
def tell(self):
"""Return file's current position."""
return self._fh.tell() - self._offset
def seek(self, offset, whence=0):
"""Set file's current position."""
if self._offset:
if whence == 0:
self._fh.seek(self._offset + offset, whence)
return
elif whence == 2:
self._fh.seek(self._offset + self._size + offset, 0)
return
self._fh.seek(offset, whence)
def close(self):
"""Close file."""
if self._close and self._fh:
self._fh.close()
self._fh = None
self.is_file = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __getattr__(self, name):
"""Return attribute from underlying file object."""
if self._offset:
warnings.warn(
"FileHandle: '%s' not implemented for embedded files" % name)
return getattr(self._fh, name)
@property
def name(self):
return self._name
@property
def dirname(self):
return self._dir
@property
def path(self):
return os.path.join(self._dir, self._name)
@property
def size(self):
return self._size
@property
def closed(self):
return self._fh is None
def read_bytes(fh, byteorder, dtype, count):
"""Read tag data from file and return as byte string."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count).tostring()
def read_numpy(fh, byteorder, dtype, count):
"""Read tag data from file and return as numpy array."""
dtype = 'b' if dtype[-1] == 's' else byteorder+dtype[-1]
return fh.read_array(dtype, count)
def read_json(fh, byteorder, dtype, count):
"""Read JSON tag data from file and return as object."""
data = fh.read(count)
try:
return json.loads(unicode(stripnull(data), 'utf-8'))
except ValueError:
warnings.warn("invalid JSON `%s`" % data)
def read_mm_header(fh, byteorder, dtype, count):
"""Read MM_HEADER tag from file and return as numpy.rec.array."""
return fh.read_record(MM_HEADER, byteorder=byteorder)
def read_mm_stamp(fh, byteorder, dtype, count):
"""Read MM_STAMP tag from file and return as numpy.array."""
return fh.read_array(byteorder+'f8', 8)
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_uic2tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC2Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 6*plane_count).reshape(plane_count, 6)
return {
'z_distance': values[:, 0] / values[:, 1],
'date_created': values[:, 2], # julian days
'time_created': values[:, 3], # milliseconds
'date_modified': values[:, 4], # julian days
'time_modified': values[:, 5], # milliseconds
}
def read_uic3tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC3Tag from file and return as dictionary."""
assert dtype == '2I' and byteorder == '<'
values = fh.read_array('<u4', 2*plane_count).reshape(plane_count, 2)
return {'wavelengths': values[:, 0] / values[:, 1]}
def read_uic4tag(fh, byteorder, dtype, plane_count):
"""Read MetaMorph STK UIC4Tag from file and return as dictionary."""
assert dtype == '1I' and byteorder == '<'
result = {}
while True:
tagid = struct.unpack('<H', fh.read(2))[0]
if tagid == 0:
break
name, value = read_uic_tag(fh, tagid, plane_count, offset=False)
result[name] = value
return result
def read_uic_tag(fh, tagid, plane_count, offset):
"""Read a single UIC tag value from file and return tag name and value.
UIC1Tags use an offset.
"""
def read_int(count=1):
value = struct.unpack('<%iI' % count, fh.read(4*count))
return value[0] if count == 1 else value
try:
name, dtype = UIC_TAGS[tagid]
except KeyError:
# unknown tag
return '_tagid_%i' % tagid, read_int()
if offset:
pos = fh.tell()
if dtype not in (int, None):
off = read_int()
if off < 8:
warnings.warn("invalid offset for uic tag '%s': %i"
% (name, off))
return name, off
fh.seek(off)
if dtype is None:
# skip
name = '_' + name
value = read_int()
elif dtype is int:
# int
value = read_int()
elif dtype is Fraction:
# fraction
value = read_int(2)
value = value[0] / value[1]
elif dtype is julian_datetime:
# datetime
value = julian_datetime(*read_int(2))
elif dtype is read_uic_image_property:
# ImagePropertyEx
value = read_uic_image_property(fh)
elif dtype is str:
# pascal string
size = read_int()
if 0 <= size < 2**10:
value = struct.unpack('%is' % size, fh.read(size))[0][:-1]
value = stripnull(value)
elif offset:
value = ''
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
elif dtype == '%ip':
# sequence of pascal strings
value = []
for i in range(plane_count):
size = read_int()
if 0 <= size < 2**10:
string = struct.unpack('%is' % size, fh.read(size))[0][:-1]
string = stripnull(string)
value.append(string)
elif offset:
warnings.warn("corrupt string in uic tag '%s'" % name)
else:
raise ValueError("invalid string size %i" % size)
else:
# struct or numpy type
dtype = '<' + dtype
if '%i' in dtype:
dtype = dtype % plane_count
if '(' in dtype:
# numpy type
value = fh.read_array(dtype, 1)[0]
if value.shape[-1] == 2:
# assume fractions
value = value[..., 0] / value[..., 1]
else:
# struct format
value = struct.unpack(dtype, fh.read(struct.calcsize(dtype)))
if len(value) == 1:
value = value[0]
if offset:
fh.seek(pos + 4)
return name, value
def read_uic_image_property(fh):
"""Read UIC ImagePropertyEx tag from file and return as dict."""
# TODO: test this
size = struct.unpack('B', fh.read(1))[0]
name = struct.unpack('%is' % size, fh.read(size))[0][:-1]
flags, prop = struct.unpack('<IB', fh.read(5))
if prop == 1:
value = struct.unpack('II', fh.read(8))
value = value[0] / value[1]
else:
size = struct.unpack('B', fh.read(1))[0]
value = struct.unpack('%is' % size, fh.read(size))[0]
return dict(name=name, flags=flags, value=value)
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def read_cz_lsm_floatpairs(fh):
"""Read LSM sequence of float pairs from file and return as list."""
size = struct.unpack('<i', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_positions(fh):
"""Read LSM positions from file and return as list."""
size = struct.unpack('<I', fh.read(4))[0]
return fh.read_array('<2f8', count=size)
def read_cz_lsm_time_stamps(fh):
"""Read LSM time stamps from file and return as list."""
size, count = struct.unpack('<ii', fh.read(8))
if size != (8 + 8 * count):
raise ValueError("lsm_time_stamps block is too short")
# return struct.unpack('<%dd' % count, fh.read(8*count))
return fh.read_array('<f8', count=count)
def read_cz_lsm_event_list(fh):
"""Read LSM events from file and return as list of (time, type, text)."""
count = struct.unpack('<II', fh.read(8))[1]
events = []
while count > 0:
esize, etime, etype = struct.unpack('<IdI', fh.read(16))
etext = stripnull(fh.read(esize - 16))
events.append((etime, etype, etext))
count -= 1
return events
def read_cz_lsm_scan_info(fh):
"""Read LSM scan information from file and return as Record."""
block = Record()
blocks = [block]
unpack = struct.unpack
if 0x10000000 != struct.unpack('<I', fh.read(4))[0]:
# not a Recording sub block
raise ValueError("not a lsm_scan_info structure")
fh.read(8)
while True:
entry, dtype, size = unpack('<III', fh.read(12))
if dtype == 2:
# ascii
value = stripnull(fh.read(size))
elif dtype == 4:
# long
value = unpack('<i', fh.read(4))[0]
elif dtype == 5:
# rational
value = unpack('<d', fh.read(8))[0]
else:
value = 0
if entry in CZ_LSM_SCAN_INFO_ARRAYS:
blocks.append(block)
name = CZ_LSM_SCAN_INFO_ARRAYS[entry]
newobj = []
setattr(block, name, newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_STRUCTS:
blocks.append(block)
newobj = Record()
block.append(newobj)
block = newobj
elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES:
name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry]
setattr(block, name, value)
elif entry == 0xffffffff:
# end sub block
block = blocks.pop()
else:
# unknown entry
setattr(block, "entry_0x%x" % entry, value)
if not blocks:
break
return block
def read_nih_image_header(fh, byteorder, dtype, count):
"""Read NIH_IMAGE_HEADER tag from file and return as numpy.rec.array."""
a = fh.read_record(NIH_IMAGE_HEADER, byteorder=byteorder)
a = a.newbyteorder(byteorder)
a.xunit = a.xunit[:a._xunit_len]
a.um = a.um[:a._um_len]
return a
def read_micromanager_metadata(fh):
"""Read MicroManager non-TIFF settings from open file and return as dict.
The settings can be used to read image data without parsing the TIFF file.
Raise ValueError if file does not contain valid MicroManager metadata.
"""
fh.seek(0)
try:
byteorder = {b'II': '<', b'MM': '>'}[fh.read(2)]
except IndexError:
raise ValueError("not a MicroManager TIFF file")
results = {}
fh.seek(8)
(index_header, index_offset, display_header, display_offset,
comments_header, comments_offset, summary_header, summary_length
) = struct.unpack(byteorder + "IIIIIIII", fh.read(32))
if summary_header != 2355492:
raise ValueError("invalid MicroManager summary_header")
results['summary'] = read_json(fh, byteorder, None, summary_length)
if index_header != 54773648:
raise ValueError("invalid MicroManager index_header")
fh.seek(index_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 3453623:
raise ValueError("invalid MicroManager index_header")
data = struct.unpack(byteorder + "IIIII"*count, fh.read(20*count))
results['index_map'] = {
'channel': data[::5], 'slice': data[1::5], 'frame': data[2::5],
'position': data[3::5], 'offset': data[4::5]}
if display_header != 483765892:
raise ValueError("invalid MicroManager display_header")
fh.seek(display_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 347834724:
raise ValueError("invalid MicroManager display_header")
results['display_settings'] = read_json(fh, byteorder, None, count)
if comments_header != 99384722:
raise ValueError("invalid MicroManager comments_header")
fh.seek(comments_offset)
header, count = struct.unpack(byteorder + "II", fh.read(8))
if header != 84720485:
raise ValueError("invalid MicroManager comments_header")
results['comments'] = read_json(fh, byteorder, None, count)
return results
def imagej_metadata(data, bytecounts, byteorder):
"""Return dict from ImageJ metadata tag value."""
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
def read_string(data, byteorder):
return _str(stripnull(data[0 if byteorder == '<' else 1::2]))
def read_double(data, byteorder):
return struct.unpack(byteorder+('d' * (len(data) // 8)), data)
def read_bytes(data, byteorder):
#return struct.unpack('b' * len(data), data)
return numpy.fromstring(data, 'uint8')
metadata_types = { # big endian
b'info': ('info', read_string),
b'labl': ('labels', read_string),
b'rang': ('ranges', read_double),
b'luts': ('luts', read_bytes),
b'roi ': ('roi', read_bytes),
b'over': ('overlays', read_bytes)}
metadata_types.update( # little endian
dict((k[::-1], v) for k, v in metadata_types.items()))
if not bytecounts:
raise ValueError("no ImageJ metadata")
if not data[:4] in (b'IJIJ', b'JIJI'):
raise ValueError("invalid ImageJ metadata")
header_size = bytecounts[0]
if header_size < 12 or header_size > 804:
raise ValueError("invalid ImageJ metadata header size")
ntypes = (header_size - 4) // 8
header = struct.unpack(byteorder+'4sI'*ntypes, data[4:4+ntypes*8])
pos = 4 + ntypes * 8
counter = 0
result = {}
for mtype, count in zip(header[::2], header[1::2]):
values = []
name, func = metadata_types.get(mtype, (_str(mtype), read_bytes))
for _ in range(count):
counter += 1
pos1 = pos + bytecounts[counter]
values.append(func(data[pos:pos1], byteorder))
pos = pos1
result[name.strip()] = values[0] if count == 1 else values
return result
def imagej_description(description):
"""Return dict from ImageJ image_description tag."""
def _bool(val):
return {b'true': True, b'false': False}[val.lower()]
_str = str if sys.version_info[0] < 3 else lambda x: str(x, 'cp1252')
result = {}
for line in description.splitlines():
try:
key, val = line.split(b'=')
except Exception:
continue
key = key.strip()
val = val.strip()
for dtype in (int, float, _bool, _str):
try:
val = dtype(val)
break
except Exception:
pass
result[_str(key)] = val
return result
def _replace_by(module_function, package=None, warn=False):
"""Try replace decorated function by module.function."""
try:
from importlib import import_module
except ImportError:
warnings.warn('could not import module importlib')
return lambda func: func
def decorate(func, module_function=module_function, warn=warn):
try:
module, function = module_function.split('.')
if not package:
module = import_module(module)
else:
module = import_module('.' + module, package=package)
func, oldfunc = getattr(module, function), func
globals()['__old_' + func.__name__] = oldfunc
except Exception:
if warn:
warnings.warn("failed to import %s" % module_function)
return func
return decorate
def decodejpg(encoded, tables=b'', photometric=None,
ycbcr_subsampling=None, ycbcr_positioning=None):
"""Decode JPEG encoded byte string (using _czifile extension module)."""
import _czifile
image = _czifile.decodejpg(encoded, tables)
if photometric == 'rgb' and ycbcr_subsampling and ycbcr_positioning:
# TODO: convert YCbCr to RGB
pass
return image.tostring()
@_replace_by('_tifffile.decodepackbits')
def decodepackbits(encoded):
"""Decompress PackBits encoded byte string.
PackBits is a simple byte-oriented run-length compression scheme.
"""
func = ord if sys.version[0] == '2' else lambda x: x
result = []
result_extend = result.extend
i = 0
try:
while True:
n = func(encoded[i]) + 1
i += 1
if n < 129:
result_extend(encoded[i:i+n])
i += n
elif n > 129:
result_extend(encoded[i:i+1] * (258-n))
i += 1
except IndexError:
pass
return b''.join(result) if sys.version[0] == '2' else bytes(result)
@_replace_by('_tifffile.decodelzw')
def decodelzw(encoded):
"""Decompress LZW (Lempel-Ziv-Welch) encoded TIFF strip (byte string).
The strip must begin with a CLEAR code and end with an EOI code.
This is an implementation of the LZW decoding algorithm described in (1).
It is not compatible with old style LZW compressed files like quad-lzw.tif.
"""
len_encoded = len(encoded)
bitcount_max = len_encoded * 8
unpack = struct.unpack
if sys.version[0] == '2':
newtable = [chr(i) for i in range(256)]
else:
newtable = [bytes([i]) for i in range(256)]
newtable.extend((0, 0))
def next_code():
"""Return integer of `bitw` bits at `bitcount` position in encoded."""
start = bitcount // 8
s = encoded[start:start+4]
try:
code = unpack('>I', s)[0]
except Exception:
code = unpack('>I', s + b'\x00'*(4-len(s)))[0]
code <<= bitcount % 8
code &= mask
return code >> shr
switchbitch = { # code: bit-width, shr-bits, bit-mask
255: (9, 23, int(9*'1'+'0'*23, 2)),
511: (10, 22, int(10*'1'+'0'*22, 2)),
1023: (11, 21, int(11*'1'+'0'*21, 2)),
2047: (12, 20, int(12*'1'+'0'*20, 2)), }
bitw, shr, mask = switchbitch[255]
bitcount = 0
if len_encoded < 4:
raise ValueError("strip must be at least 4 characters long")
if next_code() != 256:
raise ValueError("strip must begin with CLEAR code")
code = 0
oldcode = 0
result = []
result_append = result.append
while True:
code = next_code() # ~5% faster when inlining this function
bitcount += bitw
if code == 257 or bitcount >= bitcount_max: # EOI
break
if code == 256: # CLEAR
table = newtable[:]
table_append = table.append
lentable = 258
bitw, shr, mask = switchbitch[255]
code = next_code()
bitcount += bitw
if code == 257: # EOI
break
result_append(table[code])
else:
if code < lentable:
decoded = table[code]
newcode = table[oldcode] + decoded[:1]
else:
newcode = table[oldcode]
newcode += newcode[:1]
decoded = newcode
result_append(decoded)
table_append(newcode)
lentable += 1
oldcode = code
if lentable in switchbitch:
bitw, shr, mask = switchbitch[lentable]
if code != 257:
warnings.warn("unexpected end of lzw stream (code %i)" % code)
return b''.join(result)
@_replace_by('_tifffile.unpackints')
def unpackints(data, dtype, itemsize, runlen=0):
"""Decompress byte string to array of integers of any bit size <= 32.
Parameters
----------
data : byte str
Data to decompress.
dtype : numpy.dtype or str
A numpy boolean or integer type.
itemsize : int
Number of bits per integer.
runlen : int
Number of consecutive integers, after which to start at next byte.
"""
if itemsize == 1: # bitarray
data = numpy.fromstring(data, '|B')
data = numpy.unpackbits(data)
if runlen % 8:
data = data.reshape(-1, runlen + (8 - runlen % 8))
data = data[:, :runlen].reshape(-1)
return data.astype(dtype)
dtype = numpy.dtype(dtype)
if itemsize in (8, 16, 32, 64):
return numpy.fromstring(data, dtype)
if itemsize < 1 or itemsize > 32:
raise ValueError("itemsize out of range: %i" % itemsize)
if dtype.kind not in "biu":
raise ValueError("invalid dtype")
itembytes = next(i for i in (1, 2, 4, 8) if 8 * i >= itemsize)
if itembytes != dtype.itemsize:
raise ValueError("dtype.itemsize too small")
if runlen == 0:
runlen = len(data) // itembytes
skipbits = runlen*itemsize % 8
if skipbits:
skipbits = 8 - skipbits
shrbits = itembytes*8 - itemsize
bitmask = int(itemsize*'1'+'0'*shrbits, 2)
dtypestr = '>' + dtype.char # dtype always big endian?
unpack = struct.unpack
l = runlen * (len(data)*8 // (runlen*itemsize + skipbits))
result = numpy.empty((l, ), dtype)
bitcount = 0
for i in range(len(result)):
start = bitcount // 8
s = data[start:start+itembytes]
try:
code = unpack(dtypestr, s)[0]
except Exception:
code = unpack(dtypestr, s + b'\x00'*(itembytes-len(s)))[0]
code <<= bitcount % 8
code &= bitmask
result[i] = code >> shrbits
bitcount += itemsize
if (i+1) % runlen == 0:
bitcount += skipbits
return result
def unpackrgb(data, dtype='<B', bitspersample=(5, 6, 5), rescale=True):
"""Return array from byte string containing packed samples.
Use to unpack RGB565 or RGB555 to RGB888 format.
Parameters
----------
data : byte str
The data to be decoded. Samples in each pixel are stored consecutively.
Pixels are aligned to 8, 16, or 32 bit boundaries.
dtype : numpy.dtype
The sample data type. The byteorder applies also to the data stream.
bitspersample : tuple
Number of bits for each sample in a pixel.
rescale : bool
Upscale samples to the number of bits in dtype.
Returns
-------
result : ndarray
Flattened array of unpacked samples of native dtype.
Examples
--------
>>> data = struct.pack('BBBB', 0x21, 0x08, 0xff, 0xff)
>>> print(unpackrgb(data, '<B', (5, 6, 5), False))
[ 1 1 1 31 63 31]
>>> print(unpackrgb(data, '<B', (5, 6, 5)))
[ 8 4 8 255 255 255]
>>> print(unpackrgb(data, '<B', (5, 5, 5)))
[ 16 8 8 255 255 255]
"""
dtype = numpy.dtype(dtype)
bits = int(numpy.sum(bitspersample))
if not (bits <= 32 and all(i <= dtype.itemsize*8 for i in bitspersample)):
raise ValueError("sample size not supported %s" % str(bitspersample))
dt = next(i for i in 'BHI' if numpy.dtype(i).itemsize*8 >= bits)
data = numpy.fromstring(data, dtype.byteorder+dt)
result = numpy.empty((data.size, len(bitspersample)), dtype.char)
for i, bps in enumerate(bitspersample):
t = data >> int(numpy.sum(bitspersample[i+1:]))
t &= int('0b'+'1'*bps, 2)
if rescale:
o = ((dtype.itemsize * 8) // bps + 1) * bps
if o > data.dtype.itemsize * 8:
t = t.astype('I')
t *= (2**o - 1) // (2**bps - 1)
t //= 2**(o - (dtype.itemsize * 8))
result[:, i] = t
return result.reshape(-1)
def reorient(image, orientation):
"""Return reoriented view of image array.
Parameters
----------
image : numpy array
Non-squeezed output of asarray() functions.
Axes -3 and -2 must be image length and width respectively.
orientation : int or str
One of TIFF_ORIENTATIONS keys or values.
"""
o = TIFF_ORIENTATIONS.get(orientation, orientation)
if o == 'top_left':
return image
elif o == 'top_right':
return image[..., ::-1, :]
elif o == 'bottom_left':
return image[..., ::-1, :, :]
elif o == 'bottom_right':
return image[..., ::-1, ::-1, :]
elif o == 'left_top':
return numpy.swapaxes(image, -3, -2)
elif o == 'right_top':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :]
elif o == 'left_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :]
elif o == 'right_bottom':
return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
def squeeze_axes(shape, axes, skip='XY'):
"""Return shape and axes with single-dimensional entries removed.
Remove unused dimensions unless their axes are listed in 'skip'.
>>> squeeze_axes((5, 1, 2, 1, 1), 'TZYXC')
((5, 2, 1), 'TYX')
"""
if len(shape) != len(axes):
raise ValueError("dimensions of axes and shape don't match")
shape, axes = zip(*(i for i in zip(shape, axes)
if i[0] > 1 or i[1] in skip))
return shape, ''.join(axes)
def transpose_axes(data, axes, asaxes='CTZYX'):
"""Return data with its axes permuted to match specified axes.
A view is returned if possible.
>>> transpose_axes(numpy.zeros((2, 3, 4, 5)), 'TYXC', asaxes='CTZYX').shape
(5, 2, 1, 3, 4)
"""
for ax in axes:
if ax not in asaxes:
raise ValueError("unknown axis %s" % ax)
# add missing axes to data
shape = data.shape
for ax in reversed(asaxes):
if ax not in axes:
axes = ax + axes
shape = (1,) + shape
data = data.reshape(shape)
# transpose axes
data = data.transpose([axes.index(ax) for ax in asaxes])
return data
def stack_pages(pages, memmap=False, *args, **kwargs):
"""Read data from sequence of TiffPage and stack them vertically.
If memmap is True, return an array stored in a binary file on disk.
Additional parameters are passsed to the page asarray function.
"""
if len(pages) == 0:
raise ValueError("no pages")
if len(pages) == 1:
return pages[0].asarray(memmap=memmap, *args, **kwargs)
result = pages[0].asarray(*args, **kwargs)
shape = (len(pages),) + result.shape
if memmap:
with tempfile.NamedTemporaryFile() as fh:
result = numpy.memmap(fh, dtype=result.dtype, shape=shape)
else:
result = numpy.empty(shape, dtype=result.dtype)
for i, page in enumerate(pages):
result[i] = page.asarray(*args, **kwargs)
return result
def stripnull(string):
"""Return string truncated at first null character.
Clean NULL terminated C strings.
>>> stripnull(b'string\\x00')
b'string'
"""
i = string.find(b'\x00')
return string if (i < 0) else string[:i]
def stripascii(string):
"""Return string truncated at last byte that is 7bit ASCII.
Clean NULL separated and terminated TIFF strings.
>>> stripascii(b'string\\x00string\\n\\x01\\x00')
b'string\\x00string\\n'
>>> stripascii(b'\\x00')
b''
"""
# TODO: pythonize this
ord_ = ord if sys.version_info[0] < 3 else lambda x: x
i = len(string)
while i:
i -= 1
if 8 < ord_(string[i]) < 127:
break
else:
i = -1
return string[:i+1]
def format_size(size):
"""Return file size as string from byte size."""
for unit in ('B', 'KB', 'MB', 'GB', 'TB'):
if size < 2048:
return "%.f %s" % (size, unit)
size /= 1024.0
def sequence(value):
"""Return tuple containing value if value is not a sequence.
>>> sequence(1)
(1,)
>>> sequence([1])
[1]
"""
try:
len(value)
return value
except TypeError:
return (value, )
def product(iterable):
"""Return product of sequence of numbers.
Equivalent of functools.reduce(operator.mul, iterable, 1).
>>> product([2**8, 2**30])
274877906944
>>> product([])
1
"""
prod = 1
for i in iterable:
prod *= i
return prod
def natural_sorted(iterable):
"""Return human sorted list of strings.
E.g. for sorting file names.
>>> natural_sorted(['f1', 'f2', 'f10'])
['f1', 'f2', 'f10']
"""
def sortkey(x):
return [(int(c) if c.isdigit() else c) for c in re.split(numbers, x)]
numbers = re.compile(r'(\d+)')
return sorted(iterable, key=sortkey)
def excel_datetime(timestamp, epoch=datetime.datetime.fromordinal(693594)):
"""Return datetime object from timestamp in Excel serial format.
Convert LSM time stamps.
>>> excel_datetime(40237.029999999795)
datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
"""
return epoch + datetime.timedelta(timestamp)
def julian_datetime(julianday, milisecond=0):
"""Return datetime from days since 1/1/4713 BC and ms since midnight.
Convert Julian dates according to MetaMorph.
>>> julian_datetime(2451576, 54362783)
datetime.datetime(2000, 2, 2, 15, 6, 2, 783)
"""
if julianday <= 1721423:
# no datetime before year 1
return None
a = julianday + 1
if a > 2299160:
alpha = math.trunc((a - 1867216.25) / 36524.25)
a += 1 + alpha - alpha // 4
b = a + (1524 if a > 1721423 else 1158)
c = math.trunc((b - 122.1) / 365.25)
d = math.trunc(365.25 * c)
e = math.trunc((b - d) / 30.6001)
day = b - d - math.trunc(30.6001 * e)
month = e - (1 if e < 13.5 else 13)
year = c - (4716 if month > 2.5 else 4715)
hour, milisecond = divmod(milisecond, 1000 * 60 * 60)
minute, milisecond = divmod(milisecond, 1000 * 60)
second, milisecond = divmod(milisecond, 1000)
return datetime.datetime(year, month, day,
hour, minute, second, milisecond)
def test_tifffile(directory='testimages', verbose=True):
"""Read all images in directory.
Print error message on failure.
>>> test_tifffile(verbose=False)
"""
successful = 0
failed = 0
start = time.time()
for f in glob.glob(os.path.join(directory, '*.*')):
if verbose:
print("\n%s>\n" % f.lower(), end='')
t0 = time.time()
try:
tif = TiffFile(f, multifile=True)
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
try:
img = tif.asarray()
except ValueError:
try:
img = tif[0].asarray()
except Exception as e:
if not verbose:
print(f, end=' ')
print("ERROR:", e)
failed += 1
continue
finally:
tif.close()
successful += 1
if verbose:
print("%s, %s %s, %s, %.0f ms" % (
str(tif), str(img.shape), img.dtype, tif[0].compression,
(time.time()-t0) * 1e3))
if verbose:
print("\nSuccessfully read %i of %i files in %.3f s\n" % (
successful, successful+failed, time.time()-start))
class TIFF_SUBFILE_TYPES(object):
def __getitem__(self, key):
result = []
if key & 1:
result.append('reduced_image')
if key & 2:
result.append('page')
if key & 4:
result.append('mask')
return tuple(result)
TIFF_PHOTOMETRICS = {
0: 'miniswhite',
1: 'minisblack',
2: 'rgb',
3: 'palette',
4: 'mask',
5: 'separated', # CMYK
6: 'ycbcr',
8: 'cielab',
9: 'icclab',
10: 'itulab',
32803: 'cfa', # Color Filter Array
32844: 'logl',
32845: 'logluv',
34892: 'linear_raw'
}
TIFF_COMPESSIONS = {
1: None,
2: 'ccittrle',
3: 'ccittfax3',
4: 'ccittfax4',
5: 'lzw',
6: 'ojpeg',
7: 'jpeg',
8: 'adobe_deflate',
9: 't85',
10: 't43',
32766: 'next',
32771: 'ccittrlew',
32773: 'packbits',
32809: 'thunderscan',
32895: 'it8ctpad',
32896: 'it8lw',
32897: 'it8mp',
32898: 'it8bl',
32908: 'pixarfilm',
32909: 'pixarlog',
32946: 'deflate',
32947: 'dcs',
34661: 'jbig',
34676: 'sgilog',
34677: 'sgilog24',
34712: 'jp2000',
34713: 'nef',
}
TIFF_DECOMPESSORS = {
None: lambda x: x,
'adobe_deflate': zlib.decompress,
'deflate': zlib.decompress,
'packbits': decodepackbits,
'lzw': decodelzw,
# 'jpeg': decodejpg
}
TIFF_DATA_TYPES = {
1: '1B', # BYTE 8-bit unsigned integer.
2: '1s', # ASCII 8-bit byte that contains a 7-bit ASCII code;
# the last byte must be NULL (binary zero).
3: '1H', # SHORT 16-bit (2-byte) unsigned integer
4: '1I', # LONG 32-bit (4-byte) unsigned integer.
5: '2I', # RATIONAL Two LONGs: the first represents the numerator of
# a fraction; the second, the denominator.
6: '1b', # SBYTE An 8-bit signed (twos-complement) integer.
7: '1s', # UNDEFINED An 8-bit byte that may contain anything,
# depending on the definition of the field.
8: '1h', # SSHORT A 16-bit (2-byte) signed (twos-complement) integer.
9: '1i', # SLONG A 32-bit (4-byte) signed (twos-complement) integer.
10: '2i', # SRATIONAL Two SLONGs: the first represents the numerator
# of a fraction, the second the denominator.
11: '1f', # FLOAT Single precision (4-byte) IEEE format.
12: '1d', # DOUBLE Double precision (8-byte) IEEE format.
13: '1I', # IFD unsigned 4 byte IFD offset.
#14: '', # UNICODE
#15: '', # COMPLEX
16: '1Q', # LONG8 unsigned 8 byte integer (BigTiff)
17: '1q', # SLONG8 signed 8 byte integer (BigTiff)
18: '1Q', # IFD8 unsigned 8 byte IFD offset (BigTiff)
}
TIFF_SAMPLE_FORMATS = {
1: 'uint',
2: 'int',
3: 'float',
#4: 'void',
#5: 'complex_int',
6: 'complex',
}
TIFF_SAMPLE_DTYPES = {
('uint', 1): '?', # bitmap
('uint', 2): 'B',
('uint', 3): 'B',
('uint', 4): 'B',
('uint', 5): 'B',
('uint', 6): 'B',
('uint', 7): 'B',
('uint', 8): 'B',
('uint', 9): 'H',
('uint', 10): 'H',
('uint', 11): 'H',
('uint', 12): 'H',
('uint', 13): 'H',
('uint', 14): 'H',
('uint', 15): 'H',
('uint', 16): 'H',
('uint', 17): 'I',
('uint', 18): 'I',
('uint', 19): 'I',
('uint', 20): 'I',
('uint', 21): 'I',
('uint', 22): 'I',
('uint', 23): 'I',
('uint', 24): 'I',
('uint', 25): 'I',
('uint', 26): 'I',
('uint', 27): 'I',
('uint', 28): 'I',
('uint', 29): 'I',
('uint', 30): 'I',
('uint', 31): 'I',
('uint', 32): 'I',
('uint', 64): 'Q',
('int', 8): 'b',
('int', 16): 'h',
('int', 32): 'i',
('int', 64): 'q',
('float', 16): 'e',
('float', 32): 'f',
('float', 64): 'd',
('complex', 64): 'F',
('complex', 128): 'D',
('uint', (5, 6, 5)): 'B',
}
TIFF_ORIENTATIONS = {
1: 'top_left',
2: 'top_right',
3: 'bottom_right',
4: 'bottom_left',
5: 'left_top',
6: 'right_top',
7: 'right_bottom',
8: 'left_bottom',
}
# TODO: is there a standard for character axes labels?
AXES_LABELS = {
'X': 'width',
'Y': 'height',
'Z': 'depth',
'S': 'sample', # rgb(a)
'I': 'series', # general sequence, plane, page, IFD
'T': 'time',
'C': 'channel', # color, emission wavelength
'A': 'angle',
'P': 'phase', # formerly F # P is Position in LSM!
'R': 'tile', # region, point, mosaic
'H': 'lifetime', # histogram
'E': 'lambda', # excitation wavelength
'L': 'exposure', # lux
'V': 'event',
'Q': 'other',
#'M': 'mosaic', # LSM 6
}
AXES_LABELS.update(dict((v, k) for k, v in AXES_LABELS.items()))
# Map OME pixel types to numpy dtype
OME_PIXEL_TYPES = {
'int8': 'i1',
'int16': 'i2',
'int32': 'i4',
'uint8': 'u1',
'uint16': 'u2',
'uint32': 'u4',
'float': 'f4',
# 'bit': 'bit',
'double': 'f8',
'complex': 'c8',
'double-complex': 'c16',
}
# NIH Image PicHeader v1.63
NIH_IMAGE_HEADER = [
('fileid', 'a8'),
('nlines', 'i2'),
('pixelsperline', 'i2'),
('version', 'i2'),
('oldlutmode', 'i2'),
('oldncolors', 'i2'),
('colors', 'u1', (3, 32)),
('oldcolorstart', 'i2'),
('colorwidth', 'i2'),
('extracolors', 'u2', (6, 3)),
('nextracolors', 'i2'),
('foregroundindex', 'i2'),
('backgroundindex', 'i2'),
('xscale', 'f8'),
('_x0', 'i2'),
('_x1', 'i2'),
('units_t', 'i2'), # NIH_UNITS_TYPE
('p1', [('x', 'i2'), ('y', 'i2')]),
('p2', [('x', 'i2'), ('y', 'i2')]),
('curvefit_t', 'i2'), # NIH_CURVEFIT_TYPE
('ncoefficients', 'i2'),
('coeff', 'f8', 6),
('_um_len', 'u1'),
('um', 'a15'),
('_x2', 'u1'),
('binarypic', 'b1'),
('slicestart', 'i2'),
('sliceend', 'i2'),
('scalemagnification', 'f4'),
('nslices', 'i2'),
('slicespacing', 'f4'),
('currentslice', 'i2'),
('frameinterval', 'f4'),
('pixelaspectratio', 'f4'),
('colorstart', 'i2'),
('colorend', 'i2'),
('ncolors', 'i2'),
('fill1', '3u2'),
('fill2', '3u2'),
('colortable_t', 'u1'), # NIH_COLORTABLE_TYPE
('lutmode_t', 'u1'), # NIH_LUTMODE_TYPE
('invertedtable', 'b1'),
('zeroclip', 'b1'),
('_xunit_len', 'u1'),
('xunit', 'a11'),
('stacktype_t', 'i2'), # NIH_STACKTYPE_TYPE
]
NIH_COLORTABLE_TYPE = (
'CustomTable', 'AppleDefault', 'Pseudo20', 'Pseudo32', 'Rainbow',
'Fire1', 'Fire2', 'Ice', 'Grays', 'Spectrum')
NIH_LUTMODE_TYPE = (
'PseudoColor', 'OldAppleDefault', 'OldSpectrum', 'GrayScale',
'ColorLut', 'CustomGrayscale')
NIH_CURVEFIT_TYPE = (
'StraightLine', 'Poly2', 'Poly3', 'Poly4', 'Poly5', 'ExpoFit',
'PowerFit', 'LogFit', 'RodbardFit', 'SpareFit1', 'Uncalibrated',
'UncalibratedOD')
NIH_UNITS_TYPE = (
'Nanometers', 'Micrometers', 'Millimeters', 'Centimeters', 'Meters',
'Kilometers', 'Inches', 'Feet', 'Miles', 'Pixels', 'OtherUnits')
NIH_STACKTYPE_TYPE = (
'VolumeStack', 'RGBStack', 'MovieStack', 'HSVStack')
# Map Universal Imaging Corporation MetaMorph internal tag ids to name and type
UIC_TAGS = {
0: ('auto_scale', int),
1: ('min_scale', int),
2: ('max_scale', int),
3: ('spatial_calibration', int),
4: ('x_calibration', Fraction),
5: ('y_calibration', Fraction),
6: ('calibration_units', str),
7: ('name', str),
8: ('thresh_state', int),
9: ('thresh_state_red', int),
10: ('tagid_10', None), # undefined
11: ('thresh_state_green', int),
12: ('thresh_state_blue', int),
13: ('thresh_state_lo', int),
14: ('thresh_state_hi', int),
15: ('zoom', int),
16: ('create_time', julian_datetime),
17: ('last_saved_time', julian_datetime),
18: ('current_buffer', int),
19: ('gray_fit', None),
20: ('gray_point_count', None),
21: ('gray_x', Fraction),
22: ('gray_y', Fraction),
23: ('gray_min', Fraction),
24: ('gray_max', Fraction),
25: ('gray_unit_name', str),
26: ('standard_lut', int),
27: ('wavelength', int),
28: ('stage_position', '(%i,2,2)u4'), # N xy positions as fractions
29: ('camera_chip_offset', '(%i,2,2)u4'), # N xy offsets as fractions
30: ('overlay_mask', None),
31: ('overlay_compress', None),
32: ('overlay', None),
33: ('special_overlay_mask', None),
34: ('special_overlay_compress', None),
35: ('special_overlay', None),
36: ('image_property', read_uic_image_property),
37: ('stage_label', '%ip'), # N str
38: ('autoscale_lo_info', Fraction),
39: ('autoscale_hi_info', Fraction),
40: ('absolute_z', '(%i,2)u4'), # N fractions
41: ('absolute_z_valid', '(%i,)u4'), # N long
42: ('gamma', int),
43: ('gamma_red', int),
44: ('gamma_green', int),
45: ('gamma_blue', int),
46: ('camera_bin', int),
47: ('new_lut', int),
48: ('image_property_ex', None),
49: ('plane_property', int),
50: ('user_lut_table', '(256,3)u1'),
51: ('red_autoscale_info', int),
52: ('red_autoscale_lo_info', Fraction),
53: ('red_autoscale_hi_info', Fraction),
54: ('red_minscale_info', int),
55: ('red_maxscale_info', int),
56: ('green_autoscale_info', int),
57: ('green_autoscale_lo_info', Fraction),
58: ('green_autoscale_hi_info', Fraction),
59: ('green_minscale_info', int),
60: ('green_maxscale_info', int),
61: ('blue_autoscale_info', int),
62: ('blue_autoscale_lo_info', Fraction),
63: ('blue_autoscale_hi_info', Fraction),
64: ('blue_min_scale_info', int),
65: ('blue_max_scale_info', int),
#66: ('overlay_plane_color', read_uic_overlay_plane_color),
}
# Olympus FluoView
MM_DIMENSION = [
('name', 'a16'),
('size', 'i4'),
('origin', 'f8'),
('resolution', 'f8'),
('unit', 'a64'),
]
MM_HEADER = [
('header_flag', 'i2'),
('image_type', 'u1'),
('image_name', 'a257'),
('offset_data', 'u4'),
('palette_size', 'i4'),
('offset_palette0', 'u4'),
('offset_palette1', 'u4'),
('comment_size', 'i4'),
('offset_comment', 'u4'),
('dimensions', MM_DIMENSION, 10),
('offset_position', 'u4'),
('map_type', 'i2'),
('map_min', 'f8'),
('map_max', 'f8'),
('min_value', 'f8'),
('max_value', 'f8'),
('offset_map', 'u4'),
('gamma', 'f8'),
('offset', 'f8'),
('gray_channel', MM_DIMENSION),
('offset_thumbnail', 'u4'),
('voice_field', 'i4'),
('offset_voice_field', 'u4'),
]
# Carl Zeiss LSM
CZ_LSM_INFO = [
('magic_number', 'u4'),
('structure_size', 'i4'),
('dimension_x', 'i4'),
('dimension_y', 'i4'),
('dimension_z', 'i4'),
('dimension_channels', 'i4'),
('dimension_time', 'i4'),
('data_type', 'i4'), # CZ_DATA_TYPES
('thumbnail_x', 'i4'),
('thumbnail_y', 'i4'),
('voxel_size_x', 'f8'),
('voxel_size_y', 'f8'),
('voxel_size_z', 'f8'),
('origin_x', 'f8'),
('origin_y', 'f8'),
('origin_z', 'f8'),
('scan_type', 'u2'),
('spectral_scan', 'u2'),
('type_of_data', 'u4'), # CZ_TYPE_OF_DATA
('offset_vector_overlay', 'u4'),
('offset_input_lut', 'u4'),
('offset_output_lut', 'u4'),
('offset_channel_colors', 'u4'),
('time_interval', 'f8'),
('offset_channel_data_types', 'u4'),
('offset_scan_info', 'u4'), # CZ_LSM_SCAN_INFO
('offset_ks_data', 'u4'),
('offset_time_stamps', 'u4'),
('offset_event_list', 'u4'),
('offset_roi', 'u4'),
('offset_bleach_roi', 'u4'),
('offset_next_recording', 'u4'),
# LSM 2.0 ends here
('display_aspect_x', 'f8'),
('display_aspect_y', 'f8'),
('display_aspect_z', 'f8'),
('display_aspect_time', 'f8'),
('offset_mean_of_roi_overlay', 'u4'),
('offset_topo_isoline_overlay', 'u4'),
('offset_topo_profile_overlay', 'u4'),
('offset_linescan_overlay', 'u4'),
('offset_toolbar_flags', 'u4'),
('offset_channel_wavelength', 'u4'),
('offset_channel_factors', 'u4'),
('objective_sphere_correction', 'f8'),
('offset_unmix_parameters', 'u4'),
# LSM 3.2, 4.0 end here
('offset_acquisition_parameters', 'u4'),
('offset_characteristics', 'u4'),
('offset_palette', 'u4'),
('time_difference_x', 'f8'),
('time_difference_y', 'f8'),
('time_difference_z', 'f8'),
('internal_use_1', 'u4'),
('dimension_p', 'i4'),
('dimension_m', 'i4'),
('dimensions_reserved', '16i4'),
('offset_tile_positions', 'u4'),
('reserved_1', '9u4'),
('offset_positions', 'u4'),
('reserved_2', '21u4'), # must be 0
]
# Import functions for LSM_INFO sub-records
CZ_LSM_INFO_READERS = {
'scan_info': read_cz_lsm_scan_info,
'time_stamps': read_cz_lsm_time_stamps,
'event_list': read_cz_lsm_event_list,
'channel_colors': read_cz_lsm_floatpairs,
'positions': read_cz_lsm_floatpairs,
'tile_positions': read_cz_lsm_floatpairs,
}
# Map cz_lsm_info.scan_type to dimension order
CZ_SCAN_TYPES = {
0: 'XYZCT', # x-y-z scan
1: 'XYZCT', # z scan (x-z plane)
2: 'XYZCT', # line scan
3: 'XYTCZ', # time series x-y
4: 'XYZTC', # time series x-z
5: 'XYTCZ', # time series 'Mean of ROIs'
6: 'XYZTC', # time series x-y-z
7: 'XYCTZ', # spline scan
8: 'XYCZT', # spline scan x-z
9: 'XYTCZ', # time series spline plane x-z
10: 'XYZCT', # point mode
}
# Map dimension codes to cz_lsm_info attribute
CZ_DIMENSIONS = {
'X': 'dimension_x',
'Y': 'dimension_y',
'Z': 'dimension_z',
'C': 'dimension_channels',
'T': 'dimension_time',
}
# Description of cz_lsm_info.data_type
CZ_DATA_TYPES = {
0: 'varying data types',
1: '8 bit unsigned integer',
2: '12 bit unsigned integer',
5: '32 bit float',
}
# Description of cz_lsm_info.type_of_data
CZ_TYPE_OF_DATA = {
0: 'Original scan data',
1: 'Calculated data',
2: '3D reconstruction',
3: 'Topography height map',
}
CZ_LSM_SCAN_INFO_ARRAYS = {
0x20000000: "tracks",
0x30000000: "lasers",
0x60000000: "detection_channels",
0x80000000: "illumination_channels",
0xa0000000: "beam_splitters",
0xc0000000: "data_channels",
0x11000000: "timers",
0x13000000: "markers",
}
CZ_LSM_SCAN_INFO_STRUCTS = {
# 0x10000000: "recording",
0x40000000: "track",
0x50000000: "laser",
0x70000000: "detection_channel",
0x90000000: "illumination_channel",
0xb0000000: "beam_splitter",
0xd0000000: "data_channel",
0x12000000: "timer",
0x14000000: "marker",
}
CZ_LSM_SCAN_INFO_ATTRIBUTES = {
# recording
0x10000001: "name",
0x10000002: "description",
0x10000003: "notes",
0x10000004: "objective",
0x10000005: "processing_summary",
0x10000006: "special_scan_mode",
0x10000007: "scan_type",
0x10000008: "scan_mode",
0x10000009: "number_of_stacks",
0x1000000a: "lines_per_plane",
0x1000000b: "samples_per_line",
0x1000000c: "planes_per_volume",
0x1000000d: "images_width",
0x1000000e: "images_height",
0x1000000f: "images_number_planes",
0x10000010: "images_number_stacks",
0x10000011: "images_number_channels",
0x10000012: "linscan_xy_size",
0x10000013: "scan_direction",
0x10000014: "time_series",
0x10000015: "original_scan_data",
0x10000016: "zoom_x",
0x10000017: "zoom_y",
0x10000018: "zoom_z",
0x10000019: "sample_0x",
0x1000001a: "sample_0y",
0x1000001b: "sample_0z",
0x1000001c: "sample_spacing",
0x1000001d: "line_spacing",
0x1000001e: "plane_spacing",
0x1000001f: "plane_width",
0x10000020: "plane_height",
0x10000021: "volume_depth",
0x10000023: "nutation",
0x10000034: "rotation",
0x10000035: "precession",
0x10000036: "sample_0time",
0x10000037: "start_scan_trigger_in",
0x10000038: "start_scan_trigger_out",
0x10000039: "start_scan_event",
0x10000040: "start_scan_time",
0x10000041: "stop_scan_trigger_in",
0x10000042: "stop_scan_trigger_out",
0x10000043: "stop_scan_event",
0x10000044: "stop_scan_time",
0x10000045: "use_rois",
0x10000046: "use_reduced_memory_rois",
0x10000047: "user",
0x10000048: "use_bc_correction",
0x10000049: "position_bc_correction1",
0x10000050: "position_bc_correction2",
0x10000051: "interpolation_y",
0x10000052: "camera_binning",
0x10000053: "camera_supersampling",
0x10000054: "camera_frame_width",
0x10000055: "camera_frame_height",
0x10000056: "camera_offset_x",
0x10000057: "camera_offset_y",
0x10000059: "rt_binning",
0x1000005a: "rt_frame_width",
0x1000005b: "rt_frame_height",
0x1000005c: "rt_region_width",
0x1000005d: "rt_region_height",
0x1000005e: "rt_offset_x",
0x1000005f: "rt_offset_y",
0x10000060: "rt_zoom",
0x10000061: "rt_line_period",
0x10000062: "prescan",
0x10000063: "scan_direction_z",
# track
0x40000001: "multiplex_type", # 0 after line; 1 after frame
0x40000002: "multiplex_order",
0x40000003: "sampling_mode", # 0 sample; 1 line average; 2 frame average
0x40000004: "sampling_method", # 1 mean; 2 sum
0x40000005: "sampling_number",
0x40000006: "acquire",
0x40000007: "sample_observation_time",
0x4000000b: "time_between_stacks",
0x4000000c: "name",
0x4000000d: "collimator1_name",
0x4000000e: "collimator1_position",
0x4000000f: "collimator2_name",
0x40000010: "collimator2_position",
0x40000011: "is_bleach_track",
0x40000012: "is_bleach_after_scan_number",
0x40000013: "bleach_scan_number",
0x40000014: "trigger_in",
0x40000015: "trigger_out",
0x40000016: "is_ratio_track",
0x40000017: "bleach_count",
0x40000018: "spi_center_wavelength",
0x40000019: "pixel_time",
0x40000021: "condensor_frontlens",
0x40000023: "field_stop_value",
0x40000024: "id_condensor_aperture",
0x40000025: "condensor_aperture",
0x40000026: "id_condensor_revolver",
0x40000027: "condensor_filter",
0x40000028: "id_transmission_filter1",
0x40000029: "id_transmission1",
0x40000030: "id_transmission_filter2",
0x40000031: "id_transmission2",
0x40000032: "repeat_bleach",
0x40000033: "enable_spot_bleach_pos",
0x40000034: "spot_bleach_posx",
0x40000035: "spot_bleach_posy",
0x40000036: "spot_bleach_posz",
0x40000037: "id_tubelens",
0x40000038: "id_tubelens_position",
0x40000039: "transmitted_light",
0x4000003a: "reflected_light",
0x4000003b: "simultan_grab_and_bleach",
0x4000003c: "bleach_pixel_time",
# laser
0x50000001: "name",
0x50000002: "acquire",
0x50000003: "power",
# detection_channel
0x70000001: "integration_mode",
0x70000002: "special_mode",
0x70000003: "detector_gain_first",
0x70000004: "detector_gain_last",
0x70000005: "amplifier_gain_first",
0x70000006: "amplifier_gain_last",
0x70000007: "amplifier_offs_first",
0x70000008: "amplifier_offs_last",
0x70000009: "pinhole_diameter",
0x7000000a: "counting_trigger",
0x7000000b: "acquire",
0x7000000c: "point_detector_name",
0x7000000d: "amplifier_name",
0x7000000e: "pinhole_name",
0x7000000f: "filter_set_name",
0x70000010: "filter_name",
0x70000013: "integrator_name",
0x70000014: "channel_name",
0x70000015: "detector_gain_bc1",
0x70000016: "detector_gain_bc2",
0x70000017: "amplifier_gain_bc1",
0x70000018: "amplifier_gain_bc2",
0x70000019: "amplifier_offset_bc1",
0x70000020: "amplifier_offset_bc2",
0x70000021: "spectral_scan_channels",
0x70000022: "spi_wavelength_start",
0x70000023: "spi_wavelength_stop",
0x70000026: "dye_name",
0x70000027: "dye_folder",
# illumination_channel
0x90000001: "name",
0x90000002: "power",
0x90000003: "wavelength",
0x90000004: "aquire",
0x90000005: "detchannel_name",
0x90000006: "power_bc1",
0x90000007: "power_bc2",
# beam_splitter
0xb0000001: "filter_set",
0xb0000002: "filter",
0xb0000003: "name",
# data_channel
0xd0000001: "name",
0xd0000003: "acquire",
0xd0000004: "color",
0xd0000005: "sample_type",
0xd0000006: "bits_per_sample",
0xd0000007: "ratio_type",
0xd0000008: "ratio_track1",
0xd0000009: "ratio_track2",
0xd000000a: "ratio_channel1",
0xd000000b: "ratio_channel2",
0xd000000c: "ratio_const1",
0xd000000d: "ratio_const2",
0xd000000e: "ratio_const3",
0xd000000f: "ratio_const4",
0xd0000010: "ratio_const5",
0xd0000011: "ratio_const6",
0xd0000012: "ratio_first_images1",
0xd0000013: "ratio_first_images2",
0xd0000014: "dye_name",
0xd0000015: "dye_folder",
0xd0000016: "spectrum",
0xd0000017: "acquire",
# timer
0x12000001: "name",
0x12000002: "description",
0x12000003: "interval",
0x12000004: "trigger_in",
0x12000005: "trigger_out",
0x12000006: "activation_time",
0x12000007: "activation_number",
# marker
0x14000001: "name",
0x14000002: "description",
0x14000003: "trigger_in",
0x14000004: "trigger_out",
}
# Map TIFF tag code to attribute name, default value, type, count, validator
TIFF_TAGS = {
254: ('new_subfile_type', 0, 4, 1, TIFF_SUBFILE_TYPES()),
255: ('subfile_type', None, 3, 1,
{0: 'undefined', 1: 'image', 2: 'reduced_image', 3: 'page'}),
256: ('image_width', None, 4, 1, None),
257: ('image_length', None, 4, 1, None),
258: ('bits_per_sample', 1, 3, 1, None),
259: ('compression', 1, 3, 1, TIFF_COMPESSIONS),
262: ('photometric', None, 3, 1, TIFF_PHOTOMETRICS),
266: ('fill_order', 1, 3, 1, {1: 'msb2lsb', 2: 'lsb2msb'}),
269: ('document_name', None, 2, None, None),
270: ('image_description', None, 2, None, None),
271: ('make', None, 2, None, None),
272: ('model', None, 2, None, None),
273: ('strip_offsets', None, 4, None, None),
274: ('orientation', 1, 3, 1, TIFF_ORIENTATIONS),
277: ('samples_per_pixel', 1, 3, 1, None),
278: ('rows_per_strip', 2**32-1, 4, 1, None),
279: ('strip_byte_counts', None, 4, None, None),
280: ('min_sample_value', None, 3, None, None),
281: ('max_sample_value', None, 3, None, None), # 2**bits_per_sample
282: ('x_resolution', None, 5, 1, None),
283: ('y_resolution', None, 5, 1, None),
284: ('planar_configuration', 1, 3, 1, {1: 'contig', 2: 'separate'}),
285: ('page_name', None, 2, None, None),
286: ('x_position', None, 5, 1, None),
287: ('y_position', None, 5, 1, None),
296: ('resolution_unit', 2, 4, 1, {1: 'none', 2: 'inch', 3: 'centimeter'}),
297: ('page_number', None, 3, 2, None),
305: ('software', None, 2, None, None),
306: ('datetime', None, 2, None, None),
315: ('artist', None, 2, None, None),
316: ('host_computer', None, 2, None, None),
317: ('predictor', 1, 3, 1, {1: None, 2: 'horizontal'}),
318: ('white_point', None, 5, 2, None),
319: ('primary_chromaticities', None, 5, 6, None),
320: ('color_map', None, 3, None, None),
322: ('tile_width', None, 4, 1, None),
323: ('tile_length', None, 4, 1, None),
324: ('tile_offsets', None, 4, None, None),
325: ('tile_byte_counts', None, 4, None, None),
338: ('extra_samples', None, 3, None,
{0: 'unspecified', 1: 'assocalpha', 2: 'unassalpha'}),
339: ('sample_format', 1, 3, 1, TIFF_SAMPLE_FORMATS),
340: ('smin_sample_value', None, None, None, None),
341: ('smax_sample_value', None, None, None, None),
347: ('jpeg_tables', None, 7, None, None),
530: ('ycbcr_subsampling', 1, 3, 2, None),
531: ('ycbcr_positioning', 1, 3, 1, None),
32996: ('sgi_matteing', None, None, 1, None), # use extra_samples
32996: ('sgi_datatype', None, None, 1, None), # use sample_format
32997: ('image_depth', None, 4, 1, None),
32998: ('tile_depth', None, 4, 1, None),
33432: ('copyright', None, 1, None, None),
33445: ('md_file_tag', None, 4, 1, None),
33446: ('md_scale_pixel', None, 5, 1, None),
33447: ('md_color_table', None, 3, None, None),
33448: ('md_lab_name', None, 2, None, None),
33449: ('md_sample_info', None, 2, None, None),
33450: ('md_prep_date', None, 2, None, None),
33451: ('md_prep_time', None, 2, None, None),
33452: ('md_file_units', None, 2, None, None),
33550: ('model_pixel_scale', None, 12, 3, None),
33922: ('model_tie_point', None, 12, None, None),
34665: ('exif_ifd', None, None, 1, None),
34735: ('geo_key_directory', None, 3, None, None),
34736: ('geo_double_params', None, 12, None, None),
34737: ('geo_ascii_params', None, 2, None, None),
34853: ('gps_ifd', None, None, 1, None),
37510: ('user_comment', None, None, None, None),
42112: ('gdal_metadata', None, 2, None, None),
42113: ('gdal_nodata', None, 2, None, None),
50289: ('mc_xy_position', None, 12, 2, None),
50290: ('mc_z_position', None, 12, 1, None),
50291: ('mc_xy_calibration', None, 12, 3, None),
50292: ('mc_lens_lem_na_n', None, 12, 3, None),
50293: ('mc_channel_name', None, 1, None, None),
50294: ('mc_ex_wavelength', None, 12, 1, None),
50295: ('mc_time_stamp', None, 12, 1, None),
50838: ('imagej_byte_counts', None, None, None, None),
65200: ('flex_xml', None, 2, None, None),
# code: (attribute name, default value, type, count, validator)
}
# Map custom TIFF tag codes to attribute names and import functions
CUSTOM_TAGS = {
700: ('xmp', read_bytes),
34377: ('photoshop', read_numpy),
33723: ('iptc', read_bytes),
34675: ('icc_profile', read_bytes),
33628: ('uic1tag', read_uic1tag), # Universal Imaging Corporation STK
33629: ('uic2tag', read_uic2tag),
33630: ('uic3tag', read_uic3tag),
33631: ('uic4tag', read_uic4tag),
34361: ('mm_header', read_mm_header), # Olympus FluoView
34362: ('mm_stamp', read_mm_stamp),
34386: ('mm_user_block', read_bytes),
34412: ('cz_lsm_info', read_cz_lsm_info), # Carl Zeiss LSM
43314: ('nih_image_header', read_nih_image_header),
# 40001: ('mc_ipwinscal', read_bytes),
40100: ('mc_id_old', read_bytes),
50288: ('mc_id', read_bytes),
50296: ('mc_frame_properties', read_bytes),
50839: ('imagej_metadata', read_bytes),
51123: ('micromanager_metadata', read_json),
}
# Max line length of printed output
PRINT_LINE_LEN = 79
def imshow(data, title=None, vmin=0, vmax=None, cmap=None,
bitspersample=None, photometric='rgb', interpolation='nearest',
dpi=96, figure=None, subplot=111, maxdim=8192, **kwargs):
"""Plot n-dimensional images using matplotlib.pyplot.
Return figure, subplot and plot axis.
Requires pyplot already imported ``from matplotlib import pyplot``.
Parameters
----------
bitspersample : int or None
Number of bits per channel in integer RGB images.
photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'}
The color space of the image data.
title : str
Window and subplot title.
figure : matplotlib.figure.Figure (optional).
Matplotlib to use for plotting.
subplot : int
A matplotlib.pyplot.subplot axis.
maxdim : int
maximum image size in any dimension.
kwargs : optional
Arguments for matplotlib.pyplot.imshow.
"""
#if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'):
# raise ValueError("Can't handle %s photometrics" % photometric)
# TODO: handle photometric == 'separated' (CMYK)
isrgb = photometric in ('rgb', 'palette')
data = numpy.atleast_2d(data.squeeze())
data = data[(slice(0, maxdim), ) * len(data.shape)]
dims = data.ndim
if dims < 2:
raise ValueError("not an image")
elif dims == 2:
dims = 0
isrgb = False
else:
if isrgb and data.shape[-3] in (3, 4):
data = numpy.swapaxes(data, -3, -2)
data = numpy.swapaxes(data, -2, -1)
elif not isrgb and (data.shape[-1] < data.shape[-2] // 16 and
data.shape[-1] < data.shape[-3] // 16 and
data.shape[-1] < 5):
data = numpy.swapaxes(data, -3, -1)
data = numpy.swapaxes(data, -2, -1)
isrgb = isrgb and data.shape[-1] in (3, 4)
dims -= 3 if isrgb else 2
if photometric == 'palette' and isrgb:
datamax = data.max()
if datamax > 255:
data >>= 8 # possible precision loss
data = data.astype('B')
elif data.dtype.kind in 'ui':
if not (isrgb and data.dtype.itemsize <= 1) or bitspersample is None:
try:
bitspersample = int(math.ceil(math.log(data.max(), 2)))
except Exception:
bitspersample = data.dtype.itemsize * 8
elif not isinstance(bitspersample, int):
# bitspersample can be tuple, e.g. (5, 6, 5)
bitspersample = data.dtype.itemsize * 8
datamax = 2**bitspersample
if isrgb:
if bitspersample < 8:
data <<= 8 - bitspersample
elif bitspersample > 8:
data >>= bitspersample - 8 # precision loss
data = data.astype('B')
elif data.dtype.kind == 'f':
datamax = data.max()
if isrgb and datamax > 1.0:
if data.dtype.char == 'd':
data = data.astype('f')
data /= datamax
elif data.dtype.kind == 'b':
datamax = 1
elif data.dtype.kind == 'c':
raise NotImplementedError("complex type") # TODO: handle complex types
if not isrgb:
if vmax is None:
vmax = datamax
if vmin is None:
if data.dtype.kind == 'i':
dtmin = numpy.iinfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
if data.dtype.kind == 'f':
dtmin = numpy.finfo(data.dtype).min
vmin = numpy.min(data)
if vmin == dtmin:
vmin = numpy.min(data > dtmin)
else:
vmin = 0
pyplot = sys.modules['matplotlib.pyplot']
if figure is None:
pyplot.rc('font', family='sans-serif', weight='normal', size=8)
figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True,
facecolor='1.0', edgecolor='w')
try:
figure.canvas.manager.window.title(title)
except Exception:
pass
pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9,
left=0.1, right=0.95, hspace=0.05, wspace=0.0)
subplot = pyplot.subplot(subplot)
if title:
try:
title = unicode(title, 'Windows-1252')
except TypeError:
pass
pyplot.title(title, size=11)
if cmap is None:
if data.dtype.kind in 'ubf' or vmin == 0:
cmap = 'cubehelix'
else:
cmap = 'coolwarm'
if photometric == 'miniswhite':
cmap += '_r'
image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax,
cmap=cmap, interpolation=interpolation, **kwargs)
if not isrgb:
pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05
def format_coord(x, y):
# callback function to format coordinate display in toolbar
x = int(x + 0.5)
y = int(y + 0.5)
try:
if dims:
return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x],
current, x, y)
else:
return "%s @ [%4i, %4i]" % (data[y, x], x, y)
except IndexError:
return ""
pyplot.gca().format_coord = format_coord
if dims:
current = list((0, ) * dims)
cur_ax_dat = [0, data[tuple(current)].squeeze()]
sliders = [pyplot.Slider(
pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]),
'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5',
valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)]
for slider in sliders:
slider.drawon = False
def set_image(current, sliders=sliders, data=data):
# change image and redraw canvas
cur_ax_dat[1] = data[tuple(current)].squeeze()
image.set_data(cur_ax_dat[1])
for ctrl, index in zip(sliders, current):
ctrl.eventson = False
ctrl.set_val(index)
ctrl.eventson = True
figure.canvas.draw()
def on_changed(index, axis, data=data, current=current):
# callback function for slider change event
index = int(round(index))
cur_ax_dat[0] = axis
if index == current[axis]:
return
if index >= data.shape[axis]:
index = 0
elif index < 0:
index = data.shape[axis] - 1
current[axis] = index
set_image(current)
def on_keypressed(event, data=data, current=current):
# callback function for key press event
key = event.key
axis = cur_ax_dat[0]
if str(key) in '0123456789':
on_changed(key, axis)
elif key == 'right':
on_changed(current[axis] + 1, axis)
elif key == 'left':
on_changed(current[axis] - 1, axis)
elif key == 'up':
cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1
elif key == 'down':
cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1
elif key == 'end':
on_changed(data.shape[axis] - 1, axis)
elif key == 'home':
on_changed(0, axis)
figure.canvas.mpl_connect('key_press_event', on_keypressed)
for axis, ctrl in enumerate(sliders):
ctrl.on_changed(lambda k, a=axis: on_changed(k, a))
return figure, subplot, image
def _app_show():
"""Block the GUI. For use as skimage plugin."""
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show()
def main(argv=None):
"""Command line usage main function."""
if float(sys.version[0:3]) < 2.6:
print("This script requires Python version 2.6 or better.")
print("This is Python version %s" % sys.version)
return 0
if argv is None:
argv = sys.argv
import optparse
parser = optparse.OptionParser(
usage="usage: %prog [options] path",
description="Display image data in TIFF files.",
version="%%prog %s" % __version__)
opt = parser.add_option
opt('-p', '--page', dest='page', type='int', default=-1,
help="display single page")
opt('-s', '--series', dest='series', type='int', default=-1,
help="display series of pages of same shape")
opt('--nomultifile', dest='nomultifile', action='store_true',
default=False, help="don't read OME series from multiple files")
opt('--noplot', dest='noplot', action='store_true', default=False,
help="don't display images")
opt('--interpol', dest='interpol', metavar='INTERPOL', default='bilinear',
help="image interpolation method")
opt('--dpi', dest='dpi', type='int', default=96,
help="set plot resolution")
opt('--debug', dest='debug', action='store_true', default=False,
help="raise exception on failures")
opt('--test', dest='test', action='store_true', default=False,
help="try read all images in path")
opt('--doctest', dest='doctest', action='store_true', default=False,
help="runs the docstring examples")
opt('-v', '--verbose', dest='verbose', action='store_true', default=True)
opt('-q', '--quiet', dest='verbose', action='store_false')
settings, path = parser.parse_args()
path = ' '.join(path)
if settings.doctest:
import doctest
doctest.testmod()
return 0
if not path:
parser.error("No file specified")
if settings.test:
test_tifffile(path, settings.verbose)
return 0
if any(i in path for i in '?*'):
path = glob.glob(path)
if not path:
print('no files match the pattern')
return 0
# TODO: handle image sequences
#if len(path) == 1:
path = path[0]
print("Reading file structure...", end=' ')
start = time.time()
try:
tif = TiffFile(path, multifile=not settings.nomultifile)
except Exception as e:
if settings.debug:
raise
else:
print("\n", e)
sys.exit(0)
print("%.3f ms" % ((time.time()-start) * 1e3))
if tif.is_ome:
settings.norgb = True
images = [(None, tif[0 if settings.page < 0 else settings.page])]
if not settings.noplot:
print("Reading image data... ", end=' ')
def notnone(x):
return next(i for i in x if i is not None)
start = time.time()
try:
if settings.page >= 0:
images = [(tif.asarray(key=settings.page),
tif[settings.page])]
elif settings.series >= 0:
images = [(tif.asarray(series=settings.series),
notnone(tif.series[settings.series].pages))]
else:
images = []
for i, s in enumerate(tif.series):
try:
images.append(
(tif.asarray(series=i), notnone(s.pages)))
except ValueError as e:
images.append((None, notnone(s.pages)))
if settings.debug:
raise
else:
print("\n* series %i failed: %s... " % (i, e),
end='')
print("%.3f ms" % ((time.time()-start) * 1e3))
except Exception as e:
if settings.debug:
raise
else:
print(e)
tif.close()
print("\nTIFF file:", tif)
print()
for i, s in enumerate(tif.series):
print ("Series %i" % i)
print(s)
print()
for i, page in images:
print(page)
print(page.tags)
if page.is_palette:
print("\nColor Map:", page.color_map.shape, page.color_map.dtype)
for attr in ('cz_lsm_info', 'cz_lsm_scan_info', 'uic_tags',
'mm_header', 'imagej_tags', 'micromanager_metadata',
'nih_image_header'):
if hasattr(page, attr):
print("", attr.upper(), Record(getattr(page, attr)), sep="\n")
print()
if page.is_micromanager:
print('MICROMANAGER_FILE_METADATA')
print(Record(tif.micromanager_metadata))
if images and not settings.noplot:
try:
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot
except ImportError as e:
warnings.warn("failed to import matplotlib.\n%s" % e)
else:
for img, page in images:
if img is None:
continue
vmin, vmax = None, None
if 'gdal_nodata' in page.tags:
try:
vmin = numpy.min(img[img > float(page.gdal_nodata)])
except ValueError:
pass
if page.is_stk:
try:
vmin = page.uic_tags['min_scale']
vmax = page.uic_tags['max_scale']
except KeyError:
pass
else:
if vmax <= vmin:
vmin, vmax = None, None
title = "%s\n %s" % (str(tif), str(page))
imshow(img, title=title, vmin=vmin, vmax=vmax,
bitspersample=page.bits_per_sample,
photometric=page.photometric,
interpolation=settings.interpol,
dpi=settings.dpi)
pyplot.show()
TIFFfile = TiffFile # backwards compatibility
if sys.version_info[0] > 2:
basestring = str, bytes
unicode = str
if __name__ == "__main__":
sys.exit(main())
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
---
name: 'Prepare breeze && current image (CI or PROD)'
description: 'Installs breeze and recreates current python image from artifact'
inputs:
python:
description: 'Python version for image to prepare'
required: true
image-type:
description: 'Which image type to prepare (ci/prod)'
default: "ci"
platform:
description: 'Platform for the build - linux/amd64 or linux/arm64'
required: true
use-uv:
description: 'Whether to use uv'
required: true
make-mnt-writeable-and-cleanup:
description: 'Whether to cleanup /mnt'
required: true
outputs:
host-python-version:
description: Python version used in host
value: ${{ steps.breeze.outputs.host-python-version }}
runs:
using: "composite"
steps:
- name: "Make /mnt writeable and cleanup"
shell: bash
run: ./scripts/ci/make_mnt_writeable.sh
if: inputs.make-mnt-writeable-and-cleanup == 'true'
- name: "Install Breeze"
uses: ./.github/actions/breeze
id: breeze
- name: "Check free space"
shell: bash
run: |
echo "Checking free space!"
df -H
- name: "Restore ${{ inputs.image-type }} docker image ${{ inputs.platform }}:${{ inputs.python }}"
uses: apache/infrastructure-actions/stash/restore@1c35b5ccf8fba5d4c3fdf25a045ca91aa0cbc468
with:
key: ${{ inputs.image-type }}-image-save-v3-${{ inputs.platform }}-${{ inputs.python }}
path: "/mnt/"
only-current-branch: 'true'
- name: "Load ${{ inputs.image-type }} image ${{ inputs.platform }}:${{ inputs.python }}"
env:
PLATFORM: ${{ inputs.platform }}
PYTHON: ${{ inputs.python }}
IMAGE_TYPE: ${{ inputs.image-type }}
run: >
breeze ${IMAGE_TYPE}-image load --platform "${PLATFORM}" --python "${PYTHON}" --image-file-dir "/mnt"
shell: bash
|
unknown
|
github
|
https://github.com/apache/airflow
|
.github/actions/prepare_breeze_and_image/action.yml
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.chapel
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Chapel language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['ChapelLexer']
class ChapelLexer(RegexLexer):
"""
For `Chapel <http://chapel.cray.com/>`_ source.
.. versionadded:: 2.0
"""
name = 'Chapel'
filenames = ['*.chpl']
aliases = ['chapel', 'chpl']
# mimetypes = ['text/x-chapel']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'(config|const|in|inout|out|param|ref|type|var)\b',
Keyword.Declaration),
(r'(false|nil|true)\b', Keyword.Constant),
(r'(bool|complex|imag|int|opaque|range|real|string|uint)\b',
Keyword.Type),
(words((
'align', 'atomic', 'begin', 'break', 'by', 'cobegin', 'coforall',
'continue', 'delete', 'dmapped', 'do', 'domain', 'else', 'enum',
'export', 'extern', 'for', 'forall', 'if', 'index', 'inline',
'iter', 'label', 'lambda', 'let', 'local', 'new', 'noinit', 'on',
'otherwise', 'pragma', 'reduce', 'return', 'scan', 'select',
'serial', 'single', 'sparse', 'subdomain', 'sync', 'then', 'use',
'when', 'where', 'while', 'with', 'yield', 'zip'), suffix=r'\b'),
Keyword),
(r'(proc)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'procname'),
(r'(class|module|record|union)(\s+)', bygroups(Keyword, Text),
'classname'),
# imaginary integers
(r'\d+i', Number),
(r'\d+\.\d*([Ee][-+]\d+)?i', Number),
(r'\.\d+([Ee][-+]\d+)?i', Number),
(r'\d+[Ee][-+]\d+i', Number),
# reals cannot end with a period due to lexical ambiguity with
# .. operator. See reference for rationale.
(r'(\d*\.\d+)([eE][+-]?[0-9]+)?i?', Number.Float),
(r'\d+[eE][+-]?[0-9]+i?', Number.Float),
# integer literals
# -- binary
(r'0[bB][01]+', Number.Bin),
# -- hex
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# -- octal
(r'0[oO][0-7]+', Number.Oct),
# -- decimal
(r'[0-9]+', Number.Integer),
# strings
(r'["\'](\\\\|\\"|[^"\'])*["\']', String),
# tokens
(r'(=|\+=|-=|\*=|/=|\*\*=|%=|&=|\|=|\^=|&&=|\|\|=|<<=|>>=|'
r'<=>|<~>|\.\.|by|#|\.\.\.|'
r'&&|\|\||!|&|\||\^|~|<<|>>|'
r'==|!=|<=|>=|<|>|'
r'[+\-*/%]|\*\*)', Operator),
(r'[:;,.?()\[\]{}]', Punctuation),
# identifiers
(r'[a-zA-Z_][\w$]*', Name.Other),
],
'classname': [
(r'[a-zA-Z_][\w$]*', Name.Class, '#pop'),
],
'procname': [
(r'[a-zA-Z_][\w$]*', Name.Function, '#pop'),
],
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
import sys
import logging
from os.path import exists
from functools import lru_cache
from contextlib import contextmanager
from importlib import invalidate_caches
from importlib.machinery import FileFinder, SourceFileLoader
import lib2to3
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
from . import disable_logging
@lru_cache()
def get_rt():
with disable_logging():
fixes = get_fixers_from_package('lib2to3.fixes')
return RefactoringTool(fixes)
class MyLoader(SourceFileLoader):
def get_data(self, filename):
if 'arcrest' not in self.name or 'arcrest.compat' in self.name:
return super().get_data(filename)
if filename.endswith('.pyc'):
raise FileNotFoundError()
py3 = filename + '.py3'
if exists(py3):
with open(py3) as fh:
return fh.read()
with open(filename, encoding='utf8') as fh:
contents = fh.read()
rt = get_rt()
contents = rt.refactor_docstring(contents, filename)
contents = rt.refactor_string(contents, filename)
contents = str(contents)
with open(py3, 'w') as fh:
fh.write(contents)
return contents
@contextmanager
def with_hook():
hook = FileFinder.path_hook((MyLoader, ['.py']))
sys.path_hooks.insert(0, hook)
sys.path_importer_cache.clear()
invalidate_caches()
try:
yield
finally:
sys.path_hooks.remove(hook)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpoint_converter.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.python.feature_column import feature_column
from tensorflow_estimator.python.estimator.canned import dnn
from tensorflow_estimator.python.estimator.canned import dnn_linear_combined
from tensorflow_estimator.python.estimator.canned import head as head_lib
from tensorflow_estimator.python.estimator.canned import linear
from tensorflow_estimator.python.estimator.head import regression_head
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.tools import checkpoint_converter
class DNNCheckpointConverterTest(tf.test.TestCase):
def setUp(self):
self._old_ckpt_dir = os.path.join(self.get_temp_dir(), 'source_ckpt')
self._new_ckpt_dir = os.path.join(self.get_temp_dir(), 'target_ckpt')
def tearDown(self):
if os.path.exists(self._old_ckpt_dir):
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._old_ckpt_dir)
if os.path.exists(self._new_ckpt_dir):
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._new_ckpt_dir)
def _test_ckpt_converter(self, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, label_dimension,
batch_size, optimizer):
# Create checkpoint in CannedEstimator v1.
feature_columns_v1 = [
feature_column._numeric_column('x', shape=(input_dimension,))
]
est_v1 = dnn.DNNEstimator(
head=head_lib._regression_head(label_dimension=label_dimension),
hidden_units=(2, 2),
feature_columns=feature_columns_v1,
model_dir=self._old_ckpt_dir,
optimizer=optimizer)
# Train
num_steps = 10
est_v1.train(train_input_fn, steps=num_steps)
self.assertIsNotNone(est_v1.latest_checkpoint())
self.assertTrue(est_v1.latest_checkpoint().startswith(self._old_ckpt_dir))
# Convert checkpoint from v1 to v2.
source_checkpoint = os.path.join(self._old_ckpt_dir, 'model.ckpt-10')
source_graph = os.path.join(self._old_ckpt_dir, 'graph.pbtxt')
target_checkpoint = os.path.join(self._new_ckpt_dir, 'model.ckpt-10')
checkpoint_converter.convert_checkpoint('dnn', source_checkpoint,
source_graph, target_checkpoint)
# Create CannedEstimator V2 and restore from the converted checkpoint.
feature_columns_v2 = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est_v2 = dnn.DNNEstimatorV2(
head=regression_head.RegressionHead(label_dimension=label_dimension),
hidden_units=(2, 2),
feature_columns=feature_columns_v2,
model_dir=self._new_ckpt_dir,
optimizer=optimizer)
# Train
extra_steps = 10
est_v2.train(train_input_fn, steps=extra_steps)
self.assertIsNotNone(est_v2.latest_checkpoint())
self.assertTrue(est_v2.latest_checkpoint().startswith(self._new_ckpt_dir))
# Make sure estimator v2 restores from the converted checkpoint, and
# continues training extra steps.
self.assertEqual(
num_steps + extra_steps,
est_v2.get_variable_value(tf.compat.v1.GraphKeys.GLOBAL_STEP))
def _create_input_fn(self, label_dimension, batch_size):
"""Creates input_fn for integration test."""
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
return train_input_fn, eval_input_fn, predict_input_fn
def _test_ckpt_converter_with_an_optimizer(self, opt):
"""Tests checkpoint converter with an optimizer."""
label_dimension = 2
batch_size = 10
train_input_fn, eval_input_fn, predict_input_fn = self._create_input_fn(
label_dimension, batch_size)
self._test_ckpt_converter(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size,
optimizer=opt)
def test_ckpt_converter_with_adagrad(self):
"""Tests checkpoint converter with Adagrad."""
self._test_ckpt_converter_with_an_optimizer('Adagrad')
def test_ckpt_converter_with_rmsprop(self):
"""Tests checkpoint converter with RMSProp."""
self._test_ckpt_converter_with_an_optimizer('RMSProp')
def test_ckpt_converter_with_ftrl(self):
"""Tests checkpoint converter with Ftrl."""
self._test_ckpt_converter_with_an_optimizer('Ftrl')
def test_ckpt_converter_with_adam(self):
"""Tests checkpoint converter with Adam."""
self._test_ckpt_converter_with_an_optimizer('Adam')
def test_ckpt_converter_with_sgd(self):
"""Tests checkpoint converter with SGD."""
self._test_ckpt_converter_with_an_optimizer('SGD')
class LinearCheckpointConverterTest(tf.test.TestCase):
def setUp(self):
self._old_ckpt_dir = os.path.join(self.get_temp_dir(), 'source_ckpt')
self._new_ckpt_dir = os.path.join(self.get_temp_dir(), 'target_ckpt')
def tearDown(self):
if os.path.exists(self._old_ckpt_dir):
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._old_ckpt_dir)
if os.path.exists(self._new_ckpt_dir):
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._new_ckpt_dir)
def _test_ckpt_converter(self, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, label_dimension,
batch_size, optimizer):
# Create checkpoint in CannedEstimator v1.
feature_columns_v1 = [
feature_column._numeric_column('x', shape=(input_dimension,))
]
est_v1 = linear.LinearEstimator(
head=head_lib._regression_head(label_dimension=label_dimension),
feature_columns=feature_columns_v1,
model_dir=self._old_ckpt_dir,
optimizer=optimizer)
# Train
num_steps = 10
est_v1.train(train_input_fn, steps=num_steps)
self.assertIsNotNone(est_v1.latest_checkpoint())
self.assertTrue(est_v1.latest_checkpoint().startswith(self._old_ckpt_dir))
# Convert checkpoint from v1 to v2.
source_checkpoint = os.path.join(self._old_ckpt_dir, 'model.ckpt-10')
source_graph = os.path.join(self._old_ckpt_dir, 'graph.pbtxt')
target_checkpoint = os.path.join(self._new_ckpt_dir, 'model.ckpt-10')
checkpoint_converter.convert_checkpoint('linear', source_checkpoint,
source_graph, target_checkpoint)
# Create CannedEstimator V2 and restore from the converted checkpoint.
feature_columns_v2 = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est_v2 = linear.LinearEstimatorV2(
head=regression_head.RegressionHead(label_dimension=label_dimension),
feature_columns=feature_columns_v2,
model_dir=self._new_ckpt_dir,
optimizer=optimizer)
# Train
extra_steps = 10
est_v2.train(train_input_fn, steps=extra_steps)
self.assertIsNotNone(est_v2.latest_checkpoint())
self.assertTrue(est_v2.latest_checkpoint().startswith(self._new_ckpt_dir))
# Make sure estimator v2 restores from the converted checkpoint, and
# continues training extra steps.
self.assertEqual(
num_steps + extra_steps,
est_v2.get_variable_value(tf.compat.v1.GraphKeys.GLOBAL_STEP))
def _create_input_fn(self, label_dimension, batch_size):
"""Creates input_fn for integration test."""
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
return train_input_fn, eval_input_fn, predict_input_fn
def _test_ckpt_converter_with_an_optimizer(self, opt):
"""Tests checkpoint converter with an optimizer."""
label_dimension = 2
batch_size = 10
train_input_fn, eval_input_fn, predict_input_fn = self._create_input_fn(
label_dimension, batch_size)
self._test_ckpt_converter(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size,
optimizer=opt)
def test_ckpt_converter_with_adagrad(self):
"""Tests checkpoint converter with Adagrad."""
self._test_ckpt_converter_with_an_optimizer('Adagrad')
def test_ckpt_converter_with_rmsprop(self):
"""Tests checkpoint converter with RMSProp."""
self._test_ckpt_converter_with_an_optimizer('RMSProp')
def test_ckpt_converter_with_ftrl(self):
"""Tests checkpoint converter with Ftrl."""
self._test_ckpt_converter_with_an_optimizer('Ftrl')
def test_ckpt_converter_with_adam(self):
"""Tests checkpoint converter with Adam."""
self._test_ckpt_converter_with_an_optimizer('Adam')
def test_ckpt_converter_with_sgd(self):
"""Tests checkpoint converter with SGD."""
self._test_ckpt_converter_with_an_optimizer('SGD')
class DNNLinearCombinedCheckpointConverterTest(tf.test.TestCase):
def setUp(self):
self._old_ckpt_dir = os.path.join(self.get_temp_dir(), 'source_ckpt')
self._new_ckpt_dir = os.path.join(self.get_temp_dir(), 'target_ckpt')
def tearDown(self):
if os.path.exists(self._old_ckpt_dir):
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._old_ckpt_dir)
if os.path.exists(self._new_ckpt_dir):
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._new_ckpt_dir)
def _test_ckpt_converter(self, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, label_dimension,
batch_size, dnn_optimizer, linear_optimizer):
# Create checkpoint in CannedEstimator v1.
linear_feature_columns_v1 = [
feature_column._numeric_column('x', shape=(input_dimension,))
]
dnn_feature_columns_v1 = [
feature_column._numeric_column('x', shape=(input_dimension,))
]
est_v1 = dnn_linear_combined.DNNLinearCombinedEstimator(
head=head_lib._regression_head(label_dimension=label_dimension),
linear_feature_columns=linear_feature_columns_v1,
dnn_feature_columns=dnn_feature_columns_v1,
dnn_hidden_units=(2, 2),
model_dir=self._old_ckpt_dir,
dnn_optimizer=dnn_optimizer,
linear_optimizer=linear_optimizer)
# Train
num_steps = 10
est_v1.train(train_input_fn, steps=num_steps)
self.assertIsNotNone(est_v1.latest_checkpoint())
self.assertTrue(est_v1.latest_checkpoint().startswith(self._old_ckpt_dir))
# Convert checkpoint from v1 to v2.
source_checkpoint = os.path.join(self._old_ckpt_dir, 'model.ckpt-10')
source_graph = os.path.join(self._old_ckpt_dir, 'graph.pbtxt')
target_checkpoint = os.path.join(self._new_ckpt_dir, 'model.ckpt-10')
checkpoint_converter.convert_checkpoint('combined', source_checkpoint,
source_graph, target_checkpoint)
# Create CannedEstimator V2 and restore from the converted checkpoint.
linear_feature_columns_v2 = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
dnn_feature_columns_v2 = [
tf.feature_column.numeric_column('x', shape=(input_dimension,))
]
est_v2 = dnn_linear_combined.DNNLinearCombinedEstimatorV2(
head=regression_head.RegressionHead(label_dimension=label_dimension),
linear_feature_columns=linear_feature_columns_v2,
dnn_feature_columns=dnn_feature_columns_v2,
dnn_hidden_units=(2, 2),
model_dir=self._new_ckpt_dir,
dnn_optimizer=dnn_optimizer,
linear_optimizer=linear_optimizer)
# Train
extra_steps = 10
est_v2.train(train_input_fn, steps=extra_steps)
self.assertIsNotNone(est_v2.latest_checkpoint())
self.assertTrue(est_v2.latest_checkpoint().startswith(self._new_ckpt_dir))
# Make sure estimator v2 restores from the converted checkpoint, and
# continues training extra steps.
self.assertEqual(
num_steps + extra_steps,
est_v2.get_variable_value(tf.compat.v1.GraphKeys.GLOBAL_STEP))
def _create_input_fn(self, label_dimension, batch_size):
"""Creates input_fn for integration test."""
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, y=data, batch_size=batch_size, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data}, batch_size=batch_size, shuffle=False)
return train_input_fn, eval_input_fn, predict_input_fn
def _test_ckpt_converter_with_an_optimizer(self, dnn_opt, linear_opt):
"""Tests checkpoint converter with an optimizer."""
label_dimension = 2
batch_size = 10
train_input_fn, eval_input_fn, predict_input_fn = self._create_input_fn(
label_dimension, batch_size)
self._test_ckpt_converter(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size,
dnn_optimizer=dnn_opt,
linear_optimizer=linear_opt)
def test_ckpt_converter_with_adagrad(self):
"""Tests checkpoint converter with Adagrad."""
self._test_ckpt_converter_with_an_optimizer('Adagrad', 'RMSProp')
def test_ckpt_converter_with_rmsprop(self):
"""Tests checkpoint converter with RMSProp."""
self._test_ckpt_converter_with_an_optimizer('RMSProp', 'Ftrl')
def test_ckpt_converter_with_ftrl(self):
"""Tests checkpoint converter with Ftrl."""
self._test_ckpt_converter_with_an_optimizer('Ftrl', 'Adam')
def test_ckpt_converter_with_adam(self):
"""Tests checkpoint converter with Adam."""
self._test_ckpt_converter_with_an_optimizer('Adam', 'SGD')
def test_ckpt_converter_with_sgd(self):
"""Tests checkpoint converter with SGD."""
self._test_ckpt_converter_with_an_optimizer('SGD', 'Adagrad')
if __name__ == '__main__':
tf.test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
import sys
from itertools import takewhile
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.db.migrations import Migration
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.questioner import (
InteractiveMigrationQuestioner, MigrationQuestioner,
NonInteractiveMigrationQuestioner,
)
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from django.utils.six import iteritems
from django.utils.six.moves import zip
class Command(BaseCommand):
help = "Creates new migration(s) for apps."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*',
help='Specify the app label(s) to create migrations for.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run', default=False,
help="Just show what migrations would be made; don't actually write them.")
parser.add_argument('--merge', action='store_true', dest='merge', default=False,
help="Enable fixing of migration conflicts.")
parser.add_argument('--empty', action='store_true', dest='empty', default=False,
help="Create an empty migration.")
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.')
parser.add_argument('-n', '--name', action='store', dest='name', default=None,
help="Use this name for migration file(s).")
parser.add_argument('-e', '--exit', action='store_true', dest='exit_code', default=False,
help='Exit with error code 1 if no changes needing migrations are found.')
def handle(self, *app_labels, **options):
self.verbosity = options.get('verbosity')
self.interactive = options.get('interactive')
self.dry_run = options.get('dry_run', False)
self.merge = options.get('merge', False)
self.empty = options.get('empty', False)
self.migration_name = options.get('name')
self.exit_code = options.get('exit_code', False)
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
apps.get_app_config(app_label)
except LookupError:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Pass in None for the connection so
# the loader doesn't try to resolve replaced migrations from DB.
loader = MigrationLoader(None, ignore_no_migrations=True)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any and they don't want to merge
conflicts = loader.detect_conflicts()
# If app_labels is specified, filter out conflicting migrations for unspecified apps
if app_labels:
conflicts = {
app_label: conflict for app_label, conflict in iteritems(conflicts)
if app_label in app_labels
}
if conflicts and not self.merge:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they want to merge and there's nothing to merge, then politely exit
if self.merge and not conflicts:
self.stdout.write("No conflicts detected to merge.")
return
# If they want to merge and there is something to merge, then
# divert into the merge code
if self.merge and conflicts:
return self.handle_merge(loader, conflicts)
if self.interactive:
questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
else:
questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run)
# Set up autodetector
autodetector = MigrationAutodetector(
loader.project_state(),
ProjectState.from_apps(apps),
questioner,
)
# If they want to make an empty migration, make one for each app
if self.empty:
if not app_labels:
raise CommandError("You must supply at least one app label when using --empty.")
# Make a fake changes() result we can pass to arrange_for_graph
changes = {
app: [Migration("custom", app)]
for app in app_labels
}
changes = autodetector.arrange_for_graph(
changes=changes,
graph=loader.graph,
migration_name=self.migration_name,
)
self.write_migration_files(changes)
return
# Detect changes
changes = autodetector.changes(
graph=loader.graph,
trim_to_apps=app_labels or None,
convert_apps=app_labels or None,
migration_name=self.migration_name,
)
if not changes:
# No changes? Tell them.
if self.verbosity >= 1:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
if self.exit_code:
sys.exit(1)
else:
return
self.write_migration_files(changes)
def write_migration_files(self, changes):
"""
Takes a changes dict and writes them out as migration files.
"""
directory_created = {}
for app_label, app_migrations in changes.items():
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in app_migrations:
# Describe the migration
writer = MigrationWriter(migration)
if self.verbosity >= 1:
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(writer.filename),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
if not self.dry_run:
# Write the migrations file to the disk.
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
migration_string = writer.as_string()
with open(writer.path, "wb") as fh:
fh.write(migration_string)
elif self.verbosity == 3:
# Alternatively, makemigrations --dry-run --verbosity 3
# will output the migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
def handle_merge(self, loader, conflicts):
"""
Handles merging together conflicted migrations interactively,
if it's safe; otherwise, advises on how to fix it.
"""
if self.interactive:
questioner = InteractiveMigrationQuestioner()
else:
questioner = MigrationQuestioner(defaults={'ask_merge': True})
for app_label, migration_names in conflicts.items():
# Grab out the migrations in question, and work out their
# common ancestor.
merge_migrations = []
for migration_name in migration_names:
migration = loader.get_migration(app_label, migration_name)
migration.ancestry = [
mig for mig in loader.graph.forwards_plan((app_label, migration_name))
if mig[0] == migration.app_label
]
merge_migrations.append(migration)
all_items_equal = lambda seq: all(item == seq[0] for item in seq[1:])
merge_migrations_generations = zip(*[m.ancestry for m in merge_migrations])
common_ancestor_count = sum(1 for common_ancestor_generation
in takewhile(all_items_equal, merge_migrations_generations))
if not common_ancestor_count:
raise ValueError("Could not find common ancestor of %s" % migration_names)
# Now work out the operations along each divergent branch
for migration in merge_migrations:
migration.branch = migration.ancestry[common_ancestor_count:]
migrations_ops = (loader.get_migration(node_app, node_name).operations
for node_app, node_name in migration.branch)
migration.merged_operations = sum(migrations_ops, [])
# In future, this could use some of the Optimizer code
# (can_optimize_through) to automatically see if they're
# mergeable. For now, we always just prompt the user.
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label))
for migration in merge_migrations:
self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name))
for operation in migration.merged_operations:
self.stdout.write(" - %s\n" % operation.describe())
if questioner.ask_merge(app_label):
# If they still want to merge it, then write out an empty
# file depending on the migrations needing merging.
numbers = [
MigrationAutodetector.parse_number(migration.name)
for migration in merge_migrations
]
try:
biggest_number = max(x for x in numbers if x is not None)
except ValueError:
biggest_number = 1
subclass = type("Migration", (Migration, ), {
"dependencies": [(app_label, migration.name) for migration in merge_migrations],
})
new_migration = subclass("%04i_merge" % (biggest_number + 1), app_label)
writer = MigrationWriter(new_migration)
if not self.dry_run:
# Write the merge migrations file to the disk
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write("\nCreated new merge migration %s" % writer.path)
elif self.verbosity == 3:
# Alternatively, makemigrations --merge --dry-run --verbosity 3
# will output the merge migrations to stdout rather than saving
# the file to the disk.
self.stdout.write(self.style.MIGRATE_HEADING(
"Full merge migrations file '%s':" % writer.filename) + "\n"
)
self.stdout.write("%s\n" % writer.as_string())
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* global QUnit, RelatedObjectLookups */
'use strict';
QUnit.module('admin.RelatedObjectLookups', {
beforeEach: function() {
const $ = django.jQuery;
$('#qunit-fixture').append(`
<input type="text" id="test_id" name="test" />
<input type="text" id="many_test_id" name="many_test" class="vManyToManyRawIdAdminField" />
`);
}
});
QUnit.test('dismissRelatedLookupPopup closes popup window', function(assert) {
const testId = 'test_id';
let windowClosed = false;
const mockWin = {
name: testId,
close: function() {
windowClosed = true;
}
};
window.dismissRelatedLookupPopup(mockWin, '123');
assert.true(windowClosed, 'Popup window should be closed');
});
QUnit.test('dismissRelatedLookupPopup removes window from relatedWindows array', function(assert) {
const testId = 'test_id';
const mockWin = {
name: testId,
close: function() {}
};
window.relatedWindows.push(mockWin);
assert.equal(window.relatedWindows.indexOf(mockWin), 0, 'Window should be in relatedWindows array');
window.dismissRelatedLookupPopup(mockWin, '123');
assert.equal(window.relatedWindows.indexOf(mockWin), -1, 'Window should be removed from relatedWindows array');
});
QUnit.test('dismissRelatedLookupPopup triggers change event for single value field', function(assert) {
assert.timeout(1000);
const done = assert.async();
const $ = django.jQuery;
const testId = 'test_id';
const newValue = '123';
const mockWin = {
name: testId,
close: function() {}
};
let changeTriggered = false;
$('#test_id').on('change', function() {
changeTriggered = true;
assert.equal(this.value, newValue, 'Value should be updated');
done();
});
window.dismissRelatedLookupPopup(mockWin, newValue);
assert.true(changeTriggered, 'Change event should be triggered');
});
QUnit.test('dismissRelatedLookupPopup triggers change event for many-to-many field', function(assert) {
assert.timeout(1000);
const $ = django.jQuery;
const testId = 'many_test_id';
const existingValue = '1,2';
const newValue = '3';
$('#many_test_id').val(existingValue);
const mockWin = {
name: testId,
close: function() {}
};
let changeTriggered = false;
$('#many_test_id').on('change', function() {
changeTriggered = true;
assert.equal(this.value, existingValue + ',' + newValue, 'Value should be appended for many-to-many fields');
});
window.dismissRelatedLookupPopup(mockWin, newValue);
assert.true(changeTriggered, 'Change event should be triggered');
});
|
javascript
|
github
|
https://github.com/django/django
|
js_tests/admin/RelatedObjectLookups.test.js
|
import io
from unittest import mock
import pytest
from mitmproxy.test import tflow, tutils
import mitmproxy.io
from mitmproxy import flowfilter
from mitmproxy import options
from mitmproxy.io import tnetstring
from mitmproxy.exceptions import FlowReadException, ReplayException, ControlException
from mitmproxy import flow
from mitmproxy import http
from mitmproxy.net import http as net_http
from mitmproxy import master
from . import tservers
class TestSerialize:
def test_roundtrip(self):
sio = io.BytesIO()
f = tflow.tflow()
f.marked = True
f.request.content = bytes(range(256))
w = mitmproxy.io.FlowWriter(sio)
w.add(f)
sio.seek(0)
r = mitmproxy.io.FlowReader(sio)
l = list(r.stream())
assert len(l) == 1
f2 = l[0]
assert f2.get_state() == f.get_state()
assert f2.request == f.request
assert f2.marked
def test_filter(self):
sio = io.BytesIO()
flt = flowfilter.parse("~c 200")
w = mitmproxy.io.FilteredFlowWriter(sio, flt)
f = tflow.tflow(resp=True)
f.response.status_code = 200
w.add(f)
f = tflow.tflow(resp=True)
f.response.status_code = 201
w.add(f)
sio.seek(0)
r = mitmproxy.io.FlowReader(sio)
assert len(list(r.stream()))
def test_error(self):
sio = io.BytesIO()
sio.write(b"bogus")
sio.seek(0)
r = mitmproxy.io.FlowReader(sio)
with pytest.raises(FlowReadException, match='Invalid data format'):
list(r.stream())
sio = io.BytesIO()
f = tflow.tdummyflow()
w = mitmproxy.io.FlowWriter(sio)
w.add(f)
sio.seek(0)
r = mitmproxy.io.FlowReader(sio)
with pytest.raises(FlowReadException, match='Unknown flow type'):
list(r.stream())
f = FlowReadException("foo")
assert str(f) == "foo"
def test_versioncheck(self):
f = tflow.tflow()
d = f.get_state()
d["version"] = (0, 0)
sio = io.BytesIO()
tnetstring.dump(d, sio)
sio.seek(0)
r = mitmproxy.io.FlowReader(sio)
with pytest.raises(Exception, match="version"):
list(r.stream())
def test_copy(self):
"""
_backup may be shared across instances. That should not raise errors.
"""
f = tflow.tflow()
f.backup()
f.request.path = "/foo"
f2 = f.copy()
f2.revert()
f.revert()
class TestFlowMaster:
def test_load_flow_reverse(self):
s = tservers.TestState()
opts = options.Options(
mode="reverse:https://use-this-domain"
)
fm = master.Master(opts)
fm.addons.add(s)
f = tflow.tflow(resp=True)
fm.load_flow(f)
assert s.flows[0].request.host == "use-this-domain"
def test_replay(self):
opts = options.Options()
fm = master.Master(opts)
f = tflow.tflow(resp=True)
f.request.content = None
with pytest.raises(ReplayException, match="missing"):
fm.replay_request(f)
f.request = None
with pytest.raises(ReplayException, match="request"):
fm.replay_request(f)
f.intercepted = True
with pytest.raises(ReplayException, match="intercepted"):
fm.replay_request(f)
f.live = True
with pytest.raises(ReplayException, match="live"):
fm.replay_request(f)
req = tutils.treq(headers=net_http.Headers(((b":authority", b"foo"), (b"header", b"qvalue"), (b"content-length", b"7"))))
f = tflow.tflow(req=req)
f.request.http_version = "HTTP/2.0"
with mock.patch('mitmproxy.proxy.protocol.http_replay.RequestReplayThread.run'):
rt = fm.replay_request(f)
assert rt.f.request.http_version == "HTTP/1.1"
assert ":authority" not in rt.f.request.headers
def test_all(self):
s = tservers.TestState()
fm = master.Master(None)
fm.addons.add(s)
f = tflow.tflow(req=None)
fm.addons.handle_lifecycle("clientconnect", f.client_conn)
f.request = http.HTTPRequest.wrap(mitmproxy.test.tutils.treq())
fm.addons.handle_lifecycle("request", f)
assert len(s.flows) == 1
f.response = http.HTTPResponse.wrap(mitmproxy.test.tutils.tresp())
fm.addons.handle_lifecycle("response", f)
assert len(s.flows) == 1
fm.addons.handle_lifecycle("clientdisconnect", f.client_conn)
f.error = flow.Error("msg")
fm.addons.handle_lifecycle("error", f)
fm.tell("foo", f)
with pytest.raises(ControlException):
fm.tick(timeout=1)
fm.shutdown()
class TestError:
def test_getset_state(self):
e = flow.Error("Error")
state = e.get_state()
assert flow.Error.from_state(state).get_state() == e.get_state()
assert e.copy()
e2 = flow.Error("bar")
assert not e == e2
e.set_state(e2.get_state())
assert e.get_state() == e2.get_state()
e3 = e.copy()
assert e3.get_state() == e.get_state()
def test_repr(self):
e = flow.Error("yay")
assert repr(e)
assert str(e)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import generators
import unittest
# tests for deeply nested try/except/finally's
class FinallyTests(unittest.TestCase):
def gen1(self):
try:
pass
finally:
yield 1
def genContinue(self):
for i in range(3):
try:
continue
finally:
yield i
def genPass(self):
for i in range(3):
try:
pass
finally:
yield i
def genLocal(self):
x = 1
try:
pass
finally:
yield x
def genConditional(self):
for i in range(3):
x = 0
try:
if i == 2:
continue
x = 1
finally:
for j in range(x, x + 2):
yield j
def genTryExceptAroundFinally(self):
try:
for i in range(1):
try:
for i in range(3):
try:
try:
1//0
finally:
yield i
except:
pass
1//0
except:
yield 3
except:
pass
def genNested(self):
for i in range(2):
try:
continue
finally:
for j in range(2):
try:
pass
finally:
yield (i, j)
def genNestedReversed(self):
for i in range(2):
try:
pass
finally:
for j in range(2):
try:
continue
finally:
yield (i, j)
def genNestedDeeply(self):
for i in range(4):
try:
continue
finally:
for j in range(i):
try:
pass
finally:
for k in range(j):
try:
try:
1//0
finally:
yield (i, j, k)
except:
pass
def genNestedTryExcept(self):
for j in range(3):
try:
try:
1//0
finally:
for k in range(3):
try:
1//0
finally:
yield (j, k)
except:
pass
def genNestedDeeplyTryExcept(self):
for i in range(3):
try:
try:
1//0
finally:
for j in range(3):
try:
1//0
finally:
for k in range(3):
try:
1//0
finally:
yield (i, j, k)
except:
pass
def testFinally(self):
self.assertEquals([1], list(self.gen1()))
self.assertEquals([0, 1, 2], list(self.genContinue()))
self.assertEquals([0, 1, 2], list(self.genPass()))
self.assertEquals([1], list(self.genLocal()))
self.assertEquals(
[1, 2, 1, 2, 0, 1],
list(self.genConditional()))
self.assertEquals([0, 1, 2, 3], list(self.genTryExceptAroundFinally()))
self.assertEquals(
[(0, 0), (0, 1), (1, 0), (1, 1)],
list(self.genNested()))
self.assertEquals(
[(0, 0), (0, 1), (1, 0), (1, 1)],
list(self.genNestedReversed()))
self.assertEquals(
[(2, 1, 0), (3, 1, 0), (3, 2, 0), (3, 2, 1)],
list(self.genNestedDeeply()))
self.assertEquals(
[(0, 0), (1, 0), (2, 0)],
list(self.genNestedTryExcept()))
self.assertEquals(
[(0, 0, 0), (1, 0, 0), (2, 0, 0)],
list(self.genNestedDeeplyTryExcept()))
class TryExceptTests(unittest.TestCase):
def genNestedExcept(self):
for j in range(3):
try:
try:
1//0
except ZeroDivisionError, e:
yield 1
raise e
except ZeroDivisionError:
pass
def testExcept(self):
self.assertEquals([1, 1, 1], list(self.genNestedExcept()))
class TestThrowTestCase(unittest.TestCase):
def test_just_started_throw(self):
genexp = (i for i in range(2))
self.assertRaises(IOError, genexp.throw, IOError)
self.assertEqual(genexp.gi_frame, None)
self.assertRaises(StopIteration, genexp.next)
if __name__ == "__main__":
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
'use strict'
/**
* Module dependencies.
*/
var express = require('../../..');
var fs = require('node:fs');
var path = require('node:path');
module.exports = function(parent, options){
var dir = path.join(__dirname, '..', 'controllers');
var verbose = options.verbose;
fs.readdirSync(dir).forEach(function(name){
var file = path.join(dir, name)
if (!fs.statSync(file).isDirectory()) return;
verbose && console.log('\n %s:', name);
var obj = require(file);
var name = obj.name || name;
var prefix = obj.prefix || '';
var app = express();
var handler;
var method;
var url;
// allow specifying the view engine
if (obj.engine) app.set('view engine', obj.engine);
app.set('views', path.join(__dirname, '..', 'controllers', name, 'views'));
// generate routes based
// on the exported methods
for (var key in obj) {
// "reserved" exports
if (~['name', 'prefix', 'engine', 'before'].indexOf(key)) continue;
// route exports
switch (key) {
case 'show':
method = 'get';
url = '/' + name + '/:' + name + '_id';
break;
case 'list':
method = 'get';
url = '/' + name + 's';
break;
case 'edit':
method = 'get';
url = '/' + name + '/:' + name + '_id/edit';
break;
case 'update':
method = 'put';
url = '/' + name + '/:' + name + '_id';
break;
case 'create':
method = 'post';
url = '/' + name;
break;
case 'index':
method = 'get';
url = '/';
break;
default:
/* istanbul ignore next */
throw new Error('unrecognized route: ' + name + '.' + key);
}
// setup
handler = obj[key];
url = prefix + url;
// before middleware support
if (obj.before) {
app[method](url, obj.before, handler);
verbose && console.log(' %s %s -> before -> %s', method.toUpperCase(), url, key);
} else {
app[method](url, handler);
verbose && console.log(' %s %s -> %s', method.toUpperCase(), url, key);
}
}
// mount the app
parent.use(app);
});
};
|
javascript
|
github
|
https://github.com/expressjs/express
|
examples/mvc/lib/boot.js
|
"""
Copyright 2009-2015 Olivier Belanger
This file is part of pyo, a python module to help digital signal
processing script creation.
pyo is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
pyo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with pyo. If not, see <http://www.gnu.org/licenses/>.
"""
from types import ListType, FloatType, IntType
import math, sys, os
from Tkinter import *
try:
from PIL import Image, ImageDraw, ImageTk
except:
pass
# constants for platform displays with Tk
if sys.platform == 'linux2':
Y_OFFSET = 0
VM_OFFSET = 2
elif sys.platform == 'win32':
Y_OFFSET = 3
VM_OFFSET = 1
else:
Y_OFFSET = 4
VM_OFFSET = 0
######################################################################
### Multisliders
######################################################################
class MultiSlider(Frame):
def __init__(self, master, init, key, command):
Frame.__init__(self, master, bd=0, relief=FLAT)
self._values = init
self._nchnls = len(init)
self._key = key
self._command = command
self._lines = []
self._height = 16
self.canvas = Canvas(self, height=self._height*self._nchnls+1,
width=225, relief=FLAT, bd=0, bg="#BCBCAA")
w = self.canvas.winfo_width()
for i in range(self._nchnls):
x = int(self._values[i] * w)
y = self._height * i + Y_OFFSET
self._lines.append(self.canvas.create_rectangle(0, y, x,
y+self._height-1, width=0, fill="#121212"))
self.canvas.bind("<Button-1>", self.clicked)
self.canvas.bind("<Motion>", self.move)
self.canvas.bind("<Configure>", self.size)
self.canvas.grid(sticky=E+W)
self.columnconfigure(0, weight=1)
self.grid()
def size(self, event):
w = self.canvas.winfo_width()
for i in range(len(self._lines)):
y = self._height * i + Y_OFFSET
x = self._values[i] * w
self.canvas.coords(self._lines[i], 0, y, x, y+self._height-1)
def clicked(self, event):
self.update(event)
def move(self, event):
if event.state == 0x0100:
slide = (event.y - Y_OFFSET) / self._height
if 0 <= slide < len(self._lines):
self.update(event)
def update(self, event):
w = self.canvas.winfo_width()
slide = (event.y - Y_OFFSET) / self._height
val = event.x / float(w)
self._values[slide] = val
y = self._height * slide + Y_OFFSET
self.canvas.coords(self._lines[slide], 0, y, event.x, y+self._height-1)
self._command(self._key, self._values)
######################################################################
### Control window for PyoObject
######################################################################
class Command:
def __init__(self, func, key):
self.func = func
self.key = key
def __call__(self, value):
self.func(self.key, value)
class PyoObjectControl(Frame):
def __init__(self, master=None, obj=None, map_list=None):
Frame.__init__(self, master, bd=1, relief=GROOVE)
from controls import SigTo
self.bind('<Destroy>', self._destroy)
self._obj = obj
self._map_list = map_list
self._sliders = []
self._excluded = []
self._values = {}
self._displays = {}
self._maps = {}
self._sigs = {}
for i, m in enumerate(self._map_list):
key, init = m.name, m.init
# filters PyoObjects
if type(init) not in [ListType, FloatType, IntType]:
self._excluded.append(key)
else:
self._maps[key] = m
# label (param name)
label = Label(self, height=1, width=10, highlightthickness=0, text=key)
label.grid(row=i, column=0)
# create and pack slider
if type(init) != ListType:
self._sliders.append(Scale(self, command=Command(self.setval, key),
orient=HORIZONTAL, relief=GROOVE, from_=0., to=1., showvalue=False,
resolution=.0001, bd=1, length=225, troughcolor="#BCBCAA", width=12))
self._sliders[-1].set(m.set(init))
disp_height = 1
else:
self._sliders.append(MultiSlider(self, [m.set(x) for x in init], key, self.setval))
disp_height = len(init)
self._sliders[-1].grid(row=i, column=1, sticky=E+W)
# display of numeric values
textvar = StringVar(self)
display = Label(self, height=disp_height, width=10, highlightthickness=0, textvariable=textvar)
display.grid(row=i, column=2)
self._displays[key] = textvar
if type(init) != ListType:
self._displays[key].set("%.4f" % init)
else:
self._displays[key].set("\n".join(["%.4f" % i for i in init]))
# set obj attribute to PyoObject SigTo
self._sigs[key] = SigTo(init, .025, init)
refStream = self._obj.getBaseObjects()[0]._getStream()
server = self._obj.getBaseObjects()[0].getServer()
for k in range(len(self._sigs[key].getBaseObjects())):
curStream = self._sigs[key].getBaseObjects()[k]._getStream()
server.changeStreamPosition(refStream, curStream)
setattr(self._obj, key, self._sigs[key])
# padding
top = self.winfo_toplevel()
top.rowconfigure(0, weight=1)
top.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.grid(ipadx=5, ipady=5, sticky=E+W)
def _destroy(self, event):
for m in self._map_list:
key = m.name
if key not in self._excluded:
setattr(self._obj, key, self._values[key])
del self._sigs[key]
def setval(self, key, x):
if type(x) != ListType:
value = self._maps[key].get(float(x))
self._displays[key].set("%.4f" % value)
else:
value = [self._maps[key].get(float(y)) for y in x]
self._displays[key].set("\n".join(["%.4f" % i for i in value]))
self._values[key] = value
setattr(self._sigs[key], "value", value)
######################################################################
### View window for PyoTableObject
######################################################################
class ViewTable_withPIL(Frame):
def __init__(self, master=None, samples=None):
Frame.__init__(self, master, bd=1, relief=GROOVE)
self.width = 500
self.height = 200
self.half_height = self.height / 2
self.canvas = Canvas(self, height=self.height, width=self.width, relief=SUNKEN, bd=1, bg="#EFEFEF")
print Image
im = Image.new("L", (self.width, self.height), 255)
draw = ImageDraw.Draw(im)
draw.line(samples, fill=0, width=1)
self.img = ImageTk.PhotoImage(im)
self.canvas.create_image(self.width/2,self.height/2,image=self.img)
self.canvas.create_line(0, self.half_height+2, self.width, self.half_height+2, fill='grey', dash=(4,2))
self.canvas.grid()
self.grid(ipadx=10, ipady=10)
class ViewTable_withoutPIL(Frame):
def __init__(self, master=None, samples=None):
Frame.__init__(self, master, bd=1, relief=GROOVE)
self.width = 500
self.height = 200
self.half_height = self.height / 2
self.canvas = Canvas(self, height=self.height, width=self.width, relief=SUNKEN, bd=1, bg="#EFEFEF")
self.canvas.create_line(0, self.half_height+Y_OFFSET, self.width, self.half_height+Y_OFFSET, fill='grey', dash=(4,2))
self.canvas.create_line(*samples)
self.canvas.grid()
self.grid(ipadx=10, ipady=10)
######################################################################
## View window for PyoMatrixObject
#####################################################################
class ViewMatrix_withPIL(Frame):
def __init__(self, master=None, samples=None, size=None):
Frame.__init__(self, master, bd=1, relief=GROOVE)
self.canvas = Canvas(self, width=size[0], height=size[1], relief=SUNKEN, bd=1, bg="#EFEFEF")
im = Image.new("L", size, None)
im.putdata(samples)
self.img = ImageTk.PhotoImage(im)
self.canvas.create_image(size[0]/2+Y_OFFSET,size[1]/2+Y_OFFSET,image=self.img)
self.canvas.grid()
self.grid(ipadx=0, ipady=0)
class ViewMatrix_withoutPIL(Frame):
def __init__(self, master=None, samples=None, size=None):
Frame.__init__(self, master, bd=1, relief=GROOVE)
self.width = size[0]
self.height = size[1]
self.canvas = Canvas(self, width=self.width, height=self.height, relief=SUNKEN, bd=1, bg="#EFEFEF")
for i in range(self.width*self.height):
x = i % self.width
y = i / self.width
x1 = x+Y_OFFSET
y1 = y+Y_OFFSET
x2 = x+Y_OFFSET+1
y2 = y+Y_OFFSET+1
amp = int(samples[i])
amp = hex(amp).replace('0x', '')
if len(amp) == 1:
amp = "0%s" % amp
amp = "#%s%s%s" % (amp, amp, amp)
self.canvas.create_line(x1, y1, x2, y2, fill=amp)
self.canvas.grid()
self.grid(ipadx=0, ipady=0)
######################################################################
### Server Object User Interface (Tk)
######################################################################
class ServerGUI(Frame):
def __init__(self, master=None, nchnls=2, startf=None, stopf=None, recstartf=None,
recstopf=None, ampf=None, started=0, locals=None, shutdown=None, meter=True, timer=True, amp=1.):
Frame.__init__(self, master, padx=10, pady=10, bd=2, relief=GROOVE)
self.shutdown = shutdown
self.locals = locals
self.meter = meter
self.timer = timer
self.nchnls = nchnls
self.startf = startf
self.stopf = stopf
self.recstartf = recstartf
self.recstopf = recstopf
self.ampf = ampf
self.amp = amp
self._started = False
self._recstarted = False
self.B1, self.B2 = 193 - VM_OFFSET, 244 - VM_OFFSET
self._history = []
self._histo_count = 0
self.grid(ipadx=5)
self.rowconfigure(0, pad=20)
self.rowconfigure(1, pad=10)
self.rowconfigure(2, pad=10)
self.createWidgets()
if started == 1:
self.start(True)
def createWidgets(self):
row = 0
self.startStringVar = StringVar(self)
self.startStringVar.set('Start')
self.startButton = Button(self, textvariable=self.startStringVar, command=self.start)
self.startButton.grid(ipadx=5)
self.recStringVar = StringVar(self)
self.recStringVar.set('Rec Start')
self.recButton = Button(self, textvariable=self.recStringVar, command=self.record)
self.recButton.grid(ipadx=5, row=row, column=1)
self.quitButton = Button(self, text='Quit', command=self.on_quit)
self.quitButton.grid(ipadx=5, row=row, column=2)
row += 1
self.ampScale = Scale(self, command=self.setAmp, digits=4, label='Amplitude (dB)',
orient=HORIZONTAL, relief=GROOVE, from_=-60.0, to=18.0,
resolution=.01, bd=1, length=250, troughcolor="#BCBCAA", width=10)
self.ampScale.set(20.0 * math.log10(self.amp))
self.ampScale.grid(ipadx=5, ipady=5, row=row, column=0, columnspan=3)
row += 1
if self.meter:
self.vumeter = Canvas(self, height=5*self.nchnls+1, width=250, relief=FLAT, bd=0, bg="#323232")
self.green = []
self.yellow = []
self.red = []
for i in range(self.nchnls):
y = 5 * (i + 1) + 1 - VM_OFFSET
self.green.append(self.vumeter.create_line(0, y, 1, y, width=4, fill='green', dash=(9,1), dashoff=6+VM_OFFSET))
self.yellow.append(self.vumeter.create_line(self.B1, y, self.B1, y, width=4, fill='yellow', dash=(9,1), dashoff=9))
self.red.append(self.vumeter.create_line(self.B2, y, self.B2, y, width=4, fill='red', dash=(9,1), dashoff=0))
self.vumeter.grid(ipadx=5, row=row, column=0, columnspan=3)
row += 1
if self.timer:
self.timer_label = Label(self, text='Elapsed time (h:m:s:ms)')
self.timer_label.grid(ipadx=0, row=row, column=0, columnspan=3)
row += 1
self.timer_strvar = StringVar(self, " 00 : 00 : 00 : 000")
self.timetext = Label(self, textvariable=self.timer_strvar)
self.timetext.grid(ipadx=5, row=row, column=0, columnspan=3)
row += 1
if self.locals != None:
self.interp_label = Label(self, text='Interpreter')
self.interp_label.grid(ipadx=0, row=row, column=0, columnspan=3)
row += 1
self.text = Text(self, height=1, width=33, bd=1, relief=RIDGE, highlightthickness=0,
spacing1=2, spacing3=2)
self.text.grid(ipadx=5, row=row, column=0, columnspan=3)
self.text.bind("<Return>", self.getText)
self.text.bind("<Up>", self.getPrev)
self.text.bind("<Down>", self.getNext)
def on_quit(self):
self.shutdown()
self.quit()
def getPrev(self, event):
self.text.delete("1.0", END)
self._histo_count -= 1
if self._histo_count < 0:
self._histo_count = 0
self.text.insert("1.0", self._history[self._histo_count])
return "break"
def setTime(self, *args):
self.timer_strvar.set(" %02d : %02d : %02d : %03d" % (args[0], args[1], args[2], args[3]))
def getNext(self, event):
self.text.delete("1.0", END)
self._histo_count += 1
if self._histo_count >= len(self._history):
self._histo_count = len(self._history)
else:
self.text.insert("1.0", self._history[self._histo_count])
return "break"
def getText(self, event):
source = self.text.get("1.0", END)
self.text.delete("1.0", END)
exec source in self.locals
self._history.append(source)
self._histo_count = len(self._history)
return "break"
def start(self, justSet=False):
if self._started == False:
if not justSet:
self.startf()
self._started = True
self.startStringVar.set('Stop')
self.quitButton.configure(state = DISABLED)
else:
self.stopf()
self._started = False
self.startStringVar.set('Start')
self.quitButton.configure(state = NORMAL)
def record(self):
if self._recstarted == False:
self.recstartf()
self._recstarted = True
self.recStringVar.set('Rec Stop')
else:
self.recstopf()
self._recstarted = False
self.recStringVar.set('Rec Start')
def setAmp(self, value):
self.ampf(math.pow(10.0, float(value) * 0.05))
def setRms(self, *args):
for i in range(self.nchnls):
y = 5 * (i + 1) + 1 - VM_OFFSET
db = math.log10(args[i]+0.00001) * 0.2 + 1.
amp = int(db*250)
if amp <= self.B1:
self.vumeter.coords(self.green[i], 0, y, amp, y)
self.vumeter.coords(self.yellow[i], self.B1, y, self.B1, y)
self.vumeter.coords(self.red[i], self.B2, y, self.B2, y)
elif amp <= self.B2:
self.vumeter.coords(self.green[i], 0, y, self.B1, y)
self.vumeter.coords(self.yellow[i], self.B1, y, amp, y)
self.vumeter.coords(self.red[i], self.B2, y, self.B2, y)
else:
self.vumeter.coords(self.green[i], 0, y, self.B1, y)
self.vumeter.coords(self.yellow[i], self.B1, y, self.B2, y)
self.vumeter.coords(self.red[i], self.B2, y, amp, y)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy as np
from scipy.stats import norm
def measure_from_means(sample_mean, pop_mean, pop_std_dev):
"""Calculate the value of Cohen's D from the specified means.
Keyword arguments:
sample_mean -- sample mean
pop_mean -- population mean
pop_std_dev -- population standard deviation
"""
result = 0
if pop_std_dev != 0:
result = (sample_mean - pop_mean) / pop_std_dev
return result
def measure_from_statistic(test_statistic, sample_size):
"""Calculate the value of Cohen's D from the specified test statistic.
Keyword arguments:
test_statistic -- Z statistic
sample_size -- sample size
"""
result = 0
if sample_size != 0:
result = test_statistic / np.sqrt(sample_size)
return result
def measures_interval(measure, pop_std_dev, alpha):
"""Calculate the confidence interval for the specified measure.
Keyword arguments:
measure -- value of Cohen's d
pop_std_dev -- population standard deviation
alpha -- significance level
"""
ppf = norm.ppf(1 - (alpha * 0.5))
measure_low = measure - ppf * pop_std_dev
measure_high = measure + ppf * pop_std_dev
return (measure_low, measure_high)
def test_statistic(sample_mean, pop_mean, pop_std_err):
"""Calculate the value of the Z statistic from the specified means.
Keyword arguments:
sample_mean -- sample mean
pop_mean -- population mean
pop_std_err -- population standard error
"""
result = 0
if pop_std_err != 0:
result = (sample_mean - pop_mean) / pop_std_err
return result
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.collect.Iterators.singletonIterator;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import com.google.common.base.Function;
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.Iterator;
import java.util.Queue;
import org.jspecify.annotations.Nullable;
/**
* Views elements of a type {@code T} as nodes in a tree, and provides methods to traverse the trees
* induced by this traverser.
*
* <p>For example, the tree
*
* {@snippet :
* h
* / | \
* / e \
* d g
* /|\ |
* / | \ f
* a b c
* }
*
* <p>can be iterated over in preorder (hdabcegf), postorder (abcdefgh), or breadth-first order
* (hdegabcf).
*
* <p>Null nodes are strictly forbidden.
*
* <p>Because this is an abstract class, not an interface, you can't use a lambda expression to
* implement it:
*
* {@snippet :
* // won't work
* TreeTraverser<NodeType> traverser = node -> node.getChildNodes();
* }
*
* Instead, you can pass a lambda expression to the {@code using} factory method:
*
* {@snippet :
* TreeTraverser<NodeType> traverser = TreeTraverser.using(node -> node.getChildNodes());
* }
*
* @author Louis Wasserman
* @since 15.0
* @deprecated Use {@link com.google.common.graph.Traverser} instead. All instance methods have
* their equivalent on the result of {@code Traverser.forTree(tree)} where {@code tree}
* implements {@code SuccessorsFunction}, which has a similar API as {@link #children} or can be
* the same lambda function as passed into {@link #using(Function)}.
*/
// This class is now only available externally for backwards compatibility; it should not be used
// internally (hence the package-private visibility and @Deprecated annotation).
@Deprecated
@Beta
@GwtCompatible
public
abstract class TreeTraverser<T> {
/** Constructor for use by subclasses. */
public TreeTraverser() {}
/**
* Returns a tree traverser that uses the given function to navigate from a node to its children.
* This is useful if the function instance already exists, or so that you can supply a lambda
* expressions. If those circumstances don't apply, you probably don't need to use this; subclass
* {@code TreeTraverser} and implement its {@link #children} method directly.
*
* @since 20.0
* @deprecated Use {@link com.google.common.graph.Traverser#forTree} instead. If you are using a
* lambda, these methods have exactly the same signature.
*/
@Deprecated
public static <T> TreeTraverser<T> using(
Function<T, ? extends Iterable<T>> nodeToChildrenFunction) {
checkNotNull(nodeToChildrenFunction);
return new TreeTraverser<T>() {
@Override
public Iterable<T> children(T root) {
return nodeToChildrenFunction.apply(root);
}
};
}
/** Returns the children of the specified node. Must not contain null. */
public abstract Iterable<T> children(T root);
/**
* Returns an unmodifiable iterable over the nodes in a tree structure, using pre-order traversal.
* That is, each node's subtrees are traversed after the node itself is returned.
*
* <p>No guarantees are made about the behavior of the traversal when nodes change while iteration
* is in progress or when the iterators generated by {@link #children} are advanced.
*
* @deprecated Use {@link com.google.common.graph.Traverser#depthFirstPreOrder} instead, which has
* the same behavior.
*/
@Deprecated
public final FluentIterable<T> preOrderTraversal(T root) {
checkNotNull(root);
return new FluentIterable<T>() {
@Override
public UnmodifiableIterator<T> iterator() {
return preOrderIterator(root);
}
};
}
UnmodifiableIterator<T> preOrderIterator(T root) {
return new PreOrderIterator(root);
}
private final class PreOrderIterator extends UnmodifiableIterator<T> {
private final Deque<Iterator<T>> stack;
PreOrderIterator(T root) {
this.stack = new ArrayDeque<>();
stack.addLast(singletonIterator(checkNotNull(root)));
}
@Override
public boolean hasNext() {
return !stack.isEmpty();
}
@Override
public T next() {
Iterator<T> itr = stack.getLast(); // throws NSEE if empty
T result = checkNotNull(itr.next());
if (!itr.hasNext()) {
stack.removeLast();
}
Iterator<T> childItr = children(result).iterator();
if (childItr.hasNext()) {
stack.addLast(childItr);
}
return result;
}
}
/**
* Returns an unmodifiable iterable over the nodes in a tree structure, using post-order
* traversal. That is, each node's subtrees are traversed before the node itself is returned.
*
* <p>No guarantees are made about the behavior of the traversal when nodes change while iteration
* is in progress or when the iterators generated by {@link #children} are advanced.
*
* @deprecated Use {@link com.google.common.graph.Traverser#depthFirstPostOrder} instead, which
* has the same behavior.
*/
@Deprecated
public final FluentIterable<T> postOrderTraversal(T root) {
checkNotNull(root);
return new FluentIterable<T>() {
@Override
public UnmodifiableIterator<T> iterator() {
return postOrderIterator(root);
}
};
}
UnmodifiableIterator<T> postOrderIterator(T root) {
return new PostOrderIterator(root);
}
private static final class PostOrderNode<T> {
final T root;
final Iterator<T> childIterator;
PostOrderNode(T root, Iterator<T> childIterator) {
this.root = checkNotNull(root);
this.childIterator = checkNotNull(childIterator);
}
}
private final class PostOrderIterator extends AbstractIterator<T> {
private final ArrayDeque<PostOrderNode<T>> stack;
PostOrderIterator(T root) {
this.stack = new ArrayDeque<>();
stack.addLast(expand(root));
}
@Override
protected @Nullable T computeNext() {
while (!stack.isEmpty()) {
PostOrderNode<T> top = stack.getLast();
if (top.childIterator.hasNext()) {
T child = top.childIterator.next();
stack.addLast(expand(child));
} else {
stack.removeLast();
return top.root;
}
}
return endOfData();
}
private PostOrderNode<T> expand(T t) {
return new PostOrderNode<>(t, children(t).iterator());
}
}
/**
* Returns an unmodifiable iterable over the nodes in a tree structure, using breadth-first
* traversal. That is, all the nodes of depth 0 are returned, then depth 1, then 2, and so on.
*
* <p>No guarantees are made about the behavior of the traversal when nodes change while iteration
* is in progress or when the iterators generated by {@link #children} are advanced.
*
* @deprecated Use {@link com.google.common.graph.Traverser#breadthFirst} instead, which has the
* same behavior.
*/
@Deprecated
public final FluentIterable<T> breadthFirstTraversal(T root) {
checkNotNull(root);
return new FluentIterable<T>() {
@Override
public UnmodifiableIterator<T> iterator() {
return new BreadthFirstIterator(root);
}
};
}
private final class BreadthFirstIterator extends UnmodifiableIterator<T>
implements PeekingIterator<T> {
private final Queue<T> queue;
BreadthFirstIterator(T root) {
this.queue = new ArrayDeque<>();
queue.add(root);
}
@Override
public boolean hasNext() {
return !queue.isEmpty();
}
@Override
public T peek() {
return queue.element();
}
@Override
public T next() {
T result = queue.remove();
Iterables.addAll(queue, children(result));
return result;
}
}
}
|
java
|
github
|
https://github.com/google/guava
|
android/guava/src/com/google/common/collect/TreeTraverser.java
|
<?php
namespace Illuminate\Contracts\Container;
use Closure;
use Psr\Container\ContainerInterface;
interface Container extends ContainerInterface
{
/**
* {@inheritdoc}
*
* @template TClass of object
*
* @param string|class-string<TClass> $id
* @return ($id is class-string<TClass> ? TClass : mixed)
*/
public function get(string $id);
/**
* Determine if the given abstract type has been bound.
*
* @param string $abstract
* @return bool
*/
public function bound($abstract);
/**
* Alias a type to a different name.
*
* @param string $abstract
* @param string $alias
* @return void
*
* @throws \LogicException
*/
public function alias($abstract, $alias);
/**
* Assign a set of tags to a given binding.
*
* @param array|string $abstracts
* @param mixed ...$tags
* @return void
*/
public function tag($abstracts, $tags);
/**
* Resolve all of the bindings for a given tag.
*
* @param string $tag
* @return iterable
*/
public function tagged($tag);
/**
* Register a binding with the container.
*
* @param \Closure|string $abstract
* @param \Closure|string|null $concrete
* @param bool $shared
* @return void
*/
public function bind($abstract, $concrete = null, $shared = false);
/**
* Bind a callback to resolve with Container::call.
*
* @param array|string $method
* @param \Closure $callback
* @return void
*/
public function bindMethod($method, $callback);
/**
* Register a binding if it hasn't already been registered.
*
* @param \Closure|string $abstract
* @param \Closure|string|null $concrete
* @param bool $shared
* @return void
*/
public function bindIf($abstract, $concrete = null, $shared = false);
/**
* Register a shared binding in the container.
*
* @param \Closure|string $abstract
* @param \Closure|string|null $concrete
* @return void
*/
public function singleton($abstract, $concrete = null);
/**
* Register a shared binding if it hasn't already been registered.
*
* @param \Closure|string $abstract
* @param \Closure|string|null $concrete
* @return void
*/
public function singletonIf($abstract, $concrete = null);
/**
* Register a scoped binding in the container.
*
* @param \Closure|string $abstract
* @param \Closure|string|null $concrete
* @return void
*/
public function scoped($abstract, $concrete = null);
/**
* Register a scoped binding if it hasn't already been registered.
*
* @param \Closure|string $abstract
* @param \Closure|string|null $concrete
* @return void
*/
public function scopedIf($abstract, $concrete = null);
/**
* "Extend" an abstract type in the container.
*
* @param \Closure|string $abstract
* @param \Closure $closure
* @return void
*
* @throws \InvalidArgumentException
*/
public function extend($abstract, Closure $closure);
/**
* Register an existing instance as shared in the container.
*
* @template TInstance of mixed
*
* @param \Closure|string $abstract
* @param TInstance $instance
* @return TInstance
*/
public function instance($abstract, $instance);
/**
* Add a contextual binding to the container.
*
* @param string $concrete
* @param \Closure|string $abstract
* @param \Closure|string $implementation
* @return void
*/
public function addContextualBinding($concrete, $abstract, $implementation);
/**
* Define a contextual binding.
*
* @param string|array $concrete
* @return \Illuminate\Contracts\Container\ContextualBindingBuilder
*/
public function when($concrete);
/**
* Get a closure to resolve the given type from the container.
*
* @template TClass of object
*
* @param string|class-string<TClass> $abstract
* @return ($abstract is class-string<TClass> ? \Closure(): TClass : \Closure(): mixed)
*/
public function factory($abstract);
/**
* Flush the container of all bindings and resolved instances.
*
* @return void
*/
public function flush();
/**
* Resolve the given type from the container.
*
* @template TClass of object
*
* @param string|class-string<TClass> $abstract
* @param array $parameters
* @return ($abstract is class-string<TClass> ? TClass : mixed)
*
* @throws \Illuminate\Contracts\Container\BindingResolutionException
*/
public function make($abstract, array $parameters = []);
/**
* Call the given Closure / class@method and inject its dependencies.
*
* @param callable|string $callback
* @param array $parameters
* @param string|null $defaultMethod
* @return mixed
*/
public function call($callback, array $parameters = [], $defaultMethod = null);
/**
* Determine if the given abstract type has been resolved.
*
* @param string $abstract
* @return bool
*/
public function resolved($abstract);
/**
* Register a new before resolving callback.
*
* @param \Closure|string $abstract
* @param \Closure|null $callback
* @return void
*/
public function beforeResolving($abstract, ?Closure $callback = null);
/**
* Register a new resolving callback.
*
* @param \Closure|string $abstract
* @param \Closure|null $callback
* @return void
*/
public function resolving($abstract, ?Closure $callback = null);
/**
* Register a new after resolving callback.
*
* @param \Closure|string $abstract
* @param \Closure|null $callback
* @return void
*/
public function afterResolving($abstract, ?Closure $callback = null);
}
|
php
|
github
|
https://github.com/laravel/framework
|
src/Illuminate/Contracts/Container/Container.php
|
#! /usr/bin/env python
"""Stupid little script to automate generation of MANIFEST and po/POTFILES.in
Really this should have been handled by using distutils, but oh well,
distutils is a hoary beast and I can't fault people for not wanting to
spend days spelunking around inside it to find the solutions...
"""
from distutils.filelist import FileList
import os
def fileList( template ):
"""Produce a formatted file-list for storing in a file"""
files = FileList()
for line in filter(None,template.splitlines()):
files.process_template_line( line )
content = '\n'.join( files.files )
return content
def main( ):
"""Do the quicky finding of files for our manifests"""
content = fileList( open('MANIFEST.in').read() )
open( 'MANIFEST','w').write( content )
content = fileList( open('POTFILES.in').read() )
try:
os.makedirs( 'po' )
except OSError, err:
pass
open( os.path.join('po','POTFILES.in'), 'w').write( content )
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import threading
import select
import time
import utils
import global_var as g
from xlog import getLogger
xlog = getLogger("smart_router")
class Buf(object):
def __init__(self):
self.buf = []
self.size = 0
self.num = 0
def add(self, data):
self.buf.append(data)
self.size += len(data)
self.num += 1
def get(self):
if not self.buf:
return ""
dat = self.buf.pop(0)
self.size -= len(dat)
self.num -= 1
return dat
def restore(self, dat):
self.buf.insert(0, dat)
self.size += len(dat)
self.num += 1
class PipeSocks(object):
def __init__(self, buf_size=16*1024):
self.buf_size = buf_size
self.sock_dict = {}
self.read_set = []
self.write_set = []
self.error_set = []
# sock => Buf
self.send_buf = {}
self.running = True
def __str__(self):
outs = ["Pipe Sockets:"]
outs.append("buf_size=%d" % self.buf_size)
outs.append("running=%d" % self.running)
outs.append("")
outs.append("socket dict:")
for s in self.sock_dict:
outs.append(" %s =%s" % (s, self.sock_dict[s]))
outs.append("read dict:")
for s in self.read_set:
outs.append(" %s" % s)
outs.append("write dict:")
for s in self.write_set:
outs.append(" %s" % s)
outs.append("error dict:")
for s in self.error_set:
outs.append(" %s" % s)
outs.append("send buf:")
for s in self.send_buf:
buf = self.send_buf[s]
outs.append(" %s size=%d num=%d" % (s, buf.size, buf.num))
return "\n".join(outs)
def run(self):
self.down_th = threading.Thread(target=self.pipe)
self.down_th.start()
def stop(self):
self.running = False
def add_socks(self, s1, s2):
s1.setblocking(0)
s2.setblocking(0)
self.read_set.append(s1)
self.read_set.append(s2)
self.error_set.append(s1)
self.error_set.append(s2)
self.sock_dict[s1] = s2
self.sock_dict[s2] = s1
def try_remove(self, l, s):
try:
l.remove(s)
except:
pass
def close(self, s1, e):
if s1 not in self.sock_dict:
# xlog.warn("sock not in dict")
return
s2 = self.sock_dict[s1]
if s1 in self.send_buf:
left1 = self.send_buf[s1].size
else:
left1 = 0
if utils.is_private_ip(s1.ip):
local_sock = s1
remote_sock = s2
else:
local_sock = s2
remote_sock = s1
create_time = time.time() - remote_sock.create_time
xlog.debug("pipe close %s->%s run_time:%d upload:%d,%d download:%d,%d, by remote:%d, left:%d e:%r",
local_sock, remote_sock, create_time,
local_sock.recved_data, local_sock.recved_times,
remote_sock.recved_data, remote_sock.recved_times,
s1==remote_sock, left1, e)
if local_sock.recved_data > 0 and local_sock.recved_times == 1 and remote_sock.port == 443 and \
((s1 == local_sock and create_time > 30) or (s1 == remote_sock)):
host = remote_sock.host
xlog.debug("SNI:%s fail.", host)
#g.domain_cache.update_rule(host, 443, "gae")
del self.sock_dict[s1]
self.try_remove(self.read_set, s1)
self.try_remove(self.write_set, s1)
self.try_remove(self.error_set, s1)
if s1 in self.send_buf:
del self.send_buf[s1]
s1.close()
if s2 in self.send_buf and self.send_buf[s2].size:
xlog.debug("pipe close %s e:%s, but s2:%s have data(%d) to send",
s1, e, s2, self.send_buf[s2].size)
self.send_buf[s2].add("")
return
if s2 in self.sock_dict:
self.try_remove(self.read_set, s2)
self.try_remove(self.write_set, s2)
self.try_remove(self.error_set, s2)
del self.sock_dict[s2]
if s2 in self.send_buf:
del self.send_buf[s2]
s2.close()
def pipe(self):
def flush_send_s(s2, d1):
s2.setblocking(1)
s2.settimeout(1)
s2.sendall(d1)
s2.setblocking(0)
while self.running:
if not self.error_set:
time.sleep(1)
continue
try:
r, w, e = select.select(self.read_set, self.write_set, self.error_set, 0.1)
for s1 in list(r):
if s1 not in self.read_set:
continue
try:
d = s1.recv(65535)
except Exception as e:
self.close(s1, "r")
continue
if not d:
# socket closed by peer.
self.close(s1, "r")
continue
s1.recved_data += len(d)
s1.recved_times += 1
s2 = self.sock_dict[s1]
if s2.is_closed():
continue
if s2 not in self.send_buf:
self.send_buf[s2] = Buf()
if g.config.direct_split_SNI and\
s1.recved_times == 1 and \
s2.port == 443 and \
d[0] == '\x16' and \
g.gfwlist.check(s2.host):
p1 = d.find(s2.host)
if p1 > 1:
if "google" in s2.host:
p2 = d.find("google") + 3
else:
p2 = p1 + len(s2.host) - 6
d1 = d[:p2]
d2 = d[p2:]
try:
flush_send_s(s2, d1)
except Exception as e:
xlog.warn("send split SNI:%s fail:%r", s2.host, e)
self.close(s2, "w")
continue
self.send_buf[s2].add(d2)
d = ""
xlog.debug("pipe send split SNI:%s", s2.host)
if d:
self.send_buf[s2].add(d)
if s2 not in self.write_set:
self.write_set.append(s2)
if self.send_buf[s2].size > self.buf_size:
self.read_set.remove(s1)
for s1 in list(w):
if s1 not in self.write_set:
continue
if s1 not in self.send_buf or self.send_buf[s1].num == 0:
self.write_set.remove(s1)
continue
dat = self.send_buf[s1].get()
if not dat:
self.close(s1, "n")
continue
try:
sended = s1.send(dat)
except Exception as e:
self.close(s1, "w")
continue
if len(dat) - sended > 0:
self.send_buf[s1].restore(dat[sended:])
continue
if self.send_buf[s1].size == 0:
self.write_set.remove(s1)
if self.send_buf[s1].size < self.buf_size:
s2 = self.sock_dict[s1]
if s2 not in self.read_set and s2 in self.sock_dict:
self.read_set.append(s2)
for s1 in list(e):
self.close(s1, "e")
except Exception as e:
xlog.exception("pipe except:%r", e)
for s in list(self.error_set):
self.close(s, "stop")
xlog.info("pipe stopped.")
|
unknown
|
codeparrot/codeparrot-clean
| ||
#ifndef RUBY_BACKWARD2_ASSUME_H /*-*-C++-*-vi:se ft=cpp:*/
#define RUBY_BACKWARD2_ASSUME_H
/**
* @file
* @author Ruby developers <ruby-core@ruby-lang.org>
* @copyright This file is a part of the programming language Ruby.
* Permission is hereby granted, to either redistribute and/or
* modify this file, provided that the conditions mentioned in the
* file COPYING are met. Consult the file for details.
* @warning Symbols prefixed with either `RBIMPL` or `rbimpl` are
* implementation details. Don't take them as canon. They could
* rapidly appear then vanish. The name (path) of this header file
* is also an implementation detail. Do not expect it to persist
* at the place it is now. Developers are free to move it anywhere
* anytime at will.
* @note To ruby-core: remember that this header can be possibly
* recursively included from extension libraries written in C++.
* Do not expect for instance `__VA_ARGS__` is always available.
* We assume C99 for ruby itself but we don't assume languages of
* extension libraries. They could be written in C++98.
* @brief Defines #ASSUME / #RB_LIKELY / #UNREACHABLE
*/
#include "ruby/internal/config.h"
#include "ruby/internal/assume.h"
#include "ruby/internal/has/builtin.h"
#define ASSUME RBIMPL_ASSUME /**< @old{RBIMPL_ASSUME} */
#define UNREACHABLE RBIMPL_UNREACHABLE() /**< @old{RBIMPL_UNREACHABLE} */
#define UNREACHABLE_RETURN RBIMPL_UNREACHABLE_RETURN /**< @old{RBIMPL_UNREACHABLE_RETURN} */
/* likely */
#if RBIMPL_HAS_BUILTIN(__builtin_expect)
/**
* Asserts that the given Boolean expression likely holds.
*
* @param x An expression that likely holds.
*
* @note Consider this macro carefully. It has been here since when CPUs were
* like babies, but contemporary processors are beasts. They are
* smarter than mare mortals like us today. Their branch predictions
* highly expectedly outperform your use of this macro.
*/
# define RB_LIKELY(x) (__builtin_expect(!!(x), 1))
/**
* Asserts that the given Boolean expression likely doesn't hold.
*
* @param x An expression that likely doesn't hold.
*/
# define RB_UNLIKELY(x) (__builtin_expect(!!(x), 0))
#else
# define RB_LIKELY(x) (x)
# define RB_UNLIKELY(x) (x)
#endif
#endif /* RUBY_BACKWARD2_ASSUME_H */
|
c
|
github
|
https://github.com/ruby/ruby
|
include/ruby/backward/2/assume.h
|
#/u/GoldenSights
import traceback
from dateutil.parser import parse as dateparse
import string
import datetime
import time
import praw
import sqlite3
import re
""" USER CONFIG """
USERAGENT = ""
#Describe the bot and what it does. Include your username
USERNAME = "GoldenSights"
#This is the bot's username
PASSWORD = ""
#This is the bot's password
SUBREDDIT = "Goldtesting"
#This is the subreddit where the bot finds the schedules
#It should be private with only the team of moderators
TITLESEPARATOR = "||"
#This is what demarcates the timestamp from the sub from the title
#This should not be a naturally occuring part of any title
#Example: "15 December 2014 ||| GoldTesting ||| Welcome to the subreddit"
# ^Time to post ^Sub to post ^Title of post
IGNORE_FLAG = "#"
#If this character is THE FIRST CHARACTER IN THE TITLE,
#The bot will ignore that post. Used for meta / discussion.
SCHEDULEDFLAIR_TEXT = "Scheduled!"
SCHEDULEDFLAIR_CSS = "scheduled"
#This flair will be assigned to the source when the source is scheduled
POSTEDFLAIR_TEXT = "Post made!"
POSTEDFLAIR_CSS = "posted"
#This flair will be assigned to the source when the post is made
MAXPOSTS = 3
#The number of items you want to get from /new. Recommended 100
ALLOWOTHEREDITS = False
#Are users allowed to edit other peoples' post schedules?
WAIT = 30
#How many seconds in between loop cycles. Completely inactive during this time.
ADMINS = ["ApexRedditr", "GoldenSights"]
#These are the people who will get tracebacks when the bot has problems.
TRACEBACK_SUBJECT = "SchedulizerM Error traceback"
POSTEDCOMMENT = "Your post to /r/%s has been created. %s"
#Made in the source when the post is made
FOOTER = """
_____
If any information is incorrect, reply to this comment with the incorrect key,
a colon, and new value. See the
[Bot code](https://github.com/voussoir/reddit/tree/master/Schedulizer-ModTeam)
page for examples. Only make 1 edit per line.
A foolproof time format is
"DD Monthname YYYY HH:MM". All times are in UTC
([Timezone map](http://www.timeanddate.com/time/map/))
Deleting your post will cause it to be removed from the schedule.
If you think the bot is down, send it
[this message](http://www.reddit.com/message/compose?to=%s&subject=Ping&message=Ping).
"""%USERNAME
SCHEDULECOMMENT = """
Your post has been scheduled. Please check that this information is correct:
"""
#Made in the source when the source is made
ERRORCOMMENT = """
Encountered the following errors:
%s
The post will use placeholder values until you correct the information
_______
"""
ERRORDISTINGUISHFAIL = "Attempted to distinguish post and failed."
ERRORSTICKYFAIL = "Attempted to sticky post and failed."
ERRORDATETIME = '!! DateTime: Could not understand time format, or date is invalid. You entered: `%s`'
ERRORTOOEARLY = '!! DateTime: The time you have entered is before present time. You entered `%s`'
ERRORTITLEFORM = '!! Title: Title expected 3 attributes separated by `' + TITLESEPARATOR + '`'
ERRORLONGTITLE = "!! Title: Your title is too long. Max 300 characters, you have %d"
ERRORSUBREDDIT = '!! Reddit: Subbreddit /r/%s could not be found'
ERRORNOTALLOWED = "!! Reddit: Bot is not allowed to submit to /r/%s."
ERRORUNKNOWNCOMMAND = "Did not understand the command: `%s`"
ERRORCRITICAL = '\n\nBecause of a critical post error, your chosen timestamp has been forfeited. You will need to edit it along with the other keys.\n\n'
IMPOSSIBLETIME = 2147483646
""" All done! """
try:
import bot
#USERNAME = bot.uG
PASSWORD = bot.pG
USERAGENT = bot.aG
except ImportError:
pass
print('Loading database')
sql = sqlite3.connect('sql.db')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS schedules(ID TEXT, TIME INT, REDDIT TEXT, TITLE TEXT, DIST INT, STICKY INT, FLAIR TEXT, FLCSS TEXT, POST TEXT)')
# 0 1 2 3 4 5 6 7 *
sql.commit()
print('Logging in')
r = praw.Reddit(USERAGENT)
r.login(USERNAME, PASSWORD)
def getTime(bool):
timeNow = datetime.datetime.now(datetime.timezone.utc)
timeUnix = timeNow.timestamp()
if bool == False:
return timeNow
else:
return timeUnix
def processpost(inputpost):
if isinstance(inputpost, str):
if 't3_' not in inputpost:
inputpost = 't3_' + inputpost
inputpost = r.get_info(thing_id=inputpost)
sourceid = inputpost.id
print('Schedulizing post ' + sourceid)
nowstamp = getTime(True)
sourcetitle = inputpost.title
sourcesplit = sourcetitle.split(TITLESEPARATOR)
errors = []
critical = False
dosticky = 0
dodist = 0
try:
posttime = "?"
postsub = "?"
posttitle = "?"
postflair = ""
postflcss = ""
posttime = sourcesplit[0]
postsub = sourcesplit[1]
postsub = postsub.replace('/r/', '')
if '[d]' in postsub.lower():
dodist = 1
if '[s]' in postsub.lower():
dosticky = 1
regex = re.search("\[f:[^\]]*\]", postsub, re.IGNORECASE)
if regex:
postflair = regex.group(0)
postflair = postflair[3:-1]
regex = re.search("\[fc:[^\]]*\]", postsub, re.IGNORECASE)
if regex:
postflcss = regex.group(0)
postflcss = postflcss[4:-1]
elif postflair != "":
postflcss = removespecial(postflair)
postsubsplit = postsub.split(' ')
while '' in postsubsplit:
postsubsplit.remove('')
postsub = postsubsplit[0]
posttitle = '||'.join(sourcesplit[2:])
except IndexError:
errors.append(ERRORTITLEFORM)
critical = True
try:
posttimerender = dateparse(posttime)
posttimerender = posttimerender.replace(tzinfo=datetime.timezone.utc)
posttimestamp = posttimerender.timestamp()
if posttimestamp < nowstamp:
errors.append(ERRORTOOEARLY % posttime)
critical = True
except:
#December 31, 2500
posttimestamp = IMPOSSIBLETIME
errors.append(ERRORDATETIME % posttime)
critical = True
try:
validatesubreddit(postsub)
except:
errors.append(ERRORSUBREDDIT % postsub)
critical = True
#ID TEXT, TIME INT, REDDIT TEXT, TITLE TEXT, DIST INT, STICKY INT, FLAIR TEXT, FLCSS TEXT, POST TEXT
# 0 1 2 3 4 5 6 7 8
if critical:
posttimestamp = IMPOSSIBLETIME
datalist = [sourceid, posttimestamp, postsub, posttitle, dodist, dosticky, postflair, postflcss, "None"]
cur.execute('SELECT * FROM schedules WHERE ID=?', [sourceid])
fetch = cur.fetchone()
if not fetch:
cur.execute('INSERT INTO schedules VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', datalist)
sql.commit()
schedulecomment = buildcomment(datalist, errors, critical)
print('Writing comment')
inputpost.add_comment(schedulecomment)
inputpost.set_flair(flair_text=SCHEDULEDFLAIR_TEXT, flair_css_class=SCHEDULEDFLAIR_CSS)
def updatepost(comment):
source = comment.submission
print('Updating schedule for ' + source.id + ' via comment ' + comment.id)
pauthor = source.author.name
cauthor = comment.author.name
if ALLOWOTHEREDITS or (pauthor == cauthor) or any(pauthor.lower() == admin.lower() for admin in ADMINS):
cur.execute('SELECT * FROM schedules WHERE ID=?', [source.id])
data=cur.fetchone()
if data:
data= list(data)
errors = []
commentsplit = comment.body.split('\n')
while '' in commentsplit:
commentsplit.remove('')
for line in commentsplit:
line = line.split(':')
line[0] = line[0].replace(' ', '')
command = line[0].lower()
arg = ':'.join(line[1:])
if command in ['time', 'timestamp']:
try:
posttimerender = dateparse(arg)
posttimerender = posttimerender.replace(tzinfo=datetime.timezone.utc)
posttimestamp = posttimerender.timestamp()
except:
#December 31, 2500
posttimestamp = IMPOSSIBLETIME
errors.append(ERRORDATETIME % posttime)
data[1] = posttimestamp
elif command in ['reddit', 'subreddit', 'sr']:
try:
arg = arg.replace(' ', '')
arg=arg.replace('/r/', '')
validatesubreddit(arg)
except:
#This will be errored in the upcoming `ispostvalid` line
pass
data[2] = arg
elif command in ['title']:
data[3] = arg
elif command in ['distinguish', 'dist', 'd']:
if arg.lower() in ['0', 'no', 'false', 'off']:
arg = 0
if arg.lower() in ['1', 'yes', 'true', 'on']:
arg = 1
data[4] = arg
elif command in ['sticky', 's']:
if arg.lower() in ['0', 'no', 'false', 'off']:
arg = 0
if arg.lower() in ['1', 'yes', 'true', 'on']:
arg = 1
data[5] = arg
elif command in ['flair-text', 'flairtext', 'flair_text']:
data[6] = arg
elif command in ['flair-css', 'flaircss', 'flair_css']:
data[7] = removespecial(arg)
else:
errors.append(ERRORUNKNOWNCOMMAND % command)
print('\tChecking schedule validity')
status = ispostvalid(data, errors)
if status[0] == False:
data[1] = IMPOSSIBLETIME
critical = True
else:
critical = False
schedulecomment = buildcomment(data[:], errors, critical)
print('\tWriting comment')
comment.reply(schedulecomment)
cur.execute('DELETE FROM schedules WHERE ID=?', [source.id])
cur.execute('INSERT INTO schedules VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
sql.commit()
print('\tDone.')
else:
print(cauthor + ' may not edit ' + pauthor + "'s post")
def validatesubreddit(sr):
#This will intentionall crash if /r/sr does not exist
sr = sr.replace('/r/', '')
sr = sr.replace('r/', '')
sr = sr.replace('/', '')
r.get_subreddit(sr, fetch=True)
def ispostvalid(inputdata, errors):
nowstamp = getTime(True)
status = True
if inputdata[1] < nowstamp:
n = datetime.datetime.utcfromtimestamp(inputdata[1])
n = datetime.datetime.strftime(n, "%B %d %Y %H:%M")
errors.append(ERRORTOOEARLY % n)
status = False
try:
validatesubreddit(inputdata[2])
except:
print('\tBad subreddit: ' + inputdata[2])
errors.append(ERRORSUBREDDIT % inputdata[2])
status = False
if len(inputdata[3]) > 300:
errors.append(ERRORLONGTITLE % len(inputdata[3]))
status = False
return [status, errors]
def buildcomment(datalist, errors, critical=False):
schedulecomment = SCHEDULECOMMENT
if len(errors) > 0:
errors = "\n\n".join(errors)
schedulecomment = ERRORCOMMENT % errors
if critical:
schedulecomment += ERRORCRITICAL
schedulecomment += buildtable(datalist)
schedulecomment += FOOTER
return schedulecomment
#ID TEXT, TIME INT, REDDIT TEXT, TITLE TEXT, DIST INT, STICKY INT, FLAIR TEXT, FLCSS TEXT, POST TEXT
# 0 1 2 3 4 5 6 7 8
def buildtable(inputdata):
print(inputdata[1], type(inputdata[1])) #Troubleshooting with Apex
timeobj = datetime.datetime.utcfromtimestamp(inputdata[1])
inputdata[1] = datetime.datetime.strftime(timeobj, "%B %d %Y %H:%M UTC")
inputdata[2] = '/r/' + inputdata[2]
inputdata[3] = '`' + inputdata[3] + '`'
inputdata[4] = "True" if inputdata[4] == 1 else "False"
inputdata[5] = "True" if inputdata[5] == 1 else "False"
inputdata = inputdata[1:-1]
table = """
Key | Value
:- | :-
Time | {0}
Subreddit | {1}
Title | {2}
Distinguish | {3}
Sticky | {4}
Flair-text | {5}
Flair-CSS | {6}
""".format(*inputdata)
return table
def removespecial(inputstr):
ok = string.ascii_letters + string.digits
outstr = "".join([x for x in inputstr if x in ok])
return outstr
def manage_new():
print('Managing ' + SUBREDDIT + '/new')
subreddit = r.get_subreddit(SUBREDDIT)
new = list(subreddit.get_new(limit=MAXPOSTS))
for post in new:
pid = post.id
cur.execute('SELECT * FROM schedules WHERE ID=?', [pid])
if not cur.fetchone():
if post.title[0] != IGNORE_FLAG:
processpost(post)
else:
data = [post.id, 1, "", "", 0, 0, "", "", "meta"]
cur.execute('INSERT INTO schedules VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
sql.commit()
def manage_unread():
print('Managing inbox')
inbox = list(r.get_unread(limit=100))
for message in inbox:
if isinstance(message, praw.objects.Message):
if "ping" in message.subject.lower():
message.reply("Pong")
print('Responding to ping')
try:
mauthor = message.author.name
if any(mauthor.lower() == admin.lower() for admin in ADMINS):
if "kill" in message.subject.lower():
alertadmins("Hard shutdown", "The bot is being killed by " + mauthor)
quit()
except AttributeError:
pass
elif isinstance(message, praw.objects.Comment):
commentsub = message.subreddit.display_name
if commentsub.lower() == SUBREDDIT.lower():
updatepost(message)
message.mark_as_read()
def manage_schedule():
print('Managing schedules')
cur.execute('SELECT * FROM schedules WHERE POST =?', ['None'])
fetch = cur.fetchall()
fetch = list(fetch)
fetch.sort(key=lambda x: x[1])
reread = False
idlist = ['t3_'+i[0] for i in fetch]
submissionlist = []
print('Checking for deletions')
while len(idlist) > 0:
submissionlist += r.get_info(thing_id=idlist[:100])
idlist = idlist[100:]
for item in submissionlist:
if (not item.author) or (item.banned_by):
print('\t' + item.id + ' has been deleted')
cur.execute('DELETE FROM schedules WHERE ID=?', [item.id])
sql.commit()
reread = True
if reread:
cur.execute('SELECT * FROM schedules WHERE POST =?', ['None'])
fetch = cur.fetchall()
fetch = list(fetch)
fetch.sort(key=lambda x: x[1])
nowstamp = getTime(True)
for schedule in fetch:
postid = schedule[0]
print('Checking schedule ' + postid, end="")
posttime = int(schedule[1])
if posttime < nowstamp:
print()
print('\tPreparing to post')
post = r.get_info(thing_id="t3_" + postid)
ptitle = schedule[3]
psub = schedule[2]
print('\tSubmitting post')
try:
if post.is_self:
pbody = post.selftext
newpost = r.submit(psub, ptitle, text=pbody)
else:
purl = post.url
newpost = r.submit(psub, ptitle, url=purl, resubmit=True)
errors = []
if schedule[4] == 1:
try:
print('\tDistinguishing')
newpost.distinguish()
except:
print('\tDistinguish failed')
errors.append(ERRORDISTINGUISHFAIL)
if schedule[5] == 1:
try:
print('\tStickying')
newpost.sticky()
except:
print('\tSticky failed')
errors.append(ERRORSTICKYFAIL)
if schedule[6] != "" or schedule[7] != "":
try:
print('\tFlairing')
newpost.set_flair(flair_text=schedule[6], flair_css_class=schedule[7])
except:
print('\tFlair failed')
newsub = newpost.subreddit.display_name
newlink = newpost.short_link
newid = newpost.id
newcomment = POSTEDCOMMENT % (newsub, newlink)
newcomment += '\n\n'.join(errors)
cur.execute('UPDATE schedules SET POST=? WHERE ID=?', [newid, postid])
sql.commit()
print('Flairing source.')
post.add_comment(newcomment)
post.set_flair(flair_text=POSTEDFLAIR_TEXT, flair_css_class=POSTEDFLAIR_CSS)
except praw.errors.APIException as error:
if error.error_type == "SUBREDDIT_NOTALLOWED":
print("\tNOT ALLOWED IN SUBREDDIT!")
cur.execute('UPDATE schedules SET TIME=? WHERE ID=?', [IMPOSSIBLETIME, postid])
sql.commit()
scheduledata = list(schedule)
scheduledata[1] = IMPOSSIBLETIME
comment=buildcomment(scheduledata, [ERRORNOTALLOWED%psub], critical=True)
post.add_comment(comment)
else:
print(" : T-" + str(round(posttime - nowstamp)))
def alertadmins(messagesubject, messagetext):
for admin in ADMINS:
print('Messaging ' + admin)
try:
r.send_message(admin, messagesubject, messagetext)
except:
print('COULD NOT MESSAGE ADMIN')
while True:
try:
manage_new()
manage_unread()
manage_schedule()
except Exception as e:
error_message = traceback.format_exc()
print(error_message)
now = getTime(False)
now = datetime.datetime.strftime(now, "%B %d %H:%M:%S UTC")
error_message = ' ' + error_message
error_message = error_message.replace('\n', '\n ')
error_message += '\n' + str(now)
alertadmins(TRACEBACK_SUBJECT, error_message)
print("Sleeping\n")
time.sleep(WAIT)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
APVER='basen sauna 07.10.2015' # for olinuxino
# get_conf kasutusse ka mac jaoks
# get_ip lisatud ip jalgimiseks, sh tun olemasolu puhul
# molemad uniscada.py sisse, mis neid vajavad (votmesonade mac ja ip kontroll!)
#####################################################
#### functions #######
def comm_doall():
''' Handle the communication with io channels via modbus and the monitoring server '''
############### conn up or down issues ################
global udpup
#if udp.sk.get_state()[3] == 1: # restore now the variables from the server - kuna 2 paringut, siis jaab tous vahele
if udp.sk.get_state()[0] == 1 and udpup == 0: # restore now the variables from the server
udpup = 1
try:
hw = hex(mb[0].read(1,257,1)[0]) # assuming it5888, mba 1!
except:
hw = 'n/a'
#send via buffer only!
# use udp.send(sta_reg,status,val_reg,value) # only status is int, the rest are str
sendstring='AVV:HW '+hw+', APP '+APVER+'\nAVS:'
if 'rescue' in os.path.basename(__file__):
udp.send(['AVS',2,'AVV','HW '+hw+', APP '+APVER])
sendstring += '2\n' # critical service status
else:
udp.send(['AVS',0,'AVV','HW '+hw+', APP '+APVER])
sendstring += '0\n'
udp.send(['TCS',1,'TCW','?']) # restore via buffer
#sendstring += '\nTCW:?\n' # traffic counter variable to be restored
for i in range(3):
udp.send(['H'+str(i+1)+'CS',1,'H'+str(i+1)+'CW','?']) # cumulative heat energy restoration via buffer
#sendstring += '\nTCW:?\n' # cumulative traffic to be restored
ac.ask_counters()
log.info('******* uniscada connectivity up, sent AVV and tried to restore counters and some variables ********')
udp.udpsend(sendstring) # AVV only, the rest go via buffer
if udp.sk.get_state()[0] == 0: #
udpup = 0
if udp.sk.get_state()[1] > 300 + udp.sk.get_state()[2] * 300: # total 10 min down, cold reboot needed
# age and neverup taken into account from udp.sk statekeeper instance
msg = '**** going to cut power NOW (at '+str(int(time.time()))+') via 0xFEED in attempt to restore connectivity ***'
log.warning(msg)
udp.dump_buffer() # save unsent messages as file
with open("/root/d4c/appd.log", "a") as logfile:
logfile.write(msg)
time.sleep(1)
mb[0].write(1, 999,value = 0xFEED) # ioboard ver > 2.35 cuts power to start cold reboot (see reg 277)
#if that does not work, appd and python main* must be stopped, to cause 5V reset without 0xFEED functionality
try:
p.subexec('/root/d4c/killapp',0) # to make sure power will be cut in the end
except:
log.warning('executing /root/d4c/killapp failed!')
#######################
udp.unsent() # vana jama maha puhvrist
d.doall() # di koik mis vaja, loeb tihti, raporteerib muutuste korral ja aeg-ajalt asynkroonselt
ac.doall() # ai koik mis vaja, loeb ja vahel raporteerib
for mbi in range(len(mb)): # check modbus connectivity
mberr=mb[mbi].get_errorcount()
if mberr > 0: # errors
print('### mb['+str(mbi)+'] problem, errorcount '+str(mberr)+' ####')
time.sleep(2) # not to reach the errorcount 30 too fast!
r.regular_svc() # UPW,UTW, ipV, baV, cpV. mfV are default services.
got = udp.comm() # loeb ja saadab udp, siin 0.1 s viide sees. tagastab {} saadud key:value vaartustega
got_parse(got) # see next def
# once again to keep up server comm despite possible extra communication
got = udp.udpread() # loeb ainult!
got_parse(got) # see next def
def got_parse(got):
''' check the ack or cmd from server '''
if got != {} and got != None: # got something from monitoring server
ac.parse_udp(got) # chk if setup or counters need to be changed
d.parse_udp(got) # chk if setup ot toggle for di
todo = p.parse_udp(got) # any commands or setup variables from server?
# a few command to make sure they are executed even in case of udp_commands failure
if todo == 'REBOOT':
stop = 1 # kui sys.exit p sees ei moju millegiparast
print('emergency stopping by main loop, stop=',stop)
udp.dump_buffer()
if todo == 'FULLREBOOT':
print('emergency rebooting by main loop')
udp.dump_buffer()
p.subexec('reboot',0) # no []
# end making sure
#print('main: todo',todo) # debug
p.todo_proc(todo) # execute other possible commands
def app_doall():
''' Application rules and logic for energy metering and consumption limiting, via services if possible '''
global ts, ts_app, self_di, self_ledstates, self_chlevel, self_fdvalue, self_panelpower, self_ts_gps
ts_app = time.time()
res= 0
footwarning = 0
shvalue = None
fdvalue = 999
values = None
voltage = None
chlevel = 0
di = d.get_divalues('DIW')
do = d.get_divalues('DOW')
if self_di != None and di != self_di:
log.info('di changed: '+str(di)+', do: '+str(do)) ##
# switch on panelpower if any of led strips is on
# switch off panelpower if all led strips are off
try:
if di != self_di: # only changes
ledsum = 0
for i in range(4):
if self_di[i] != None and di[i] != None and di[i] != self_di[i] and di[i] == 0: # change, press start
led[i].toggle()
ledstate = led[i].get_state()
if ledstate[0] != self_ledstates[i]:
log.info('light '+str(i + 1)+' new state '+str(ledstate[0]))
self_ledstates[i] = ledstate[0]
d.set_dovalue('LTW', i+1, ledstate[0]) # actual output service, fixed in dchannels.py 13.9.2015
ledsum += ledstate[0] << i
if ledsum > 0:
panelpower = 1
else:
panelpower = 0
if panelpower != panel.get_power():
log.info('NEW panelpower '+str(panelpower))
panel.set_power(panelpower)
d.set_dovalue('PPS',1,panelpower)
else:
log.info('no change in panelpower '+str(panelpower))
## DATA FOR seneca S401 panel rows / via aochannels! panel update ##
# temp temp temp temp aku jalg uks
for i in range(7): # panel values 7 rows
if i == 0: # sauna temp
aivalue = ac.get_aivalue('T1W', 1)[0] # can be None!
elif i == 1: # bath
aivalue = ac.get_aivalue('T2W', 1)[0] # panel row 2
elif i == 2: # outdoor
aivalue = ac.get_aivalue('T3W', 1)[0] # panel row 3
##elif i == 3: # hotwater
## aivalue = ac.get_aivalue('T4W', 1)[0] # panel row 4
elif i == 4: # battery
batt_presence = ac.get_aivalues('BPW') # car and batt voltage presence
voltage = ac.get_aivalues('BTW') # panel row 5, sauna battery
shvalue = voltage[1] # sauna batt
#if voltage != None and voltage[0] != None and voltage[1] != None and voltage[0] > voltage[1] + 10 and voltage[0] > 13200: # recharging possible
if voltage != None and voltage[0] != None and voltage[1] != None and voltage[0] > 13300: # recharging possible
chlevel = 1 # FIXME
#if voltage[0] > voltage[1] + 1000: # low current initially
# chlevel = 1
#elif voltage[0] < voltage[1] + 500: # directly together for faster charging ???? CHK if allowed, current and voltage
# chlevel = 2
# possible battery charge stop
if ts_app > self_ts_batt + 60: # move that to ioloop timers
self_ts_batt = ts_app
chlevel = 0 # disconnnect for a while once a minute, will reconnect if needed
else:
chlevel= 0 # no car voltage present or engine stopped
#log.info('batt charging level '+str(chlevel)+', voltages '+str(voltage)) ##
if chlevel != self_chlevel:
log.info('NEW batt charging level '+str(chlevel)+', voltages '+str(voltage))
d.set_dovalue('BCW', 1, (chlevel & 1)) # via resistor
d.set_dovalue('BCW', 2, (chlevel & 2) >> 1)
self_chlevel = chlevel
elif i == 5: # feet and door chk via AI1. values 3600, 3150, 2580
aivalue = ac.get_aivalue('A1V',1)[0] # ai1 voltage 0..4095 mV, pullup 1 k on
if aivalue != None:
if aivalue > 3700:
##log.warning('feet/door line cut!')
fdvalue = 999
elif aivalue > 2400 and aivalue < 2800:
fdvalue = 1
elif aivalue > 2800 and aivalue < 3300:
fdvalue = 2
elif aivalue > 3300 and aivalue < 3700:
fdvalue = 3
elif aivalue < 1000:
fdvalue = 0 # ok
#log.info('feet/door aivalue '+str(aivalue)+', shvalue '+str(fdvalue)) ##
if fdvalue != 999 and fdvalue != self_fdvalue:
d.set_divalue('FDW',1,(fdvalue & 1))
d.set_divalue('FDW',2,(fdvalue & 2) >> 1)
log.info('NEW feet/door aivalue '+str(aivalue)+', fdvalue '+str(fdvalue))
self_fdvalue = fdvalue
shvalue = (fdvalue & 1)
elif i == 6: # door
shvalue = (self_fdvalue & 2) >> 1 # door bit in self_dvalue
#######
if i < 4: # temperatures, i = 0..3
if aivalue != None:
shvalue = int(round(aivalue / 10.0, 0))
else:
shvalue = 9999 # sensor disconnected
linereg = sorted(list(panel.get_data().keys()))[i]
panel.send(linereg, shvalue) ## sending to panel row with correct reg address
log.debug('sent to panel '+str((linereg, shvalue))) ##
ac.set_aivalue('PNW', i + 1, shvalue) # to report only
#ac.set_aosvc('PNW', i + 1, shvalue) # panel row register write in aochannels
#log.debug('PNW.'+str(i + 1)+' '+str(shvalue))
d.sync_do() # actual output writing
self_di = di
#ac.sync_ao() # no need with panel instance in use
#print('app panelpower '+str(panel.get_power)) ##
## end panel update ##
except:
print('main app ERROR')
traceback.print_exc()
if gps and ts_app > self_ts_gps + 30:
self_ts_gps = ts_app
try:
coord = gps.get_coordinates()
if coord != None and coord[0] != None and coord[1] != None:
ac.set_airaw('G1V',1,int(coord[0] * 1000000)) # lat
ac.set_airaw('G2V',1,int(coord[1] * 1000000)) # lng
else:
log.warning('NO coordinates from GPS device, coord '+str(coord))
except:
print('gps ERROR')
traceback.print_exc()
#print('### main app end ###'+str(self_panelpower)) ####### app end ####
################ MAIN #################
import logging, os, sys
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
#logging.getLogger('acchannels').setLevel(logging.DEBUG) # acchannels esile
#logging.getLogger('dchannels').setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
# env variable HOSTNAME should be set before starting python
try:
print('HOSTNAME is',os.environ['HOSTNAME'])
# FIXME set OSTYPE
except:
os.environ['HOSTNAME']='olinuxino' # to make sure it exists on background of npe too
print('set HOSTNAME to '+os.environ['HOSTNAME'])
OSTYPE='archlinux'
print('OSTYPE',OSTYPE)
from droidcontroller.udp_commands import * # sellega alusta, kaivitab ka SQlgeneral
from droidcontroller.loadlimit import * # load limitation level 0..3 to be calculation
p = Commands(OSTYPE) # setup and commands from server
r = RegularComm(interval=120) # variables like uptime and traffic, not io channels
mac = udp.get_conf('mac', 'host_id.conf') # uniscada paneb ka enda jaoks kehtima
#ip = udp.get_ip() # paneb ka uniscada enda jaoks paika. initsialiseerimisel votab ka ise!
#print('mac ip', mac, ip)
# mac=mac_ip[0]
#r.set_host_ip(ip)
#print('mac, ip', mac, ip)
udp.setID(mac) # env muutuja kaudu ehk parem?
tcp.setID(mac) #
udp.setIP('46.183.73.35') # '195.222.15.51') # ('46.183.73.35') # mon server ip. only 195.222.15.51 has access to starman
udp.setPort(44445)
from droidcontroller.acchannels import *
from droidcontroller.dchannels import *
# the following instances are subclasses of SQLgeneral.
d = Dchannels(readperiod = 0, sendperiod = 120) # di and do. immediate notification, read as often as possible.
ac = ACchannels(in_sql = 'aicochannels.sql', readperiod = 5, sendperiod = 30) # counters, power. also 32 bit ai! trigger in aichannels
s.check_setup('aicochannels')
#s.check_setup('dichannels')
#s.check_setup('counters')
s.set_apver(APVER) # set version
##from droidcontroller.pic_update import *
##pic = PicUpdate(mb) # to replace ioboard fw should it be needed. use pic,update(mba, file)
from droidcontroller.read_gps import * #
gps = ReadGps(speed = 4800) # USB
from droidcontroller.panel_seneca import *
panel = PanelSeneca(mb, mba = 3, mbi = 0, power = 0) # actual. no negative!
#panel = PanelSeneca(mb, mba = 1, mbi = 0, linedict={400:-999,401:-999, 403:-999,404:-999, 406:-999,407:-999,409:-999}, power = 0) # test
####
#from droidcontroller.nagios import NagiosMessage # paralleelteated otse starmani
#nagios = NagiosMessage(mac, 'service_energy_ee', nagios_ip='62.65.192.33', nagios_port=50000)
#udp.set_copynotifier(nagios.output_and_send) # mida kasutada teadete koopia saatmiseks nagiosele. kui puudub, ei saada koopiaid.
####
ts = time.time() # needed for manual function testing
ts_app = ts
self_di = [None, None, None, None]
self_ledstates = [0, 0, 0, 0]
self_chlevel = 0
self_fdvalue = 0
self_panelpower = 0
self_ts_gps = time.time()
led = [] # lighting instances
from droidcontroller.statekeeper import *
for i in range(4): # button triggers
led.append(StateKeeper(off_tout = 3600, on_tout = 0))
#from droidcontroller.statekeeper import * # vorreldes glob muutujatega kukub ise down
#hp1fail = StateKeeper(off_tout=100) # soojuspumba 1 rike, lubab luba varujahutust
#hp2 = StateKeeper(off_tout=100) # soojuspumba 1 olek, kui up, siis ei luba varujahutust
##udp.setID(udp.get_conf('mac', 'mac.conf')) # host id
if __name__ == '__main__':
#kontrollime energiamootjate seisusid. koik loendid automaatselt?
msg=''
stop=0
while stop == 0: # endless loop
ts = time.time() # global for functions
comm_doall() # communication with io and server
app_doall() # application rules and logic, via services if possible
#crosscheck() # check for phase consumption failures
# #########################################
if len(msg)>0:
print(msg)
udp.syslog(msg)
msg=''
#time.sleep(1) # main loop takt 0.1, debug jaoks suurem
sys.stdout.write('.') # dot without newline for main loop
sys.stdout.flush()
# main loop end, exit from application
|
unknown
|
codeparrot/codeparrot-clean
| ||
#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H
#define JEMALLOC_INTERNAL_EXTENT_DSS_H
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
extern const char *dss_prec_names[];
extern const char *opt_dss;
dss_prec_t extent_dss_prec_get(void);
bool extent_dss_prec_set(dss_prec_t dss_prec);
void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit);
bool extent_in_dss(void *addr);
bool extent_dss_mergeable(void *addr_a, void *addr_b);
void extent_dss_boot(void);
#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */
|
c
|
github
|
https://github.com/redis/redis
|
deps/jemalloc/include/jemalloc/internal/extent_dss.h
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import eventlet
from oslo_config import cfg
from eventlet import wsgi
from st2common import log as logging
from st2common.service_setup import setup as common_setup
from st2common.service_setup import teardown as common_teardown
from st2common.util.wsgi import shutdown_server_kill_pending_requests
from st2api.signal_handlers import register_api_signal_handlers
from st2api.listener import get_listener_if_set
from st2api import config
config.register_opts()
from st2api import app
__all__ = [
'main'
]
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=False if '--use-debugger' in sys.argv else True,
time=True)
LOG = logging.getLogger(__name__)
# How much time to give to the request in progress to finish in seconds before killing them
WSGI_SERVER_REQUEST_SHUTDOWN_TIME = 2
def _setup():
common_setup(service='api', config=config, setup_db=True, register_mq_exchanges=True,
register_signal_handlers=True, register_internal_trigger_types=True)
def _run_server():
host = cfg.CONF.api.host
port = cfg.CONF.api.port
LOG.info('(PID=%s) ST2 API is serving on http://%s:%s.', os.getpid(), host, port)
max_pool_size = eventlet.wsgi.DEFAULT_MAX_SIMULTANEOUS_REQUESTS
worker_pool = eventlet.GreenPool(max_pool_size)
sock = eventlet.listen((host, port))
def queue_shutdown(signal_number, stack_frame):
eventlet.spawn_n(shutdown_server_kill_pending_requests, sock=sock,
worker_pool=worker_pool, wait_time=WSGI_SERVER_REQUEST_SHUTDOWN_TIME)
# We register a custom SIGINT handler which allows us to kill long running active requests.
# Note: Eventually we will support draining (waiting for short-running requests), but we
# will still want to kill long running stream requests.
register_api_signal_handlers(handler_func=queue_shutdown)
wsgi.server(sock, app.setup_app(), custom_pool=worker_pool)
return 0
def _teardown():
common_teardown()
def main():
try:
_setup()
return _run_server()
except SystemExit as exit_code:
sys.exit(exit_code)
except KeyboardInterrupt:
listener = get_listener_if_set()
if listener:
listener.shutdown()
except Exception:
LOG.exception('(PID=%s) ST2 API quit due to exception.', os.getpid())
return 1
finally:
_teardown()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
XX. Proxy model inheritance
Proxy model inheritance across apps can result in syncdb not creating the table
for the proxied model (as described in #12286). This test creates two dummy
apps and calls syncdb, then verifies that the table has been created.
"""
import os
import sys
from django.conf import settings, Settings
from django.core.management import call_command
from django.db.models.loading import load_app
from django.test import TransactionTestCase
class ProxyModelInheritanceTests(TransactionTestCase):
def setUp(self):
self.old_sys_path = sys.path[:]
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
self.old_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS = ('app1', 'app2')
map(load_app, settings.INSTALLED_APPS)
call_command('syncdb', verbosity=0)
global ProxyModel, NiceModel
from app1.models import ProxyModel
from app2.models import NiceModel
def tearDown(self):
settings.INSTALLED_APPS = self.old_installed_apps
sys.path = self.old_sys_path
def test_table_exists(self):
self.assertEqual(NiceModel.objects.all().count(), 0)
self.assertEqual(ProxyModel.objects.all().count(), 0)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from oauth2client.client import GoogleCredentials
from googleapiclient import discovery
from googleapiclient import errors
import json
class MLEngine:
def __init__(self, projectID='cloudml-demo', service='ml', version='v1'):
self.projectID = projectID
self.service=service
self.version=version
self.svc = self.make_svc()
def make_svc(self):
# Get application default credentials (possible only if the gcloud tool is
# configured on your machine).
# gcloud auth application-default login
credentials = GoogleCredentials.get_application_default()
# Build a representation of the Cloud ML API.
ml = discovery.build(self.service, self.version, credentials=credentials)
return ml
def models_list(self):
print('models.list')
request = self.svc.projects().models().list(
parent='projects/{}'.format(self.projectID)) #, body=requestDict)
# Make the call.
try:
response = request.execute()
print(response)
except errors.HttpError as err:
# Something went wrong, print out some information.
print('There was an error listing the model. Details:')
print(err._get_reason())
print(err)
def model_predict(self, model, version):
print('models.predict')
instances = []
model_id = 'projects/{}/models/{}/versions/{}'.format(self.projectID, model, version)
model_id = 'projects/{}/models/{}'.format(self.projectID, model)
print(model_id)
with open('test.json') as infile:
for line in infile:
instances.append(json.loads(line))
request_body = {'instances': instances}
request = self.svc.projects().predict(
# parent=self.projectID,
name=model_id,
body=request_body
) #, body=requestDict)
# Make the call.
try:
response = request.execute()
print(response)
except errors.HttpError as err:
# Something went wrong, print out some information.
print('There was an error listing the model. Details:')
print(err._get_reason())
print(err)
def make_models():
ml = MLEngine()
# ml.models_list()
ml.model_predict('cloudwnd', 'v1')
return
if __name__ == "__main__":
make_models()
# Create a dictionary with the fields from the request body.
# requestDict = {'name': 'api_model1', 'description': 'a model from the python api'}
# Create a request to call projects.models.list.
# request = ml.svc.projects().models().list(
# parent=ml.projectID) #, body=requestDict)
# # Make the call.
# try:
# response = request.execute()
# print(response)
# except errors.HttpError as err:
# # Something went wrong, print out some information.
# print('There was an error creating the model. Check the details:')
# print(err._get_reason())
# print(err)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import re
from ..game.structure_utils import StructureBase
class ParsignError(Exception):
pass
class DefinitionNotFoundError(ParsignError):
pass
def hex_format(num, digits=2):
return ('{0:0>' + str(digits) + '}').format(hex(num)[2::])
class CParser:
@staticmethod
def get_custom_initialization_pattern(definition, start_pattern, end_pattern, unlimited=True):
pattern = ('', r'.*')[unlimited] + definition.to_regex() \
+ r'\s*=\s*' + start_pattern + r'(.*?)' + end_pattern \
+ r'\s*;' + ('', r'.*')[unlimited]
return pattern
@classmethod
def get_initialization_pattern(cls, definition, group_curly_brackets=True, unlimited=True):
pattern = cls.get_custom_initialization_pattern(
definition,
(r'\{\s*', '')[group_curly_brackets],
(r'\s*\}', '')[group_curly_brackets],
unlimited
)
return pattern
@staticmethod
def remove_comments(txt):
no_block_comments = re.sub(r'/\*.*?\*/', r'', txt, flags=re.DOTALL)
no_line_comments = re.sub(r'//.*', r'', no_block_comments, flags=re.DOTALL)
return no_line_comments
@classmethod
def get_array_contents_text(cls, txt, definition):
txt = cls.remove_comments(txt)
pattern = cls.get_initialization_pattern(definition, group_curly_brackets=False)
m = re.match(pattern, txt, re.DOTALL)
if m is None:
raise DefinitionNotFoundError('Array definition specified not found.')
array_contents_txt = m.group(1)
if array_contents_txt and array_contents_txt[-1] != ',':
array_contents_txt += ','
return array_contents_txt
@classmethod
def parse_struct_array(cls, txt, definition):
array_contents_txt = cls.get_array_contents_text(txt, definition)
array_contents = []
it = re.finditer(r'\s*(\{.*?\})\s*,', array_contents_txt, flags=re.DOTALL)
for matcher in it:
array_contents.append(matcher.group(1))
return array_contents
@staticmethod
def parse_comma_separated_num(txt):
ret = []
it = re.finditer(r'\s*(.+?)\s*,', txt, re.DOTALL)
for matcher in it:
value = matcher.group(1)
try:
value = int(value, base=0)
except ValueError:
if not value:
value = 0
ret.append(value)
return ret
@classmethod
def parse_number_array(cls, txt, definition):
array_contents_txt = cls.get_array_contents_text(txt, definition)
return cls.parse_comma_separated_num(array_contents_txt)
@classmethod
def change_initialization(cls, old_txt, definition, new_initialization):
pattern = cls.get_initialization_pattern(definition, unlimited=False)
if cls.is_definition_in_text(definition, old_txt):
new_txt = re.sub(pattern, new_initialization, old_txt, flags=re.DOTALL)
else:
new_txt = old_txt + '\n\n' + new_initialization
return new_txt
@staticmethod
def is_definition_in_text(definition, text):
pattern = r'.*' + definition.to_regex() + r'.*'
m = re.match(pattern, text, re.DOTALL)
return m is not None
@staticmethod
def is_prototype_declared(definition, text):
pattern = r'.*' + definition.to_regex() + r'\s*;.*'
m = re.match(pattern, text, re.DOTALL)
return m is not None
@staticmethod
def format_initialization(definition, contents):
return definition.to_c_format() + ' = {\n' + contents + '};\n\n'
@staticmethod
def format_array_contents(array):
contents = ''
for entry in array:
if isinstance(entry, int):
entry = hex(entry)
contents += '\t{0},\n'.format(entry)
return contents
@staticmethod
def format_incbin(definition, filename):
return '{0} = INCBIN_U8("{1}");'.format(definition, filename)
@staticmethod
def format_incbin_array(definition, filenames):
ret = definition.to_c_format() + ' = {\n'
for filename in filenames:
ret += '\tINCBIN_U8("{0}"),\n'.format(filename)
return ret + '};'
@classmethod
def get_filename_from_incbin(cls, txt, definition):
pattern = cls.get_custom_initialization_pattern(
definition,
r'INCBIN_U8\(\s*\"',
r'\"\)\s*'
)
m = re.match(pattern, txt, re.DOTALL)
if m is None:
raise DefinitionNotFoundError('Binary inclusion not found.')
return m.group(1)
class CDefinition:
STATIC = 1
EXTERN = 2
def __init__(self, type, label, base_format='{0}', visibility=None):
self.type = type
self.label = label
self.base_format = base_format
self.visibility = visibility
def copy(self):
c = CDefinition(self.type, self.label, self.base_format, self.visibility)
return c
def as_extern(self):
c = self.copy()
c.visibility = self.EXTERN
return c
def as_prototype(self):
return self.to_c_format() + ';'
def get_label(self):
return self.label
def to_c_format(self):
if self.visibility == self.STATIC:
ret = 'static '
elif self.visibility == self.EXTERN:
ret = 'extern '
else:
ret = ''
ret += self.type + ' '
ret += ''.join(self.base_format.format(self.label).split())
return ret
def to_regex(self):
if self.visibility == self.STATIC:
ret = r'static\s+'
elif self.visibility == self.EXTERN:
ret = r'extern\s+'
else:
ret = ''
ret += r'\s+'.join(self.type.strip().split()) + r'\s+'
ret += r'\s*'.join(
re.escape(part) for part in self.base_format.format(self.label).split()
)
return ret
def __repr__(self):
return self.to_c_format()
def format_label(self, *args):
self.label = self.label.format(*args)
class AsmParser:
@staticmethod
def format_repoints(label, addresses):
ret = ''
for address in addresses:
ret += '\n.org {0}\n\t.word {1}\n'.format(hex(address), label)
return ret
class ParseableStructBase(StructureBase):
ATTR_STR_MASK = {}
def __init__(self):
self.modified = False
self.dependencies = []
def to_c_format(self):
data = '{ '
for attr, attr_size in self.FORMAT:
if data != '{ ':
data += ', '
value = getattr(self, attr)
if isinstance(value, int):
value = hex(value)
if attr in self.ATTR_STR_MASK:
value = self.ATTR_STR_MASK[attr].format(value)
data += value
return data + ' }'
def load_from_c_format(self, txt):
m = re.match(r'\s*\{\s*(.*?)\s*\}\s*', txt, re.DOTALL)
if m is None:
raise ParsignError('Could not parse "{}" into a structure.'.format(txt))
no_brackets_txt = m.group(1)
if no_brackets_txt and no_brackets_txt[-1] != ',':
no_brackets_txt += ','
values = CParser.parse_comma_separated_num(no_brackets_txt)
for i in range(len(self.FORMAT)):
if i < len(values):
setattr(self, self.FORMAT[i][0], values[i])
else:
setattr(self, self.FORMAT[i][0], 0)
def __repr__(self):
return self.to_c_format()
def was_modified(self):
return self.modified
def add_dependency(self, definition):
self.dependencies.append(definition)
def get_16_color_palette_from_pal_file_format(file_contents):
pattern = r'JASC-PAL\r\n0100\r\n16\r\n(\d{1,3} \d{1,3} \d{1,3}\r\n){16}'
m = re.match(pattern, file_contents)
if m is None:
raise ParsignError('Not a valid pal file')
lines = file_contents.split('\n')
palette = []
for i in range(3, 19):
for value in lines[i].split():
value = int(value)
if value > 255:
raise ParsignError('Not a valid pal file')
palette.append(value)
return palette
def convert_16_color_palette_to_pal_file_format(palette):
ret = 'JASC-PAL\n0100\n16\n'
for i in range(16):
ret += '{0} {1} {2}\n'.format(*(palette[i * 3:i * 3 + 3]))
return ret
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Global table styles */
@use '@angular/material' as mat;
@use './typography';
table[mat-table],
table.ng-table {
width: 100%;
tr {
th {
@extend %body-bold-01;
margin: 0;
}
td {
@extend %body-01;
}
}
}
table[mat-table] {
@include mat.table-overrides(
(
row-item-label-text-size: 0.8rem,
)
);
}
table.ng-table {
border-collapse: collapse;
text-align: left;
th,
td {
border-bottom: 1px solid var(--senary-contrast);
overflow: hidden;
text-overflow: ellipsis;
padding: 0.625rem 0.375rem;
}
}
|
unknown
|
github
|
https://github.com/angular/angular
|
devtools/projects/ng-devtools/src/styles/_tables.scss
|
from django import forms
from django.test import TestCase
from django.core.exceptions import NON_FIELD_ERRORS
from modeltests.validation import ValidationTestCase
from modeltests.validation.models import Author, Article, ModelToValidate
# Import other tests for this package.
from modeltests.validation.validators import TestModelsWithValidators
from modeltests.validation.test_unique import GetUniqueCheckTests, PerformUniqueChecksTest
from modeltests.validation.test_custom_messages import CustomMessagesTest
class BaseModelValidationTests(ValidationTestCase):
def test_missing_required_field_raises_error(self):
mtv = ModelToValidate(f_with_custom_validator=42)
self.assertFailsValidation(mtv.full_clean, ['name', 'number'])
def test_with_correct_value_model_validates(self):
mtv = ModelToValidate(number=10, name='Some Name')
self.assertEqual(None, mtv.full_clean())
def test_custom_validate_method(self):
mtv = ModelToValidate(number=11)
self.assertFailsValidation(mtv.full_clean, [NON_FIELD_ERRORS, 'name'])
def test_wrong_FK_value_raises_error(self):
mtv=ModelToValidate(number=10, name='Some Name', parent_id=3)
self.assertFailsValidation(mtv.full_clean, ['parent'])
def test_correct_FK_value_validates(self):
parent = ModelToValidate.objects.create(number=10, name='Some Name')
mtv = ModelToValidate(number=10, name='Some Name', parent_id=parent.pk)
self.assertEqual(None, mtv.full_clean())
def test_limitted_FK_raises_error(self):
# The limit_choices_to on the parent field says that a parent object's
# number attribute must be 10, so this should fail validation.
parent = ModelToValidate.objects.create(number=11, name='Other Name')
mtv = ModelToValidate(number=10, name='Some Name', parent_id=parent.pk)
self.assertFailsValidation(mtv.full_clean, ['parent'])
def test_wrong_email_value_raises_error(self):
mtv = ModelToValidate(number=10, name='Some Name', email='not-an-email')
self.assertFailsValidation(mtv.full_clean, ['email'])
def test_correct_email_value_passes(self):
mtv = ModelToValidate(number=10, name='Some Name', email='valid@email.com')
self.assertEqual(None, mtv.full_clean())
def test_wrong_url_value_raises_error(self):
mtv = ModelToValidate(number=10, name='Some Name', url='not a url')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'Enter a valid value.'])
def test_correct_url_but_nonexisting_gives_404(self):
mtv = ModelToValidate(number=10, name='Some Name', url='http://google.com/we-love-microsoft.html')
self.assertFieldFailsValidationWithMessage(mtv.full_clean, 'url', [u'This URL appears to be a broken link.'])
def test_correct_url_value_passes(self):
mtv = ModelToValidate(number=10, name='Some Name', url='http://www.djangoproject.com/')
self.assertEqual(None, mtv.full_clean()) # This will fail if there's no Internet connection
def test_text_greater_that_charfields_max_length_eaises_erros(self):
mtv = ModelToValidate(number=10, name='Some Name'*100)
self.assertFailsValidation(mtv.full_clean, ['name',])
class ArticleForm(forms.ModelForm):
class Meta:
model = Article
exclude = ['author']
class ModelFormsTests(TestCase):
def setUp(self):
self.author = Author.objects.create(name='Joseph Kocherhans')
def test_partial_validation(self):
# Make sure the "commit=False and set field values later" idiom still
# works with model validation.
data = {
'title': 'The state of model validation',
'pub_date': '2010-1-10 14:49:00'
}
form = ArticleForm(data)
self.assertEqual(form.errors.keys(), [])
article = form.save(commit=False)
article.author = self.author
article.save()
def test_validation_with_empty_blank_field(self):
# Since a value for pub_date wasn't provided and the field is
# blank=True, model-validation should pass.
# Also, Article.clean() should be run, so pub_date will be filled after
# validation, so the form should save cleanly even though pub_date is
# not allowed to be null.
data = {
'title': 'The state of model validation',
}
article = Article(author_id=self.author.id)
form = ArticleForm(data, instance=article)
self.assertEqual(form.errors.keys(), [])
self.assertNotEqual(form.instance.pub_date, None)
article = form.save()
def test_validation_with_invalid_blank_field(self):
# Even though pub_date is set to blank=True, an invalid value was
# provided, so it should fail validation.
data = {
'title': 'The state of model validation',
'pub_date': 'never'
}
article = Article(author_id=self.author.id)
form = ArticleForm(data, instance=article)
self.assertEqual(form.errors.keys(), ['pub_date'])
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Generated from:
//
// GOARCH=ppc64 go tool cgo -godefs defs_openbsd.go
//
// Then converted to the form used by the runtime.
package runtime
import "unsafe"
const (
_EINTR = 0x4
_EFAULT = 0xe
_EAGAIN = 0x23
_ETIMEDOUT = 0x3c
_O_WRONLY = 0x1
_O_NONBLOCK = 0x4
_O_CREAT = 0x200
_O_TRUNC = 0x400
_O_CLOEXEC = 0x10000
_PROT_NONE = 0x0
_PROT_READ = 0x1
_PROT_WRITE = 0x2
_PROT_EXEC = 0x4
_MAP_ANON = 0x1000
_MAP_PRIVATE = 0x2
_MAP_FIXED = 0x10
_MAP_STACK = 0x4000
_MADV_DONTNEED = 0x4
_MADV_FREE = 0x6
_SA_SIGINFO = 0x40
_SA_RESTART = 0x2
_SA_ONSTACK = 0x1
_PTHREAD_CREATE_DETACHED = 0x1
_SIGHUP = 0x1
_SIGINT = 0x2
_SIGQUIT = 0x3
_SIGILL = 0x4
_SIGTRAP = 0x5
_SIGABRT = 0x6
_SIGEMT = 0x7
_SIGFPE = 0x8
_SIGKILL = 0x9
_SIGBUS = 0xa
_SIGSEGV = 0xb
_SIGSYS = 0xc
_SIGPIPE = 0xd
_SIGALRM = 0xe
_SIGTERM = 0xf
_SIGURG = 0x10
_SIGSTOP = 0x11
_SIGTSTP = 0x12
_SIGCONT = 0x13
_SIGCHLD = 0x14
_SIGTTIN = 0x15
_SIGTTOU = 0x16
_SIGIO = 0x17
_SIGXCPU = 0x18
_SIGXFSZ = 0x19
_SIGVTALRM = 0x1a
_SIGPROF = 0x1b
_SIGWINCH = 0x1c
_SIGINFO = 0x1d
_SIGUSR1 = 0x1e
_SIGUSR2 = 0x1f
_FPE_INTDIV = 0x1
_FPE_INTOVF = 0x2
_FPE_FLTDIV = 0x3
_FPE_FLTOVF = 0x4
_FPE_FLTUND = 0x5
_FPE_FLTRES = 0x6
_FPE_FLTINV = 0x7
_FPE_FLTSUB = 0x8
_BUS_ADRALN = 0x1
_BUS_ADRERR = 0x2
_BUS_OBJERR = 0x3
_SEGV_MAPERR = 0x1
_SEGV_ACCERR = 0x2
_ITIMER_REAL = 0x0
_ITIMER_VIRTUAL = 0x1
_ITIMER_PROF = 0x2
_EV_ADD = 0x1
_EV_DELETE = 0x2
_EV_CLEAR = 0x20
_EV_ERROR = 0x4000
_EV_EOF = 0x8000
_EVFILT_READ = -0x1
_EVFILT_WRITE = -0x2
)
type tforkt struct {
tf_tcb unsafe.Pointer
tf_tid *int32
tf_stack uintptr
}
type sigcontext struct {
sc_cookie uint64
sc_mask int32
sc_reg [32]uint64
sc_lr uint64
sc_cr uint64
sc_xer uint64
sc_ctr uint64
sc_pc uint64
sc_ps uint64
sc_vrsave uint64
pad_cgo_0 [8]byte
sc_vsx [64][16]uint8
sc_fpscr uint64
sc_vscr uint64
}
type siginfo struct {
si_signo int32
si_code int32
si_errno int32
pad_cgo_0 [4]byte
_data [120]byte
}
type stackt struct {
ss_sp uintptr
ss_size uintptr
ss_flags int32
pad_cgo_0 [4]byte
}
type timespec struct {
tv_sec int64
tv_nsec int64
}
//go:nosplit
func (ts *timespec) setNsec(ns int64) {
ts.tv_sec = ns / 1e9
ts.tv_nsec = ns % 1e9
}
type timeval struct {
tv_sec int64
tv_usec int64
}
func (tv *timeval) set_usec(x int32) {
tv.tv_usec = int64(x)
}
type itimerval struct {
it_interval timeval
it_value timeval
}
type keventt struct {
ident uint64
filter int16
flags uint16
fflags uint32
data int64
udata *byte
}
type pthread uintptr
type pthreadattr uintptr
type pthreadcond uintptr
type pthreadcondattr uintptr
type pthreadmutex uintptr
type pthreadmutexattr uintptr
|
go
|
github
|
https://github.com/golang/go
|
src/runtime/defs_openbsd_ppc64.go
|
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add access level
Revision ID: 211836bf835c
Revises: 162a3e673105
Create Date: 2014-12-19 05:34:06.790159
"""
# revision identifiers, used by Alembic.
revision = '211836bf835c'
down_revision = '162a3e673105'
from alembic import op
import sqlalchemy as sa
from manila.common import constants
def upgrade():
op.add_column('share_access_map',
sa.Column('access_level', sa.String(2),
default=constants.ACCESS_LEVEL_RW))
def downgrade():
op.drop_column('share_access_map', 'access_level')
|
unknown
|
codeparrot/codeparrot-clean
| ||
import json
from httpretty import HTTPretty
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test
class BitbucketOAuth1Test(OAuth1Test):
backend_path = 'social.backends.bitbucket.BitbucketOAuth'
user_data_url = 'https://bitbucket.org/api/1.0/users/foo@bar.com'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
emails_body = json.dumps([{
'active': True,
'email': 'foo@bar.com',
'primary': True
}])
user_data_body = json.dumps({
'user': {
'username': 'foobar',
'first_name': 'Foo',
'last_name': 'Bar',
'display_name': 'Foo Bar',
'is_team': False,
'avatar': 'https://secure.gravatar.com/avatar/'
'5280f15cedf540b544eecc30fcf3027c?'
'd=https%3A%2F%2Fd3oaxc4q5k2d6q.cloudfront.net%2Fm%2F'
'9e262ba34f96%2Fimg%2Fdefault_avatar%2F32%2F'
'user_blue.png&s=32',
'resource_uri': '/1.0/users/foobar'
}
})
def test_login(self):
HTTPretty.register_uri(HTTPretty.GET,
'https://bitbucket.org/api/1.0/emails/',
status=200, body=self.emails_body)
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
unknown
|
codeparrot/codeparrot-clean
| ||
@import "@sass/abstracts/vars";
.indent {
margin: 0 $extralarge-margin;
}
.indent-top {
margin-top: $middle-margin;
}
.indent-bottom {
margin-bottom: $middle-margin;
}
|
unknown
|
github
|
https://github.com/vercel/next.js
|
examples/cms-sitecore-xmcloud/src/assets/sass/components/spacing/_indent.scss
|
# Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The bulk write operations interface.
.. versionadded:: 2.7
"""
from bson.objectid import ObjectId
from bson.py3compat import u
from bson.son import SON
from pymongo.common import (validate_is_mapping,
validate_is_mutable_mapping,
validate_ok_for_replace,
validate_ok_for_update)
from pymongo.errors import (BulkWriteError,
DocumentTooLarge,
InvalidOperation,
OperationFailure)
from pymongo.message import (_INSERT, _UPDATE, _DELETE,
_do_batched_write_command,
_randint,
_BulkWriteContext)
from pymongo.write_concern import WriteConcern
_DELETE_ALL = 0
_DELETE_ONE = 1
# For backwards compatibility. See MongoDB src/mongo/base/error_codes.err
_BAD_VALUE = 2
_UNKNOWN_ERROR = 8
_WRITE_CONCERN_ERROR = 64
_COMMANDS = ('insert', 'update', 'delete')
# These string literals are used when we create fake server return
# documents client side. We use unicode literals in python 2.x to
# match the actual return values from the server.
_UID = u("_id")
_UCODE = u("code")
_UERRMSG = u("errmsg")
_UINDEX = u("index")
_UOP = u("op")
class _Run(object):
"""Represents a batch of write operations.
"""
def __init__(self, op_type):
"""Initialize a new Run object.
"""
self.op_type = op_type
self.index_map = []
self.ops = []
def index(self, idx):
"""Get the original index of an operation in this run.
:Parameters:
- `idx`: The Run index that maps to the original index.
"""
return self.index_map[idx]
def add(self, original_index, operation):
"""Add an operation to this Run instance.
:Parameters:
- `original_index`: The original index of this operation
within a larger bulk operation.
- `operation`: The operation document.
"""
self.index_map.append(original_index)
self.ops.append(operation)
def _make_error(index, code, errmsg, operation):
"""Create and return an error document.
"""
return {
_UINDEX: index,
_UCODE: code,
_UERRMSG: errmsg,
_UOP: operation
}
def _merge_legacy(run, full_result, result, index):
"""Merge a result from a legacy opcode into the full results.
"""
affected = result.get('n', 0)
errmsg = result.get("errmsg", result.get("err", ""))
if errmsg:
# wtimeout is not considered a hard failure in
# MongoDB 2.6 so don't treat it like one here.
if result.get("wtimeout"):
error_doc = {'errmsg': errmsg, 'code': _WRITE_CONCERN_ERROR}
full_result['writeConcernErrors'].append(error_doc)
else:
code = result.get("code", _UNKNOWN_ERROR)
error = _make_error(run.index(index), code, errmsg, run.ops[index])
if "errInfo" in result:
error["errInfo"] = result["errInfo"]
full_result["writeErrors"].append(error)
return
if run.op_type == _INSERT:
full_result['nInserted'] += 1
elif run.op_type == _UPDATE:
if "upserted" in result:
doc = {_UINDEX: run.index(index), _UID: result["upserted"]}
full_result["upserted"].append(doc)
full_result['nUpserted'] += affected
# Versions of MongoDB before 2.6 don't return the _id for an
# upsert if _id is not an ObjectId.
elif result.get("updatedExisting") is False and affected == 1:
op = run.ops[index]
# If _id is in both the update document *and* the query spec
# the update document _id takes precedence.
_id = op['u'].get('_id', op['q'].get('_id'))
doc = {_UINDEX: run.index(index), _UID: _id}
full_result["upserted"].append(doc)
full_result['nUpserted'] += affected
else:
full_result['nMatched'] += affected
elif run.op_type == _DELETE:
full_result['nRemoved'] += affected
def _merge_command(run, full_result, results):
"""Merge a group of results from write commands into the full result.
"""
for offset, result in results:
affected = result.get("n", 0)
if run.op_type == _INSERT:
full_result["nInserted"] += affected
elif run.op_type == _DELETE:
full_result["nRemoved"] += affected
elif run.op_type == _UPDATE:
upserted = result.get("upserted")
if upserted:
if isinstance(upserted, list):
n_upserted = len(upserted)
for doc in upserted:
doc["index"] = run.index(doc["index"] + offset)
full_result["upserted"].extend(upserted)
else:
n_upserted = 1
index = run.index(offset)
doc = {_UINDEX: index, _UID: upserted}
full_result["upserted"].append(doc)
full_result["nUpserted"] += n_upserted
full_result["nMatched"] += (affected - n_upserted)
else:
full_result["nMatched"] += affected
n_modified = result.get("nModified")
# SERVER-13001 - in a mixed sharded cluster a call to
# update could return nModified (>= 2.6) or not (<= 2.4).
# If any call does not return nModified we can't report
# a valid final count so omit the field completely.
if n_modified is not None and "nModified" in full_result:
full_result["nModified"] += n_modified
else:
full_result.pop("nModified", None)
write_errors = result.get("writeErrors")
if write_errors:
for doc in write_errors:
# Leave the server response intact for APM.
replacement = doc.copy()
idx = doc["index"] + offset
replacement["index"] = run.index(idx)
# Add the failed operation to the error document.
replacement[_UOP] = run.ops[idx]
full_result["writeErrors"].append(replacement)
wc_error = result.get("writeConcernError")
if wc_error:
full_result["writeConcernErrors"].append(wc_error)
class _Bulk(object):
"""The private guts of the bulk write API.
"""
def __init__(self, collection, ordered):
"""Initialize a _Bulk instance.
"""
self.collection = collection
self.ordered = ordered
self.ops = []
self.name = "%s.%s" % (collection.database.name, collection.name)
self.namespace = collection.database.name + '.$cmd'
self.executed = False
def add_insert(self, document):
"""Add an insert document to the list of ops.
"""
validate_is_mutable_mapping("document", document)
# Generate ObjectId client side.
if '_id' not in document:
document['_id'] = ObjectId()
self.ops.append((_INSERT, document))
def add_update(self, selector, update, multi=False, upsert=False):
"""Create an update document and add it to the list of ops.
"""
validate_ok_for_update(update)
cmd = SON([('q', selector), ('u', update),
('multi', multi), ('upsert', upsert)])
self.ops.append((_UPDATE, cmd))
def add_replace(self, selector, replacement, upsert=False):
"""Create a replace document and add it to the list of ops.
"""
validate_ok_for_replace(replacement)
cmd = SON([('q', selector), ('u', replacement),
('multi', False), ('upsert', upsert)])
self.ops.append((_UPDATE, cmd))
def add_delete(self, selector, limit):
"""Create a delete document and add it to the list of ops.
"""
cmd = SON([('q', selector), ('limit', limit)])
self.ops.append((_DELETE, cmd))
def gen_ordered(self):
"""Generate batches of operations, batched by type of
operation, in the order **provided**.
"""
run = None
for idx, (op_type, operation) in enumerate(self.ops):
if run is None:
run = _Run(op_type)
elif run.op_type != op_type:
yield run
run = _Run(op_type)
run.add(idx, operation)
yield run
def gen_unordered(self):
"""Generate batches of operations, batched by type of
operation, in arbitrary order.
"""
operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)]
for idx, (op_type, operation) in enumerate(self.ops):
operations[op_type].add(idx, operation)
for run in operations:
if run.ops:
yield run
def execute_command(self, sock_info, generator, write_concern):
"""Execute using write commands.
"""
# nModified is only reported for write commands, not legacy ops.
full_result = {
"writeErrors": [],
"writeConcernErrors": [],
"nInserted": 0,
"nUpserted": 0,
"nMatched": 0,
"nModified": 0,
"nRemoved": 0,
"upserted": [],
}
op_id = _randint()
db_name = self.collection.database.name
for run in generator:
cmd = SON([(_COMMANDS[run.op_type], self.collection.name),
('ordered', self.ordered)])
if write_concern.document:
cmd['writeConcern'] = write_concern.document
bwc = _BulkWriteContext(db_name, cmd, sock_info, op_id)
results = _do_batched_write_command(
self.namespace, run.op_type, cmd,
run.ops, True, self.collection.codec_options, bwc)
_merge_command(run, full_result, results)
# We're supposed to continue if errors are
# at the write concern level (e.g. wtimeout)
if self.ordered and full_result['writeErrors']:
break
if full_result["writeErrors"] or full_result["writeConcernErrors"]:
if full_result['writeErrors']:
full_result['writeErrors'].sort(
key=lambda error: error['index'])
raise BulkWriteError(full_result)
return full_result
def execute_no_results(self, sock_info, generator):
"""Execute all operations, returning no results (w=0).
"""
coll = self.collection
# If ordered is True we have to send GLE or use write
# commands so we can abort on the first error.
write_concern = WriteConcern(w=int(self.ordered))
op_id = _randint()
for run in generator:
try:
if run.op_type == _INSERT:
coll._insert(sock_info,
run.ops,
self.ordered,
write_concern=write_concern,
op_id=op_id)
elif run.op_type == _UPDATE:
for operation in run.ops:
doc = operation['u']
check_keys = True
if doc and next(iter(doc)).startswith('$'):
check_keys = False
coll._update(sock_info,
operation['q'],
doc,
operation['upsert'],
check_keys,
operation['multi'],
write_concern=write_concern,
op_id=op_id,
ordered=self.ordered)
else:
for operation in run.ops:
coll._delete(sock_info,
operation['q'],
not operation['limit'],
write_concern,
op_id,
self.ordered)
except OperationFailure:
if self.ordered:
break
def execute_legacy(self, sock_info, generator, write_concern):
"""Execute using legacy wire protocol ops.
"""
coll = self.collection
full_result = {
"writeErrors": [],
"writeConcernErrors": [],
"nInserted": 0,
"nUpserted": 0,
"nMatched": 0,
"nRemoved": 0,
"upserted": [],
}
op_id = _randint()
stop = False
for run in generator:
for idx, operation in enumerate(run.ops):
try:
# To do per-operation reporting we have to do ops one
# at a time. That means the performance of bulk insert
# will be slower here than calling Collection.insert()
if run.op_type == _INSERT:
coll._insert(sock_info,
operation,
self.ordered,
write_concern=write_concern,
op_id=op_id)
result = {}
elif run.op_type == _UPDATE:
doc = operation['u']
check_keys = True
if doc and next(iter(doc)).startswith('$'):
check_keys = False
result = coll._update(sock_info,
operation['q'],
doc,
operation['upsert'],
check_keys,
operation['multi'],
write_concern=write_concern,
op_id=op_id,
ordered=self.ordered)
else:
result = coll._delete(sock_info,
operation['q'],
not operation['limit'],
write_concern,
op_id,
self.ordered)
_merge_legacy(run, full_result, result, idx)
except DocumentTooLarge as exc:
# MongoDB 2.6 uses error code 2 for "too large".
error = _make_error(
run.index(idx), _BAD_VALUE, str(exc), operation)
full_result['writeErrors'].append(error)
if self.ordered:
stop = True
break
except OperationFailure as exc:
if not exc.details:
# Some error not related to the write operation
# (e.g. kerberos failure). Re-raise immediately.
raise
_merge_legacy(run, full_result, exc.details, idx)
# We're supposed to continue if errors are
# at the write concern level (e.g. wtimeout)
if self.ordered and full_result["writeErrors"]:
stop = True
break
if stop:
break
if full_result["writeErrors"] or full_result['writeConcernErrors']:
if full_result['writeErrors']:
full_result['writeErrors'].sort(
key=lambda error: error['index'])
raise BulkWriteError(full_result)
return full_result
def execute(self, write_concern):
"""Execute operations.
"""
if not self.ops:
raise InvalidOperation('No operations to execute')
if self.executed:
raise InvalidOperation('Bulk operations can '
'only be executed once.')
self.executed = True
write_concern = (WriteConcern(**write_concern) if
write_concern else self.collection.write_concern)
if self.ordered:
generator = self.gen_ordered()
else:
generator = self.gen_unordered()
client = self.collection.database.client
with client._socket_for_writes() as sock_info:
if not write_concern.acknowledged:
self.execute_no_results(sock_info, generator)
elif sock_info.max_wire_version > 1:
return self.execute_command(sock_info, generator, write_concern)
else:
return self.execute_legacy(sock_info, generator, write_concern)
class BulkUpsertOperation(object):
"""An interface for adding upsert operations.
"""
__slots__ = ('__selector', '__bulk')
def __init__(self, selector, bulk):
self.__selector = selector
self.__bulk = bulk
def update_one(self, update):
"""Update one document matching the selector.
:Parameters:
- `update` (dict): the update operations to apply
"""
self.__bulk.add_update(self.__selector,
update, multi=False, upsert=True)
def update(self, update):
"""Update all documents matching the selector.
:Parameters:
- `update` (dict): the update operations to apply
"""
self.__bulk.add_update(self.__selector,
update, multi=True, upsert=True)
def replace_one(self, replacement):
"""Replace one entire document matching the selector criteria.
:Parameters:
- `replacement` (dict): the replacement document
"""
self.__bulk.add_replace(self.__selector, replacement, upsert=True)
class BulkWriteOperation(object):
"""An interface for adding update or remove operations.
"""
__slots__ = ('__selector', '__bulk')
def __init__(self, selector, bulk):
self.__selector = selector
self.__bulk = bulk
def update_one(self, update):
"""Update one document matching the selector criteria.
:Parameters:
- `update` (dict): the update operations to apply
"""
self.__bulk.add_update(self.__selector, update, multi=False)
def update(self, update):
"""Update all documents matching the selector criteria.
:Parameters:
- `update` (dict): the update operations to apply
"""
self.__bulk.add_update(self.__selector, update, multi=True)
def replace_one(self, replacement):
"""Replace one entire document matching the selector criteria.
:Parameters:
- `replacement` (dict): the replacement document
"""
self.__bulk.add_replace(self.__selector, replacement)
def remove_one(self):
"""Remove a single document matching the selector criteria.
"""
self.__bulk.add_delete(self.__selector, _DELETE_ONE)
def remove(self):
"""Remove all documents matching the selector criteria.
"""
self.__bulk.add_delete(self.__selector, _DELETE_ALL)
def upsert(self):
"""Specify that all chained update operations should be
upserts.
:Returns:
- A :class:`BulkUpsertOperation` instance, used to add
update operations to this bulk operation.
"""
return BulkUpsertOperation(self.__selector, self.__bulk)
class BulkOperationBuilder(object):
"""An interface for executing a batch of write operations.
"""
__slots__ = '__bulk'
def __init__(self, collection, ordered=True):
"""Initialize a new BulkOperationBuilder instance.
:Parameters:
- `collection`: A :class:`~pymongo.collection.Collection` instance.
- `ordered` (optional): If ``True`` all operations will be executed
serially, in the order provided, and the entire execution will
abort on the first error. If ``False`` operations will be executed
in arbitrary order (possibly in parallel on the server), reporting
any errors that occurred after attempting all operations. Defaults
to ``True``.
"""
self.__bulk = _Bulk(collection, ordered)
def find(self, selector):
"""Specify selection criteria for bulk operations.
:Parameters:
- `selector` (dict): the selection criteria for update
and remove operations.
:Returns:
- A :class:`BulkWriteOperation` instance, used to add
update and remove operations to this bulk operation.
"""
validate_is_mapping("selector", selector)
return BulkWriteOperation(selector, self.__bulk)
def insert(self, document):
"""Insert a single document.
:Parameters:
- `document` (dict): the document to insert
"""
self.__bulk.add_insert(document)
def execute(self, write_concern=None):
"""Execute all provided operations.
:Parameters:
- write_concern (optional): the write concern for this bulk
execution.
"""
if write_concern is not None:
validate_is_mapping("write_concern", write_concern)
return self.__bulk.execute(write_concern)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* contrib/sslinfo/sslinfo--1.2.sql */
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "CREATE EXTENSION sslinfo" to load this file. \quit
CREATE FUNCTION ssl_client_serial() RETURNS numeric
AS 'MODULE_PATHNAME', 'ssl_client_serial'
LANGUAGE C STRICT PARALLEL RESTRICTED;
CREATE FUNCTION ssl_is_used() RETURNS boolean
AS 'MODULE_PATHNAME', 'ssl_is_used'
LANGUAGE C STRICT PARALLEL RESTRICTED;
CREATE FUNCTION ssl_version() RETURNS text
AS 'MODULE_PATHNAME', 'ssl_version'
LANGUAGE C STRICT PARALLEL RESTRICTED;
CREATE FUNCTION ssl_cipher() RETURNS text
AS 'MODULE_PATHNAME', 'ssl_cipher'
LANGUAGE C STRICT PARALLEL RESTRICTED;
CREATE FUNCTION ssl_client_cert_present() RETURNS boolean
AS 'MODULE_PATHNAME', 'ssl_client_cert_present'
LANGUAGE C STRICT PARALLEL RESTRICTED;
CREATE FUNCTION ssl_client_dn_field(text) RETURNS text
AS 'MODULE_PATHNAME', 'ssl_client_dn_field'
LANGUAGE C STRICT PARALLEL RESTRICTED;
CREATE FUNCTION ssl_issuer_field(text) RETURNS text
AS 'MODULE_PATHNAME', 'ssl_issuer_field'
LANGUAGE C STRICT PARALLEL RESTRICTED;
CREATE FUNCTION ssl_client_dn() RETURNS text
AS 'MODULE_PATHNAME', 'ssl_client_dn'
LANGUAGE C STRICT PARALLEL RESTRICTED;
CREATE FUNCTION ssl_issuer_dn() RETURNS text
AS 'MODULE_PATHNAME', 'ssl_issuer_dn'
LANGUAGE C STRICT PARALLEL RESTRICTED;
CREATE FUNCTION
ssl_extension_info(OUT name text,
OUT value text,
OUT critical boolean
) RETURNS SETOF record
AS 'MODULE_PATHNAME', 'ssl_extension_info'
LANGUAGE C STRICT PARALLEL RESTRICTED;
|
sql
|
github
|
https://github.com/postgres/postgres
|
contrib/sslinfo/sslinfo--1.2.sql
|
# A binary morphology add-on for the Python Imaging Library
#
# History:
# 2014-06-04 Initial version.
#
# Copyright (c) 2014 Dov Grobgeld <dov.grobgeld@gmail.com>
from __future__ import print_function
from . import Image, _imagingmorph
import re
LUT_SIZE = 1 << 9
class LutBuilder(object):
"""A class for building a MorphLut from a descriptive language
The input patterns is a list of a strings sequences like these::
4:(...
.1.
111)->1
(whitespaces including linebreaks are ignored). The option 4
describes a series of symmetry operations (in this case a
4-rotation), the pattern is described by:
- . or X - Ignore
- 1 - Pixel is on
- 0 - Pixel is off
The result of the operation is described after "->" string.
The default is to return the current pixel value, which is
returned if no other match is found.
Operations:
- 4 - 4 way rotation
- N - Negate
- 1 - Dummy op for no other operation (an op must always be given)
- M - Mirroring
Example::
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
lut = lb.build_lut()
"""
def __init__(self, patterns=None, op_name=None):
if patterns is not None:
self.patterns = patterns
else:
self.patterns = []
self.lut = None
if op_name is not None:
known_patterns = {
'corner': ['1:(... ... ...)->0',
'4:(00. 01. ...)->1'],
'dilation4': ['4:(... .0. .1.)->1'],
'dilation8': ['4:(... .0. .1.)->1',
'4:(... .0. ..1)->1'],
'erosion4': ['4:(... .1. .0.)->0'],
'erosion8': ['4:(... .1. .0.)->0',
'4:(... .1. ..0)->0'],
'edge': ['1:(... ... ...)->0',
'4:(.0. .1. ...)->1',
'4:(01. .1. ...)->1']
}
if op_name not in known_patterns:
raise Exception('Unknown pattern '+op_name+'!')
self.patterns = known_patterns[op_name]
def add_patterns(self, patterns):
self.patterns += patterns
def build_default_lut(self):
symbols = [0, 1]
m = 1 << 4 # pos of current pixel
self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE))
def get_lut(self):
return self.lut
def _string_permute(self, pattern, permutation):
"""string_permute takes a pattern and a permutation and returns the
string permuted according to the permutation list.
"""
assert(len(permutation) == 9)
return ''.join(pattern[p] for p in permutation)
def _pattern_permute(self, basic_pattern, options, basic_result):
"""pattern_permute takes a basic pattern and its result and clones
the pattern according to the modifications described in the $options
parameter. It returns a list of all cloned patterns."""
patterns = [(basic_pattern, basic_result)]
# rotations
if '4' in options:
res = patterns[-1][1]
for i in range(4):
patterns.append(
(self._string_permute(patterns[-1][0], [6, 3, 0,
7, 4, 1,
8, 5, 2]), res))
# mirror
if 'M' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
patterns.append(
(self._string_permute(pattern, [2, 1, 0,
5, 4, 3,
8, 7, 6]), res))
# negate
if 'N' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
# Swap 0 and 1
pattern = (pattern
.replace('0', 'Z')
.replace('1', '0')
.replace('Z', '1'))
res = '%d' % (1-int(res))
patterns.append((pattern, res))
return patterns
def build_lut(self):
"""Compile all patterns into a morphology lut.
TBD :Build based on (file) morphlut:modify_lut
"""
self.build_default_lut()
patterns = []
# Parse and create symmetries of the patterns strings
for p in self.patterns:
m = re.search(
r'(\w*):?\s*\((.+?)\)\s*->\s*(\d)', p.replace('\n', ''))
if not m:
raise Exception('Syntax error in pattern "'+p+'"')
options = m.group(1)
pattern = m.group(2)
result = int(m.group(3))
# Get rid of spaces
pattern = pattern.replace(' ', '').replace('\n', '')
patterns += self._pattern_permute(pattern, options, result)
# # Debugging
# for p,r in patterns:
# print(p,r)
# print('--')
# compile the patterns into regular expressions for speed
for i, pattern in enumerate(patterns):
p = pattern[0].replace('.', 'X').replace('X', '[01]')
p = re.compile(p)
patterns[i] = (p, pattern[1])
# Step through table and find patterns that match.
# Note that all the patterns are searched. The last one
# caught overrides
for i in range(LUT_SIZE):
# Build the bit pattern
bitpattern = bin(i)[2:]
bitpattern = ('0'*(9-len(bitpattern)) + bitpattern)[::-1]
for p, r in patterns:
if p.match(bitpattern):
self.lut[i] = [0, 1][r]
return self.lut
class MorphOp(object):
"""A class for binary morphological operators"""
def __init__(self,
lut=None,
op_name=None,
patterns=None):
"""Create a binary morphological operator"""
self.lut = lut
if op_name is not None:
self.lut = LutBuilder(op_name=op_name).build_lut()
elif patterns is not None:
self.lut = LutBuilder(patterns=patterns).build_lut()
def apply(self, image):
"""Run a single morphological operation on an image
Returns a tuple of the number of changed pixels and the
morphed image"""
if self.lut is None:
raise Exception('No operator loaded')
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
outimage = Image.new(image.mode, image.size, None)
count = _imagingmorph.apply(
bytes(self.lut), image.im.id, outimage.im.id)
return count, outimage
def match(self, image):
"""Get a list of coordinates matching the morphological operation on
an image.
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if self.lut is None:
raise Exception('No operator loaded')
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
return _imagingmorph.match(bytes(self.lut), image.im.id)
def get_on_pixels(self, image):
"""Get a list of all turned on pixels in a binary image
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
return _imagingmorph.get_on_pixels(image.im.id)
def load_lut(self, filename):
"""Load an operator from an mrl file"""
with open(filename, 'rb') as f:
self.lut = bytearray(f.read())
if len(self.lut) != 8192:
self.lut = None
raise Exception('Wrong size operator file!')
def save_lut(self, filename):
"""Save an operator to an mrl file"""
if self.lut is None:
raise Exception('No operator loaded')
with open(filename, 'wb') as f:
f.write(self.lut)
def set_lut(self, lut):
"""Set the lut from an external source"""
self.lut = lut
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
json_indent = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
best_score_hparams = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
org_names = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
org_names[m] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
org_names[m] = "allenai"
def rewrite_dict_keys(d):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "</w>", k), v) for k, v in d.items())
keep_keys = ["<s>", "<pad>", "</s>", "<unk>"]
# restore the special tokens
for k in keep_keys:
del d2[f"{k}</w>"]
d2[k] = d[k] # restore
return d2
def convert_fsmt_checkpoint_to_pytorch(fsmt_checkpoint_path, pytorch_dump_folder_path):
# prep
assert os.path.exists(fsmt_checkpoint_path)
os.makedirs(pytorch_dump_folder_path, exist_ok=True)
print(f"Writing results to {pytorch_dump_folder_path}")
# handle various types of models
checkpoint_file = basename(fsmt_checkpoint_path)
fsmt_folder_path = dirname(fsmt_checkpoint_path)
cls = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
models = cls.hub_models()
kwargs = {"bpe": "fastbpe", "tokenizer": "moses"}
data_name_or_path = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}")
chkpt = hub_utils.from_pretrained(
fsmt_folder_path, checkpoint_file, data_name_or_path, archive_map=models, **kwargs
)
args = vars(chkpt["args"]["model"])
src_lang = args["source_lang"]
tgt_lang = args["target_lang"]
data_root = dirname(pytorch_dump_folder_path)
model_dir = basename(pytorch_dump_folder_path)
# dicts
src_dict_file = os.path.join(fsmt_folder_path, f"dict.{src_lang}.txt")
tgt_dict_file = os.path.join(fsmt_folder_path, f"dict.{tgt_lang}.txt")
src_dict = Dictionary.load(src_dict_file)
src_vocab = rewrite_dict_keys(src_dict.indices)
src_vocab_size = len(src_vocab)
src_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-src.json")
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records")
with open(src_vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent))
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
do_lower_case = True
for k in src_vocab:
if not k.islower():
do_lower_case = False
break
tgt_dict = Dictionary.load(tgt_dict_file)
tgt_vocab = rewrite_dict_keys(tgt_dict.indices)
tgt_vocab_size = len(tgt_vocab)
tgt_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-tgt.json")
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records")
with open(tgt_vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tgt_vocab, ensure_ascii=False, indent=json_indent))
# merges_file (bpecodes)
merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"])
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
fsmt_merges_file = os.path.join(fsmt_folder_path, fn)
if os.path.exists(fsmt_merges_file):
break
with open(fsmt_merges_file, encoding="utf-8") as fin:
merges = fin.read()
merges = re.sub(r" \d+$", "", merges, 0, re.MULTILINE) # remove frequency number
print(f"Generating {merges_file}")
with open(merges_file, "w", encoding="utf-8") as fout:
fout.write(merges)
# model config
fsmt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json")
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
model_conf = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
model_conf["num_beams"] = 5
model_conf["early_stopping"] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
model_conf["length_penalty"] = best_score_hparams[model_dir]["length_penalty"]
else:
model_conf["length_penalty"] = 1.0
print(f"Generating {fsmt_model_config_file}")
with open(fsmt_model_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent))
# tokenizer config
fsmt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE)
tokenizer_conf = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}")
with open(fsmt_tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent))
# model
model = chkpt["models"][0]
model_state_dict = model.state_dict()
# rename keys to start with 'model.'
model_state_dict = OrderedDict(("model." + k, v) for k, v in model_state_dict.items())
# remove unneeded keys
ignore_keys = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(k, None)
config = FSMTConfig.from_pretrained(pytorch_dump_folder_path)
model_new = FSMTForConditionalGeneration(config)
# check that it loads ok
model_new.load_state_dict(model_state_dict, strict=False)
# save
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
print(f"Generating {pytorch_weights_dump_path}")
torch.save(model_state_dict, pytorch_weights_dump_path)
print("Conversion is done!")
print("\nLast step is to upload the files to s3")
print(f"cd {data_root}")
print(f"transformers upload {model_dir}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
|
python
|
github
|
https://github.com/huggingface/transformers
|
src/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py
|
"""
Django settings for api project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=pe$t0a2dgf%ghj(b$suu2=4vi0x^uq6=l82qn1fx=fe52uym5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'api.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'api.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp.tools.translate import _
from openerp.osv import orm, fields
logger = logging.getLogger(__name__)
class credit_control_policy_changer(orm.TransientModel):
"""Wizard that is run from invoices and allows to set manually a policy
Policy are actually apply to related move lines availabe
in selection widget
"""
_name = "credit.control.policy.changer"
_columns = {
'new_policy_id': fields.many2one('credit.control.policy',
'New Policy to Apply',
required=True),
'new_policy_level_id': fields.many2one('credit.control.policy.level',
'New level to apply',
required=True),
# Only used to provide dynamic filtering on form
'do_nothing': fields.boolean('No follow policy'),
'move_line_ids': fields.many2many('account.move.line',
rel='credit_changer_ml_rel',
string='Move line to change'),
}
def _get_default_lines(self, cr, uid, context=None):
"""Get default lines for fields move_line_ids
of wizard. Only take lines that are on the same account
and move of the invoice and not reconciled
:return: list of compliant move line ids
"""
if context is None:
context = {}
active_ids = context.get('active_ids')
selected_line_ids = []
inv_model = self.pool['account.invoice']
move_line_model = self.pool['account.move.line']
if not active_ids:
return False
# raise ValueError('No active_ids passed in context')
for invoice in inv_model.browse(cr, uid, active_ids, context=context):
if invoice.type in ('in_invoice', 'in_refund', 'out_refund'):
raise orm.except_orm(
_('User error'),
_('Please use wizard on cutomer invoices')
)
domain = [('account_id', '=', invoice.account_id.id),
('move_id', '=', invoice.move_id.id),
('reconcile_id', '=', False)]
move_ids = move_line_model.search(cr, uid, domain, context=context)
selected_line_ids.extend(move_ids)
return selected_line_ids
_defaults = {'move_line_ids': _get_default_lines}
def onchange_policy_id(self, cr, uid, ids, new_policy_id, context=None):
if not new_policy_id:
return {}
policy = self.pool['credit.control.policy'].browse(cr, uid,
new_policy_id,
context=context)
return {'value': {'do_nothing': policy.do_nothing}}
def _mark_as_overridden(self, cr, uid, move_lines, context=None):
"""Mark `move_lines` related credit control line as overridden
This is done by setting manually_overridden fields to True
:param move_lines: move line to mark as overridden
:retun: list of credit line ids that where marked as overridden
"""
credit_model = self.pool['credit.control.line']
domain = [('move_line_id', 'in', [x.id for x in move_lines])]
credits_ids = credit_model.search(cr, uid, domain, context=context)
credit_model.write(cr, uid,
credits_ids,
{'manually_overridden': True},
context)
return credits_ids
def _set_invoice_policy(self, cr, uid, move_line_ids, policy,
context=None):
"""Force policy on invoice"""
invoice_model = self.pool['account.invoice']
invoice_ids = set([x.invoice.id for x in move_line_ids if x.invoice])
invoice_model.write(cr, uid, list(invoice_ids),
{'credit_policy_id': policy.id},
context=context)
def _check_accounts_policies(self, cr, uid, lines, policy, context=None):
policy_obj = self.pool['credit.control.policy']
for line in lines:
policy_obj.check_policy_against_account(
cr, uid,
line.account_id.id,
policy.id,
context=context
)
return True
def set_new_policy(self, cr, uid, wizard_id, context=None):
"""Set new policy on an invoice.
This is done by creating a new credit control line
related to the move line and the policy setted in
the wizard form
:return: ir.actions.act_windows dict
"""
assert len(wizard_id) == 1, "Only one id expected"
wizard_id = wizard_id[0]
credit_line_model = self.pool['credit.control.line']
ir_model = self.pool['ir.model.data']
ui_act_model = self.pool['ir.actions.act_window']
wizard = self.browse(cr, uid, wizard_id, context=context)
controlling_date = fields.date.today()
self._check_accounts_policies(
cr,
uid,
wizard.move_line_ids,
wizard.new_policy_id)
self._mark_as_overridden(
cr,
uid,
wizard.move_line_ids,
context=context
)
# As disscused with business expert
# draft lines should be passed to ignored
# if same level as the new one
# As it is a manual action
# We also ignore rounding tolerance
generated_ids = None
generated_ids = credit_line_model.create_or_update_from_mv_lines(
cr, uid, [],
[x.id for x in wizard.move_line_ids],
wizard.new_policy_level_id.id,
controlling_date,
check_tolerance=False,
context=None
)
self._set_invoice_policy(cr, uid,
wizard.move_line_ids,
wizard.new_policy_id,
context=context)
if not generated_ids:
return {}
view_id = ir_model.get_object_reference(cr, uid,
"account_credit_control",
"credit_control_line_action")
assert view_id, 'No view found'
action = ui_act_model.read(cr, uid, view_id[1], context=context)
action['domain'] = [('id', 'in', generated_ids)]
return action
|
unknown
|
codeparrot/codeparrot-clean
| ||
// run
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test the 'for range' construct.
package main
// test range over channels
func gen(c chan int, lo, hi int) {
for i := lo; i <= hi; i++ {
c <- i
}
close(c)
}
func seq(lo, hi int) chan int {
c := make(chan int)
go gen(c, lo, hi)
return c
}
const alphabet = "abcdefghijklmnopqrstuvwxyz"
func testblankvars() {
n := 0
for range alphabet {
n++
}
if n != 26 {
println("for range: wrong count", n, "want 26")
panic("fail")
}
n = 0
for _ = range alphabet {
n++
}
if n != 26 {
println("for _ = range: wrong count", n, "want 26")
panic("fail")
}
n = 0
for _, _ = range alphabet {
n++
}
if n != 26 {
println("for _, _ = range: wrong count", n, "want 26")
panic("fail")
}
s := 0
for i, _ := range alphabet {
s += i
}
if s != 325 {
println("for i, _ := range: wrong sum", s, "want 325")
panic("fail")
}
r := rune(0)
for _, v := range alphabet {
r += v
}
if r != 2847 {
println("for _, v := range: wrong sum", r, "want 2847")
panic("fail")
}
}
func testchan() {
s := ""
for i := range seq('a', 'z') {
s += string(i)
}
if s != alphabet {
println("Wanted lowercase alphabet; got", s)
panic("fail")
}
n := 0
for range seq('a', 'z') {
n++
}
if n != 26 {
println("testchan wrong count", n, "want 26")
panic("fail")
}
}
// test that range over slice only evaluates
// the expression after "range" once.
var nmake = 0
func makeslice() []int {
nmake++
return []int{1, 2, 3, 4, 5}
}
func testslice() {
s := 0
nmake = 0
for _, v := range makeslice() {
s += v
}
if nmake != 1 {
println("range called makeslice", nmake, "times")
panic("fail")
}
if s != 15 {
println("wrong sum ranging over makeslice", s)
panic("fail")
}
x := []int{10, 20}
y := []int{99}
i := 1
for i, x[i] = range y {
break
}
if i != 0 || x[0] != 10 || x[1] != 99 {
println("wrong parallel assignment", i, x[0], x[1])
panic("fail")
}
}
func testslice1() {
s := 0
nmake = 0
for i := range makeslice() {
s += i
}
if nmake != 1 {
println("range called makeslice", nmake, "times")
panic("fail")
}
if s != 10 {
println("wrong sum ranging over makeslice", s)
panic("fail")
}
}
func testslice2() {
n := 0
nmake = 0
for range makeslice() {
n++
}
if nmake != 1 {
println("range called makeslice", nmake, "times")
panic("fail")
}
if n != 5 {
println("wrong count ranging over makeslice", n)
panic("fail")
}
}
// test that range over []byte(string) only evaluates
// the expression after "range" once.
func makenumstring() string {
nmake++
return "\x01\x02\x03\x04\x05"
}
func testslice3() {
s := byte(0)
nmake = 0
for _, v := range []byte(makenumstring()) {
s += v
}
if nmake != 1 {
println("range called makenumstring", nmake, "times")
panic("fail")
}
if s != 15 {
println("wrong sum ranging over []byte(makenumstring)", s)
panic("fail")
}
}
// test that range over array only evaluates
// the expression after "range" once.
func makearray() [5]int {
nmake++
return [5]int{1, 2, 3, 4, 5}
}
func testarray() {
s := 0
nmake = 0
for _, v := range makearray() {
s += v
}
if nmake != 1 {
println("range called makearray", nmake, "times")
panic("fail")
}
if s != 15 {
println("wrong sum ranging over makearray", s)
panic("fail")
}
}
func testarray1() {
s := 0
nmake = 0
for i := range makearray() {
s += i
}
if nmake != 1 {
println("range called makearray", nmake, "times")
panic("fail")
}
if s != 10 {
println("wrong sum ranging over makearray", s)
panic("fail")
}
}
func testarray2() {
n := 0
nmake = 0
for range makearray() {
n++
}
if nmake != 1 {
println("range called makearray", nmake, "times")
panic("fail")
}
if n != 5 {
println("wrong count ranging over makearray", n)
panic("fail")
}
}
func makearrayptr() *[5]int {
nmake++
return &[5]int{1, 2, 3, 4, 5}
}
func testarrayptr() {
nmake = 0
x := len(makearrayptr())
if x != 5 || nmake != 1 {
println("len called makearrayptr", nmake, "times and got len", x)
panic("fail")
}
nmake = 0
x = cap(makearrayptr())
if x != 5 || nmake != 1 {
println("cap called makearrayptr", nmake, "times and got len", x)
panic("fail")
}
s := 0
nmake = 0
for _, v := range makearrayptr() {
s += v
}
if nmake != 1 {
println("range called makearrayptr", nmake, "times")
panic("fail")
}
if s != 15 {
println("wrong sum ranging over makearrayptr", s)
panic("fail")
}
}
func testarrayptr1() {
s := 0
nmake = 0
for i := range makearrayptr() {
s += i
}
if nmake != 1 {
println("range called makearrayptr", nmake, "times")
panic("fail")
}
if s != 10 {
println("wrong sum ranging over makearrayptr", s)
panic("fail")
}
}
func testarrayptr2() {
n := 0
nmake = 0
for range makearrayptr() {
n++
}
if nmake != 1 {
println("range called makearrayptr", nmake, "times")
panic("fail")
}
if n != 5 {
println("wrong count ranging over makearrayptr", n)
panic("fail")
}
}
// test that range over string only evaluates
// the expression after "range" once.
func makestring() string {
nmake++
return "abcd☺"
}
func teststring() {
var s rune
nmake = 0
for _, v := range makestring() {
s += v
}
if nmake != 1 {
println("range called makestring", nmake, "times")
panic("fail")
}
if s != 'a'+'b'+'c'+'d'+'☺' {
println("wrong sum ranging over makestring", s)
panic("fail")
}
x := []rune{'a', 'b'}
i := 1
for i, x[i] = range "c" {
break
}
if i != 0 || x[0] != 'a' || x[1] != 'c' {
println("wrong parallel assignment", i, x[0], x[1])
panic("fail")
}
y := []int{1, 2, 3}
r := rune(1)
for y[r], r = range "\x02" {
break
}
if r != 2 || y[0] != 1 || y[1] != 0 || y[2] != 3 {
println("wrong parallel assignment", r, y[0], y[1], y[2])
panic("fail")
}
}
func teststring1() {
s := 0
nmake = 0
for i := range makestring() {
s += i
}
if nmake != 1 {
println("range called makestring", nmake, "times")
panic("fail")
}
if s != 10 {
println("wrong sum ranging over makestring", s)
panic("fail")
}
}
func teststring2() {
n := 0
nmake = 0
for range makestring() {
n++
}
if nmake != 1 {
println("range called makestring", nmake, "times")
panic("fail")
}
if n != 5 {
println("wrong count ranging over makestring", n)
panic("fail")
}
}
// test that range over map only evaluates
// the expression after "range" once.
func makemap() map[int]int {
nmake++
return map[int]int{0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: '☺'}
}
func testmap() {
s := 0
nmake = 0
for _, v := range makemap() {
s += v
}
if nmake != 1 {
println("range called makemap", nmake, "times")
panic("fail")
}
if s != 'a'+'b'+'c'+'d'+'☺' {
println("wrong sum ranging over makemap", s)
panic("fail")
}
}
func testmap1() {
s := 0
nmake = 0
for i := range makemap() {
s += i
}
if nmake != 1 {
println("range called makemap", nmake, "times")
panic("fail")
}
if s != 10 {
println("wrong sum ranging over makemap", s)
panic("fail")
}
}
func testmap2() {
n := 0
nmake = 0
for range makemap() {
n++
}
if nmake != 1 {
println("range called makemap", nmake, "times")
panic("fail")
}
if n != 5 {
println("wrong count ranging over makemap", n)
panic("fail")
}
}
// test that range evaluates the index and value expressions
// exactly once per iteration.
var ncalls = 0
func getvar(p *int) *int {
ncalls++
return p
}
func testcalls() {
var i, v int
si := 0
sv := 0
for *getvar(&i), *getvar(&v) = range [2]int{1, 2} {
si += i
sv += v
}
if ncalls != 4 {
println("wrong number of calls:", ncalls, "!= 4")
panic("fail")
}
if si != 1 || sv != 3 {
println("wrong sum in testcalls", si, sv)
panic("fail")
}
ncalls = 0
for *getvar(&i), *getvar(&v) = range [0]int{} {
println("loop ran on empty array")
panic("fail")
}
if ncalls != 0 {
println("wrong number of calls:", ncalls, "!= 0")
panic("fail")
}
}
func main() {
testblankvars()
testchan()
testarray()
testarray1()
testarray2()
testarrayptr()
testarrayptr1()
testarrayptr2()
testslice()
testslice1()
testslice2()
testslice3()
teststring()
teststring1()
teststring2()
testmap()
testmap1()
testmap2()
testcalls()
}
|
go
|
github
|
https://github.com/golang/go
|
test/range.go
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.