max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
ida_export/lib/shovel/ida/decode.py | RUB-SysSec/VPS | 7 | 6624751 | <filename>ida_export/lib/shovel/ida/decode.py<gh_stars>1-10
from idc import NextNotTail, PrevHead, NextHead, GetMnem, isCode, GetFlags, GetOpnd, BADADDR
from idaapi import FlowChart, get_func, o_void, is_ret_insn
from idautils import CodeRefsFrom, DecodeInstruction
from copy import deepcopy
from .. import block, operands, instruction
from ..operands import AccessType
from ida_interface import *
from ..arch import RegistersTricore, RegistersX64
import __builtin__
__all__ = ['decode_function']
def normalize_access(operand):
ops = []
if operand.access == AccessType.ReadWrite:
target = deepcopy(operand)
target._access = AccessType.Write
ops.append(target)
operand._access = AccessType.Read
ops.append(operand)
else:
ops.append(operand)
return ops
def normalize_operand(operand):
ops = []
if isinstance(__builtin__.REGISTERS, RegistersTricore):
ops = tricore_normalize_extended_registers(operand)
else:
ops.append(operand)
return [x for o in ops for x in normalize_access(o)]
def tricore_normalize_extended_registers(operand):
ops = []
if isinstance(operand.operand, operands.Register) and \
__builtin__.REGISTERS.e0 <= operand.operand.index and \
operand.operand.index <= __builtin__.REGISTERS.e14:
data_lo = ((operand.operand.index - __builtin__.REGISTERS.e0) *
2 + __builtin__.REGISTERS.d0)
data_hi = data_lo + 1
hi = deepcopy(operand)
hi.operand._index = data_hi
ops.append(hi)
lo = deepcopy(operand)
lo.operand._index = data_lo
ops.append(lo)
else:
ops.append(operand)
return ops
def decode_instruction(address):
# DEBUG
#print("Decoding instruction: 0x%x" % address)
i = DecodeInstruction(address)
mnemonic = i.get_canon_mnem()
ops = []
ignore_ops = False
if isinstance(__builtin__.REGISTERS, RegistersX64):
# uses floating point register "st" which has
# overlapping index with "rax" and so on
ignore_ops = mnemonic in ["fabs,", "fadd", "faddp", "fbld", "fbstp",
"fchs", "fclex", "fcmov", "fcmovb", "fcmovbe", "fcmove", "fcmovnb",
"fcmovnbe", "fcmovne", "fcmovnu", "fcmovu", "fcom", "fcomi",
"fcomip", "fcomp", "fcompp", "fdecstp", "fdiv", "fdivp",
"fdivr", "fdivrp", "ffree", "fiadd", "ficom", "ficomp",
"fidiv", "fidivr", "fild", "fimul", "fincstp", "finit", "fist",
"fistp", "fisub", "fisubr", "fld," "fld1", "fldcw", "fldenv",
"fldenvw", "fldl2e", "fldl2t", "fldlg2", "fldln2", "fldpi",
"fldz", "fmul", "fmulp", "fnclex", "fndisi", "fneni", "fninit",
"fnop", "fnsave", "fnsavew", "fnstcw", "fnstenv", "fnstenvw",
"fnstsw", "fpatan", "fprem", "fptan", "frndint", "frstor",
"frstorw", "fsave", "fsavew", "fscale", "fsqrt", "fst",
"fstcw", "fstenv", "fstenvw", "fstp", "fstsw", "fsub",
"fsubp", "fsubr", "fsubrp", "ftst", "fucomi", "fucomip",
"fwait", "fxam", "fxch", "fxtract", "fyl2x", "fyl2xp1"]
if not ignore_ops:
for o in range(6):
if i[o].type == o_void:
break
# NOTE: for x64 we only consider 64 bit granularity at the moment.
ida_operand_str = GetOpnd(address, o)
# Some instructions like "stosq" do not have string operands
# => ignore for now.
if ida_operand_str == "":
break
# DEBUG
#print(ida_operand_str)
operand = instruction.Operand(get_operand_access_type(i, o),
i[o],
ida_operand_str=ida_operand_str,
address=address,
op_num=o)
normalized = normalize_operand(operand)
ops.extend(normalized)
is_control_flow = is_ret_insn(address) or \
len(list(CodeRefsFrom(address, 1))) > 1
return instruction.Instruction(address, mnemonic, ops, is_control_flow)
def decode_block(start, end):
if isinstance(__builtin__.REGISTERS, RegistersTricore):
return tricore_decode_block(start, end)
elif isinstance(__builtin__.REGISTERS, RegistersX64):
return x64_decode_block(start, end)
else:
raise NotImplementedError("Do not know how to decode block.")
def decode_function(function):
ida_function = get_func(function._address)
assert ida_function, 'Cannot match given address to existing function.'
ida_blocks = list(FlowChart(ida_function))
# Make strict basic blocks.
block_tuples = list()
for b in ida_blocks:
address = b.startEA
block_start = b.startEA
block_end = b.endEA
# DEBUG
#print("Block start: 0x%x" % block_start)
#print("Block end: 0x%x" % block_end)
while address != BADADDR and address < block_end:
# Maybe use CodeRefsTo, any edge cases?
if GetMnem(address).startswith('call'):
address = NextHead(address)
block_tuples.append((block_start, address))
block_start = address
else:
address = NextHead(address)
if block_start != block_end:
block_tuples.append((block_start, block_end))
# Decode basic blocks.
block_vas = set([b[0] for b in block_tuples])
for b in block_tuples:
block_ = decode_block(b[0], b[1])
function._blocks[b[0]] = block_
for c in filter(lambda x: x in block_vas,
CodeRefsFrom(PrevHead(b[1]), 1)):
function._successors[b[0]].add(c)
block_.add_successor(c)
def tricore_decode_block(start, end):
b = block.Block(start, end)
# Reads from/writes to return value register retv.
ret_r = instruction.Operand(AccessType.Read,
operand=operands.Register(
index=__builtin__.REGISTERS.retval))
ret_w = instruction.Operand(AccessType.Write,
operand=operands.Register(
index=__builtin__.REGISTERS.retval))
a2_w = instruction.Operand(AccessType.Write,
operand=operands.Register(index=__builtin__.REGISTERS.a2))
d2_w = instruction.Operand(AccessType.Write,
operand=operands.Register(index=__builtin__.REGISTERS.d2))
a2_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.a2))
d2_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.d2))
# Parameters.
a4_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.a4))
d4_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.d4))
current = start
while current < end:
instr = decode_instruction(current)
if instr._mnemonic.startswith('ret'):
instr._operands.append(deepcopy(a2_r))
instr._operands.append(deepcopy(d2_r))
b.add_instruction(instr)
elif instr._mnemonic.startswith('call'):
instr._operands.insert(0, deepcopy(ret_w))
instr._operands.extend([deepcopy(a4_r), deepcopy(d4_r)])
b.add_instruction(instr)
return_a2 = instruction.CallingConvention(instr.address,
[deepcopy(a2_w),
deepcopy(ret_r)])
return_d2 = instruction.CallingConvention(instr.address,
[deepcopy(d2_w),
deepcopy(ret_r)])
b.add_instruction(return_a2)
b.add_instruction(return_d2)
else:
b.add_instruction(instr)
current = NextNotTail(current)
return b
def x64_decode_block(start, end):
# DEBUG
#print("Decode block start: 0x%x" % start)
#print("Decode block end: 0x%x" % end)
b = block.Block(start, end)
rax_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rax))
rax_w = instruction.Operand(AccessType.Write,
operand=operands.Register(index=__builtin__.REGISTERS.rax))
rdi_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rdi))
rsi_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rsi))
rdx_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rdx))
rcx_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rcx))
r8_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.r8))
r9_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.r9))
current = start
while current < end:
if not isCode(GetFlags(current)):
print("WARNING: 0x%x not considered as code " % current
+ "but inside basic block. Skipping.")
current = NextNotTail(current)
continue
instr = decode_instruction(current)
if instr._mnemonic.startswith('ret'):
instr._operands.append(deepcopy(rax_r))
b.add_instruction(instr)
elif instr._mnemonic.startswith('call'):
instr._operands.insert(0, deepcopy(rax_w))
# TODO consider only System V AMD64 ABI at the moment.
instr._operands.extend([deepcopy(rdi_r), deepcopy(rsi_r),
deepcopy(rdx_r), deepcopy(rcx_r), deepcopy(r8_r),
deepcopy(r9_r)])
b.add_instruction(instr)
else:
b.add_instruction(instr)
current = NextNotTail(current)
return b | <filename>ida_export/lib/shovel/ida/decode.py<gh_stars>1-10
from idc import NextNotTail, PrevHead, NextHead, GetMnem, isCode, GetFlags, GetOpnd, BADADDR
from idaapi import FlowChart, get_func, o_void, is_ret_insn
from idautils import CodeRefsFrom, DecodeInstruction
from copy import deepcopy
from .. import block, operands, instruction
from ..operands import AccessType
from ida_interface import *
from ..arch import RegistersTricore, RegistersX64
import __builtin__
__all__ = ['decode_function']
def normalize_access(operand):
ops = []
if operand.access == AccessType.ReadWrite:
target = deepcopy(operand)
target._access = AccessType.Write
ops.append(target)
operand._access = AccessType.Read
ops.append(operand)
else:
ops.append(operand)
return ops
def normalize_operand(operand):
ops = []
if isinstance(__builtin__.REGISTERS, RegistersTricore):
ops = tricore_normalize_extended_registers(operand)
else:
ops.append(operand)
return [x for o in ops for x in normalize_access(o)]
def tricore_normalize_extended_registers(operand):
ops = []
if isinstance(operand.operand, operands.Register) and \
__builtin__.REGISTERS.e0 <= operand.operand.index and \
operand.operand.index <= __builtin__.REGISTERS.e14:
data_lo = ((operand.operand.index - __builtin__.REGISTERS.e0) *
2 + __builtin__.REGISTERS.d0)
data_hi = data_lo + 1
hi = deepcopy(operand)
hi.operand._index = data_hi
ops.append(hi)
lo = deepcopy(operand)
lo.operand._index = data_lo
ops.append(lo)
else:
ops.append(operand)
return ops
def decode_instruction(address):
# DEBUG
#print("Decoding instruction: 0x%x" % address)
i = DecodeInstruction(address)
mnemonic = i.get_canon_mnem()
ops = []
ignore_ops = False
if isinstance(__builtin__.REGISTERS, RegistersX64):
# uses floating point register "st" which has
# overlapping index with "rax" and so on
ignore_ops = mnemonic in ["fabs,", "fadd", "faddp", "fbld", "fbstp",
"fchs", "fclex", "fcmov", "fcmovb", "fcmovbe", "fcmove", "fcmovnb",
"fcmovnbe", "fcmovne", "fcmovnu", "fcmovu", "fcom", "fcomi",
"fcomip", "fcomp", "fcompp", "fdecstp", "fdiv", "fdivp",
"fdivr", "fdivrp", "ffree", "fiadd", "ficom", "ficomp",
"fidiv", "fidivr", "fild", "fimul", "fincstp", "finit", "fist",
"fistp", "fisub", "fisubr", "fld," "fld1", "fldcw", "fldenv",
"fldenvw", "fldl2e", "fldl2t", "fldlg2", "fldln2", "fldpi",
"fldz", "fmul", "fmulp", "fnclex", "fndisi", "fneni", "fninit",
"fnop", "fnsave", "fnsavew", "fnstcw", "fnstenv", "fnstenvw",
"fnstsw", "fpatan", "fprem", "fptan", "frndint", "frstor",
"frstorw", "fsave", "fsavew", "fscale", "fsqrt", "fst",
"fstcw", "fstenv", "fstenvw", "fstp", "fstsw", "fsub",
"fsubp", "fsubr", "fsubrp", "ftst", "fucomi", "fucomip",
"fwait", "fxam", "fxch", "fxtract", "fyl2x", "fyl2xp1"]
if not ignore_ops:
for o in range(6):
if i[o].type == o_void:
break
# NOTE: for x64 we only consider 64 bit granularity at the moment.
ida_operand_str = GetOpnd(address, o)
# Some instructions like "stosq" do not have string operands
# => ignore for now.
if ida_operand_str == "":
break
# DEBUG
#print(ida_operand_str)
operand = instruction.Operand(get_operand_access_type(i, o),
i[o],
ida_operand_str=ida_operand_str,
address=address,
op_num=o)
normalized = normalize_operand(operand)
ops.extend(normalized)
is_control_flow = is_ret_insn(address) or \
len(list(CodeRefsFrom(address, 1))) > 1
return instruction.Instruction(address, mnemonic, ops, is_control_flow)
def decode_block(start, end):
if isinstance(__builtin__.REGISTERS, RegistersTricore):
return tricore_decode_block(start, end)
elif isinstance(__builtin__.REGISTERS, RegistersX64):
return x64_decode_block(start, end)
else:
raise NotImplementedError("Do not know how to decode block.")
def decode_function(function):
ida_function = get_func(function._address)
assert ida_function, 'Cannot match given address to existing function.'
ida_blocks = list(FlowChart(ida_function))
# Make strict basic blocks.
block_tuples = list()
for b in ida_blocks:
address = b.startEA
block_start = b.startEA
block_end = b.endEA
# DEBUG
#print("Block start: 0x%x" % block_start)
#print("Block end: 0x%x" % block_end)
while address != BADADDR and address < block_end:
# Maybe use CodeRefsTo, any edge cases?
if GetMnem(address).startswith('call'):
address = NextHead(address)
block_tuples.append((block_start, address))
block_start = address
else:
address = NextHead(address)
if block_start != block_end:
block_tuples.append((block_start, block_end))
# Decode basic blocks.
block_vas = set([b[0] for b in block_tuples])
for b in block_tuples:
block_ = decode_block(b[0], b[1])
function._blocks[b[0]] = block_
for c in filter(lambda x: x in block_vas,
CodeRefsFrom(PrevHead(b[1]), 1)):
function._successors[b[0]].add(c)
block_.add_successor(c)
def tricore_decode_block(start, end):
b = block.Block(start, end)
# Reads from/writes to return value register retv.
ret_r = instruction.Operand(AccessType.Read,
operand=operands.Register(
index=__builtin__.REGISTERS.retval))
ret_w = instruction.Operand(AccessType.Write,
operand=operands.Register(
index=__builtin__.REGISTERS.retval))
a2_w = instruction.Operand(AccessType.Write,
operand=operands.Register(index=__builtin__.REGISTERS.a2))
d2_w = instruction.Operand(AccessType.Write,
operand=operands.Register(index=__builtin__.REGISTERS.d2))
a2_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.a2))
d2_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.d2))
# Parameters.
a4_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.a4))
d4_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.d4))
current = start
while current < end:
instr = decode_instruction(current)
if instr._mnemonic.startswith('ret'):
instr._operands.append(deepcopy(a2_r))
instr._operands.append(deepcopy(d2_r))
b.add_instruction(instr)
elif instr._mnemonic.startswith('call'):
instr._operands.insert(0, deepcopy(ret_w))
instr._operands.extend([deepcopy(a4_r), deepcopy(d4_r)])
b.add_instruction(instr)
return_a2 = instruction.CallingConvention(instr.address,
[deepcopy(a2_w),
deepcopy(ret_r)])
return_d2 = instruction.CallingConvention(instr.address,
[deepcopy(d2_w),
deepcopy(ret_r)])
b.add_instruction(return_a2)
b.add_instruction(return_d2)
else:
b.add_instruction(instr)
current = NextNotTail(current)
return b
def x64_decode_block(start, end):
# DEBUG
#print("Decode block start: 0x%x" % start)
#print("Decode block end: 0x%x" % end)
b = block.Block(start, end)
rax_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rax))
rax_w = instruction.Operand(AccessType.Write,
operand=operands.Register(index=__builtin__.REGISTERS.rax))
rdi_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rdi))
rsi_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rsi))
rdx_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rdx))
rcx_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.rcx))
r8_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.r8))
r9_r = instruction.Operand(AccessType.Read,
operand=operands.Register(index=__builtin__.REGISTERS.r9))
current = start
while current < end:
if not isCode(GetFlags(current)):
print("WARNING: 0x%x not considered as code " % current
+ "but inside basic block. Skipping.")
current = NextNotTail(current)
continue
instr = decode_instruction(current)
if instr._mnemonic.startswith('ret'):
instr._operands.append(deepcopy(rax_r))
b.add_instruction(instr)
elif instr._mnemonic.startswith('call'):
instr._operands.insert(0, deepcopy(rax_w))
# TODO consider only System V AMD64 ABI at the moment.
instr._operands.extend([deepcopy(rdi_r), deepcopy(rsi_r),
deepcopy(rdx_r), deepcopy(rcx_r), deepcopy(r8_r),
deepcopy(r9_r)])
b.add_instruction(instr)
else:
b.add_instruction(instr)
current = NextNotTail(current)
return b | en | 0.680721 | # DEBUG #print("Decoding instruction: 0x%x" % address) # uses floating point register "st" which has # overlapping index with "rax" and so on # NOTE: for x64 we only consider 64 bit granularity at the moment. # Some instructions like "stosq" do not have string operands # => ignore for now. # DEBUG #print(ida_operand_str) # Make strict basic blocks. # DEBUG #print("Block start: 0x%x" % block_start) #print("Block end: 0x%x" % block_end) # Maybe use CodeRefsTo, any edge cases? # Decode basic blocks. # Reads from/writes to return value register retv. # Parameters. # DEBUG #print("Decode block start: 0x%x" % start) #print("Decode block end: 0x%x" % end) # TODO consider only System V AMD64 ABI at the moment. | 2.098245 | 2 |
filer/admin/imageadmin.py | vstoykov/django-filer | 1 | 6624752 | #-*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from filer import settings as filer_settings
from filer.admin.fileadmin import FileAdmin
from filer.models import Image
class ImageAdminForm(forms.ModelForm):
subject_location = forms.CharField(
max_length=64, required=False,
label=_('Subject location'),
help_text=_('Location of the main subject of the scene.'))
def sidebar_image_ratio(self):
if self.instance:
# this is very important. It forces the value to be returned as a
# string and always with a "." as seperator. If the conversion
# from float to string is done in the template, the locale will
# be used and in some cases there would be a "," instead of ".".
# javascript would parse that to an integer.
return '%.6F' % self.instance.sidebar_image_ratio()
else:
return ''
class Meta:
model = Image
exclude = ()
class Media:
css = {
#'all': (settings.MEDIA_URL + 'filer/css/focal_point.css',)
}
js = (
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/raphael.js',
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/focal_point.js',
)
class ImageAdmin(FileAdmin):
form = ImageAdminForm
ImageAdmin.fieldsets = ImageAdmin.build_fieldsets(
extra_main_fields=('default_alt_text', 'default_caption',),
extra_fieldsets=(
('Subject Location', {
'fields': ('subject_location',),
'classes': ('collapse',),
}),
)
)
| #-*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from filer import settings as filer_settings
from filer.admin.fileadmin import FileAdmin
from filer.models import Image
class ImageAdminForm(forms.ModelForm):
subject_location = forms.CharField(
max_length=64, required=False,
label=_('Subject location'),
help_text=_('Location of the main subject of the scene.'))
def sidebar_image_ratio(self):
if self.instance:
# this is very important. It forces the value to be returned as a
# string and always with a "." as seperator. If the conversion
# from float to string is done in the template, the locale will
# be used and in some cases there would be a "," instead of ".".
# javascript would parse that to an integer.
return '%.6F' % self.instance.sidebar_image_ratio()
else:
return ''
class Meta:
model = Image
exclude = ()
class Media:
css = {
#'all': (settings.MEDIA_URL + 'filer/css/focal_point.css',)
}
js = (
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/raphael.js',
filer_settings.FILER_STATICMEDIA_PREFIX + 'js/focal_point.js',
)
class ImageAdmin(FileAdmin):
form = ImageAdminForm
ImageAdmin.fieldsets = ImageAdmin.build_fieldsets(
extra_main_fields=('default_alt_text', 'default_caption',),
extra_fieldsets=(
('Subject Location', {
'fields': ('subject_location',),
'classes': ('collapse',),
}),
)
)
| en | 0.895428 | #-*- coding: utf-8 -*- # this is very important. It forces the value to be returned as a # string and always with a "." as seperator. If the conversion # from float to string is done in the template, the locale will # be used and in some cases there would be a "," instead of ".". # javascript would parse that to an integer. #'all': (settings.MEDIA_URL + 'filer/css/focal_point.css',) | 1.909762 | 2 |
yocto/poky/scripts/lib/wic/3rdparty/pykickstart/version.py | jxtxinbing/ops-build | 2 | 6624753 | #
# <NAME> <<EMAIL>>
#
# Copyright 2006, 2007, 2008, 2009, 2010 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
"""
Methods for working with kickstart versions.
This module defines several symbolic constants that specify kickstart syntax
versions. Each version corresponds roughly to one release of Red Hat Linux,
Red Hat Enterprise Linux, or Fedora Core as these are where most syntax
changes take place.
This module also exports several functions:
makeVersion - Given a version number, return an instance of the
matching handler class.
returnClassForVersion - Given a version number, return the matching
handler class. This does not return an
instance of that class, however.
stringToVersion - Convert a string representation of a version number
into the symbolic constant.
versionToString - Perform the reverse mapping.
versionFromFile - Read a kickstart file and determine the version of
syntax it uses. This requires the kickstart file to
have a version= comment in it.
"""
import imputil, re, sys
import gettext
_ = lambda x: gettext.ldgettext("pykickstart", x)
from pykickstart.errors import KickstartVersionError
# Symbolic names for internal version numbers.
RHEL3 = 900
FC3 = 1000
RHEL4 = 1100
FC4 = 2000
FC5 = 3000
FC6 = 4000
RHEL5 = 4100
F7 = 5000
F8 = 6000
F9 = 7000
F10 = 8000
F11 = 9000
F12 = 10000
F13 = 11000
RHEL6 = 11100
F14 = 12000
F15 = 13000
F16 = 14000
# This always points at the latest version and is the default.
DEVEL = F16
# A one-to-one mapping from string representations to version numbers.
versionMap = {
"DEVEL": DEVEL,
"FC3": FC3, "FC4": FC4, "FC5": FC5, "FC6": FC6, "F7": F7, "F8": F8,
"F9": F9, "F10": F10, "F11": F11, "F12": F12, "F13": F13,
"F14": F14, "F15": F15, "F16": F16,
"RHEL3": RHEL3, "RHEL4": RHEL4, "RHEL5": RHEL5, "RHEL6": RHEL6
}
def stringToVersion(s):
"""Convert string into one of the provided version constants. Raises
KickstartVersionError if string does not match anything.
"""
# First try these short forms.
try:
return versionMap[s.upper()]
except KeyError:
pass
# Now try the Fedora versions.
m = re.match("^fedora.* (\d+)$", s, re.I)
if m and m.group(1):
if versionMap.has_key("FC" + m.group(1)):
return versionMap["FC" + m.group(1)]
elif versionMap.has_key("F" + m.group(1)):
return versionMap["F" + m.group(1)]
else:
raise KickstartVersionError(_("Unsupported version specified: %s") % s)
# Now try the RHEL versions.
m = re.match("^red hat enterprise linux.* (\d+)([\.\d]*)$", s, re.I)
if m and m.group(1):
if versionMap.has_key("RHEL" + m.group(1)):
return versionMap["RHEL" + m.group(1)]
else:
raise KickstartVersionError(_("Unsupported version specified: %s") % s)
# If nothing else worked, we're out of options.
raise KickstartVersionError(_("Unsupported version specified: %s") % s)
def versionToString(version, skipDevel=False):
"""Convert version into a string representation of the version number.
This is the reverse operation of stringToVersion. Raises
KickstartVersionError if version does not match anything.
"""
if not skipDevel and version == versionMap["DEVEL"]:
return "DEVEL"
for (key, val) in versionMap.iteritems():
if key == "DEVEL":
continue
elif val == version:
return key
raise KickstartVersionError(_("Unsupported version specified: %s") % version)
def returnClassForVersion(version=DEVEL):
"""Return the class of the syntax handler for version. version can be
either a string or the matching constant. Raises KickstartValueError
if version does not match anything.
"""
try:
version = int(version)
module = "%s" % versionToString(version, skipDevel=True)
except ValueError:
module = "%s" % version
version = stringToVersion(version)
module = module.lower()
try:
import pykickstart.handlers
sys.path.extend(pykickstart.handlers.__path__)
found = imputil.imp.find_module(module)
loaded = imputil.imp.load_module(module, found[0], found[1], found[2])
for (k, v) in loaded.__dict__.iteritems():
if k.lower().endswith("%shandler" % module):
return v
except:
raise KickstartVersionError(_("Unsupported version specified: %s") % version)
def makeVersion(version=DEVEL):
"""Return a new instance of the syntax handler for version. version can be
either a string or the matching constant. This function is useful for
standalone programs which just need to handle a specific version of
kickstart syntax (as provided by a command line argument, for example)
and need to instantiate the correct object.
"""
cl = returnClassForVersion(version)
return cl()
| #
# <NAME> <<EMAIL>>
#
# Copyright 2006, 2007, 2008, 2009, 2010 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
"""
Methods for working with kickstart versions.
This module defines several symbolic constants that specify kickstart syntax
versions. Each version corresponds roughly to one release of Red Hat Linux,
Red Hat Enterprise Linux, or Fedora Core as these are where most syntax
changes take place.
This module also exports several functions:
makeVersion - Given a version number, return an instance of the
matching handler class.
returnClassForVersion - Given a version number, return the matching
handler class. This does not return an
instance of that class, however.
stringToVersion - Convert a string representation of a version number
into the symbolic constant.
versionToString - Perform the reverse mapping.
versionFromFile - Read a kickstart file and determine the version of
syntax it uses. This requires the kickstart file to
have a version= comment in it.
"""
import imputil, re, sys
import gettext
_ = lambda x: gettext.ldgettext("pykickstart", x)
from pykickstart.errors import KickstartVersionError
# Symbolic names for internal version numbers.
RHEL3 = 900
FC3 = 1000
RHEL4 = 1100
FC4 = 2000
FC5 = 3000
FC6 = 4000
RHEL5 = 4100
F7 = 5000
F8 = 6000
F9 = 7000
F10 = 8000
F11 = 9000
F12 = 10000
F13 = 11000
RHEL6 = 11100
F14 = 12000
F15 = 13000
F16 = 14000
# This always points at the latest version and is the default.
DEVEL = F16
# A one-to-one mapping from string representations to version numbers.
versionMap = {
"DEVEL": DEVEL,
"FC3": FC3, "FC4": FC4, "FC5": FC5, "FC6": FC6, "F7": F7, "F8": F8,
"F9": F9, "F10": F10, "F11": F11, "F12": F12, "F13": F13,
"F14": F14, "F15": F15, "F16": F16,
"RHEL3": RHEL3, "RHEL4": RHEL4, "RHEL5": RHEL5, "RHEL6": RHEL6
}
def stringToVersion(s):
"""Convert string into one of the provided version constants. Raises
KickstartVersionError if string does not match anything.
"""
# First try these short forms.
try:
return versionMap[s.upper()]
except KeyError:
pass
# Now try the Fedora versions.
m = re.match("^fedora.* (\d+)$", s, re.I)
if m and m.group(1):
if versionMap.has_key("FC" + m.group(1)):
return versionMap["FC" + m.group(1)]
elif versionMap.has_key("F" + m.group(1)):
return versionMap["F" + m.group(1)]
else:
raise KickstartVersionError(_("Unsupported version specified: %s") % s)
# Now try the RHEL versions.
m = re.match("^red hat enterprise linux.* (\d+)([\.\d]*)$", s, re.I)
if m and m.group(1):
if versionMap.has_key("RHEL" + m.group(1)):
return versionMap["RHEL" + m.group(1)]
else:
raise KickstartVersionError(_("Unsupported version specified: %s") % s)
# If nothing else worked, we're out of options.
raise KickstartVersionError(_("Unsupported version specified: %s") % s)
def versionToString(version, skipDevel=False):
"""Convert version into a string representation of the version number.
This is the reverse operation of stringToVersion. Raises
KickstartVersionError if version does not match anything.
"""
if not skipDevel and version == versionMap["DEVEL"]:
return "DEVEL"
for (key, val) in versionMap.iteritems():
if key == "DEVEL":
continue
elif val == version:
return key
raise KickstartVersionError(_("Unsupported version specified: %s") % version)
def returnClassForVersion(version=DEVEL):
"""Return the class of the syntax handler for version. version can be
either a string or the matching constant. Raises KickstartValueError
if version does not match anything.
"""
try:
version = int(version)
module = "%s" % versionToString(version, skipDevel=True)
except ValueError:
module = "%s" % version
version = stringToVersion(version)
module = module.lower()
try:
import pykickstart.handlers
sys.path.extend(pykickstart.handlers.__path__)
found = imputil.imp.find_module(module)
loaded = imputil.imp.load_module(module, found[0], found[1], found[2])
for (k, v) in loaded.__dict__.iteritems():
if k.lower().endswith("%shandler" % module):
return v
except:
raise KickstartVersionError(_("Unsupported version specified: %s") % version)
def makeVersion(version=DEVEL):
"""Return a new instance of the syntax handler for version. version can be
either a string or the matching constant. This function is useful for
standalone programs which just need to handle a specific version of
kickstart syntax (as provided by a command line argument, for example)
and need to instantiate the correct object.
"""
cl = returnClassForVersion(version)
return cl()
| en | 0.809495 | # # <NAME> <<EMAIL>> # # Copyright 2006, 2007, 2008, 2009, 2010 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, modify, # copy, or redistribute it subject to the terms and conditions of the GNU # General Public License v.2. This program is distributed in the hope that it # will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat # trademarks that are incorporated in the source code or documentation are not # subject to the GNU General Public License and may only be used or replicated # with the express permission of Red Hat, Inc. # Methods for working with kickstart versions. This module defines several symbolic constants that specify kickstart syntax versions. Each version corresponds roughly to one release of Red Hat Linux, Red Hat Enterprise Linux, or Fedora Core as these are where most syntax changes take place. This module also exports several functions: makeVersion - Given a version number, return an instance of the matching handler class. returnClassForVersion - Given a version number, return the matching handler class. This does not return an instance of that class, however. stringToVersion - Convert a string representation of a version number into the symbolic constant. versionToString - Perform the reverse mapping. versionFromFile - Read a kickstart file and determine the version of syntax it uses. This requires the kickstart file to have a version= comment in it. # Symbolic names for internal version numbers. # This always points at the latest version and is the default. # A one-to-one mapping from string representations to version numbers. Convert string into one of the provided version constants. Raises KickstartVersionError if string does not match anything. # First try these short forms. # Now try the Fedora versions. # Now try the RHEL versions. # If nothing else worked, we're out of options. Convert version into a string representation of the version number. This is the reverse operation of stringToVersion. Raises KickstartVersionError if version does not match anything. Return the class of the syntax handler for version. version can be either a string or the matching constant. Raises KickstartValueError if version does not match anything. Return a new instance of the syntax handler for version. version can be either a string or the matching constant. This function is useful for standalone programs which just need to handle a specific version of kickstart syntax (as provided by a command line argument, for example) and need to instantiate the correct object. | 1.979695 | 2 |
algo.py | VRaviTheja/SDN-policy | 0 | 6624754 | #!/usr/bin/python
import time
import pytricia
import python3_reading_file_to_dict
import sys
import pprint
import csv
import p_trie
import excluding_ip
import excluding_port
import add_all_rules_after_excluding
import ipaddress
import copy
import os
from operator import itemgetter
final_device_values = []
se_number = 1001
def WriteDictToCSV(csv_file,csv_columns,dict_data):
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError as err:
print("I/O error{0}: ".format(err))
return
def creating_dict():
device_values = python3_reading_file_to_dict.csv_dict_list(sys.argv[1]) # Calls the csv_dict_list function, passing the named csv
i = 0
for x in device_values:
x['priority'] = int(x['priority'])
device_values[i] = x
i = i+1
device_values = sorted(device_values, key=itemgetter('priority')) # device_values = sorted(device_values, key=itemgetter('priority'))
# pprint.pprint(device_values)
i = 0 # Prints the results nice and pretty like
for x in device_values:
x['priority'] = str(x['priority'])
device_values[i] = x
i = i+1
temp = []
"""for x in device_values:
temp.append(int(x['priority']))
print(temp)"""
return device_values
def check_layer2_layer4(a):
if (a['src_ip'],a['dst_ip']) == ('0.0.0.0/0','0.0.0.0/0'):
if (a['src_mac'],a['dst_mac'],a['src_start'],a['dst_end']) != ('00:00:00:00:00:00','00:00:00:00:00:00','0','0'):
return True
else:
return False
else :
return False
def find_all_parents(pyt,ip): # Finding list of all parents
parent_all = []
ip = pyt.parent(ip)
while ip != None :
parent_all.append(ip)
ip = pyt.parent(ip)
return parent_all
def check_tcp_udp(flow_rule): # checking whether tcp or udp
return int(flow_rule["nw_proto"])
def check_ingress(flow_rule):
return (flow_rule["ingress"])
"""def add_rule_to_newft(flow_rule): #Adding rule to flow
with open("new_table99", "a") as myfile:
myfile.write(str(flow_rule))
"""
def finding_patricia_empty(pyt): #Checking whether patricia tree is empty or not
if(len(pyt)==0):
return True
else :
return False
def check_and_delete_in_final_device_values(flow_rule):
for x in final_device_values:
if x['aasno'] == flow_rule['aasno']:
final_device_values.remove(flow_rule)
break
else:
continue
def add_rule_to_patricia(pyt_src,pyt_dst,flow_rule): #Adding rules to patricia and final_device values
temp = []
isthere = 0
if len(final_device_values) == 0:
final_device_values.append(flow_rule)
else:
for x in final_device_values:
if x['aasno'] == flow_rule['aasno']:
isthere = 1
break
if isthere != 1:
final_device_values.append(flow_rule)
isthere = 0
if pyt_src.has_key(flow_rule['src_ip']):
temp = pyt_src.get(flow_rule['src_ip'])
if int(flow_rule['aasno']) not in temp:
temp.append(int(flow_rule['aasno']))
pyt_src.insert(flow_rule['src_ip'],temp)
else :
pyt_src.insert(flow_rule['src_ip'],[int(flow_rule['aasno'])])
temp1 = []
if pyt_dst.has_key(flow_rule['dst_ip']):
temp1 = pyt_dst.get(flow_rule['dst_ip'])
if int(flow_rule['aasno']) not in temp1:
temp1.append(int(flow_rule['aasno']))
pyt_dst.insert(flow_rule['dst_ip'],temp1)
else:
pyt_dst.insert(flow_rule['dst_ip'],[int(flow_rule['aasno'])])
return None
def subset_for_port(src_a_start, src_a_end, dst_a_start, dst_a_end, src_b_start, src_b_end, dst_b_start, dst_b_end):
src_a = list(range(int(src_a_start), int(src_a_end)+1))
dst_a = list(range(int(dst_a_start), int(dst_a_end)+1))
src_b = list(range(int(src_b_start), int(src_b_end)+1))
dst_b = list(range(int(dst_b_start), int(dst_b_end)+1))
src_inter = list(set(src_a) & set(src_b))
dst_inter = list(set(dst_a) & set(dst_b))
if ((int(src_a_start) == int(src_b_start)) and (int(src_a_end) == int(src_b_end))) and ((int(dst_a_start) == int(dst_b_start)) and (int(dst_a_end) == int(dst_b_end))):
var2 = "exact"
elif ((int(src_a_start) >= int(src_b_start) and int(src_a_end) <= int(src_b_end)) and (int(dst_a_start) >= int(dst_b_start) and int(dst_a_end) <= int(dst_b_end))):
var2 = "equal"
elif ((int(src_a_start) <= int(src_b_start) and int(src_a_end) >= int(src_b_end)) and (int(dst_a_start) <= int(dst_b_start) and int(dst_a_end) >= int(dst_b_end))):
var2 = "reverse"
elif src_inter and dst_inter:
var2 = "intersect"
else :
var2 = "completely"
"""
temp = []
src_port_intersection = []
dst_port_intersection = []
for x in src_inter:
if temp :
temp.append(x)
continue
if x-1 == temp[-1]:
temp.append(x)
continue
else:
src_port_intersection = [temp]
temp = []
"""
src_inter.sort()
dst_inter.sort()
src_port_intersection_part = src_inter
dst_port_intersection_part = dst_inter
# print("Length of Source port Intersection: ",len(src_port_intersection_part),"|| Length of Source port Intersection: ",len(dst_port_intersection_part))
return var2,src_port_intersection_part,dst_port_intersection_part
def subset_for_ip(pyt_src, pyt_dst, gamma, mydict ,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules):
compare = int(gamma['aasno'])
if (compare in src_same_conflict_rules) and (compare in dst_same_conflict_rules):
var1 = "exact"
src_intersection_part = mydict['src_ip']
dst_intersection_part = mydict['dst_ip']
elif (((compare in src_paren_conflict_rules) or (compare in src_same_conflict_rules)) and ((compare in dst_paren_conflict_rules) or (compare in dst_same_conflict_rules))):
var1 = "equal"
src_intersection_part = mydict['src_ip']
dst_intersection_part = mydict['dst_ip']
elif (((compare in src_child_conflict_rules) or (compare in src_same_conflict_rules)) and ((compare in dst_child_conflict_rules) or (compare in dst_same_conflict_rules))):
var1 = "reverse"
src_intersection_part = gamma['src_ip']
dst_intersection_part = gamma['dst_ip']
elif ((compare in src_child_conflict_rules) and (compare in dst_paren_conflict_rules)):
var1 = "intersect"
src_intersection_part = gamma['src_ip']
dst_intersection_part = mydict['dst_ip']
elif ((compare in src_paren_conflict_rules) and (compare in dst_child_conflict_rules)):
var1 = "intersect"
src_intersection_part = mydict['src_ip']
dst_intersection_part = gamma['dst_ip']
var2,src_port_intersection_part,dst_port_intersection_part = subset_for_port(mydict['src_start'], mydict['src_end'], mydict['dst_start'], mydict['dst_end'], gamma['src_start'], gamma['src_end'], gamma['dst_start'], gamma['dst_end']) # Now calling subset_for port
# print("Conflict_type in IPs: ", var1, "|| Conflict_type in PORTs: ", var2)
if var1 == "exact" and var2 == "exact":
final = "exact"
elif var1 == "equal" and var2 == "equal":
final = var1
elif var1 == "reverse" and var2 == "reverse":
final = var1
elif var1 == "reverse" and var2 == "exact":
final = "reverse"
elif var1 == "exact" and var2 == "reverse":
final = "reverse"
elif var1 == "reverse" and var2 == "equal":
final = "intersect"
elif var1 == "equal" and var2 == "reverse":
final = "intersect"
elif var1 == "equal" and var2 == "exact":
final = "equal"
elif var1 == "exact" and var2 == "equal":
final = "equal"
elif var1 == "intersect" or var2 == "intersect":
final = "intersect"
elif var2 == "completely":
final = "different"
else :
final = "intersect"
return final,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part
def check_rule_for_similars(pyt_src,pyt_dst,mydict):
src_conflict_rules = []
dst_conflict_rules = []
src_same_conflict_rules = []
dst_same_conflict_rules = []
if pyt_src.has_key(mydict['src_ip']):
src_same_conflict_rules = src_same_conflict_rules + pyt_src.get(mydict['src_ip'])
if pyt_dst.has_key(mydict['dst_ip']):
dst_same_conflict_rules = dst_same_conflict_rules + pyt_dst.get(mydict['dst_ip'])
add_rule_to_patricia(pyt_src, pyt_dst, mydict) #Adding trule to patricia
src_child = pyt_src.children(mydict["src_ip"])
src_paren = find_all_parents(pyt_src, mydict['src_ip'])
dst_child = pyt_dst.children(mydict['dst_ip'])
dst_paren = find_all_parents(pyt_dst, mydict['dst_ip'])
src_child_conflict_rules = []
dst_child_conflict_rules = []
src_paren_conflict_rules = []
dst_paren_conflict_rules = []
if src_child != None :
for i in src_child:
src_child_conflict_rules = src_child_conflict_rules + pyt_src.get(i)
if dst_child != None :
for i in dst_child:
dst_child_conflict_rules = dst_child_conflict_rules + pyt_dst.get(i)
if src_paren != None :
for i in src_paren:
src_paren_conflict_rules = src_paren_conflict_rules + pyt_src.get(i)
if dst_paren != None :
for i in dst_paren:
dst_paren_conflict_rules = dst_paren_conflict_rules + pyt_dst.get(i)
src_all = src_child + src_paren
dst_all = dst_child + dst_paren
if src_all != None :
for i in src_all:
src_conflict_rules = src_conflict_rules + pyt_src.get(i)
if dst_all != None :
for i in dst_all:
dst_conflict_rules = dst_conflict_rules + pyt_dst.get(i)
src_conflict_rules = src_conflict_rules + src_same_conflict_rules
dst_conflict_rules = dst_conflict_rules + dst_same_conflict_rules
final_conflict_rules = list(set(src_conflict_rules) & set(dst_conflict_rules))
delete_rule_from_pt_ft(pyt_src, pyt_dst, mydict)
return final_conflict_rules,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules
def detection_algorithm(gamma,mydict,pyt_src,pyt_dst,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules,rap):
final,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part = subset_for_ip(pyt_src, pyt_dst, gamma, mydict,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules)
# print("Final_conflict_type: ",final)
if((check_tcp_udp(mydict) != check_tcp_udp(gamma)) or (final == "different") or (check_ingress(mydict) != check_ingress(gamma))):
add_rule_to_patricia(pyt_src,pyt_dst,mydict)
# add_rule_to_newft(mydict)
# print("Just added")
elif(final == "exact"):
if(mydict["action "]==gamma["action "]):
# print("Conflict is Redundancy : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"redundancy",rap)
else:
if(mydict["priority"]==gamma["priority"]):
# print("Conflict is Intersection_different_action_prompt : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation_prompt",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
# print("Conflict is Shielding : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"shadowing",rap)
elif(final == "equal"): #do subset here
if(mydict["action "]==gamma["action "]):
# print("Conflict is Redundancy : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"redundancy",rap)
else:
if(mydict["priority"]==gamma["priority"]):
# print("Conflict is Intersection_different_action_prompt : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation_prompt",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
# print("Conflict is Abstraction : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"generalization",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
elif(final == "reverse"): # find Reverse subset here
if(mydict["action "]==gamma["action "]):
# print("Conflict is Redundancy_gamma_Removing : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"redundancy_gamma_removing",rap)
else:
if(mydict["priority"]==gamma["priority"]):
# print("Conflict is Intersection_different_action_prompt : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation_prompt",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
# print("Conflict is Shielding : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"shadowing",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
elif(final == "intersect"):
if(mydict["action "]==gamma["action "]):
# print("Conflict is Intersection_same_action : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"overlap",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
if(mydict["priority"]==gamma["priority"]):
# print("Conflict is Intersection_different_action_prompt : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation_prompt",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
# print("Conflict is Intersection_different_action : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
# print("---------------------------")
return rap
def delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma):
check_and_delete_in_final_device_values(gamma) # Calling to check and delete final_device_values
temp = []
Ips = gamma['src_ip']
prio = int(gamma['aasno'])
temp = pyt_src.get(Ips)
if temp is not None:
if (prio not in temp):
return None
else:
if len(temp) > 1 :
temp.remove(prio)
pyt_src.insert(Ips,temp)
else:
pyt_src.delete(Ips)
temp = [] # For Destination insertion
Ipd = gamma['dst_ip']
temp = pyt_dst.get(Ipd)
if temp is not None:
if (prio not in temp):
return None
else:
if len(temp) > 1 :
temp.remove(prio)
pyt_dst.insert(Ipd,temp)
else:
pyt_dst.delete(Ipd)
"""bad_words = ["'aasno': '"+str(prio)+"',"] # deleting a flow fro flow table
with open('new_table99') as oldfile, open('new_table22', 'w') as newfile:
for line in oldfile:
if not any(bad_word in line for bad_word in bad_words):
newfile.write(line)
with open('new_table99', 'w+') as output, open('new_table22', 'r') as input1:
while True:
data = input1.read(100000)
if data == '': # end of file reached
break
output.write(data)
"""
def conflict_resolver(pyt_src, pyt_dst, mydict, gamma, conflict_type,rap,src_intersection_part = None,dst_intersection_part = None,src_port_intersection_part = None,dst_port_intersection_part = None):
if(conflict_type=="shadowing"):
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
rap = 200
# print("Removed gamma R Holded")
elif(conflict_type=="redundancy_gamma_removing"):
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
add_rule_to_patricia(pyt_src, pyt_dst, mydict)
# add_rule_to_newft(mydict)
# print("Gamma Removed R adds")
elif(conflict_type == "redundancy"):
# print("No adding of R")
pass
elif(conflict_type=="generalization"):
rap = 200
src_ip_list=excluding_ip.func_exclude_ip(gamma["src_ip"],src_intersection_part)
dst_ip_list=excluding_ip.func_exclude_ip(gamma["dst_ip"],dst_intersection_part)
src_port_list=excluding_port.func_exclude_port(list(range(int(gamma["src_start"]),int(gamma["src_end"])+1)),src_port_intersection_part)
dst_port_list=excluding_port.func_exclude_port(list(range(int(gamma["dst_start"]),int(gamma["dst_end"])+1)),dst_port_intersection_part)
f_list = add_all_rules_after_excluding.add_all_rules(src_ip_list, dst_ip_list, src_port_list, dst_port_list, mydict, gamma, pyt_src, pyt_dst)
for x in f_list:
add_rule_to_patricia(pyt_src, pyt_dst, x)
# add_rule_to_newft(x)
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
# print("gamma Splitted")
elif(conflict_type=="overlap"):
rap = 200
src_ip_list=excluding_ip.func_exclude_ip(gamma["src_ip"],src_intersection_part)
dst_ip_list=excluding_ip.func_exclude_ip(gamma["dst_ip"],dst_intersection_part)
src_port_list=excluding_port.func_exclude_port(list(range(int(gamma["src_start"]),int(gamma["src_end"])+1)),src_port_intersection_part)
dst_port_list=excluding_port.func_exclude_port(list(range(int(gamma["dst_start"]),int(gamma["dst_end"])+1)),dst_port_intersection_part)
f_list = add_all_rules_after_excluding.add_all_rules(src_ip_list, dst_ip_list, src_port_list, dst_port_list, mydict, gamma, pyt_src, pyt_dst)
for x in f_list:
add_rule_to_patricia(pyt_src, pyt_dst, x)
# add_rule_to_newft(x)
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
# print("gamma Splitted")
elif(conflict_type=="correlation_prompt"):
rap = 200
src_ip_list=excluding_ip.func_exclude_ip(gamma["src_ip"],src_intersection_part)
dst_ip_list=excluding_ip.func_exclude_ip(gamma["dst_ip"],dst_intersection_part)
src_port_list=excluding_port.func_exclude_port(list(range(int(gamma["src_start"]),int(gamma["src_end"])+1)),src_port_intersection_part)
dst_port_list=excluding_port.func_exclude_port(list(range(int(gamma["dst_start"]),int(gamma["dst_end"])+1)),dst_port_intersection_part)
f_list = add_all_rules_after_excluding.add_all_rules(src_ip_list, dst_ip_list, src_port_list, dst_port_list, mydict, gamma, pyt_src, pyt_dst)
for x in f_list:
add_rule_to_patricia(pyt_src, pyt_dst, x)
# add_rule_to_newft(x)
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
# print("gamma Splitted:")
elif(conflict_type=="correlation"):
rap = 200
src_ip_list=excluding_ip.func_exclude_ip(gamma["src_ip"],src_intersection_part)
dst_ip_list=excluding_ip.func_exclude_ip(gamma["dst_ip"],dst_intersection_part)
src_port_list=excluding_port.func_exclude_port(list(range(int(gamma["src_start"]),int(gamma["src_end"])+1)),src_port_intersection_part)
dst_port_list=excluding_port.func_exclude_port(list(range(int(gamma["dst_start"]),int(gamma["dst_end"])+1)),dst_port_intersection_part)
f_list = add_all_rules_after_excluding.add_all_rules(src_ip_list, dst_ip_list, src_port_list, dst_port_list, mydict, gamma, pyt_src, pyt_dst)
for x in f_list:
add_rule_to_patricia(pyt_src, pyt_dst, x)
# add_rule_to_newft(x)
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
# print("gamma Splitted")
return rap
def detection(device_values,pyt_src,pyt_dst): # Main Detection
print("Detection starts from here")
for mydict in device_values :
# print(mydict['priority'])
if check_layer2_layer4(mydict) == True :
# print(("\nReconcile %s" %mydict['aasno']))
pass
else :
# print(("\nNO Reconc %s" %mydict['aasno']))
conflict_rule_numbers,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules = check_rule_for_similars(pyt_src,pyt_dst,mydict) #Gives list of conflict ru
# print("Conflicted_numbers: ",conflict_rule_numbers)
if len(conflict_rule_numbers) == 0 :
add_rule_to_patricia(pyt_src,pyt_dst,mydict)
# add_rule_to_newft(mydict)
else :
fd = final_device_values
rap = 100
for i in conflict_rule_numbers:
it = str(i)
# print("\n",it)
my_item = 100
for item in fd:
if item['aasno'] == it:
my_item = item
break
if my_item != 100:
gamma = my_item
rap1 = detection_algorithm(gamma, mydict, pyt_src, pyt_dst,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules,rap)
else:
pass
if rap1 == 200:
add_rule_to_patricia(pyt_src,pyt_dst,mydict)
# add_rule_to_newft(mydict)
print("DETECTION COMPLETE:")
if __name__ == "__main__" :
device_values = creating_dict()
pyt_src,pyt_dst = p_trie.patricia()
start_time = time.time()
detection(device_values,pyt_src,pyt_dst)
Total_time = time.time() - start_time
print(len(final_device_values))
pprint.pprint(final_device_values)
csv_columns = final_device_values[0].keys()
currentPath = os.getcwd()
csv_file = currentPath + "/csv/Outputflows.csv"
WriteDictToCSV(csv_file,csv_columns,final_device_values)
print("Time taken: --- %s seconds ---" % (Total_time))
| #!/usr/bin/python
import time
import pytricia
import python3_reading_file_to_dict
import sys
import pprint
import csv
import p_trie
import excluding_ip
import excluding_port
import add_all_rules_after_excluding
import ipaddress
import copy
import os
from operator import itemgetter
final_device_values = []
se_number = 1001
def WriteDictToCSV(csv_file,csv_columns,dict_data):
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in dict_data:
writer.writerow(data)
except IOError as err:
print("I/O error{0}: ".format(err))
return
def creating_dict():
device_values = python3_reading_file_to_dict.csv_dict_list(sys.argv[1]) # Calls the csv_dict_list function, passing the named csv
i = 0
for x in device_values:
x['priority'] = int(x['priority'])
device_values[i] = x
i = i+1
device_values = sorted(device_values, key=itemgetter('priority')) # device_values = sorted(device_values, key=itemgetter('priority'))
# pprint.pprint(device_values)
i = 0 # Prints the results nice and pretty like
for x in device_values:
x['priority'] = str(x['priority'])
device_values[i] = x
i = i+1
temp = []
"""for x in device_values:
temp.append(int(x['priority']))
print(temp)"""
return device_values
def check_layer2_layer4(a):
if (a['src_ip'],a['dst_ip']) == ('0.0.0.0/0','0.0.0.0/0'):
if (a['src_mac'],a['dst_mac'],a['src_start'],a['dst_end']) != ('00:00:00:00:00:00','00:00:00:00:00:00','0','0'):
return True
else:
return False
else :
return False
def find_all_parents(pyt,ip): # Finding list of all parents
parent_all = []
ip = pyt.parent(ip)
while ip != None :
parent_all.append(ip)
ip = pyt.parent(ip)
return parent_all
def check_tcp_udp(flow_rule): # checking whether tcp or udp
return int(flow_rule["nw_proto"])
def check_ingress(flow_rule):
return (flow_rule["ingress"])
"""def add_rule_to_newft(flow_rule): #Adding rule to flow
with open("new_table99", "a") as myfile:
myfile.write(str(flow_rule))
"""
def finding_patricia_empty(pyt): #Checking whether patricia tree is empty or not
if(len(pyt)==0):
return True
else :
return False
def check_and_delete_in_final_device_values(flow_rule):
for x in final_device_values:
if x['aasno'] == flow_rule['aasno']:
final_device_values.remove(flow_rule)
break
else:
continue
def add_rule_to_patricia(pyt_src,pyt_dst,flow_rule): #Adding rules to patricia and final_device values
temp = []
isthere = 0
if len(final_device_values) == 0:
final_device_values.append(flow_rule)
else:
for x in final_device_values:
if x['aasno'] == flow_rule['aasno']:
isthere = 1
break
if isthere != 1:
final_device_values.append(flow_rule)
isthere = 0
if pyt_src.has_key(flow_rule['src_ip']):
temp = pyt_src.get(flow_rule['src_ip'])
if int(flow_rule['aasno']) not in temp:
temp.append(int(flow_rule['aasno']))
pyt_src.insert(flow_rule['src_ip'],temp)
else :
pyt_src.insert(flow_rule['src_ip'],[int(flow_rule['aasno'])])
temp1 = []
if pyt_dst.has_key(flow_rule['dst_ip']):
temp1 = pyt_dst.get(flow_rule['dst_ip'])
if int(flow_rule['aasno']) not in temp1:
temp1.append(int(flow_rule['aasno']))
pyt_dst.insert(flow_rule['dst_ip'],temp1)
else:
pyt_dst.insert(flow_rule['dst_ip'],[int(flow_rule['aasno'])])
return None
def subset_for_port(src_a_start, src_a_end, dst_a_start, dst_a_end, src_b_start, src_b_end, dst_b_start, dst_b_end):
src_a = list(range(int(src_a_start), int(src_a_end)+1))
dst_a = list(range(int(dst_a_start), int(dst_a_end)+1))
src_b = list(range(int(src_b_start), int(src_b_end)+1))
dst_b = list(range(int(dst_b_start), int(dst_b_end)+1))
src_inter = list(set(src_a) & set(src_b))
dst_inter = list(set(dst_a) & set(dst_b))
if ((int(src_a_start) == int(src_b_start)) and (int(src_a_end) == int(src_b_end))) and ((int(dst_a_start) == int(dst_b_start)) and (int(dst_a_end) == int(dst_b_end))):
var2 = "exact"
elif ((int(src_a_start) >= int(src_b_start) and int(src_a_end) <= int(src_b_end)) and (int(dst_a_start) >= int(dst_b_start) and int(dst_a_end) <= int(dst_b_end))):
var2 = "equal"
elif ((int(src_a_start) <= int(src_b_start) and int(src_a_end) >= int(src_b_end)) and (int(dst_a_start) <= int(dst_b_start) and int(dst_a_end) >= int(dst_b_end))):
var2 = "reverse"
elif src_inter and dst_inter:
var2 = "intersect"
else :
var2 = "completely"
"""
temp = []
src_port_intersection = []
dst_port_intersection = []
for x in src_inter:
if temp :
temp.append(x)
continue
if x-1 == temp[-1]:
temp.append(x)
continue
else:
src_port_intersection = [temp]
temp = []
"""
src_inter.sort()
dst_inter.sort()
src_port_intersection_part = src_inter
dst_port_intersection_part = dst_inter
# print("Length of Source port Intersection: ",len(src_port_intersection_part),"|| Length of Source port Intersection: ",len(dst_port_intersection_part))
return var2,src_port_intersection_part,dst_port_intersection_part
def subset_for_ip(pyt_src, pyt_dst, gamma, mydict ,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules):
compare = int(gamma['aasno'])
if (compare in src_same_conflict_rules) and (compare in dst_same_conflict_rules):
var1 = "exact"
src_intersection_part = mydict['src_ip']
dst_intersection_part = mydict['dst_ip']
elif (((compare in src_paren_conflict_rules) or (compare in src_same_conflict_rules)) and ((compare in dst_paren_conflict_rules) or (compare in dst_same_conflict_rules))):
var1 = "equal"
src_intersection_part = mydict['src_ip']
dst_intersection_part = mydict['dst_ip']
elif (((compare in src_child_conflict_rules) or (compare in src_same_conflict_rules)) and ((compare in dst_child_conflict_rules) or (compare in dst_same_conflict_rules))):
var1 = "reverse"
src_intersection_part = gamma['src_ip']
dst_intersection_part = gamma['dst_ip']
elif ((compare in src_child_conflict_rules) and (compare in dst_paren_conflict_rules)):
var1 = "intersect"
src_intersection_part = gamma['src_ip']
dst_intersection_part = mydict['dst_ip']
elif ((compare in src_paren_conflict_rules) and (compare in dst_child_conflict_rules)):
var1 = "intersect"
src_intersection_part = mydict['src_ip']
dst_intersection_part = gamma['dst_ip']
var2,src_port_intersection_part,dst_port_intersection_part = subset_for_port(mydict['src_start'], mydict['src_end'], mydict['dst_start'], mydict['dst_end'], gamma['src_start'], gamma['src_end'], gamma['dst_start'], gamma['dst_end']) # Now calling subset_for port
# print("Conflict_type in IPs: ", var1, "|| Conflict_type in PORTs: ", var2)
if var1 == "exact" and var2 == "exact":
final = "exact"
elif var1 == "equal" and var2 == "equal":
final = var1
elif var1 == "reverse" and var2 == "reverse":
final = var1
elif var1 == "reverse" and var2 == "exact":
final = "reverse"
elif var1 == "exact" and var2 == "reverse":
final = "reverse"
elif var1 == "reverse" and var2 == "equal":
final = "intersect"
elif var1 == "equal" and var2 == "reverse":
final = "intersect"
elif var1 == "equal" and var2 == "exact":
final = "equal"
elif var1 == "exact" and var2 == "equal":
final = "equal"
elif var1 == "intersect" or var2 == "intersect":
final = "intersect"
elif var2 == "completely":
final = "different"
else :
final = "intersect"
return final,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part
def check_rule_for_similars(pyt_src,pyt_dst,mydict):
src_conflict_rules = []
dst_conflict_rules = []
src_same_conflict_rules = []
dst_same_conflict_rules = []
if pyt_src.has_key(mydict['src_ip']):
src_same_conflict_rules = src_same_conflict_rules + pyt_src.get(mydict['src_ip'])
if pyt_dst.has_key(mydict['dst_ip']):
dst_same_conflict_rules = dst_same_conflict_rules + pyt_dst.get(mydict['dst_ip'])
add_rule_to_patricia(pyt_src, pyt_dst, mydict) #Adding trule to patricia
src_child = pyt_src.children(mydict["src_ip"])
src_paren = find_all_parents(pyt_src, mydict['src_ip'])
dst_child = pyt_dst.children(mydict['dst_ip'])
dst_paren = find_all_parents(pyt_dst, mydict['dst_ip'])
src_child_conflict_rules = []
dst_child_conflict_rules = []
src_paren_conflict_rules = []
dst_paren_conflict_rules = []
if src_child != None :
for i in src_child:
src_child_conflict_rules = src_child_conflict_rules + pyt_src.get(i)
if dst_child != None :
for i in dst_child:
dst_child_conflict_rules = dst_child_conflict_rules + pyt_dst.get(i)
if src_paren != None :
for i in src_paren:
src_paren_conflict_rules = src_paren_conflict_rules + pyt_src.get(i)
if dst_paren != None :
for i in dst_paren:
dst_paren_conflict_rules = dst_paren_conflict_rules + pyt_dst.get(i)
src_all = src_child + src_paren
dst_all = dst_child + dst_paren
if src_all != None :
for i in src_all:
src_conflict_rules = src_conflict_rules + pyt_src.get(i)
if dst_all != None :
for i in dst_all:
dst_conflict_rules = dst_conflict_rules + pyt_dst.get(i)
src_conflict_rules = src_conflict_rules + src_same_conflict_rules
dst_conflict_rules = dst_conflict_rules + dst_same_conflict_rules
final_conflict_rules = list(set(src_conflict_rules) & set(dst_conflict_rules))
delete_rule_from_pt_ft(pyt_src, pyt_dst, mydict)
return final_conflict_rules,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules
def detection_algorithm(gamma,mydict,pyt_src,pyt_dst,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules,rap):
final,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part = subset_for_ip(pyt_src, pyt_dst, gamma, mydict,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules)
# print("Final_conflict_type: ",final)
if((check_tcp_udp(mydict) != check_tcp_udp(gamma)) or (final == "different") or (check_ingress(mydict) != check_ingress(gamma))):
add_rule_to_patricia(pyt_src,pyt_dst,mydict)
# add_rule_to_newft(mydict)
# print("Just added")
elif(final == "exact"):
if(mydict["action "]==gamma["action "]):
# print("Conflict is Redundancy : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"redundancy",rap)
else:
if(mydict["priority"]==gamma["priority"]):
# print("Conflict is Intersection_different_action_prompt : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation_prompt",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
# print("Conflict is Shielding : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"shadowing",rap)
elif(final == "equal"): #do subset here
if(mydict["action "]==gamma["action "]):
# print("Conflict is Redundancy : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"redundancy",rap)
else:
if(mydict["priority"]==gamma["priority"]):
# print("Conflict is Intersection_different_action_prompt : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation_prompt",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
# print("Conflict is Abstraction : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"generalization",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
elif(final == "reverse"): # find Reverse subset here
if(mydict["action "]==gamma["action "]):
# print("Conflict is Redundancy_gamma_Removing : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"redundancy_gamma_removing",rap)
else:
if(mydict["priority"]==gamma["priority"]):
# print("Conflict is Intersection_different_action_prompt : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation_prompt",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
# print("Conflict is Shielding : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"shadowing",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
elif(final == "intersect"):
if(mydict["action "]==gamma["action "]):
# print("Conflict is Intersection_same_action : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"overlap",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
if(mydict["priority"]==gamma["priority"]):
# print("Conflict is Intersection_different_action_prompt : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation_prompt",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
else:
# print("Conflict is Intersection_different_action : Sent to resolving")
rap = conflict_resolver(pyt_src, pyt_dst, mydict,gamma,"correlation",rap,src_intersection_part,dst_intersection_part,src_port_intersection_part,dst_port_intersection_part)
# print("---------------------------")
return rap
def delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma):
check_and_delete_in_final_device_values(gamma) # Calling to check and delete final_device_values
temp = []
Ips = gamma['src_ip']
prio = int(gamma['aasno'])
temp = pyt_src.get(Ips)
if temp is not None:
if (prio not in temp):
return None
else:
if len(temp) > 1 :
temp.remove(prio)
pyt_src.insert(Ips,temp)
else:
pyt_src.delete(Ips)
temp = [] # For Destination insertion
Ipd = gamma['dst_ip']
temp = pyt_dst.get(Ipd)
if temp is not None:
if (prio not in temp):
return None
else:
if len(temp) > 1 :
temp.remove(prio)
pyt_dst.insert(Ipd,temp)
else:
pyt_dst.delete(Ipd)
"""bad_words = ["'aasno': '"+str(prio)+"',"] # deleting a flow fro flow table
with open('new_table99') as oldfile, open('new_table22', 'w') as newfile:
for line in oldfile:
if not any(bad_word in line for bad_word in bad_words):
newfile.write(line)
with open('new_table99', 'w+') as output, open('new_table22', 'r') as input1:
while True:
data = input1.read(100000)
if data == '': # end of file reached
break
output.write(data)
"""
def conflict_resolver(pyt_src, pyt_dst, mydict, gamma, conflict_type,rap,src_intersection_part = None,dst_intersection_part = None,src_port_intersection_part = None,dst_port_intersection_part = None):
if(conflict_type=="shadowing"):
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
rap = 200
# print("Removed gamma R Holded")
elif(conflict_type=="redundancy_gamma_removing"):
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
add_rule_to_patricia(pyt_src, pyt_dst, mydict)
# add_rule_to_newft(mydict)
# print("Gamma Removed R adds")
elif(conflict_type == "redundancy"):
# print("No adding of R")
pass
elif(conflict_type=="generalization"):
rap = 200
src_ip_list=excluding_ip.func_exclude_ip(gamma["src_ip"],src_intersection_part)
dst_ip_list=excluding_ip.func_exclude_ip(gamma["dst_ip"],dst_intersection_part)
src_port_list=excluding_port.func_exclude_port(list(range(int(gamma["src_start"]),int(gamma["src_end"])+1)),src_port_intersection_part)
dst_port_list=excluding_port.func_exclude_port(list(range(int(gamma["dst_start"]),int(gamma["dst_end"])+1)),dst_port_intersection_part)
f_list = add_all_rules_after_excluding.add_all_rules(src_ip_list, dst_ip_list, src_port_list, dst_port_list, mydict, gamma, pyt_src, pyt_dst)
for x in f_list:
add_rule_to_patricia(pyt_src, pyt_dst, x)
# add_rule_to_newft(x)
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
# print("gamma Splitted")
elif(conflict_type=="overlap"):
rap = 200
src_ip_list=excluding_ip.func_exclude_ip(gamma["src_ip"],src_intersection_part)
dst_ip_list=excluding_ip.func_exclude_ip(gamma["dst_ip"],dst_intersection_part)
src_port_list=excluding_port.func_exclude_port(list(range(int(gamma["src_start"]),int(gamma["src_end"])+1)),src_port_intersection_part)
dst_port_list=excluding_port.func_exclude_port(list(range(int(gamma["dst_start"]),int(gamma["dst_end"])+1)),dst_port_intersection_part)
f_list = add_all_rules_after_excluding.add_all_rules(src_ip_list, dst_ip_list, src_port_list, dst_port_list, mydict, gamma, pyt_src, pyt_dst)
for x in f_list:
add_rule_to_patricia(pyt_src, pyt_dst, x)
# add_rule_to_newft(x)
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
# print("gamma Splitted")
elif(conflict_type=="correlation_prompt"):
rap = 200
src_ip_list=excluding_ip.func_exclude_ip(gamma["src_ip"],src_intersection_part)
dst_ip_list=excluding_ip.func_exclude_ip(gamma["dst_ip"],dst_intersection_part)
src_port_list=excluding_port.func_exclude_port(list(range(int(gamma["src_start"]),int(gamma["src_end"])+1)),src_port_intersection_part)
dst_port_list=excluding_port.func_exclude_port(list(range(int(gamma["dst_start"]),int(gamma["dst_end"])+1)),dst_port_intersection_part)
f_list = add_all_rules_after_excluding.add_all_rules(src_ip_list, dst_ip_list, src_port_list, dst_port_list, mydict, gamma, pyt_src, pyt_dst)
for x in f_list:
add_rule_to_patricia(pyt_src, pyt_dst, x)
# add_rule_to_newft(x)
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
# print("gamma Splitted:")
elif(conflict_type=="correlation"):
rap = 200
src_ip_list=excluding_ip.func_exclude_ip(gamma["src_ip"],src_intersection_part)
dst_ip_list=excluding_ip.func_exclude_ip(gamma["dst_ip"],dst_intersection_part)
src_port_list=excluding_port.func_exclude_port(list(range(int(gamma["src_start"]),int(gamma["src_end"])+1)),src_port_intersection_part)
dst_port_list=excluding_port.func_exclude_port(list(range(int(gamma["dst_start"]),int(gamma["dst_end"])+1)),dst_port_intersection_part)
f_list = add_all_rules_after_excluding.add_all_rules(src_ip_list, dst_ip_list, src_port_list, dst_port_list, mydict, gamma, pyt_src, pyt_dst)
for x in f_list:
add_rule_to_patricia(pyt_src, pyt_dst, x)
# add_rule_to_newft(x)
delete_rule_from_pt_ft(pyt_src, pyt_dst, gamma)
# print("gamma Splitted")
return rap
def detection(device_values,pyt_src,pyt_dst): # Main Detection
print("Detection starts from here")
for mydict in device_values :
# print(mydict['priority'])
if check_layer2_layer4(mydict) == True :
# print(("\nReconcile %s" %mydict['aasno']))
pass
else :
# print(("\nNO Reconc %s" %mydict['aasno']))
conflict_rule_numbers,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules = check_rule_for_similars(pyt_src,pyt_dst,mydict) #Gives list of conflict ru
# print("Conflicted_numbers: ",conflict_rule_numbers)
if len(conflict_rule_numbers) == 0 :
add_rule_to_patricia(pyt_src,pyt_dst,mydict)
# add_rule_to_newft(mydict)
else :
fd = final_device_values
rap = 100
for i in conflict_rule_numbers:
it = str(i)
# print("\n",it)
my_item = 100
for item in fd:
if item['aasno'] == it:
my_item = item
break
if my_item != 100:
gamma = my_item
rap1 = detection_algorithm(gamma, mydict, pyt_src, pyt_dst,src_same_conflict_rules,src_child_conflict_rules,src_paren_conflict_rules,dst_same_conflict_rules,dst_child_conflict_rules,dst_paren_conflict_rules,rap)
else:
pass
if rap1 == 200:
add_rule_to_patricia(pyt_src,pyt_dst,mydict)
# add_rule_to_newft(mydict)
print("DETECTION COMPLETE:")
if __name__ == "__main__" :
device_values = creating_dict()
pyt_src,pyt_dst = p_trie.patricia()
start_time = time.time()
detection(device_values,pyt_src,pyt_dst)
Total_time = time.time() - start_time
print(len(final_device_values))
pprint.pprint(final_device_values)
csv_columns = final_device_values[0].keys()
currentPath = os.getcwd()
csv_file = currentPath + "/csv/Outputflows.csv"
WriteDictToCSV(csv_file,csv_columns,final_device_values)
print("Time taken: --- %s seconds ---" % (Total_time))
| en | 0.645634 | #!/usr/bin/python # Calls the csv_dict_list function, passing the named csv # device_values = sorted(device_values, key=itemgetter('priority')) # pprint.pprint(device_values) # Prints the results nice and pretty like for x in device_values: temp.append(int(x['priority'])) print(temp) # Finding list of all parents # checking whether tcp or udp def add_rule_to_newft(flow_rule): #Adding rule to flow with open("new_table99", "a") as myfile: myfile.write(str(flow_rule)) #Checking whether patricia tree is empty or not #Adding rules to patricia and final_device values temp = [] src_port_intersection = [] dst_port_intersection = [] for x in src_inter: if temp : temp.append(x) continue if x-1 == temp[-1]: temp.append(x) continue else: src_port_intersection = [temp] temp = [] # print("Length of Source port Intersection: ",len(src_port_intersection_part),"|| Length of Source port Intersection: ",len(dst_port_intersection_part)) # Now calling subset_for port # print("Conflict_type in IPs: ", var1, "|| Conflict_type in PORTs: ", var2) #Adding trule to patricia # print("Final_conflict_type: ",final) # add_rule_to_newft(mydict) # print("Just added") # print("Conflict is Redundancy : Sent to resolving") # print("Conflict is Intersection_different_action_prompt : Sent to resolving") # print("Conflict is Shielding : Sent to resolving") #do subset here # print("Conflict is Redundancy : Sent to resolving") # print("Conflict is Intersection_different_action_prompt : Sent to resolving") # print("Conflict is Abstraction : Sent to resolving") # find Reverse subset here # print("Conflict is Redundancy_gamma_Removing : Sent to resolving") # print("Conflict is Intersection_different_action_prompt : Sent to resolving") # print("Conflict is Shielding : Sent to resolving") # print("Conflict is Intersection_same_action : Sent to resolving") # print("Conflict is Intersection_different_action_prompt : Sent to resolving") # print("Conflict is Intersection_different_action : Sent to resolving") # print("---------------------------") # Calling to check and delete final_device_values # For Destination insertion bad_words = ["'aasno': '"+str(prio)+"',"] # deleting a flow fro flow table with open('new_table99') as oldfile, open('new_table22', 'w') as newfile: for line in oldfile: if not any(bad_word in line for bad_word in bad_words): newfile.write(line) with open('new_table99', 'w+') as output, open('new_table22', 'r') as input1: while True: data = input1.read(100000) if data == '': # end of file reached break output.write(data) # print("Removed gamma R Holded") # add_rule_to_newft(mydict) # print("Gamma Removed R adds") # print("No adding of R") # add_rule_to_newft(x) # print("gamma Splitted") # add_rule_to_newft(x) # print("gamma Splitted") # add_rule_to_newft(x) # print("gamma Splitted:") # add_rule_to_newft(x) # print("gamma Splitted") # Main Detection # print(mydict['priority']) # print(("\nReconcile %s" %mydict['aasno'])) # print(("\nNO Reconc %s" %mydict['aasno'])) #Gives list of conflict ru # print("Conflicted_numbers: ",conflict_rule_numbers) # add_rule_to_newft(mydict) # print("\n",it) # add_rule_to_newft(mydict) | 3.202468 | 3 |
source code and resource files/open_circuit_window.py | SongyanLiCS/Solar-Panel-Equivalent-Circuit-Parameters-Extractor | 0 | 6624755 | <filename>source code and resource files/open_circuit_window.py
# Define the window class for approximating the slope di/dv near the open circuit condition
# using a graphical approximation method.
from short_circuit_window import ShortCircuitWindow
import tkinter as tk
class OpenCircuitWindow(ShortCircuitWindow):
# Just inherit from the short circuit window class and make appropriate modifications.
def __init__(self, main_window, **kwargs):
super().__init__(main_window, **kwargs)
self.title('Approximating the tangent slope near the open circuit condition...')
self.image_frame.configure(text='Load the I-V curve (for STC) near I=0:')
def calculate_slope(self):
entries_to_check = ['delta i', 'delta y', 'delta v', 'delta x']
all_good = True
for item in entries_to_check:
if self.is_a_number(self.string_vars[item].get()) is False:
all_good = False
if all_good:
x1 = self.cursor_1.get_coordinate()[0]
y1 = self.cursor_1.get_coordinate()[1]
x2 = self.cursor_2.get_coordinate()[0]
y2 = self.cursor_2.get_coordinate()[1]
self.slop_i_v = - (y2 - y1) * float(self.string_vars['delta i'].get())/float(self.string_vars['delta y'].get()) / ((x2 - x1) * float(self.string_vars['delta v'].get())/float(self.string_vars['delta x'].get()))
if self.slop_i_v > 0 or self.slop_i_v < -1e6:
tk.messagebox.showinfo(parent=self, title='The I-V curve seems too steep or the slop appears positive',
message='The I-V curve seems too steep or the slop appears positive. To make the calculation numerically robust and the case valid, the slope is assumed to be -1e6 (A/V). Please retry if necessary.')
self.slop_i_v = -1e6
self.string_vars['tangent slope'].set('{0:.5f}'.format(self.slop_i_v))
else:
tk.messagebox.showinfo(parent=self, title='Invalid input',
message='Please provide valid numbers in the entries for |ΔI|, |Δy|, |ΔV|, and |Δx|.')
def apply(self):
if self.is_a_number(self.string_vars['tangent slope'].get()):
self.main_window.set_di_dv_oc(self.string_vars['tangent slope'].get())
self.grab_release()
self.destroy()
else:
tk.messagebox.showinfo(parent=self, title='dI/dV not obtained yet.',
message='Please get a valid dI/dV first.') | <filename>source code and resource files/open_circuit_window.py
# Define the window class for approximating the slope di/dv near the open circuit condition
# using a graphical approximation method.
from short_circuit_window import ShortCircuitWindow
import tkinter as tk
class OpenCircuitWindow(ShortCircuitWindow):
# Just inherit from the short circuit window class and make appropriate modifications.
def __init__(self, main_window, **kwargs):
super().__init__(main_window, **kwargs)
self.title('Approximating the tangent slope near the open circuit condition...')
self.image_frame.configure(text='Load the I-V curve (for STC) near I=0:')
def calculate_slope(self):
entries_to_check = ['delta i', 'delta y', 'delta v', 'delta x']
all_good = True
for item in entries_to_check:
if self.is_a_number(self.string_vars[item].get()) is False:
all_good = False
if all_good:
x1 = self.cursor_1.get_coordinate()[0]
y1 = self.cursor_1.get_coordinate()[1]
x2 = self.cursor_2.get_coordinate()[0]
y2 = self.cursor_2.get_coordinate()[1]
self.slop_i_v = - (y2 - y1) * float(self.string_vars['delta i'].get())/float(self.string_vars['delta y'].get()) / ((x2 - x1) * float(self.string_vars['delta v'].get())/float(self.string_vars['delta x'].get()))
if self.slop_i_v > 0 or self.slop_i_v < -1e6:
tk.messagebox.showinfo(parent=self, title='The I-V curve seems too steep or the slop appears positive',
message='The I-V curve seems too steep or the slop appears positive. To make the calculation numerically robust and the case valid, the slope is assumed to be -1e6 (A/V). Please retry if necessary.')
self.slop_i_v = -1e6
self.string_vars['tangent slope'].set('{0:.5f}'.format(self.slop_i_v))
else:
tk.messagebox.showinfo(parent=self, title='Invalid input',
message='Please provide valid numbers in the entries for |ΔI|, |Δy|, |ΔV|, and |Δx|.')
def apply(self):
if self.is_a_number(self.string_vars['tangent slope'].get()):
self.main_window.set_di_dv_oc(self.string_vars['tangent slope'].get())
self.grab_release()
self.destroy()
else:
tk.messagebox.showinfo(parent=self, title='dI/dV not obtained yet.',
message='Please get a valid dI/dV first.') | en | 0.851181 | # Define the window class for approximating the slope di/dv near the open circuit condition # using a graphical approximation method. # Just inherit from the short circuit window class and make appropriate modifications. | 3.210062 | 3 |
tests/isolateserver_smoke_test.py | webrtc-lizp/infra-luci-client-py | 0 | 6624756 | #!/usr/bin/env vpython
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import hashlib
import os
import subprocess
import sys
import tempfile
import time
import unittest
# Mutates sys.path.
import test_env
import isolated_format
from utils import file_path
# Ensure that the testing machine has access to this server.
ISOLATE_SERVER = 'https://isolateserver.appspot.com/'
CONTENTS = {
'empty_file.txt': '',
'small_file.txt': 'small file\n',
# TODO(maruel): symlinks.
}
class IsolateServerArchiveSmokeTest(unittest.TestCase):
# This test is touching the live infrastructure. It's a pain if your IP
# is not whitelisted so do not run them for now. They should use a local fake
# web service instead.
no_run = 1
def setUp(self):
super(IsolateServerArchiveSmokeTest, self).setUp()
# The namespace must end in '-gzip' since all files are now compressed
# before being uploaded.
# TODO(maruel): This should not be leaked to the client. It's a
# transport/storage detail.
self.namespace = ('temporary' + str(long(time.time())).split('.', 1)[0]
+ '-gzip')
self.tempdir = tempfile.mkdtemp(prefix=u'isolateserver')
self.rootdir = os.path.join(self.tempdir, 'rootdir')
self.test_data = os.path.join(self.tempdir, 'test_data')
test_env.make_tree(self.test_data, CONTENTS)
def tearDown(self):
try:
file_path.rmtree(self.tempdir)
finally:
super(IsolateServerArchiveSmokeTest, self).tearDown()
def _run(self, args):
"""Runs isolateserver.py."""
cmd = [
sys.executable, os.path.join(test_env.CLIENT_DIR, 'isolateserver.py'),
]
cmd.extend(args)
cmd.extend(
[
'--isolate-server', ISOLATE_SERVER,
'--namespace', self.namespace,
])
if '-v' in sys.argv:
cmd.append('--verbose')
subprocess.check_call(cmd)
else:
subprocess.check_output(cmd)
def _archive_given_files(self, files):
"""Given a list of files, call isolateserver.py with them. Then
verify they are all on the server."""
files = [os.path.join(self.test_data, filename) for filename in files]
self._run(['archive'] + files)
self._download_given_files(files)
def _download_given_files(self, files):
"""Tries to download the files from the server."""
args = [
'download',
'--cache', os.path.join(self.tempdir, 'cache'),
'--target', self.rootdir,
]
file_hashes = [isolated_format.hash_file(f, hashlib.sha1) for f in files]
for f in file_hashes:
args.extend(['--file', f, f])
self._run(args)
# Assert the files are present.
actual = [
isolated_format.hash_file(os.path.join(self.rootdir, f), hashlib.sha1)
for f in os.listdir(self.rootdir)
]
self.assertEqual(sorted(file_hashes), sorted(actual))
def test_archive_empty_file(self):
self._archive_given_files(['empty_file.txt'])
def test_archive_small_file(self):
self._archive_given_files(['small_file.txt'])
def test_archive_huge_file(self):
# Create a file over 2gbs.
name = '2.1gb.7z'
with open(os.path.join(self.test_data, name), 'wb') as f:
# Write 2.1gb.
data = os.urandom(1024)
for _ in range(2150 * 1024):
f.write(data)
self._archive_given_files([name])
if sys.maxsize == (2**31) - 1:
def test_archive_multiple_huge_file(self):
# Create multiple files over 2.5gb. This test exists to stress the virtual
# address space on 32 bits systems
files = []
for i in range(5):
name = '512mb_%d.7z' % i
files.append(name)
with open(os.path.join(self.test_data, name), 'wb') as f:
# Write 512mb.
data = os.urandom(1024)
for _ in range(512 * 1024):
f.write(data)
self._archive_given_files(files)
if __name__ == '__main__':
test_env.main()
| #!/usr/bin/env vpython
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import hashlib
import os
import subprocess
import sys
import tempfile
import time
import unittest
# Mutates sys.path.
import test_env
import isolated_format
from utils import file_path
# Ensure that the testing machine has access to this server.
ISOLATE_SERVER = 'https://isolateserver.appspot.com/'
CONTENTS = {
'empty_file.txt': '',
'small_file.txt': 'small file\n',
# TODO(maruel): symlinks.
}
class IsolateServerArchiveSmokeTest(unittest.TestCase):
# This test is touching the live infrastructure. It's a pain if your IP
# is not whitelisted so do not run them for now. They should use a local fake
# web service instead.
no_run = 1
def setUp(self):
super(IsolateServerArchiveSmokeTest, self).setUp()
# The namespace must end in '-gzip' since all files are now compressed
# before being uploaded.
# TODO(maruel): This should not be leaked to the client. It's a
# transport/storage detail.
self.namespace = ('temporary' + str(long(time.time())).split('.', 1)[0]
+ '-gzip')
self.tempdir = tempfile.mkdtemp(prefix=u'isolateserver')
self.rootdir = os.path.join(self.tempdir, 'rootdir')
self.test_data = os.path.join(self.tempdir, 'test_data')
test_env.make_tree(self.test_data, CONTENTS)
def tearDown(self):
try:
file_path.rmtree(self.tempdir)
finally:
super(IsolateServerArchiveSmokeTest, self).tearDown()
def _run(self, args):
"""Runs isolateserver.py."""
cmd = [
sys.executable, os.path.join(test_env.CLIENT_DIR, 'isolateserver.py'),
]
cmd.extend(args)
cmd.extend(
[
'--isolate-server', ISOLATE_SERVER,
'--namespace', self.namespace,
])
if '-v' in sys.argv:
cmd.append('--verbose')
subprocess.check_call(cmd)
else:
subprocess.check_output(cmd)
def _archive_given_files(self, files):
"""Given a list of files, call isolateserver.py with them. Then
verify they are all on the server."""
files = [os.path.join(self.test_data, filename) for filename in files]
self._run(['archive'] + files)
self._download_given_files(files)
def _download_given_files(self, files):
"""Tries to download the files from the server."""
args = [
'download',
'--cache', os.path.join(self.tempdir, 'cache'),
'--target', self.rootdir,
]
file_hashes = [isolated_format.hash_file(f, hashlib.sha1) for f in files]
for f in file_hashes:
args.extend(['--file', f, f])
self._run(args)
# Assert the files are present.
actual = [
isolated_format.hash_file(os.path.join(self.rootdir, f), hashlib.sha1)
for f in os.listdir(self.rootdir)
]
self.assertEqual(sorted(file_hashes), sorted(actual))
def test_archive_empty_file(self):
self._archive_given_files(['empty_file.txt'])
def test_archive_small_file(self):
self._archive_given_files(['small_file.txt'])
def test_archive_huge_file(self):
# Create a file over 2gbs.
name = '2.1gb.7z'
with open(os.path.join(self.test_data, name), 'wb') as f:
# Write 2.1gb.
data = os.urandom(1024)
for _ in range(2150 * 1024):
f.write(data)
self._archive_given_files([name])
if sys.maxsize == (2**31) - 1:
def test_archive_multiple_huge_file(self):
# Create multiple files over 2.5gb. This test exists to stress the virtual
# address space on 32 bits systems
files = []
for i in range(5):
name = '512mb_%d.7z' % i
files.append(name)
with open(os.path.join(self.test_data, name), 'wb') as f:
# Write 512mb.
data = os.urandom(1024)
for _ in range(512 * 1024):
f.write(data)
self._archive_given_files(files)
if __name__ == '__main__':
test_env.main()
| en | 0.864428 | #!/usr/bin/env vpython # Copyright 2013 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. # Mutates sys.path. # Ensure that the testing machine has access to this server. # TODO(maruel): symlinks. # This test is touching the live infrastructure. It's a pain if your IP # is not whitelisted so do not run them for now. They should use a local fake # web service instead. # The namespace must end in '-gzip' since all files are now compressed # before being uploaded. # TODO(maruel): This should not be leaked to the client. It's a # transport/storage detail. Runs isolateserver.py. Given a list of files, call isolateserver.py with them. Then verify they are all on the server. Tries to download the files from the server. # Assert the files are present. # Create a file over 2gbs. # Write 2.1gb. # Create multiple files over 2.5gb. This test exists to stress the virtual # address space on 32 bits systems # Write 512mb. | 2.176495 | 2 |
recsys/mf/bpr.py | Danielto1404/ML-ALGO | 1 | 6624757 | import numpy as np
import scipy.sparse as sp
from tqdm import tqdm
from recsys.mf.core import CoreMF
class BPR(CoreMF):
def __init__(self, iterations, factors, learning_rate, alpha, seed):
super().__init__(iterations, factors, learning_rate, alpha, seed=seed, beta=0, calculate_loss=False)
self.positives = {}
self.negatives = {}
def negative_choice(self, user):
return np.random.choice(self.negatives[user])
def fit(self, user_to_item: sp.csr_matrix):
self.__fit_preparation__(user_to_item)
implicit_values = user_to_item.toarray()
n_users, n_items = user_to_item.shape
items_range = np.arange(n_items)
users_range = np.unique(self.user_indices)
for user in np.arange(n_users):
values = implicit_values[user]
self.positives[user] = items_range[values > 0]
self.negatives[user] = items_range[values == 0]
def anti_gradient_step(m, gradient, latent):
exp = np.exp(-m)
return self.learning_rate * ((exp / (1 + exp)) * gradient - self.alpha * latent)
for it in np.arange(self.iterations):
for user in tqdm(users_range, desc='Epoch {}'.format(it + 1), colour='green'):
for positive in self.positives[user]:
negative = self.negative_choice(user)
positive_item = self.item_factors[positive]
negative_item = self.item_factors[negative]
user_factors = self.user_factors[user]
delta = positive_item - negative_item
margin = user_factors @ delta.T
self.user_factors[user] += anti_gradient_step(margin, delta, user_factors)
self.item_factors[positive] += anti_gradient_step(margin, user_factors, positive_item)
self.item_factors[negative] += anti_gradient_step(margin, -user_factors, negative_item)
| import numpy as np
import scipy.sparse as sp
from tqdm import tqdm
from recsys.mf.core import CoreMF
class BPR(CoreMF):
def __init__(self, iterations, factors, learning_rate, alpha, seed):
super().__init__(iterations, factors, learning_rate, alpha, seed=seed, beta=0, calculate_loss=False)
self.positives = {}
self.negatives = {}
def negative_choice(self, user):
return np.random.choice(self.negatives[user])
def fit(self, user_to_item: sp.csr_matrix):
self.__fit_preparation__(user_to_item)
implicit_values = user_to_item.toarray()
n_users, n_items = user_to_item.shape
items_range = np.arange(n_items)
users_range = np.unique(self.user_indices)
for user in np.arange(n_users):
values = implicit_values[user]
self.positives[user] = items_range[values > 0]
self.negatives[user] = items_range[values == 0]
def anti_gradient_step(m, gradient, latent):
exp = np.exp(-m)
return self.learning_rate * ((exp / (1 + exp)) * gradient - self.alpha * latent)
for it in np.arange(self.iterations):
for user in tqdm(users_range, desc='Epoch {}'.format(it + 1), colour='green'):
for positive in self.positives[user]:
negative = self.negative_choice(user)
positive_item = self.item_factors[positive]
negative_item = self.item_factors[negative]
user_factors = self.user_factors[user]
delta = positive_item - negative_item
margin = user_factors @ delta.T
self.user_factors[user] += anti_gradient_step(margin, delta, user_factors)
self.item_factors[positive] += anti_gradient_step(margin, user_factors, positive_item)
self.item_factors[negative] += anti_gradient_step(margin, -user_factors, negative_item)
| none | 1 | 2.130399 | 2 | |
lingvo/core/learner.py | xsppp/gpipe_with_Mnist | 1 | 6624758 | <filename>lingvo/core/learner.py
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A learner optimizes a subset of variables according to a loss.
It consists of a learning rate schedule, an optimizer, and gradient clipping
mechanisms. A BaseTask can have multiple learners, each optimizing a (usually
disjoint) subset of variables.
"""
import re
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import optimizer
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.core import summary_utils
class Learner(base_layer.BaseLayer):
"""A training program layer.
The layer takes a loss tensor as input and outputs a trainer op.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'l2_regularizer_weight', None,
'If not None, L2 regularization to apply to the weights. '
'Otherwise, disable L2 regularization.')
p.Define(
'loss_name', None, 'Name(s) of the loss(es) this learner to optimize. '
'If not set, use learner name directly. '
'If given as a list, the gradients will be combined via a '
'GradientCombiner created from p.gradient_combiner, which must be '
'specified as well.')
p.Define(
'gradient_combiner', None,
'Params of a gradient_combiner.GradientCombiner used to combine '
'gradients from multiple losses.')
p.Define(
'l1_regularizer_weight', None,
'If not None, L1 regularization to apply to the weights. '
'Otherwise, disable L1 regularization.')
p.Define('learning_rate', 0.0, 'learning rate to use.')
p.Define(
'clip_gradient_norm_to_value', 0.0,
'Clip gradient by global norm to this value. This is similar to '
'the bahaviour of tf.clip_by_global_norm, if you are looking for '
'tf.clip_by_norm refer to clip_gradient_single_norm_to_value. Note '
'these are mutually exclusive.')
p.Define(
'clip_gradient_single_norm_to_value', 0.0,
'Clip gradient by single tensor norm to this value. This is '
'similar to the bahaviour of tf.clip_by_norm. Note this is mutually '
'exlusive to using clip_gradient_norm_to_value.')
p.Define('grad_norm_to_clip_to_zero', 0.0,
'Clip gradient to 0 if its norm exceeds this value.')
p.Define('grad_norm_tracker', None, 'Params for GradNormTracker.')
p.Define('optimizer', optimizer.Adam.Params(), 'Params for the optimizer.')
p.Define('lr_schedule', schedule.ContinuousSchedule.Params(),
'Learning rate decay schedule.')
p.Define(
'bprop_variable_filter', None,
'If set, only backprop variables whose names partially match '
'this regexp (re.search).')
p.Define(
'bprop_variable_exclusion', None,
'If set, do not backprop variables whose names partially match '
'this regexp (re.search).')
p.Define(
'grad_aggregation_method', tf.AggregationMethod.EXPERIMENTAL_TREE,
'Specifies the method used to combine gradient terms. Accepted '
'values are constants defined in the class AggregationMethod.')
p.Define(
'gate_gradients', False,
'If True, add a tuple around the gradients returned for an '
'operations. This avoids some race conditions.')
p.Define('colocate_gradients_with_ops', True,
'If True, try colocating gradients with the corresponding op.')
p.Define(
'skip_zero_gradients', None,
'If set, skips aggregating zero gradients while computing gradients.'
'This helps in case where some weights may not be used in forward '
'computation, e.g., sparsely activated networks or switchable layers '
'in neural architectural search. '
'Possible values are: '
'None: do not skip zero gradients; '
'"variable": skip if the entire variable gradients are almost zero; '
'"weight": skip if the individual weight gradients are almost zero.')
p.Define('scale_gradients', True,
'Whether to apply gradients adjustment and scaling.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self._var_grads = None
self._eval_metrics = {}
if p.grad_norm_tracker:
self.CreateChild('grad_norm_tracker', p.grad_norm_tracker)
self.CreateChild('lr_schedule', p.lr_schedule)
self.CreateChild('optimizer', p.optimizer)
if isinstance(p.loss_name, (list, tuple)):
assert p.gradient_combiner
self.CreateChild('gradient_combiner', p.gradient_combiner)
else:
assert p.gradient_combiner is None
def _CreateChildrenVariables(self):
# Backwards compatibility: manually call child.InstantiateVariables()
# outside of tf.variable_scope(p.name).
if self.params.grad_norm_tracker:
self.grad_norm_tracker.InstantiateVariables()
self.lr_schedule.InstantiateVariables()
self.optimizer.InstantiateVariables()
super()._CreateChildrenVariables()
def GetVarGrads(self):
return self._var_grads
def GetTrainableVariables(self, vmap):
p = self.params
pos = re.compile(
p.bprop_variable_filter) if p.bprop_variable_filter else None
neg = re.compile(
p.bprop_variable_exclusion) if p.bprop_variable_exclusion else None
def VariableFilter(v):
"""Returns True if variable v should be optimized by this learner."""
if not v.trainable:
return False
if pos and not pos.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_filter: %s', p.name,
v.name)
return False
if neg and neg.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_exclusion: %s', p.name,
v.name)
return False
return True
return vmap.Filter(VariableFilter)
def ApplyPostTrainingLoop(self, global_step):
"""Applies any computation to run after each tpu trainining loop.
Args:
global_step: Global step variable.
Returns:
Ops to run after training loop ends.
"""
return self.optimizer.ApplyPostTrainingLoop(global_step)
def LearningRate(self, step):
p = self.params
lrs = self.lr_schedule.Value(step)
lrs.set_shape([])
self._AddEvalMetric('lr_schedule', lrs, tf.constant(1.0))
return p.learning_rate * lrs
def Apply(self, metrics, vmap, gradient_mask=None, gradient_adjuster=None):
"""Computes updates on 'vmap' to optimize 'loss'.
TODO(rpang): explore merging gradient_mask and gradient_adjuster.
Args:
metrics: A Dict[str, (value, weight)], from which loss can be extracted
according to p.loss_name.
vmap: A `.NestedMap` object containing variables to optimize.
gradient_mask: if not None, a dict mapping variable names to a 0/1 scalar.
gradient_adjuster: if not None, a function that mutates a given var_grads.
Returns:
(losses, op, eval_metrics), where
- losses is a list of scalar tensors;
- op is a tf.Operation to update variables;
- eval_metrics is a Dict[str, (value, weight)], where each value/weight
is a scalar tensor.
"""
# We apply gradients outside the name_scope to maintain backwards
# compatibility on variables created by self.optimizer.Apply().
losses, var_grads, eval_metrics = self._ComputeLossesAndGradients(
metrics, vmap)
var_grads, stats = self.AdjustGradients(
var_grads,
gradient_mask=gradient_mask,
gradient_adjuster=gradient_adjuster)
eval_metrics.update(stats)
self._var_grads = var_grads
assert self.theta.global_step is not None, self.theta
lr = self.LearningRate(self.theta.global_step)
var_update_op = self.optimizer.Apply(lr, var_grads)
return losses, var_update_op, eval_metrics
def ComputeActivationGradients(self, activations, activations_grad, vmap):
p = self.params
vmap = self.GetTrainableVariables(vmap)
for v in vmap.Flatten():
tf.logging.info('%s: bprop variable: %s', p.name, v.name)
return self.optimizer.ComputeGradients(
activations,
vmap,
p.grad_aggregation_method,
p.colocate_gradients_with_ops,
p.gate_gradients,
compute_gradients_fn=self._CustomComputeGradientsFn(),
skip_zero_gradients=p.skip_zero_gradients,
skip_none_gradients=False,
activations_grad=activations_grad,
is_activations=True)
def ComputeLosses(self, metrics):
p = self.params
def _Loss(metric_name):
"""Returns (loss, var_grads) computed from metrics[metric_name]."""
metric = metrics.get(metric_name, None)
if metric is None:
raise ValueError('Loss %s not found in metrics %s' %
(metric_name, list(metrics.keys())))
return metric
loss_name = p.loss_name or p.name
losses = []
if isinstance(loss_name, (list, tuple)):
for metric_name in loss_name:
loss_metric = _Loss(metric_name)
losses.append(loss_metric[0])
else:
loss_metric = _Loss(loss_name)
losses.append(loss_metric[0])
return losses
def _CustomComputeGradientsFn(self):
"""Returns the compute_gradients_fn to use for py_utils.ComputeGradients."""
return None # use the default function
def _ComputeLossesAndGradients(self, metrics, vmap):
p = self.params
vmap = self.GetTrainableVariables(vmap)
for v in vmap.Flatten():
tf.logging.info('%s: bprop variable: %s', p.name, v.name)
def LossAndGradients(metric_name):
"""Returns (loss, var_grads) computed from metrics[metric_name]."""
metric = metrics.get(metric_name, None)
if metric is None:
raise ValueError('Loss %s not found in metrics %s' %
(metric_name, list(metrics.keys())))
# TODO(b/154785713): pass (loss, loss_weight) to ComputeGradients().
loss = metric[0]
return metric, self.optimizer.ComputeGradients(
loss,
vmap,
p.grad_aggregation_method,
p.colocate_gradients_with_ops,
p.gate_gradients,
compute_gradients_fn=self._CustomComputeGradientsFn(),
skip_zero_gradients=p.skip_zero_gradients,
skip_none_gradients=False)
loss_name = p.loss_name or p.name
losses = []
eval_metrics = {}
if isinstance(loss_name, (list, tuple)):
losses_and_grads = {}
variables = None
for metric_name in loss_name:
loss_metric, var_grads = LossAndGradients(metric_name)
losses_and_grads[metric_name] = py_utils.NestedMap(
loss_metric=loss_metric,
grads=tf.nest.map_structure(lambda vg: vg.grad, var_grads))
current_vars = tf.nest.map_structure(lambda vg: vg.var, var_grads)
if variables is None:
variables = current_vars
else:
tf.nest.assert_same_structure(variables, current_vars)
losses.append(loss_metric[0])
grads, eval_metrics = self.gradient_combiner.Combine(
variables, losses_and_grads)
var_grads = tf.nest.map_structure(
lambda v, g: py_utils.VarGrad(var=v, grad=g), variables, grads)
else:
loss_metric, var_grads = LossAndGradients(loss_name)
losses.append(loss_metric[0])
return losses, py_utils.SkipNoneGradients(var_grads), eval_metrics
def AdjustGradients(self,
var_grads,
gradient_mask=None,
gradient_adjuster=None):
"""Adjusts gradients according to learner params.
Args:
var_grads: a `.NestedMap` whose values are (var, grad) pairs.
gradient_mask: if not None, a dict mapping variable names to a 0/1 scalar.
gradient_adjuster: if not None, a function that mutates a given var_grads.
Returns:
(var_grads, eval_metrics), where var_grads is a `.NestedMap` whose values
(var, grad) pairs representing adjusted gradients.
"""
p = self.params
# L2 regularizer.
if p.l2_regularizer_weight is not None:
l2_loss, var_grads = py_utils.AdjustGradientsWithLpLoss(
var_grads, p.l2_regularizer_weight, p=2.0)
self._AddEvalMetric('l2_loss', l2_loss, tf.constant(1.0))
# L1 regularizer.
if p.l1_regularizer_weight is not None:
l1_loss, var_grads = py_utils.AdjustGradientsWithLpLoss(
var_grads, p.l1_regularizer_weight, p=1.0)
self._AddEvalMetric('l1_loss', l1_loss, tf.constant(1.0))
# Mask gradients only if the mask is set.
if gradient_mask:
var_grads = py_utils.MaskGradients(var_grads, gradient_mask)
# Scale gradients, e.g., gradient clipping.
if p.scale_gradients:
scaled_vars = self.ScaleGradients(
var_grads, gradient_adjuster=gradient_adjuster)
var_grads = scaled_vars.final_var_grads
# Histogram summary.
summary_utils.CollectVarHistogram(var_grads)
return var_grads, self._eval_metrics
def _GetGlobalGradScale(self, all_grad_norm, has_nan_or_inf):
"""Returns a scaling factor for all gradients according to their norm.
In case there are NaN or Inf values the function will return 0.0.
Args:
all_grad_norm: A scalar represeting the total norm of all vars.
has_nan_or_inf: A scalar of 0 or 1, indicating whether there is any NaN or
Inf in input gradients.
Returns:
The gradient scale. 0 if gradient updates should be skipped for the step.
"""
p = self.params
# Computes gradient's scale.
grad_scale = tf.constant(1.0, all_grad_norm.dtype)
if p.clip_gradient_norm_to_value:
# If all_grad_norm > p.clip_gradient_norm_to_value, scales
# all_grads so that the norm is 1.0.
grad_scale = tf.minimum(
tf.constant(1.0, all_grad_norm.dtype),
p.clip_gradient_norm_to_value / all_grad_norm)
if p.grad_norm_to_clip_to_zero:
# If all_grad_norm > p.grad_norm_to_clip_to_zero, treats
# grad_scale as 0. This way, we ignore this step.
grad_scale *= tf.cast(all_grad_norm < p.grad_norm_to_clip_to_zero,
p.dtype)
if p.grad_norm_tracker:
grad_scale *= self.grad_norm_tracker.FPropDefaultTheta(
all_grad_norm, has_nan_or_inf)
# Force grad_scale to be 0 if there is any NaN or Inf in gradients.
grad_scale = tf.where(has_nan_or_inf, tf.constant(0.0, grad_scale.dtype),
grad_scale)
return grad_scale
def ScaleGradients(self, var_grads, gradient_adjuster=None):
"""Scales gradients according to training params.
Args:
var_grads: a `.NestedMap` whose values are (var, grad) pairs.
gradient_adjuster: if not None, a function that mutates a given var_grads.
Returns:
A `.NestedMap` containing
- final_var_grads: a `.NestedMap` whose values are (var, grad) pairs,
where gradients have already been scaled.
- grad_scale: the gradient scale. 0 if gradient updates should be skipped
for the step. (Optional, only returned in case global norm clipping is
used.)
"""
p = self.params
# Computes gradients' norm and adds their summaries. Note that all_grad_norm
# may be nan, which may cause grad_scale to be nan.
for name, vg in var_grads.FlattenItems():
summary_utils.AddNormSummary(
py_utils.SanitizeScopeKey(name) + '/' + p.name, vg)
flatten = py_utils.Flatten(var_grads)
all_grad_norm = tf.sqrt(py_utils.SumSquared([g for (_, g) in flatten]))
all_var_norm = tf.sqrt(py_utils.SumSquared([v for (v, _) in flatten]))
grad_norm_is_nan_or_inf = tf.math.logical_or(
tf.math.is_nan(all_grad_norm), tf.math.is_inf(all_grad_norm))
# Optional gradient adjustment. Note that this happens after computing
# all_grad_norm.
if gradient_adjuster is not None:
tf.logging.info('gradient_adjuster=%s', gradient_adjuster)
var_grads = gradient_adjuster(var_grads)
# Handles NaN/Inf gradients.
has_nan_or_inf = py_utils.HasNanOrInfGradient(var_grads)
# Grad norm can still be inf even if none of the individual grad is inf.
has_nan_or_inf = tf.math.logical_or(has_nan_or_inf, grad_norm_is_nan_or_inf)
self._AddEvalMetric('has_nan_or_inf', has_nan_or_inf, tf.constant(1.0))
return_values = py_utils.NestedMap()
if p.clip_gradient_single_norm_to_value:
# Currently using both types of clipping simultaneously is unsupported.
if p.clip_gradient_norm_to_value:
raise ValueError('Cannot use clip_gradient_single_norm_to_value=%f and '
'clip_gradient_norm_to_value=%f.' %
(p.clip_gradient_single_norm_to_value,
p.clip_gradient_norm_to_value))
final_var_grads = py_utils.ApplyGradNormClipping(
var_grads, p.clip_gradient_single_norm_to_value)
else:
grad_scale = self._GetGlobalGradScale(all_grad_norm, has_nan_or_inf)
# grad_norm/all is both a eval metric(collected by trainer) and a summary
# (collected by controller).
summary_utils.scalar(f'grad_norm/all/{p.name}', all_grad_norm)
self._AddEvalMetric('grad_norm/all', all_grad_norm, tf.constant(1.0))
self._AddEvalMetric('var_norm/all', all_var_norm, tf.constant(1.0))
self._AddEvalMetric('grad_scale_all', grad_scale, tf.constant(1.0))
final_var_grads = py_utils.ApplyGradMultiplier(var_grads, grad_scale)
return_values.grad_scale = grad_scale
return_values.final_var_grads = final_var_grads
return return_values
def _AddEvalMetric(self, key, value, weight):
self._eval_metrics[key] = (value, weight)
_LEGACY_LEARNER_PARAMS = [
'bprop_variable_filter',
'bprop_variable_exclusion',
'clip_gradient_norm_to_value',
'clip_gradient_single_norm_to_value',
'colocate_gradients_with_ops',
'gate_gradients',
'scale_gradients',
'grad_aggregation_method',
'grad_norm_to_clip_to_zero',
'grad_norm_tracker',
'l1_regularizer_weight',
'l2_regularizer_weight',
'learning_rate',
'lr_schedule',
'optimizer',
]
def ExtractLearnerFromLegacyParams(tp, cls=Learner):
"""Extracts legacy learner params from 'tp' to a Learner params.
Args:
tp: BaseTask training params (p.train). Its legacy params will be cleared to
be None after the conversion.
cls: Learner class where we set the params.
Returns:
A params for Learner.
"""
lp = cls.Params()
lp.name = 'loss'
for k, v in tp.IterParams():
if k not in _LEGACY_LEARNER_PARAMS:
tf.logging.info(
'Ignoring legacy param %s=%s for optimization program', k, v)
continue
setattr(lp, k, v)
setattr(tp, k, None)
for line in lp.ToText().split('\n'):
tf.logging.info('Learner params: %s', line)
return lp
| <filename>lingvo/core/learner.py
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A learner optimizes a subset of variables according to a loss.
It consists of a learning rate schedule, an optimizer, and gradient clipping
mechanisms. A BaseTask can have multiple learners, each optimizing a (usually
disjoint) subset of variables.
"""
import re
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import optimizer
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.core import summary_utils
class Learner(base_layer.BaseLayer):
"""A training program layer.
The layer takes a loss tensor as input and outputs a trainer op.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'l2_regularizer_weight', None,
'If not None, L2 regularization to apply to the weights. '
'Otherwise, disable L2 regularization.')
p.Define(
'loss_name', None, 'Name(s) of the loss(es) this learner to optimize. '
'If not set, use learner name directly. '
'If given as a list, the gradients will be combined via a '
'GradientCombiner created from p.gradient_combiner, which must be '
'specified as well.')
p.Define(
'gradient_combiner', None,
'Params of a gradient_combiner.GradientCombiner used to combine '
'gradients from multiple losses.')
p.Define(
'l1_regularizer_weight', None,
'If not None, L1 regularization to apply to the weights. '
'Otherwise, disable L1 regularization.')
p.Define('learning_rate', 0.0, 'learning rate to use.')
p.Define(
'clip_gradient_norm_to_value', 0.0,
'Clip gradient by global norm to this value. This is similar to '
'the bahaviour of tf.clip_by_global_norm, if you are looking for '
'tf.clip_by_norm refer to clip_gradient_single_norm_to_value. Note '
'these are mutually exclusive.')
p.Define(
'clip_gradient_single_norm_to_value', 0.0,
'Clip gradient by single tensor norm to this value. This is '
'similar to the bahaviour of tf.clip_by_norm. Note this is mutually '
'exlusive to using clip_gradient_norm_to_value.')
p.Define('grad_norm_to_clip_to_zero', 0.0,
'Clip gradient to 0 if its norm exceeds this value.')
p.Define('grad_norm_tracker', None, 'Params for GradNormTracker.')
p.Define('optimizer', optimizer.Adam.Params(), 'Params for the optimizer.')
p.Define('lr_schedule', schedule.ContinuousSchedule.Params(),
'Learning rate decay schedule.')
p.Define(
'bprop_variable_filter', None,
'If set, only backprop variables whose names partially match '
'this regexp (re.search).')
p.Define(
'bprop_variable_exclusion', None,
'If set, do not backprop variables whose names partially match '
'this regexp (re.search).')
p.Define(
'grad_aggregation_method', tf.AggregationMethod.EXPERIMENTAL_TREE,
'Specifies the method used to combine gradient terms. Accepted '
'values are constants defined in the class AggregationMethod.')
p.Define(
'gate_gradients', False,
'If True, add a tuple around the gradients returned for an '
'operations. This avoids some race conditions.')
p.Define('colocate_gradients_with_ops', True,
'If True, try colocating gradients with the corresponding op.')
p.Define(
'skip_zero_gradients', None,
'If set, skips aggregating zero gradients while computing gradients.'
'This helps in case where some weights may not be used in forward '
'computation, e.g., sparsely activated networks or switchable layers '
'in neural architectural search. '
'Possible values are: '
'None: do not skip zero gradients; '
'"variable": skip if the entire variable gradients are almost zero; '
'"weight": skip if the individual weight gradients are almost zero.')
p.Define('scale_gradients', True,
'Whether to apply gradients adjustment and scaling.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self._var_grads = None
self._eval_metrics = {}
if p.grad_norm_tracker:
self.CreateChild('grad_norm_tracker', p.grad_norm_tracker)
self.CreateChild('lr_schedule', p.lr_schedule)
self.CreateChild('optimizer', p.optimizer)
if isinstance(p.loss_name, (list, tuple)):
assert p.gradient_combiner
self.CreateChild('gradient_combiner', p.gradient_combiner)
else:
assert p.gradient_combiner is None
def _CreateChildrenVariables(self):
# Backwards compatibility: manually call child.InstantiateVariables()
# outside of tf.variable_scope(p.name).
if self.params.grad_norm_tracker:
self.grad_norm_tracker.InstantiateVariables()
self.lr_schedule.InstantiateVariables()
self.optimizer.InstantiateVariables()
super()._CreateChildrenVariables()
def GetVarGrads(self):
return self._var_grads
def GetTrainableVariables(self, vmap):
p = self.params
pos = re.compile(
p.bprop_variable_filter) if p.bprop_variable_filter else None
neg = re.compile(
p.bprop_variable_exclusion) if p.bprop_variable_exclusion else None
def VariableFilter(v):
"""Returns True if variable v should be optimized by this learner."""
if not v.trainable:
return False
if pos and not pos.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_filter: %s', p.name,
v.name)
return False
if neg and neg.search(v.name):
tf.logging.info('%s: disabled by bprop_variable_exclusion: %s', p.name,
v.name)
return False
return True
return vmap.Filter(VariableFilter)
def ApplyPostTrainingLoop(self, global_step):
"""Applies any computation to run after each tpu trainining loop.
Args:
global_step: Global step variable.
Returns:
Ops to run after training loop ends.
"""
return self.optimizer.ApplyPostTrainingLoop(global_step)
def LearningRate(self, step):
p = self.params
lrs = self.lr_schedule.Value(step)
lrs.set_shape([])
self._AddEvalMetric('lr_schedule', lrs, tf.constant(1.0))
return p.learning_rate * lrs
def Apply(self, metrics, vmap, gradient_mask=None, gradient_adjuster=None):
"""Computes updates on 'vmap' to optimize 'loss'.
TODO(rpang): explore merging gradient_mask and gradient_adjuster.
Args:
metrics: A Dict[str, (value, weight)], from which loss can be extracted
according to p.loss_name.
vmap: A `.NestedMap` object containing variables to optimize.
gradient_mask: if not None, a dict mapping variable names to a 0/1 scalar.
gradient_adjuster: if not None, a function that mutates a given var_grads.
Returns:
(losses, op, eval_metrics), where
- losses is a list of scalar tensors;
- op is a tf.Operation to update variables;
- eval_metrics is a Dict[str, (value, weight)], where each value/weight
is a scalar tensor.
"""
# We apply gradients outside the name_scope to maintain backwards
# compatibility on variables created by self.optimizer.Apply().
losses, var_grads, eval_metrics = self._ComputeLossesAndGradients(
metrics, vmap)
var_grads, stats = self.AdjustGradients(
var_grads,
gradient_mask=gradient_mask,
gradient_adjuster=gradient_adjuster)
eval_metrics.update(stats)
self._var_grads = var_grads
assert self.theta.global_step is not None, self.theta
lr = self.LearningRate(self.theta.global_step)
var_update_op = self.optimizer.Apply(lr, var_grads)
return losses, var_update_op, eval_metrics
def ComputeActivationGradients(self, activations, activations_grad, vmap):
p = self.params
vmap = self.GetTrainableVariables(vmap)
for v in vmap.Flatten():
tf.logging.info('%s: bprop variable: %s', p.name, v.name)
return self.optimizer.ComputeGradients(
activations,
vmap,
p.grad_aggregation_method,
p.colocate_gradients_with_ops,
p.gate_gradients,
compute_gradients_fn=self._CustomComputeGradientsFn(),
skip_zero_gradients=p.skip_zero_gradients,
skip_none_gradients=False,
activations_grad=activations_grad,
is_activations=True)
def ComputeLosses(self, metrics):
p = self.params
def _Loss(metric_name):
"""Returns (loss, var_grads) computed from metrics[metric_name]."""
metric = metrics.get(metric_name, None)
if metric is None:
raise ValueError('Loss %s not found in metrics %s' %
(metric_name, list(metrics.keys())))
return metric
loss_name = p.loss_name or p.name
losses = []
if isinstance(loss_name, (list, tuple)):
for metric_name in loss_name:
loss_metric = _Loss(metric_name)
losses.append(loss_metric[0])
else:
loss_metric = _Loss(loss_name)
losses.append(loss_metric[0])
return losses
def _CustomComputeGradientsFn(self):
"""Returns the compute_gradients_fn to use for py_utils.ComputeGradients."""
return None # use the default function
def _ComputeLossesAndGradients(self, metrics, vmap):
p = self.params
vmap = self.GetTrainableVariables(vmap)
for v in vmap.Flatten():
tf.logging.info('%s: bprop variable: %s', p.name, v.name)
def LossAndGradients(metric_name):
"""Returns (loss, var_grads) computed from metrics[metric_name]."""
metric = metrics.get(metric_name, None)
if metric is None:
raise ValueError('Loss %s not found in metrics %s' %
(metric_name, list(metrics.keys())))
# TODO(b/154785713): pass (loss, loss_weight) to ComputeGradients().
loss = metric[0]
return metric, self.optimizer.ComputeGradients(
loss,
vmap,
p.grad_aggregation_method,
p.colocate_gradients_with_ops,
p.gate_gradients,
compute_gradients_fn=self._CustomComputeGradientsFn(),
skip_zero_gradients=p.skip_zero_gradients,
skip_none_gradients=False)
loss_name = p.loss_name or p.name
losses = []
eval_metrics = {}
if isinstance(loss_name, (list, tuple)):
losses_and_grads = {}
variables = None
for metric_name in loss_name:
loss_metric, var_grads = LossAndGradients(metric_name)
losses_and_grads[metric_name] = py_utils.NestedMap(
loss_metric=loss_metric,
grads=tf.nest.map_structure(lambda vg: vg.grad, var_grads))
current_vars = tf.nest.map_structure(lambda vg: vg.var, var_grads)
if variables is None:
variables = current_vars
else:
tf.nest.assert_same_structure(variables, current_vars)
losses.append(loss_metric[0])
grads, eval_metrics = self.gradient_combiner.Combine(
variables, losses_and_grads)
var_grads = tf.nest.map_structure(
lambda v, g: py_utils.VarGrad(var=v, grad=g), variables, grads)
else:
loss_metric, var_grads = LossAndGradients(loss_name)
losses.append(loss_metric[0])
return losses, py_utils.SkipNoneGradients(var_grads), eval_metrics
def AdjustGradients(self,
var_grads,
gradient_mask=None,
gradient_adjuster=None):
"""Adjusts gradients according to learner params.
Args:
var_grads: a `.NestedMap` whose values are (var, grad) pairs.
gradient_mask: if not None, a dict mapping variable names to a 0/1 scalar.
gradient_adjuster: if not None, a function that mutates a given var_grads.
Returns:
(var_grads, eval_metrics), where var_grads is a `.NestedMap` whose values
(var, grad) pairs representing adjusted gradients.
"""
p = self.params
# L2 regularizer.
if p.l2_regularizer_weight is not None:
l2_loss, var_grads = py_utils.AdjustGradientsWithLpLoss(
var_grads, p.l2_regularizer_weight, p=2.0)
self._AddEvalMetric('l2_loss', l2_loss, tf.constant(1.0))
# L1 regularizer.
if p.l1_regularizer_weight is not None:
l1_loss, var_grads = py_utils.AdjustGradientsWithLpLoss(
var_grads, p.l1_regularizer_weight, p=1.0)
self._AddEvalMetric('l1_loss', l1_loss, tf.constant(1.0))
# Mask gradients only if the mask is set.
if gradient_mask:
var_grads = py_utils.MaskGradients(var_grads, gradient_mask)
# Scale gradients, e.g., gradient clipping.
if p.scale_gradients:
scaled_vars = self.ScaleGradients(
var_grads, gradient_adjuster=gradient_adjuster)
var_grads = scaled_vars.final_var_grads
# Histogram summary.
summary_utils.CollectVarHistogram(var_grads)
return var_grads, self._eval_metrics
def _GetGlobalGradScale(self, all_grad_norm, has_nan_or_inf):
"""Returns a scaling factor for all gradients according to their norm.
In case there are NaN or Inf values the function will return 0.0.
Args:
all_grad_norm: A scalar represeting the total norm of all vars.
has_nan_or_inf: A scalar of 0 or 1, indicating whether there is any NaN or
Inf in input gradients.
Returns:
The gradient scale. 0 if gradient updates should be skipped for the step.
"""
p = self.params
# Computes gradient's scale.
grad_scale = tf.constant(1.0, all_grad_norm.dtype)
if p.clip_gradient_norm_to_value:
# If all_grad_norm > p.clip_gradient_norm_to_value, scales
# all_grads so that the norm is 1.0.
grad_scale = tf.minimum(
tf.constant(1.0, all_grad_norm.dtype),
p.clip_gradient_norm_to_value / all_grad_norm)
if p.grad_norm_to_clip_to_zero:
# If all_grad_norm > p.grad_norm_to_clip_to_zero, treats
# grad_scale as 0. This way, we ignore this step.
grad_scale *= tf.cast(all_grad_norm < p.grad_norm_to_clip_to_zero,
p.dtype)
if p.grad_norm_tracker:
grad_scale *= self.grad_norm_tracker.FPropDefaultTheta(
all_grad_norm, has_nan_or_inf)
# Force grad_scale to be 0 if there is any NaN or Inf in gradients.
grad_scale = tf.where(has_nan_or_inf, tf.constant(0.0, grad_scale.dtype),
grad_scale)
return grad_scale
def ScaleGradients(self, var_grads, gradient_adjuster=None):
"""Scales gradients according to training params.
Args:
var_grads: a `.NestedMap` whose values are (var, grad) pairs.
gradient_adjuster: if not None, a function that mutates a given var_grads.
Returns:
A `.NestedMap` containing
- final_var_grads: a `.NestedMap` whose values are (var, grad) pairs,
where gradients have already been scaled.
- grad_scale: the gradient scale. 0 if gradient updates should be skipped
for the step. (Optional, only returned in case global norm clipping is
used.)
"""
p = self.params
# Computes gradients' norm and adds their summaries. Note that all_grad_norm
# may be nan, which may cause grad_scale to be nan.
for name, vg in var_grads.FlattenItems():
summary_utils.AddNormSummary(
py_utils.SanitizeScopeKey(name) + '/' + p.name, vg)
flatten = py_utils.Flatten(var_grads)
all_grad_norm = tf.sqrt(py_utils.SumSquared([g for (_, g) in flatten]))
all_var_norm = tf.sqrt(py_utils.SumSquared([v for (v, _) in flatten]))
grad_norm_is_nan_or_inf = tf.math.logical_or(
tf.math.is_nan(all_grad_norm), tf.math.is_inf(all_grad_norm))
# Optional gradient adjustment. Note that this happens after computing
# all_grad_norm.
if gradient_adjuster is not None:
tf.logging.info('gradient_adjuster=%s', gradient_adjuster)
var_grads = gradient_adjuster(var_grads)
# Handles NaN/Inf gradients.
has_nan_or_inf = py_utils.HasNanOrInfGradient(var_grads)
# Grad norm can still be inf even if none of the individual grad is inf.
has_nan_or_inf = tf.math.logical_or(has_nan_or_inf, grad_norm_is_nan_or_inf)
self._AddEvalMetric('has_nan_or_inf', has_nan_or_inf, tf.constant(1.0))
return_values = py_utils.NestedMap()
if p.clip_gradient_single_norm_to_value:
# Currently using both types of clipping simultaneously is unsupported.
if p.clip_gradient_norm_to_value:
raise ValueError('Cannot use clip_gradient_single_norm_to_value=%f and '
'clip_gradient_norm_to_value=%f.' %
(p.clip_gradient_single_norm_to_value,
p.clip_gradient_norm_to_value))
final_var_grads = py_utils.ApplyGradNormClipping(
var_grads, p.clip_gradient_single_norm_to_value)
else:
grad_scale = self._GetGlobalGradScale(all_grad_norm, has_nan_or_inf)
# grad_norm/all is both a eval metric(collected by trainer) and a summary
# (collected by controller).
summary_utils.scalar(f'grad_norm/all/{p.name}', all_grad_norm)
self._AddEvalMetric('grad_norm/all', all_grad_norm, tf.constant(1.0))
self._AddEvalMetric('var_norm/all', all_var_norm, tf.constant(1.0))
self._AddEvalMetric('grad_scale_all', grad_scale, tf.constant(1.0))
final_var_grads = py_utils.ApplyGradMultiplier(var_grads, grad_scale)
return_values.grad_scale = grad_scale
return_values.final_var_grads = final_var_grads
return return_values
def _AddEvalMetric(self, key, value, weight):
self._eval_metrics[key] = (value, weight)
_LEGACY_LEARNER_PARAMS = [
'bprop_variable_filter',
'bprop_variable_exclusion',
'clip_gradient_norm_to_value',
'clip_gradient_single_norm_to_value',
'colocate_gradients_with_ops',
'gate_gradients',
'scale_gradients',
'grad_aggregation_method',
'grad_norm_to_clip_to_zero',
'grad_norm_tracker',
'l1_regularizer_weight',
'l2_regularizer_weight',
'learning_rate',
'lr_schedule',
'optimizer',
]
def ExtractLearnerFromLegacyParams(tp, cls=Learner):
"""Extracts legacy learner params from 'tp' to a Learner params.
Args:
tp: BaseTask training params (p.train). Its legacy params will be cleared to
be None after the conversion.
cls: Learner class where we set the params.
Returns:
A params for Learner.
"""
lp = cls.Params()
lp.name = 'loss'
for k, v in tp.IterParams():
if k not in _LEGACY_LEARNER_PARAMS:
tf.logging.info(
'Ignoring legacy param %s=%s for optimization program', k, v)
continue
setattr(lp, k, v)
setattr(tp, k, None)
for line in lp.ToText().split('\n'):
tf.logging.info('Learner params: %s', line)
return lp
| en | 0.71105 | # Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== A learner optimizes a subset of variables according to a loss. It consists of a learning rate schedule, an optimizer, and gradient clipping mechanisms. A BaseTask can have multiple learners, each optimizing a (usually disjoint) subset of variables. A training program layer. The layer takes a loss tensor as input and outputs a trainer op. # Backwards compatibility: manually call child.InstantiateVariables() # outside of tf.variable_scope(p.name). Returns True if variable v should be optimized by this learner. Applies any computation to run after each tpu trainining loop. Args: global_step: Global step variable. Returns: Ops to run after training loop ends. Computes updates on 'vmap' to optimize 'loss'. TODO(rpang): explore merging gradient_mask and gradient_adjuster. Args: metrics: A Dict[str, (value, weight)], from which loss can be extracted according to p.loss_name. vmap: A `.NestedMap` object containing variables to optimize. gradient_mask: if not None, a dict mapping variable names to a 0/1 scalar. gradient_adjuster: if not None, a function that mutates a given var_grads. Returns: (losses, op, eval_metrics), where - losses is a list of scalar tensors; - op is a tf.Operation to update variables; - eval_metrics is a Dict[str, (value, weight)], where each value/weight is a scalar tensor. # We apply gradients outside the name_scope to maintain backwards # compatibility on variables created by self.optimizer.Apply(). Returns (loss, var_grads) computed from metrics[metric_name]. Returns the compute_gradients_fn to use for py_utils.ComputeGradients. # use the default function Returns (loss, var_grads) computed from metrics[metric_name]. # TODO(b/154785713): pass (loss, loss_weight) to ComputeGradients(). Adjusts gradients according to learner params. Args: var_grads: a `.NestedMap` whose values are (var, grad) pairs. gradient_mask: if not None, a dict mapping variable names to a 0/1 scalar. gradient_adjuster: if not None, a function that mutates a given var_grads. Returns: (var_grads, eval_metrics), where var_grads is a `.NestedMap` whose values (var, grad) pairs representing adjusted gradients. # L2 regularizer. # L1 regularizer. # Mask gradients only if the mask is set. # Scale gradients, e.g., gradient clipping. # Histogram summary. Returns a scaling factor for all gradients according to their norm. In case there are NaN or Inf values the function will return 0.0. Args: all_grad_norm: A scalar represeting the total norm of all vars. has_nan_or_inf: A scalar of 0 or 1, indicating whether there is any NaN or Inf in input gradients. Returns: The gradient scale. 0 if gradient updates should be skipped for the step. # Computes gradient's scale. # If all_grad_norm > p.clip_gradient_norm_to_value, scales # all_grads so that the norm is 1.0. # If all_grad_norm > p.grad_norm_to_clip_to_zero, treats # grad_scale as 0. This way, we ignore this step. # Force grad_scale to be 0 if there is any NaN or Inf in gradients. Scales gradients according to training params. Args: var_grads: a `.NestedMap` whose values are (var, grad) pairs. gradient_adjuster: if not None, a function that mutates a given var_grads. Returns: A `.NestedMap` containing - final_var_grads: a `.NestedMap` whose values are (var, grad) pairs, where gradients have already been scaled. - grad_scale: the gradient scale. 0 if gradient updates should be skipped for the step. (Optional, only returned in case global norm clipping is used.) # Computes gradients' norm and adds their summaries. Note that all_grad_norm # may be nan, which may cause grad_scale to be nan. # Optional gradient adjustment. Note that this happens after computing # all_grad_norm. # Handles NaN/Inf gradients. # Grad norm can still be inf even if none of the individual grad is inf. # Currently using both types of clipping simultaneously is unsupported. # grad_norm/all is both a eval metric(collected by trainer) and a summary # (collected by controller). Extracts legacy learner params from 'tp' to a Learner params. Args: tp: BaseTask training params (p.train). Its legacy params will be cleared to be None after the conversion. cls: Learner class where we set the params. Returns: A params for Learner. | 2.656918 | 3 |
src/eon_service/pipelines/data_science/__init__.py | InnovativeDigitalSolution/NASA_ML-airport-estimated-ON | 1 | 6624759 | from .pipeline import create_pipelines # NOQA | from .pipeline import create_pipelines # NOQA | none | 1 | 1.027917 | 1 | |
maskrcnn_benchmark/layers/sigmoid_dr_loss.py | banben/maskrcnn-benchmark | 114 | 6624760 | <gh_stars>100-1000
import torch
from torch import nn
import torch.nn.functional as F
import math
"""
PyTorch Implementation for DR Loss
Reference
CVPR'20: "DR Loss: Improving Object Detection by Distributional Ranking"
Copyright@Alibaba Group Holding Limited
"""
class SigmoidDRLoss(nn.Module):
def __init__(self, pos_lambda=1, neg_lambda=0.1/math.log(3.5), L=6., tau=4.):
super(SigmoidDRLoss, self).__init__()
self.margin = 0.5
self.pos_lambda = pos_lambda
self.neg_lambda = neg_lambda
self.L = L
self.tau = tau
def forward(self, logits, targets):
num_classes = logits.shape[1]
dtype = targets.dtype
device = targets.device
class_range = torch.arange(1, num_classes + 1, dtype=dtype, device=device).unsqueeze(0)
t = targets.unsqueeze(1)
pos_ind = (t == class_range)
neg_ind = (t != class_range) * (t >= 0)
pos_prob = logits[pos_ind].sigmoid()
neg_prob = logits[neg_ind].sigmoid()
neg_q = F.softmax(neg_prob/self.neg_lambda, dim=0)
neg_dist = torch.sum(neg_q * neg_prob)
if pos_prob.numel() > 0:
pos_q = F.softmax(-pos_prob/self.pos_lambda, dim=0)
pos_dist = torch.sum(pos_q * pos_prob)
loss = self.tau*torch.log(1.+torch.exp(self.L*(neg_dist - pos_dist+self.margin)))/self.L
else:
loss = self.tau*torch.log(1.+torch.exp(self.L*(neg_dist - 1. + self.margin)))/self.L
return loss
| import torch
from torch import nn
import torch.nn.functional as F
import math
"""
PyTorch Implementation for DR Loss
Reference
CVPR'20: "DR Loss: Improving Object Detection by Distributional Ranking"
Copyright@Alibaba Group Holding Limited
"""
class SigmoidDRLoss(nn.Module):
def __init__(self, pos_lambda=1, neg_lambda=0.1/math.log(3.5), L=6., tau=4.):
super(SigmoidDRLoss, self).__init__()
self.margin = 0.5
self.pos_lambda = pos_lambda
self.neg_lambda = neg_lambda
self.L = L
self.tau = tau
def forward(self, logits, targets):
num_classes = logits.shape[1]
dtype = targets.dtype
device = targets.device
class_range = torch.arange(1, num_classes + 1, dtype=dtype, device=device).unsqueeze(0)
t = targets.unsqueeze(1)
pos_ind = (t == class_range)
neg_ind = (t != class_range) * (t >= 0)
pos_prob = logits[pos_ind].sigmoid()
neg_prob = logits[neg_ind].sigmoid()
neg_q = F.softmax(neg_prob/self.neg_lambda, dim=0)
neg_dist = torch.sum(neg_q * neg_prob)
if pos_prob.numel() > 0:
pos_q = F.softmax(-pos_prob/self.pos_lambda, dim=0)
pos_dist = torch.sum(pos_q * pos_prob)
loss = self.tau*torch.log(1.+torch.exp(self.L*(neg_dist - pos_dist+self.margin)))/self.L
else:
loss = self.tau*torch.log(1.+torch.exp(self.L*(neg_dist - 1. + self.margin)))/self.L
return loss | en | 0.687481 | PyTorch Implementation for DR Loss Reference CVPR'20: "DR Loss: Improving Object Detection by Distributional Ranking" Copyright@Alibaba Group Holding Limited | 2.827134 | 3 |
mainapp/admin.py | singlasahil221/developers | 0 | 6624761 | <gh_stars>0
from django.contrib import admin
from .models import code_model
# Register your models here.
admin.site.register(code_model) | from django.contrib import admin
from .models import code_model
# Register your models here.
admin.site.register(code_model) | en | 0.968259 | # Register your models here. | 1.298661 | 1 |
BSSN/BSSN_RHSs.py | leowerneck/NRPyIGM | 0 | 6624762 | # As documented in the NRPy+ tutorial module
# Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb,
# this module will construct the right-hand sides (RHSs)
# expressions of the BSSN time evolution equations.
#
# Time-evolution equations for the BSSN gauge conditions are
# specified in the BSSN_gauge_RHSs module and documented in
# the Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb
# NRPy+ tutorial module.
# Author: <NAME>
# zachetie **at** gmail **dot* com
# Step 1.a: import all needed modules from NRPy+:
import sympy as sp
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
import reference_metric as rfm
have_already_called_BSSN_RHSs_function = False
# Step 1.b: Set the coordinate system for the numerical grid:
# DO NOT SET IN STANDALONE PYTHON MODULE
# par.set_parval_from_str("reference_metric::CoordSystem","Spherical")
def BSSN_RHSs():
# Step 1.c: Given the chosen coordinate system, set up
# corresponding reference metric and needed
# reference metric quantities
# The following function call sets up the reference metric
# and related quantities, including rescaling matrices ReDD,
# ReU, and hatted quantities.
rfm.reference_metric()
global have_already_called_BSSN_RHSs_function # setting to global enables other modules to see updated value.
have_already_called_BSSN_RHSs_function = True
# Step 1.d: Set spatial dimension (must be 3 for BSSN, as BSSN is
# a 3+1-dimensional decomposition of the general
# relativistic field equations)
DIM = 3
# Step 1.e: Import all basic (unrescaled) BSSN scalars & tensors
import BSSN.BSSN_quantities as Bq
Bq.BSSN_basic_tensors()
gammabarDD = Bq.gammabarDD
AbarDD = Bq.AbarDD
LambdabarU = Bq.LambdabarU
trK = Bq.trK
alpha = Bq.alpha
betaU = Bq.betaU
# Step 1.f: Import all neeeded rescaled BSSN tensors:
aDD = Bq.aDD
cf = Bq.cf
lambdaU = Bq.lambdaU
# Step 2.a.i: Import derivative expressions for betaU defined in the BSSN.BSSN_quantities module:
Bq.betaU_derivs()
betaU_dD = Bq.betaU_dD
betaU_dDD = Bq.betaU_dDD
# Step 2.a.ii: Import derivative expression for gammabarDD
Bq.gammabar__inverse_and_derivs()
gammabarDD_dupD = Bq.gammabarDD_dupD
# Step 2.a.iii: First term of \partial_t \bar{\gamma}_{i j} right-hand side:
# \beta^k \bar{\gamma}_{ij,k} + \beta^k_{,i} \bar{\gamma}_{kj} + \beta^k_{,j} \bar{\gamma}_{ik}
gammabar_rhsDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
gammabar_rhsDD[i][j] += betaU[k] * gammabarDD_dupD[i][j][k] + betaU_dD[k][i] * gammabarDD[k][j] \
+ betaU_dD[k][j] * gammabarDD[i][k]
# Step 2.b.i: First import \bar{A}_{ij} = AbarDD[i][j], and its contraction trAbar = \bar{A}^k_k
# from BSSN.BSSN_quantities
Bq.AbarUU_AbarUD_trAbar_AbarDD_dD()
trAbar = Bq.trAbar
# Step 2.b.ii: Import detgammabar quantities from BSSN.BSSN_quantities:
Bq.detgammabar_and_derivs()
detgammabar = Bq.detgammabar
detgammabar_dD = Bq.detgammabar_dD
# Step 2.b.ii: Compute the contraction \bar{D}_k \beta^k = \beta^k_{,k} + \frac{\beta^k \bar{\gamma}_{,k}}{2 \bar{\gamma}}
Dbarbetacontraction = sp.sympify(0)
for k in range(DIM):
Dbarbetacontraction += betaU_dD[k][k] + betaU[k] * detgammabar_dD[k] / (2 * detgammabar)
# Step 2.b.iii: Second term of \partial_t \bar{\gamma}_{i j} right-hand side:
# \frac{2}{3} \bar{\gamma}_{i j} \left (\alpha \bar{A}_{k}^{k} - \bar{D}_{k} \beta^{k}\right )
for i in range(DIM):
for j in range(DIM):
gammabar_rhsDD[i][j] += sp.Rational(2, 3) * gammabarDD[i][j] * (alpha * trAbar - Dbarbetacontraction)
# Step 2.c: Third term of \partial_t \bar{\gamma}_{i j} right-hand side:
# -2 \alpha \bar{A}_{ij}
for i in range(DIM):
for j in range(DIM):
gammabar_rhsDD[i][j] += -2 * alpha * AbarDD[i][j]
# Step 3.a: First term of \partial_t \bar{A}_{i j}:
# \beta^k \partial_k \bar{A}_{ij} + \partial_i \beta^k \bar{A}_{kj} + \partial_j \beta^k \bar{A}_{ik}
# First define AbarDD_dupD:
AbarDD_dupD = Bq.AbarDD_dupD # From Bq.AbarUU_AbarUD_trAbar_AbarDD_dD()
Abar_rhsDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
Abar_rhsDD[i][j] += betaU[k] * AbarDD_dupD[i][j][k] + betaU_dD[k][i] * AbarDD[k][j] \
+ betaU_dD[k][j] * AbarDD[i][k]
# Step 3.b: Second term of \partial_t \bar{A}_{i j}:
# - (2/3) \bar{A}_{i j} \bar{D}_{k} \beta^{k} - 2 \alpha \bar{A}_{i k} {\bar{A}^{k}}_{j} + \alpha \bar{A}_{i j} K
gammabarUU = Bq.gammabarUU # From Bq.gammabar__inverse_and_derivs()
AbarUD = Bq.AbarUD # From Bq.AbarUU_AbarUD_trAbar()
for i in range(DIM):
for j in range(DIM):
Abar_rhsDD[i][j] += -sp.Rational(2, 3) * AbarDD[i][j] * Dbarbetacontraction + alpha * AbarDD[i][j] * trK
for k in range(DIM):
Abar_rhsDD[i][j] += -2 * alpha * AbarDD[i][k] * AbarUD[k][j]
# Step 3.c.i: Define partial derivatives of \phi in terms of evolved quantity "cf":
Bq.phi_and_derivs()
phi_dD = Bq.phi_dD
phi_dupD = Bq.phi_dupD
phi_dDD = Bq.phi_dDD
exp_m4phi = Bq.exp_m4phi
phi_dBarD = Bq.phi_dBarD # phi_dBarD = Dbar_i phi = phi_dD (since phi is a scalar)
phi_dBarDD = Bq.phi_dBarDD # phi_dBarDD = Dbar_i Dbar_j phi (covariant derivative)
# Step 3.c.ii: Define RbarDD
Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU()
RbarDD = Bq.RbarDD
# Step 3.c.iii: Define first and second derivatives of \alpha, as well as
# \bar{D}_i \bar{D}_j \alpha, which is defined just like phi
alpha_dD = ixp.declarerank1("alpha_dD")
alpha_dDD = ixp.declarerank2("alpha_dDD", "sym01")
alpha_dBarD = alpha_dD
alpha_dBarDD = ixp.zerorank2()
GammabarUDD = Bq.GammabarUDD # Defined in Bq.gammabar__inverse_and_derivs()
for i in range(DIM):
for j in range(DIM):
alpha_dBarDD[i][j] = alpha_dDD[i][j]
for k in range(DIM):
alpha_dBarDD[i][j] += - GammabarUDD[k][i][j] * alpha_dD[k]
# Step 3.c.iv: Define the terms in curly braces:
curlybrackettermsDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
curlybrackettermsDD[i][j] = -2 * alpha * phi_dBarDD[i][j] + 4 * alpha * phi_dBarD[i] * phi_dBarD[j] \
+ 2 * alpha_dBarD[i] * phi_dBarD[j] \
+ 2 * alpha_dBarD[j] * phi_dBarD[i] \
- alpha_dBarDD[i][j] + alpha * RbarDD[i][j]
# Step 3.c.v: Compute the trace:
curlybracketterms_trace = sp.sympify(0)
for i in range(DIM):
for j in range(DIM):
curlybracketterms_trace += gammabarUU[i][j] * curlybrackettermsDD[i][j]
# Step 3.c.vi: Third and final term of Abar_rhsDD[i][j]:
for i in range(DIM):
for j in range(DIM):
Abar_rhsDD[i][j] += exp_m4phi * (curlybrackettermsDD[i][j] -
sp.Rational(1, 3) * gammabarDD[i][j] * curlybracketterms_trace)
# Step 4: Right-hand side of conformal factor variable "cf". Supported
# options include: cf=phi, cf=W=e^(-2*phi) (default), and cf=chi=e^(-4*phi)
# \partial_t phi = \left[\beta^k \partial_k \phi \right] <- TERM 1
# + \frac{1}{6} \left (\bar{D}_{k} \beta^{k} - \alpha K \right ) <- TERM 2
global cf_rhs
cf_rhs = sp.Rational(1, 6) * (Dbarbetacontraction - alpha * trK) # Term 2
for k in range(DIM):
cf_rhs += betaU[k] * phi_dupD[k] # Term 1
# Next multiply to convert phi_rhs to cf_rhs.
if par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") == "phi":
pass # do nothing; cf_rhs = phi_rhs
elif par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") == "W":
cf_rhs *= -2 * cf # cf_rhs = -2*cf*phi_rhs
elif par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") == "chi":
cf_rhs *= -4 * cf # cf_rhs = -4*cf*phi_rhs
else:
print("Error: EvolvedConformalFactor_cf == " +
par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") + " unsupported!")
exit(1)
# Step 5: right-hand side of trK (trace of extrinsic curvature):
# \partial_t K = \beta^k \partial_k K <- TERM 1
# + \frac{1}{3} \alpha K^{2} <- TERM 2
# + \alpha \bar{A}_{i j} \bar{A}^{i j} <- TERM 3
# - - e^{-4 \phi} (\bar{D}_{i} \bar{D}^{i} \alpha + 2 \bar{D}^{i} \alpha \bar{D}_{i} \phi ) <- TERM 4
global trK_rhs
# TERM 2:
trK_rhs = sp.Rational(1, 3) * alpha * trK * trK
trK_dupD = ixp.declarerank1("trK_dupD")
for i in range(DIM):
# TERM 1:
trK_rhs += betaU[i] * trK_dupD[i]
for i in range(DIM):
for j in range(DIM):
# TERM 4:
trK_rhs += -exp_m4phi * gammabarUU[i][j] * (alpha_dBarDD[i][j] + 2 * alpha_dBarD[j] * phi_dBarD[i])
AbarUU = Bq.AbarUU # From Bq.AbarUU_AbarUD_trAbar()
for i in range(DIM):
for j in range(DIM):
# TERM 3:
trK_rhs += alpha * AbarDD[i][j] * AbarUU[i][j]
# Step 6: right-hand side of \partial_t \bar{\Lambda}^i:
# \partial_t \bar{\Lambda}^i = \beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k <- TERM 1
# + \bar{\gamma}^{j k} \hat{D}_{j} \hat{D}_{k} \beta^{i} <- TERM 2
# + \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j} <- TERM 3
# + \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j} <- TERM 4
# - 2 \bar{A}^{i j} (\partial_{j} \alpha - 6 \partial_{j} \phi) <- TERM 5
# + 2 \alpha \bar{A}^{j k} \Delta_{j k}^{i} <- TERM 6
# - \frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K <- TERM 7
# Step 6.a: Term 1 of \partial_t \bar{\Lambda}^i: \beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k
# First we declare \bar{\Lambda}^i and \bar{\Lambda}^i_{,j} in terms of \lambda^i and \lambda^i_{,j}
global LambdabarU_dupD # Used on the RHS of the Gamma-driving shift conditions
LambdabarU_dupD = ixp.zerorank2()
lambdaU_dupD = ixp.declarerank2("lambdaU_dupD", "nosym")
for i in range(DIM):
for j in range(DIM):
LambdabarU_dupD[i][j] = lambdaU_dupD[i][j] * rfm.ReU[i] + lambdaU[i] * rfm.ReUdD[i][j]
global Lambdabar_rhsU # Used on the RHS of the Gamma-driving shift conditions
Lambdabar_rhsU = ixp.zerorank1()
for i in range(DIM):
for k in range(DIM):
Lambdabar_rhsU[i] += betaU[k] * LambdabarU_dupD[i][k] - betaU_dD[i][k] * LambdabarU[k] # Term 1
# Step 6.b: Term 2 of \partial_t \bar{\Lambda}^i = \bar{\gamma}^{jk} (Term 2a + Term 2b + Term 2c)
# Term 2a: \bar{\gamma}^{jk} \beta^i_{,kj}
Term2aUDD = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
Term2aUDD[i][j][k] += betaU_dDD[i][k][j]
# Term 2b: \hat{\Gamma}^i_{mk,j} \beta^m + \hat{\Gamma}^i_{mk} \beta^m_{,j}
# + \hat{\Gamma}^i_{dj}\beta^d_{,k} - \hat{\Gamma}^d_{kj} \beta^i_{,d}
Term2bUDD = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
for m in range(DIM):
Term2bUDD[i][j][k] += rfm.GammahatUDDdD[i][m][k][j] * betaU[m] \
+ rfm.GammahatUDD[i][m][k] * betaU_dD[m][j] \
+ rfm.GammahatUDD[i][m][j] * betaU_dD[m][k] \
- rfm.GammahatUDD[m][k][j] * betaU_dD[i][m]
# Term 2c: \hat{\Gamma}^i_{dj}\hat{\Gamma}^d_{mk} \beta^m - \hat{\Gamma}^d_{kj} \hat{\Gamma}^i_{md} \beta^m
Term2cUDD = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
for m in range(DIM):
for d in range(DIM):
Term2cUDD[i][j][k] += (rfm.GammahatUDD[i][d][j] * rfm.GammahatUDD[d][m][k] \
- rfm.GammahatUDD[d][k][j] * rfm.GammahatUDD[i][m][d]) * betaU[m]
Lambdabar_rhsUpieceU = ixp.zerorank1()
# Put it all together to get Term 2:
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
Lambdabar_rhsU[i] += gammabarUU[j][k] * (Term2aUDD[i][j][k] + Term2bUDD[i][j][k] + Term2cUDD[i][j][k])
Lambdabar_rhsUpieceU[i] += gammabarUU[j][k] * (
Term2aUDD[i][j][k] + Term2bUDD[i][j][k] + Term2cUDD[i][j][k])
# Step 6.c: Term 3 of \partial_t \bar{\Lambda}^i:
# \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j}
DGammaU = Bq.DGammaU # From Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU()
for i in range(DIM):
Lambdabar_rhsU[i] += sp.Rational(2, 3) * DGammaU[i] * Dbarbetacontraction # Term 3
# Step 6.d: Term 4 of \partial_t \bar{\Lambda}^i:
# \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j}
detgammabar_dDD = Bq.detgammabar_dDD # From Bq.detgammabar_and_derivs()
Dbarbetacontraction_dBarD = ixp.zerorank1()
for k in range(DIM):
for m in range(DIM):
Dbarbetacontraction_dBarD[m] += betaU_dDD[k][k][m] + \
(betaU_dD[k][m] * detgammabar_dD[k] +
betaU[k] * detgammabar_dDD[k][m]) / (2 * detgammabar) \
- betaU[k] * detgammabar_dD[k] * detgammabar_dD[m] / (
2 * detgammabar * detgammabar)
for i in range(DIM):
for m in range(DIM):
Lambdabar_rhsU[i] += sp.Rational(1, 3) * gammabarUU[i][m] * Dbarbetacontraction_dBarD[m]
# Step 6.e: Term 5 of \partial_t \bar{\Lambda}^i:
# - 2 \bar{A}^{i j} (\partial_{j} \alpha - 6 \alpha \partial_{j} \phi)
for i in range(DIM):
for j in range(DIM):
Lambdabar_rhsU[i] += -2 * AbarUU[i][j] * (alpha_dD[j] - 6 * alpha * phi_dD[j])
# Step 6.f: Term 6 of \partial_t \bar{\Lambda}^i:
# 2 \alpha \bar{A}^{j k} \Delta^{i}_{j k}
DGammaUDD = Bq.DGammaUDD # From RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
Lambdabar_rhsU[i] += 2 * alpha * AbarUU[j][k] * DGammaUDD[i][j][k]
# Step 6.g: Term 7 of \partial_t \bar{\Lambda}^i:
# -\frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K
trK_dD = ixp.declarerank1("trK_dD")
for i in range(DIM):
for j in range(DIM):
Lambdabar_rhsU[i] += -sp.Rational(4, 3) * alpha * gammabarUU[i][j] * trK_dD[j]
# Step 7: Rescale the RHS quantities so that the evolved
# variables are smooth across coord singularities
global h_rhsDD,a_rhsDD,lambda_rhsU
h_rhsDD = ixp.zerorank2()
a_rhsDD = ixp.zerorank2()
lambda_rhsU = ixp.zerorank1()
for i in range(DIM):
lambda_rhsU[i] = Lambdabar_rhsU[i] / rfm.ReU[i]
for j in range(DIM):
h_rhsDD[i][j] = gammabar_rhsDD[i][j] / rfm.ReDD[i][j]
a_rhsDD[i][j] = Abar_rhsDD[i][j] / rfm.ReDD[i][j]
# print(str(Abar_rhsDD[2][2]).replace("**","^").replace("_","").replace("xx","x").replace("sin(x2)","Sin[x2]").replace("sin(2*x2)","Sin[2*x2]").replace("cos(x2)","Cos[x2]").replace("detgbaroverdetghat","detg"))
# print(str(Dbarbetacontraction).replace("**","^").replace("_","").replace("xx","x").replace("sin(x2)","Sin[x2]").replace("detgbaroverdetghat","detg"))
# print(betaU_dD)
# print(str(trK_rhs).replace("xx2","xx3").replace("xx1","xx2").replace("xx0","xx1").replace("**","^").replace("_","").replace("sin(xx2)","Sinx2").replace("xx","x").replace("sin(2*x2)","Sin2x2").replace("cos(x2)","Cosx2").replace("detgbaroverdetghat","detg"))
# print(str(bet_rhsU[0]).replace("xx2","xx3").replace("xx1","xx2").replace("xx0","xx1").replace("**","^").replace("_","").replace("sin(xx2)","Sinx2").replace("xx","x").replace("sin(2*x2)","Sin2x2").replace("cos(x2)","Cosx2").replace("detgbaroverdetghat","detg"))
| # As documented in the NRPy+ tutorial module
# Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb,
# this module will construct the right-hand sides (RHSs)
# expressions of the BSSN time evolution equations.
#
# Time-evolution equations for the BSSN gauge conditions are
# specified in the BSSN_gauge_RHSs module and documented in
# the Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb
# NRPy+ tutorial module.
# Author: <NAME>
# zachetie **at** gmail **dot* com
# Step 1.a: import all needed modules from NRPy+:
import sympy as sp
import NRPy_param_funcs as par
import indexedexp as ixp
import grid as gri
import finite_difference as fin
import reference_metric as rfm
have_already_called_BSSN_RHSs_function = False
# Step 1.b: Set the coordinate system for the numerical grid:
# DO NOT SET IN STANDALONE PYTHON MODULE
# par.set_parval_from_str("reference_metric::CoordSystem","Spherical")
def BSSN_RHSs():
# Step 1.c: Given the chosen coordinate system, set up
# corresponding reference metric and needed
# reference metric quantities
# The following function call sets up the reference metric
# and related quantities, including rescaling matrices ReDD,
# ReU, and hatted quantities.
rfm.reference_metric()
global have_already_called_BSSN_RHSs_function # setting to global enables other modules to see updated value.
have_already_called_BSSN_RHSs_function = True
# Step 1.d: Set spatial dimension (must be 3 for BSSN, as BSSN is
# a 3+1-dimensional decomposition of the general
# relativistic field equations)
DIM = 3
# Step 1.e: Import all basic (unrescaled) BSSN scalars & tensors
import BSSN.BSSN_quantities as Bq
Bq.BSSN_basic_tensors()
gammabarDD = Bq.gammabarDD
AbarDD = Bq.AbarDD
LambdabarU = Bq.LambdabarU
trK = Bq.trK
alpha = Bq.alpha
betaU = Bq.betaU
# Step 1.f: Import all neeeded rescaled BSSN tensors:
aDD = Bq.aDD
cf = Bq.cf
lambdaU = Bq.lambdaU
# Step 2.a.i: Import derivative expressions for betaU defined in the BSSN.BSSN_quantities module:
Bq.betaU_derivs()
betaU_dD = Bq.betaU_dD
betaU_dDD = Bq.betaU_dDD
# Step 2.a.ii: Import derivative expression for gammabarDD
Bq.gammabar__inverse_and_derivs()
gammabarDD_dupD = Bq.gammabarDD_dupD
# Step 2.a.iii: First term of \partial_t \bar{\gamma}_{i j} right-hand side:
# \beta^k \bar{\gamma}_{ij,k} + \beta^k_{,i} \bar{\gamma}_{kj} + \beta^k_{,j} \bar{\gamma}_{ik}
gammabar_rhsDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
gammabar_rhsDD[i][j] += betaU[k] * gammabarDD_dupD[i][j][k] + betaU_dD[k][i] * gammabarDD[k][j] \
+ betaU_dD[k][j] * gammabarDD[i][k]
# Step 2.b.i: First import \bar{A}_{ij} = AbarDD[i][j], and its contraction trAbar = \bar{A}^k_k
# from BSSN.BSSN_quantities
Bq.AbarUU_AbarUD_trAbar_AbarDD_dD()
trAbar = Bq.trAbar
# Step 2.b.ii: Import detgammabar quantities from BSSN.BSSN_quantities:
Bq.detgammabar_and_derivs()
detgammabar = Bq.detgammabar
detgammabar_dD = Bq.detgammabar_dD
# Step 2.b.ii: Compute the contraction \bar{D}_k \beta^k = \beta^k_{,k} + \frac{\beta^k \bar{\gamma}_{,k}}{2 \bar{\gamma}}
Dbarbetacontraction = sp.sympify(0)
for k in range(DIM):
Dbarbetacontraction += betaU_dD[k][k] + betaU[k] * detgammabar_dD[k] / (2 * detgammabar)
# Step 2.b.iii: Second term of \partial_t \bar{\gamma}_{i j} right-hand side:
# \frac{2}{3} \bar{\gamma}_{i j} \left (\alpha \bar{A}_{k}^{k} - \bar{D}_{k} \beta^{k}\right )
for i in range(DIM):
for j in range(DIM):
gammabar_rhsDD[i][j] += sp.Rational(2, 3) * gammabarDD[i][j] * (alpha * trAbar - Dbarbetacontraction)
# Step 2.c: Third term of \partial_t \bar{\gamma}_{i j} right-hand side:
# -2 \alpha \bar{A}_{ij}
for i in range(DIM):
for j in range(DIM):
gammabar_rhsDD[i][j] += -2 * alpha * AbarDD[i][j]
# Step 3.a: First term of \partial_t \bar{A}_{i j}:
# \beta^k \partial_k \bar{A}_{ij} + \partial_i \beta^k \bar{A}_{kj} + \partial_j \beta^k \bar{A}_{ik}
# First define AbarDD_dupD:
AbarDD_dupD = Bq.AbarDD_dupD # From Bq.AbarUU_AbarUD_trAbar_AbarDD_dD()
Abar_rhsDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
Abar_rhsDD[i][j] += betaU[k] * AbarDD_dupD[i][j][k] + betaU_dD[k][i] * AbarDD[k][j] \
+ betaU_dD[k][j] * AbarDD[i][k]
# Step 3.b: Second term of \partial_t \bar{A}_{i j}:
# - (2/3) \bar{A}_{i j} \bar{D}_{k} \beta^{k} - 2 \alpha \bar{A}_{i k} {\bar{A}^{k}}_{j} + \alpha \bar{A}_{i j} K
gammabarUU = Bq.gammabarUU # From Bq.gammabar__inverse_and_derivs()
AbarUD = Bq.AbarUD # From Bq.AbarUU_AbarUD_trAbar()
for i in range(DIM):
for j in range(DIM):
Abar_rhsDD[i][j] += -sp.Rational(2, 3) * AbarDD[i][j] * Dbarbetacontraction + alpha * AbarDD[i][j] * trK
for k in range(DIM):
Abar_rhsDD[i][j] += -2 * alpha * AbarDD[i][k] * AbarUD[k][j]
# Step 3.c.i: Define partial derivatives of \phi in terms of evolved quantity "cf":
Bq.phi_and_derivs()
phi_dD = Bq.phi_dD
phi_dupD = Bq.phi_dupD
phi_dDD = Bq.phi_dDD
exp_m4phi = Bq.exp_m4phi
phi_dBarD = Bq.phi_dBarD # phi_dBarD = Dbar_i phi = phi_dD (since phi is a scalar)
phi_dBarDD = Bq.phi_dBarDD # phi_dBarDD = Dbar_i Dbar_j phi (covariant derivative)
# Step 3.c.ii: Define RbarDD
Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU()
RbarDD = Bq.RbarDD
# Step 3.c.iii: Define first and second derivatives of \alpha, as well as
# \bar{D}_i \bar{D}_j \alpha, which is defined just like phi
alpha_dD = ixp.declarerank1("alpha_dD")
alpha_dDD = ixp.declarerank2("alpha_dDD", "sym01")
alpha_dBarD = alpha_dD
alpha_dBarDD = ixp.zerorank2()
GammabarUDD = Bq.GammabarUDD # Defined in Bq.gammabar__inverse_and_derivs()
for i in range(DIM):
for j in range(DIM):
alpha_dBarDD[i][j] = alpha_dDD[i][j]
for k in range(DIM):
alpha_dBarDD[i][j] += - GammabarUDD[k][i][j] * alpha_dD[k]
# Step 3.c.iv: Define the terms in curly braces:
curlybrackettermsDD = ixp.zerorank2()
for i in range(DIM):
for j in range(DIM):
curlybrackettermsDD[i][j] = -2 * alpha * phi_dBarDD[i][j] + 4 * alpha * phi_dBarD[i] * phi_dBarD[j] \
+ 2 * alpha_dBarD[i] * phi_dBarD[j] \
+ 2 * alpha_dBarD[j] * phi_dBarD[i] \
- alpha_dBarDD[i][j] + alpha * RbarDD[i][j]
# Step 3.c.v: Compute the trace:
curlybracketterms_trace = sp.sympify(0)
for i in range(DIM):
for j in range(DIM):
curlybracketterms_trace += gammabarUU[i][j] * curlybrackettermsDD[i][j]
# Step 3.c.vi: Third and final term of Abar_rhsDD[i][j]:
for i in range(DIM):
for j in range(DIM):
Abar_rhsDD[i][j] += exp_m4phi * (curlybrackettermsDD[i][j] -
sp.Rational(1, 3) * gammabarDD[i][j] * curlybracketterms_trace)
# Step 4: Right-hand side of conformal factor variable "cf". Supported
# options include: cf=phi, cf=W=e^(-2*phi) (default), and cf=chi=e^(-4*phi)
# \partial_t phi = \left[\beta^k \partial_k \phi \right] <- TERM 1
# + \frac{1}{6} \left (\bar{D}_{k} \beta^{k} - \alpha K \right ) <- TERM 2
global cf_rhs
cf_rhs = sp.Rational(1, 6) * (Dbarbetacontraction - alpha * trK) # Term 2
for k in range(DIM):
cf_rhs += betaU[k] * phi_dupD[k] # Term 1
# Next multiply to convert phi_rhs to cf_rhs.
if par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") == "phi":
pass # do nothing; cf_rhs = phi_rhs
elif par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") == "W":
cf_rhs *= -2 * cf # cf_rhs = -2*cf*phi_rhs
elif par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") == "chi":
cf_rhs *= -4 * cf # cf_rhs = -4*cf*phi_rhs
else:
print("Error: EvolvedConformalFactor_cf == " +
par.parval_from_str("BSSN.BSSN_quantities::EvolvedConformalFactor_cf") + " unsupported!")
exit(1)
# Step 5: right-hand side of trK (trace of extrinsic curvature):
# \partial_t K = \beta^k \partial_k K <- TERM 1
# + \frac{1}{3} \alpha K^{2} <- TERM 2
# + \alpha \bar{A}_{i j} \bar{A}^{i j} <- TERM 3
# - - e^{-4 \phi} (\bar{D}_{i} \bar{D}^{i} \alpha + 2 \bar{D}^{i} \alpha \bar{D}_{i} \phi ) <- TERM 4
global trK_rhs
# TERM 2:
trK_rhs = sp.Rational(1, 3) * alpha * trK * trK
trK_dupD = ixp.declarerank1("trK_dupD")
for i in range(DIM):
# TERM 1:
trK_rhs += betaU[i] * trK_dupD[i]
for i in range(DIM):
for j in range(DIM):
# TERM 4:
trK_rhs += -exp_m4phi * gammabarUU[i][j] * (alpha_dBarDD[i][j] + 2 * alpha_dBarD[j] * phi_dBarD[i])
AbarUU = Bq.AbarUU # From Bq.AbarUU_AbarUD_trAbar()
for i in range(DIM):
for j in range(DIM):
# TERM 3:
trK_rhs += alpha * AbarDD[i][j] * AbarUU[i][j]
# Step 6: right-hand side of \partial_t \bar{\Lambda}^i:
# \partial_t \bar{\Lambda}^i = \beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k <- TERM 1
# + \bar{\gamma}^{j k} \hat{D}_{j} \hat{D}_{k} \beta^{i} <- TERM 2
# + \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j} <- TERM 3
# + \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j} <- TERM 4
# - 2 \bar{A}^{i j} (\partial_{j} \alpha - 6 \partial_{j} \phi) <- TERM 5
# + 2 \alpha \bar{A}^{j k} \Delta_{j k}^{i} <- TERM 6
# - \frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K <- TERM 7
# Step 6.a: Term 1 of \partial_t \bar{\Lambda}^i: \beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k
# First we declare \bar{\Lambda}^i and \bar{\Lambda}^i_{,j} in terms of \lambda^i and \lambda^i_{,j}
global LambdabarU_dupD # Used on the RHS of the Gamma-driving shift conditions
LambdabarU_dupD = ixp.zerorank2()
lambdaU_dupD = ixp.declarerank2("lambdaU_dupD", "nosym")
for i in range(DIM):
for j in range(DIM):
LambdabarU_dupD[i][j] = lambdaU_dupD[i][j] * rfm.ReU[i] + lambdaU[i] * rfm.ReUdD[i][j]
global Lambdabar_rhsU # Used on the RHS of the Gamma-driving shift conditions
Lambdabar_rhsU = ixp.zerorank1()
for i in range(DIM):
for k in range(DIM):
Lambdabar_rhsU[i] += betaU[k] * LambdabarU_dupD[i][k] - betaU_dD[i][k] * LambdabarU[k] # Term 1
# Step 6.b: Term 2 of \partial_t \bar{\Lambda}^i = \bar{\gamma}^{jk} (Term 2a + Term 2b + Term 2c)
# Term 2a: \bar{\gamma}^{jk} \beta^i_{,kj}
Term2aUDD = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
Term2aUDD[i][j][k] += betaU_dDD[i][k][j]
# Term 2b: \hat{\Gamma}^i_{mk,j} \beta^m + \hat{\Gamma}^i_{mk} \beta^m_{,j}
# + \hat{\Gamma}^i_{dj}\beta^d_{,k} - \hat{\Gamma}^d_{kj} \beta^i_{,d}
Term2bUDD = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
for m in range(DIM):
Term2bUDD[i][j][k] += rfm.GammahatUDDdD[i][m][k][j] * betaU[m] \
+ rfm.GammahatUDD[i][m][k] * betaU_dD[m][j] \
+ rfm.GammahatUDD[i][m][j] * betaU_dD[m][k] \
- rfm.GammahatUDD[m][k][j] * betaU_dD[i][m]
# Term 2c: \hat{\Gamma}^i_{dj}\hat{\Gamma}^d_{mk} \beta^m - \hat{\Gamma}^d_{kj} \hat{\Gamma}^i_{md} \beta^m
Term2cUDD = ixp.zerorank3()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
for m in range(DIM):
for d in range(DIM):
Term2cUDD[i][j][k] += (rfm.GammahatUDD[i][d][j] * rfm.GammahatUDD[d][m][k] \
- rfm.GammahatUDD[d][k][j] * rfm.GammahatUDD[i][m][d]) * betaU[m]
Lambdabar_rhsUpieceU = ixp.zerorank1()
# Put it all together to get Term 2:
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
Lambdabar_rhsU[i] += gammabarUU[j][k] * (Term2aUDD[i][j][k] + Term2bUDD[i][j][k] + Term2cUDD[i][j][k])
Lambdabar_rhsUpieceU[i] += gammabarUU[j][k] * (
Term2aUDD[i][j][k] + Term2bUDD[i][j][k] + Term2cUDD[i][j][k])
# Step 6.c: Term 3 of \partial_t \bar{\Lambda}^i:
# \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j}
DGammaU = Bq.DGammaU # From Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU()
for i in range(DIM):
Lambdabar_rhsU[i] += sp.Rational(2, 3) * DGammaU[i] * Dbarbetacontraction # Term 3
# Step 6.d: Term 4 of \partial_t \bar{\Lambda}^i:
# \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j}
detgammabar_dDD = Bq.detgammabar_dDD # From Bq.detgammabar_and_derivs()
Dbarbetacontraction_dBarD = ixp.zerorank1()
for k in range(DIM):
for m in range(DIM):
Dbarbetacontraction_dBarD[m] += betaU_dDD[k][k][m] + \
(betaU_dD[k][m] * detgammabar_dD[k] +
betaU[k] * detgammabar_dDD[k][m]) / (2 * detgammabar) \
- betaU[k] * detgammabar_dD[k] * detgammabar_dD[m] / (
2 * detgammabar * detgammabar)
for i in range(DIM):
for m in range(DIM):
Lambdabar_rhsU[i] += sp.Rational(1, 3) * gammabarUU[i][m] * Dbarbetacontraction_dBarD[m]
# Step 6.e: Term 5 of \partial_t \bar{\Lambda}^i:
# - 2 \bar{A}^{i j} (\partial_{j} \alpha - 6 \alpha \partial_{j} \phi)
for i in range(DIM):
for j in range(DIM):
Lambdabar_rhsU[i] += -2 * AbarUU[i][j] * (alpha_dD[j] - 6 * alpha * phi_dD[j])
# Step 6.f: Term 6 of \partial_t \bar{\Lambda}^i:
# 2 \alpha \bar{A}^{j k} \Delta^{i}_{j k}
DGammaUDD = Bq.DGammaUDD # From RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU()
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
Lambdabar_rhsU[i] += 2 * alpha * AbarUU[j][k] * DGammaUDD[i][j][k]
# Step 6.g: Term 7 of \partial_t \bar{\Lambda}^i:
# -\frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K
trK_dD = ixp.declarerank1("trK_dD")
for i in range(DIM):
for j in range(DIM):
Lambdabar_rhsU[i] += -sp.Rational(4, 3) * alpha * gammabarUU[i][j] * trK_dD[j]
# Step 7: Rescale the RHS quantities so that the evolved
# variables are smooth across coord singularities
global h_rhsDD,a_rhsDD,lambda_rhsU
h_rhsDD = ixp.zerorank2()
a_rhsDD = ixp.zerorank2()
lambda_rhsU = ixp.zerorank1()
for i in range(DIM):
lambda_rhsU[i] = Lambdabar_rhsU[i] / rfm.ReU[i]
for j in range(DIM):
h_rhsDD[i][j] = gammabar_rhsDD[i][j] / rfm.ReDD[i][j]
a_rhsDD[i][j] = Abar_rhsDD[i][j] / rfm.ReDD[i][j]
# print(str(Abar_rhsDD[2][2]).replace("**","^").replace("_","").replace("xx","x").replace("sin(x2)","Sin[x2]").replace("sin(2*x2)","Sin[2*x2]").replace("cos(x2)","Cos[x2]").replace("detgbaroverdetghat","detg"))
# print(str(Dbarbetacontraction).replace("**","^").replace("_","").replace("xx","x").replace("sin(x2)","Sin[x2]").replace("detgbaroverdetghat","detg"))
# print(betaU_dD)
# print(str(trK_rhs).replace("xx2","xx3").replace("xx1","xx2").replace("xx0","xx1").replace("**","^").replace("_","").replace("sin(xx2)","Sinx2").replace("xx","x").replace("sin(2*x2)","Sin2x2").replace("cos(x2)","Cosx2").replace("detgbaroverdetghat","detg"))
# print(str(bet_rhsU[0]).replace("xx2","xx3").replace("xx1","xx2").replace("xx0","xx1").replace("**","^").replace("_","").replace("sin(xx2)","Sinx2").replace("xx","x").replace("sin(2*x2)","Sin2x2").replace("cos(x2)","Cosx2").replace("detgbaroverdetghat","detg"))
| en | 0.519525 | # As documented in the NRPy+ tutorial module # Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb, # this module will construct the right-hand sides (RHSs) # expressions of the BSSN time evolution equations. # # Time-evolution equations for the BSSN gauge conditions are # specified in the BSSN_gauge_RHSs module and documented in # the Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb # NRPy+ tutorial module. # Author: <NAME> # zachetie **at** gmail **dot* com # Step 1.a: import all needed modules from NRPy+: # Step 1.b: Set the coordinate system for the numerical grid: # DO NOT SET IN STANDALONE PYTHON MODULE # par.set_parval_from_str("reference_metric::CoordSystem","Spherical") # Step 1.c: Given the chosen coordinate system, set up # corresponding reference metric and needed # reference metric quantities # The following function call sets up the reference metric # and related quantities, including rescaling matrices ReDD, # ReU, and hatted quantities. # setting to global enables other modules to see updated value. # Step 1.d: Set spatial dimension (must be 3 for BSSN, as BSSN is # a 3+1-dimensional decomposition of the general # relativistic field equations) # Step 1.e: Import all basic (unrescaled) BSSN scalars & tensors # Step 1.f: Import all neeeded rescaled BSSN tensors: # Step 2.a.i: Import derivative expressions for betaU defined in the BSSN.BSSN_quantities module: # Step 2.a.ii: Import derivative expression for gammabarDD # Step 2.a.iii: First term of \partial_t \bar{\gamma}_{i j} right-hand side: # \beta^k \bar{\gamma}_{ij,k} + \beta^k_{,i} \bar{\gamma}_{kj} + \beta^k_{,j} \bar{\gamma}_{ik} # Step 2.b.i: First import \bar{A}_{ij} = AbarDD[i][j], and its contraction trAbar = \bar{A}^k_k # from BSSN.BSSN_quantities # Step 2.b.ii: Import detgammabar quantities from BSSN.BSSN_quantities: # Step 2.b.ii: Compute the contraction \bar{D}_k \beta^k = \beta^k_{,k} + \frac{\beta^k \bar{\gamma}_{,k}}{2 \bar{\gamma}} # Step 2.b.iii: Second term of \partial_t \bar{\gamma}_{i j} right-hand side: # \frac{2}{3} \bar{\gamma}_{i j} \left (\alpha \bar{A}_{k}^{k} - \bar{D}_{k} \beta^{k}\right ) # Step 2.c: Third term of \partial_t \bar{\gamma}_{i j} right-hand side: # -2 \alpha \bar{A}_{ij} # Step 3.a: First term of \partial_t \bar{A}_{i j}: # \beta^k \partial_k \bar{A}_{ij} + \partial_i \beta^k \bar{A}_{kj} + \partial_j \beta^k \bar{A}_{ik} # First define AbarDD_dupD: # From Bq.AbarUU_AbarUD_trAbar_AbarDD_dD() # Step 3.b: Second term of \partial_t \bar{A}_{i j}: # - (2/3) \bar{A}_{i j} \bar{D}_{k} \beta^{k} - 2 \alpha \bar{A}_{i k} {\bar{A}^{k}}_{j} + \alpha \bar{A}_{i j} K # From Bq.gammabar__inverse_and_derivs() # From Bq.AbarUU_AbarUD_trAbar() # Step 3.c.i: Define partial derivatives of \phi in terms of evolved quantity "cf": # phi_dBarD = Dbar_i phi = phi_dD (since phi is a scalar) # phi_dBarDD = Dbar_i Dbar_j phi (covariant derivative) # Step 3.c.ii: Define RbarDD # Step 3.c.iii: Define first and second derivatives of \alpha, as well as # \bar{D}_i \bar{D}_j \alpha, which is defined just like phi # Defined in Bq.gammabar__inverse_and_derivs() # Step 3.c.iv: Define the terms in curly braces: # Step 3.c.v: Compute the trace: # Step 3.c.vi: Third and final term of Abar_rhsDD[i][j]: # Step 4: Right-hand side of conformal factor variable "cf". Supported # options include: cf=phi, cf=W=e^(-2*phi) (default), and cf=chi=e^(-4*phi) # \partial_t phi = \left[\beta^k \partial_k \phi \right] <- TERM 1 # + \frac{1}{6} \left (\bar{D}_{k} \beta^{k} - \alpha K \right ) <- TERM 2 # Term 2 # Term 1 # Next multiply to convert phi_rhs to cf_rhs. # do nothing; cf_rhs = phi_rhs # cf_rhs = -2*cf*phi_rhs # cf_rhs = -4*cf*phi_rhs # Step 5: right-hand side of trK (trace of extrinsic curvature): # \partial_t K = \beta^k \partial_k K <- TERM 1 # + \frac{1}{3} \alpha K^{2} <- TERM 2 # + \alpha \bar{A}_{i j} \bar{A}^{i j} <- TERM 3 # - - e^{-4 \phi} (\bar{D}_{i} \bar{D}^{i} \alpha + 2 \bar{D}^{i} \alpha \bar{D}_{i} \phi ) <- TERM 4 # TERM 2: # TERM 1: # TERM 4: # From Bq.AbarUU_AbarUD_trAbar() # TERM 3: # Step 6: right-hand side of \partial_t \bar{\Lambda}^i: # \partial_t \bar{\Lambda}^i = \beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k <- TERM 1 # + \bar{\gamma}^{j k} \hat{D}_{j} \hat{D}_{k} \beta^{i} <- TERM 2 # + \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j} <- TERM 3 # + \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j} <- TERM 4 # - 2 \bar{A}^{i j} (\partial_{j} \alpha - 6 \partial_{j} \phi) <- TERM 5 # + 2 \alpha \bar{A}^{j k} \Delta_{j k}^{i} <- TERM 6 # - \frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K <- TERM 7 # Step 6.a: Term 1 of \partial_t \bar{\Lambda}^i: \beta^k \partial_k \bar{\Lambda}^i - \partial_k \beta^i \bar{\Lambda}^k # First we declare \bar{\Lambda}^i and \bar{\Lambda}^i_{,j} in terms of \lambda^i and \lambda^i_{,j} # Used on the RHS of the Gamma-driving shift conditions # Used on the RHS of the Gamma-driving shift conditions # Term 1 # Step 6.b: Term 2 of \partial_t \bar{\Lambda}^i = \bar{\gamma}^{jk} (Term 2a + Term 2b + Term 2c) # Term 2a: \bar{\gamma}^{jk} \beta^i_{,kj} # Term 2b: \hat{\Gamma}^i_{mk,j} \beta^m + \hat{\Gamma}^i_{mk} \beta^m_{,j} # + \hat{\Gamma}^i_{dj}\beta^d_{,k} - \hat{\Gamma}^d_{kj} \beta^i_{,d} # Term 2c: \hat{\Gamma}^i_{dj}\hat{\Gamma}^d_{mk} \beta^m - \hat{\Gamma}^d_{kj} \hat{\Gamma}^i_{md} \beta^m # Put it all together to get Term 2: # Step 6.c: Term 3 of \partial_t \bar{\Lambda}^i: # \frac{2}{3} \Delta^{i} \bar{D}_{j} \beta^{j} # From Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU() # Term 3 # Step 6.d: Term 4 of \partial_t \bar{\Lambda}^i: # \frac{1}{3} \bar{D}^{i} \bar{D}_{j} \beta^{j} # From Bq.detgammabar_and_derivs() # Step 6.e: Term 5 of \partial_t \bar{\Lambda}^i: # - 2 \bar{A}^{i j} (\partial_{j} \alpha - 6 \alpha \partial_{j} \phi) # Step 6.f: Term 6 of \partial_t \bar{\Lambda}^i: # 2 \alpha \bar{A}^{j k} \Delta^{i}_{j k} # From RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU() # Step 6.g: Term 7 of \partial_t \bar{\Lambda}^i: # -\frac{4}{3} \alpha \bar{\gamma}^{i j} \partial_{j} K # Step 7: Rescale the RHS quantities so that the evolved # variables are smooth across coord singularities # print(str(Abar_rhsDD[2][2]).replace("**","^").replace("_","").replace("xx","x").replace("sin(x2)","Sin[x2]").replace("sin(2*x2)","Sin[2*x2]").replace("cos(x2)","Cos[x2]").replace("detgbaroverdetghat","detg")) # print(str(Dbarbetacontraction).replace("**","^").replace("_","").replace("xx","x").replace("sin(x2)","Sin[x2]").replace("detgbaroverdetghat","detg")) # print(betaU_dD) # print(str(trK_rhs).replace("xx2","xx3").replace("xx1","xx2").replace("xx0","xx1").replace("**","^").replace("_","").replace("sin(xx2)","Sinx2").replace("xx","x").replace("sin(2*x2)","Sin2x2").replace("cos(x2)","Cosx2").replace("detgbaroverdetghat","detg")) # print(str(bet_rhsU[0]).replace("xx2","xx3").replace("xx1","xx2").replace("xx0","xx1").replace("**","^").replace("_","").replace("sin(xx2)","Sinx2").replace("xx","x").replace("sin(2*x2)","Sin2x2").replace("cos(x2)","Cosx2").replace("detgbaroverdetghat","detg")) | 2.362935 | 2 |
src/prefect/tasks/snowflake/snowflake.py | jhemmingsson/prefect | 0 | 6624763 | import snowflake.connector as sf
from prefect import Task
from prefect.utilities.tasks import defaults_from_attrs
class SnowflakeQuery(Task):
"""
Task for executing a query against a snowflake database.
Args:
- account (str): snowflake account name, see snowflake connector
package documentation for details
- user (str): user name used to authenticate
- password (str, optional): password used to authenticate.
password or private_lkey must be present
- private_key (bytes, optional): pem to authenticate.
password or private_key must be present
- database (str, optional): name of the default database to use
- schema (int, optional): name of the default schema to use
- role (str, optional): name of the default role to use
- warehouse (str, optional): name of the default warehouse to use
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using placeholder
is query string
- autocommit (bool, optional): set to True to autocommit, defaults to None, which
takes snowflake AUTOCOMMIT parameter
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
account: str,
user: str,
password: str = None,
private_key: bytes = None,
database: str = None,
schema: str = None,
role: str = None,
warehouse: str = None,
query: str = None,
data: tuple = None,
autocommit: bool = None,
**kwargs
):
self.account = account
self.user = user
self.password = password
self.database = database
self.schema = schema
self.role = role
self.warehouse = warehouse
self.query = query
self.data = data
self.autocommit = autocommit
self.private_key = private_key
super().__init__(**kwargs)
@defaults_from_attrs("query", "data", "autocommit")
def run(self, query: str = None, data: tuple = None, autocommit: bool = None):
"""
Task run method. Executes a query against snowflake database.
Args:
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using
placeholder is query string
- autocommit (bool, optional): set to True to autocommit, defaults to None
which takes the snowflake AUTOCOMMIT parameter
Returns:
- None
Raises:
- ValueError: if query parameter is None or a blank string
- DatabaseError: if exception occurs when executing the query
"""
if not query:
raise ValueError("A query string must be provided")
# build the connection parameter dictionary
# we will remove `None` values next
connect_params = {
"account": self.account,
"user": self.user,
"password": <PASSWORD>,
"private_key": self.private_key,
"database": self.database,
"schema": self.schema,
"role": self.role,
"warehouse": self.warehouse,
"autocommit": self.autocommit,
}
# filter out unset values
connect_params = {
param: value
for (param, value) in connect_params.items()
if value is not None
}
# connect to database, open cursor
conn = sf.connect(**connect_params)
# try to execute query
# context manager automatically rolls back failed transactions
try:
with conn:
with conn.cursor() as cursor:
executed = cursor.execute(query, params=data).fetchall()
columns = cursor.description
conn.close()
return { "data": executed, "columns": columns }
# pass through error, and ensure connection is closed
except Exception as error:
conn.close()
raise error
| import snowflake.connector as sf
from prefect import Task
from prefect.utilities.tasks import defaults_from_attrs
class SnowflakeQuery(Task):
"""
Task for executing a query against a snowflake database.
Args:
- account (str): snowflake account name, see snowflake connector
package documentation for details
- user (str): user name used to authenticate
- password (str, optional): password used to authenticate.
password or private_lkey must be present
- private_key (bytes, optional): pem to authenticate.
password or private_key must be present
- database (str, optional): name of the default database to use
- schema (int, optional): name of the default schema to use
- role (str, optional): name of the default role to use
- warehouse (str, optional): name of the default warehouse to use
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using placeholder
is query string
- autocommit (bool, optional): set to True to autocommit, defaults to None, which
takes snowflake AUTOCOMMIT parameter
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
account: str,
user: str,
password: str = None,
private_key: bytes = None,
database: str = None,
schema: str = None,
role: str = None,
warehouse: str = None,
query: str = None,
data: tuple = None,
autocommit: bool = None,
**kwargs
):
self.account = account
self.user = user
self.password = password
self.database = database
self.schema = schema
self.role = role
self.warehouse = warehouse
self.query = query
self.data = data
self.autocommit = autocommit
self.private_key = private_key
super().__init__(**kwargs)
@defaults_from_attrs("query", "data", "autocommit")
def run(self, query: str = None, data: tuple = None, autocommit: bool = None):
"""
Task run method. Executes a query against snowflake database.
Args:
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using
placeholder is query string
- autocommit (bool, optional): set to True to autocommit, defaults to None
which takes the snowflake AUTOCOMMIT parameter
Returns:
- None
Raises:
- ValueError: if query parameter is None or a blank string
- DatabaseError: if exception occurs when executing the query
"""
if not query:
raise ValueError("A query string must be provided")
# build the connection parameter dictionary
# we will remove `None` values next
connect_params = {
"account": self.account,
"user": self.user,
"password": <PASSWORD>,
"private_key": self.private_key,
"database": self.database,
"schema": self.schema,
"role": self.role,
"warehouse": self.warehouse,
"autocommit": self.autocommit,
}
# filter out unset values
connect_params = {
param: value
for (param, value) in connect_params.items()
if value is not None
}
# connect to database, open cursor
conn = sf.connect(**connect_params)
# try to execute query
# context manager automatically rolls back failed transactions
try:
with conn:
with conn.cursor() as cursor:
executed = cursor.execute(query, params=data).fetchall()
columns = cursor.description
conn.close()
return { "data": executed, "columns": columns }
# pass through error, and ensure connection is closed
except Exception as error:
conn.close()
raise error
| en | 0.625884 | Task for executing a query against a snowflake database. Args: - account (str): snowflake account name, see snowflake connector package documentation for details - user (str): user name used to authenticate - password (str, optional): password used to authenticate. password or private_lkey must be present - private_key (bytes, optional): pem to authenticate. password or private_key must be present - database (str, optional): name of the default database to use - schema (int, optional): name of the default schema to use - role (str, optional): name of the default role to use - warehouse (str, optional): name of the default warehouse to use - query (str, optional): query to execute against database - data (tuple, optional): values to use in query, must be specified using placeholder is query string - autocommit (bool, optional): set to True to autocommit, defaults to None, which takes snowflake AUTOCOMMIT parameter - **kwargs (dict, optional): additional keyword arguments to pass to the Task constructor Task run method. Executes a query against snowflake database. Args: - query (str, optional): query to execute against database - data (tuple, optional): values to use in query, must be specified using placeholder is query string - autocommit (bool, optional): set to True to autocommit, defaults to None which takes the snowflake AUTOCOMMIT parameter Returns: - None Raises: - ValueError: if query parameter is None or a blank string - DatabaseError: if exception occurs when executing the query # build the connection parameter dictionary # we will remove `None` values next # filter out unset values # connect to database, open cursor # try to execute query # context manager automatically rolls back failed transactions # pass through error, and ensure connection is closed | 2.904329 | 3 |
silx/gui/dialog/SafeFileSystemModel.py | payno/silx | 1 | 6624764 | <filename>silx/gui/dialog/SafeFileSystemModel.py
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
This module contains an :class:`SafeFileSystemModel`.
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "22/11/2017"
import sys
import os.path
import logging
import weakref
from silx.gui import qt
from silx.third_party import six
from .SafeFileIconProvider import SafeFileIconProvider
_logger = logging.getLogger(__name__)
class _Item(object):
def __init__(self, fileInfo):
self.__fileInfo = fileInfo
self.__parent = None
self.__children = None
self.__absolutePath = None
def isDrive(self):
if sys.platform == "win32":
return self.parent().parent() is None
else:
return False
def isRoot(self):
return self.parent() is None
def isFile(self):
"""
Returns true if the path is a file.
It avoid to access to the `Qt.QFileInfo` in case the file is a drive.
"""
if self.isDrive():
return False
return self.__fileInfo.isFile()
def isDir(self):
"""
Returns true if the path is a directory.
The default `qt.QFileInfo.isDir` can freeze the file system with
network drives. This function avoid the freeze in case of browsing
the root.
"""
if self.isDrive():
# A drive is a directory, we don't have to synchronize the
# drive to know that
return True
return self.__fileInfo.isDir()
def absoluteFilePath(self):
"""
Returns an absolute path including the file name.
This function uses in most cases the default
`qt.QFileInfo.absoluteFilePath`. But it is known to freeze the file
system with network drives.
This function uses `qt.QFileInfo.filePath` in case of root drives, to
avoid this kind of issues. In case of drive, the result is the same,
while the file path is already absolute.
:rtype: str
"""
if self.__absolutePath is None:
if self.isRoot():
path = ""
elif self.isDrive():
path = self.__fileInfo.filePath()
else:
path = os.path.join(self.parent().absoluteFilePath(), self.__fileInfo.fileName())
if path == "":
return "/"
self.__absolutePath = path
return self.__absolutePath
def child(self):
self.populate()
return self.__children
def childAt(self, position):
self.populate()
return self.__children[position]
def childCount(self):
self.populate()
return len(self.__children)
def indexOf(self, item):
self.populate()
return self.__children.index(item)
def parent(self):
parent = self.__parent
if parent is None:
return None
return parent()
def filePath(self):
return self.__fileInfo.filePath()
def fileName(self):
if self.isDrive():
name = self.absoluteFilePath()
if name[-1] == "/":
name = name[:-1]
return name
return os.path.basename(self.absoluteFilePath())
def fileInfo(self):
"""
Returns the Qt file info.
:rtype: Qt.QFileInfo
"""
return self.__fileInfo
def _setParent(self, parent):
self.__parent = weakref.ref(parent)
def findChildrenByPath(self, path):
if path == "":
return self
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
names = path.split("/")
caseSensitive = qt.QFSFileEngine(path).caseSensitive()
count = len(names)
cursor = self
for name in names:
for item in cursor.child():
if caseSensitive:
same = item.fileName() == name
else:
same = item.fileName().lower() == name.lower()
if same:
cursor = item
count -= 1
break
else:
return None
if count == 0:
break
else:
return None
return cursor
def populate(self):
if self.__children is not None:
return
self.__children = []
if self.isRoot():
items = qt.QDir.drives()
else:
directory = qt.QDir(self.absoluteFilePath())
filters = qt.QDir.AllEntries | qt.QDir.Hidden | qt.QDir.System
items = directory.entryInfoList(filters)
for fileInfo in items:
i = _Item(fileInfo)
self.__children.append(i)
i._setParent(self)
class _RawFileSystemModel(qt.QAbstractItemModel):
"""
This class implement a file system model and try to avoid freeze. On Qt4,
:class:`qt.QFileSystemModel` is known to freeze the file system when
network drives are available.
To avoid this behaviour, this class does not use
`qt.QFileInfo.absoluteFilePath` nor `qt.QFileInfo.canonicalPath` to reach
information on drives.
This model do not take care of sorting and filtering. This features are
managed by another model, by composition.
And because it is the end of life of Qt4, we do not implement asynchronous
loading of files as it is done by :class:`qt.QFileSystemModel`, nor some
useful features.
"""
__directoryLoadedSync = qt.Signal(str)
"""This signal is connected asynchronously to a slot. It allows to
emit directoryLoaded as an asynchronous signal."""
directoryLoaded = qt.Signal(str)
"""This signal is emitted when the gatherer thread has finished to load the
path."""
rootPathChanged = qt.Signal(str)
"""This signal is emitted whenever the root path has been changed to a
newPath."""
NAME_COLUMN = 0
SIZE_COLUMN = 1
TYPE_COLUMN = 2
LAST_MODIFIED_COLUMN = 3
def __init__(self, parent=None):
qt.QAbstractItemModel.__init__(self, parent)
self.__computer = _Item(qt.QFileInfo())
self.__header = "Name", "Size", "Type", "Last modification"
self.__currentPath = ""
self.__iconProvider = SafeFileIconProvider()
self.__directoryLoadedSync.connect(self.__emitDirectoryLoaded, qt.Qt.QueuedConnection)
def headerData(self, section, orientation, role=qt.Qt.DisplayRole):
if orientation == qt.Qt.Horizontal:
if role == qt.Qt.DisplayRole:
return self.__header[section]
if role == qt.Qt.TextAlignmentRole:
return qt.Qt.AlignRight if section == 1 else qt.Qt.AlignLeft
return None
def flags(self, index):
if not index.isValid():
return 0
return qt.Qt.ItemIsEnabled | qt.Qt.ItemIsSelectable
def columnCount(self, parent=qt.QModelIndex()):
return len(self.__header)
def rowCount(self, parent=qt.QModelIndex()):
item = self.__getItem(parent)
return item.childCount()
def data(self, index, role=qt.Qt.DisplayRole):
if not index.isValid():
return None
column = index.column()
if role in [qt.Qt.DisplayRole, qt.Qt.EditRole]:
if column == self.NAME_COLUMN:
return self.__displayName(index)
elif column == self.SIZE_COLUMN:
return self.size(index)
elif column == self.TYPE_COLUMN:
return self.type(index)
elif column == self.LAST_MODIFIED_COLUMN:
return self.lastModified(index)
else:
_logger.warning("data: invalid display value column %d", index.column())
elif role == qt.QFileSystemModel.FilePathRole:
return self.filePath(index)
elif role == qt.QFileSystemModel.FileNameRole:
return self.fileName(index)
elif role == qt.Qt.DecorationRole:
if column == self.NAME_COLUMN:
icon = self.fileIcon(index)
if icon is None or icon.isNull():
if self.isDir(index):
self.__iconProvider.icon(qt.QFileIconProvider.Folder)
else:
self.__iconProvider.icon(qt.QFileIconProvider.File)
return icon
elif role == qt.Qt.TextAlignmentRole:
if column == self.SIZE_COLUMN:
return qt.Qt.AlignRight
elif role == qt.QFileSystemModel.FilePermissions:
return self.permissions(index)
return None
def index(self, *args, **kwargs):
path_api = False
path_api |= len(args) >= 1 and isinstance(args[0], six.string_types)
path_api |= "path" in kwargs
if path_api:
return self.__indexFromPath(*args, **kwargs)
else:
return self.__index(*args, **kwargs)
def __index(self, row, column, parent=qt.QModelIndex()):
if parent.isValid() and parent.column() != 0:
return None
parentItem = self.__getItem(parent)
item = parentItem.childAt(row)
return self.createIndex(row, column, item)
def __indexFromPath(self, path, column=0):
"""
Uses the index(str) C++ API
:rtype: qt.QModelIndex
"""
if path == "":
return qt.QModelIndex()
item = self.__computer.findChildrenByPath(path)
if item is None:
return qt.QModelIndex()
return self.createIndex(item.parent().indexOf(item), column, item)
def parent(self, index):
if not index.isValid():
return qt.QModelIndex()
item = self.__getItem(index)
if index is None:
return qt.QModelIndex()
parent = item.parent()
if parent is None or parent is self.__computer:
return qt.QModelIndex()
return self.createIndex(parent.parent().indexOf(parent), 0, parent)
def __emitDirectoryLoaded(self, path):
self.directoryLoaded.emit(path)
def __emitRootPathChanged(self, path):
self.rootPathChanged.emit(path)
def __getItem(self, index):
if not index.isValid():
return self.__computer
item = index.internalPointer()
return item
def fileIcon(self, index):
item = self.__getItem(index)
if self.__iconProvider is not None:
fileInfo = item.fileInfo()
result = self.__iconProvider.icon(fileInfo)
else:
style = qt.QApplication.instance().style()
if item.isRoot():
result = style.standardIcon(qt.QStyle.SP_ComputerIcon)
elif item.isDrive():
result = style.standardIcon(qt.QStyle.SP_DriveHDIcon)
elif item.isDir():
result = style.standardIcon(qt.QStyle.SP_DirIcon)
else:
result = style.standardIcon(qt.QStyle.SP_FileIcon)
return result
def _item(self, index):
item = self.__getItem(index)
return item
def fileInfo(self, index):
item = self.__getItem(index)
result = item.fileInfo()
return result
def __fileIcon(self, index):
item = self.__getItem(index)
result = item.fileName()
return result
def __displayName(self, index):
item = self.__getItem(index)
result = item.fileName()
return result
def fileName(self, index):
item = self.__getItem(index)
result = item.fileName()
return result
def filePath(self, index):
item = self.__getItem(index)
result = item.fileInfo().filePath()
return result
def isDir(self, index):
item = self.__getItem(index)
result = item.isDir()
return result
def lastModified(self, index):
item = self.__getItem(index)
result = item.fileInfo().lastModified()
return result
def permissions(self, index):
item = self.__getItem(index)
result = item.fileInfo().permissions()
return result
def size(self, index):
item = self.__getItem(index)
result = item.fileInfo().size()
return result
def type(self, index):
item = self.__getItem(index)
if self.__iconProvider is not None:
fileInfo = item.fileInfo()
result = self.__iconProvider.type(fileInfo)
else:
if item.isRoot():
result = "Computer"
elif item.isDrive():
result = "Drive"
elif item.isDir():
result = "Directory"
else:
fileInfo = item.fileInfo()
result = fileInfo.suffix()
return result
# File manipulation
# bool remove(const QModelIndex & index) const
# bool rmdir(const QModelIndex & index) const
# QModelIndex mkdir(const QModelIndex & parent, const QString & name)
# Configuration
def rootDirectory(self):
return qt.QDir(self.rootPath())
def rootPath(self):
return self.__currentPath
def setRootPath(self, path):
if self.__currentPath == path:
return
self.__currentPath = path
item = self.__computer.findChildrenByPath(path)
self.__emitRootPathChanged(path)
if item is None or item.parent() is None:
return qt.QModelIndex()
index = self.createIndex(item.parent().indexOf(item), 0, item)
self.__directoryLoadedSync.emit(path)
return index
def iconProvider(self):
# FIXME: invalidate the model
return self.__iconProvider
def setIconProvider(self, provider):
# FIXME: invalidate the model
self.__iconProvider = provider
# bool resolveSymlinks() const
# void setResolveSymlinks(bool enable)
def setNameFilterDisables(self, enable):
return None
def nameFilterDisables(self):
return None
def myComputer(self, role=qt.Qt.DisplayRole):
return None
def setNameFilters(self, filters):
return
def nameFilters(self):
return None
def filter(self):
return self.__filters
def setFilter(self, filters):
return
def setReadOnly(self, enable):
assert(enable is True)
def isReadOnly(self):
return False
class SafeFileSystemModel(qt.QSortFilterProxyModel):
"""
This class implement a file system model and try to avoid freeze. On Qt4,
:class:`qt.QFileSystemModel` is known to freeze the file system when
network drives are available.
To avoid this behaviour, this class does not use
`qt.QFileInfo.absoluteFilePath` nor `qt.QFileInfo.canonicalPath` to reach
information on drives.
And because it is the end of life of Qt4, we do not implement asynchronous
loading of files as it is done by :class:`qt.QFileSystemModel`, nor some
useful features.
"""
def __init__(self, parent=None):
qt.QSortFilterProxyModel.__init__(self, parent=parent)
self.__nameFilterDisables = sys.platform == "darwin"
self.__nameFilters = []
self.__filters = qt.QDir.AllEntries | qt.QDir.NoDotAndDotDot | qt.QDir.AllDirs
sourceModel = _RawFileSystemModel(self)
self.setSourceModel(sourceModel)
@property
def directoryLoaded(self):
return self.sourceModel().directoryLoaded
@property
def rootPathChanged(self):
return self.sourceModel().rootPathChanged
def index(self, *args, **kwargs):
path_api = False
path_api |= len(args) >= 1 and isinstance(args[0], six.string_types)
path_api |= "path" in kwargs
if path_api:
return self.__indexFromPath(*args, **kwargs)
else:
return self.__index(*args, **kwargs)
def __index(self, row, column, parent=qt.QModelIndex()):
return qt.QSortFilterProxyModel.index(self, row, column, parent)
def __indexFromPath(self, path, column=0):
"""
Uses the index(str) C++ API
:rtype: qt.QModelIndex
"""
if path == "":
return qt.QModelIndex()
index = self.sourceModel().index(path, column)
index = self.mapFromSource(index)
return index
def lessThan(self, leftSourceIndex, rightSourceIndex):
sourceModel = self.sourceModel()
sortColumn = self.sortColumn()
if sortColumn == _RawFileSystemModel.NAME_COLUMN:
leftItem = sourceModel._item(leftSourceIndex)
rightItem = sourceModel._item(rightSourceIndex)
if sys.platform != "darwin":
# Sort directories before files
leftIsDir = leftItem.isDir()
rightIsDir = rightItem.isDir()
if leftIsDir ^ rightIsDir:
return leftIsDir
return leftItem.fileName().lower() < rightItem.fileName().lower()
elif sortColumn == _RawFileSystemModel.SIZE_COLUMN:
left = sourceModel.fileInfo(leftSourceIndex)
right = sourceModel.fileInfo(rightSourceIndex)
return left.size() < right.size()
elif sortColumn == _RawFileSystemModel.TYPE_COLUMN:
left = sourceModel.type(leftSourceIndex)
right = sourceModel.type(rightSourceIndex)
return left < right
elif sortColumn == _RawFileSystemModel.LAST_MODIFIED_COLUMN:
left = sourceModel.fileInfo(leftSourceIndex)
right = sourceModel.fileInfo(rightSourceIndex)
return left.lastModified() < right.lastModified()
else:
_logger.warning("Unsupported sorted column %d", sortColumn)
return False
def __filtersAccepted(self, item, filters):
"""
Check individual flag filters.
"""
if not (filters & (qt.QDir.Dirs | qt.QDir.AllDirs)):
# Hide dirs
if item.isDir():
return False
if not (filters & qt.QDir.Files):
# Hide files
if item.isFile():
return False
if not (filters & qt.QDir.Drives):
# Hide drives
if item.isDrive():
return False
fileInfo = item.fileInfo()
if fileInfo is None:
return False
filterPermissions = (filters & qt.QDir.PermissionMask) != 0
if filterPermissions and (filters & (qt.QDir.Dirs | qt.QDir.Files)):
if (filters & qt.QDir.Readable):
# Hide unreadable
if not fileInfo.isReadable():
return False
if (filters & qt.QDir.Writable):
# Hide unwritable
if not fileInfo.isWritable():
return False
if (filters & qt.QDir.Executable):
# Hide unexecutable
if not fileInfo.isExecutable():
return False
if (filters & qt.QDir.NoSymLinks):
# Hide sym links
if fileInfo.isSymLink():
return False
if not (filters & qt.QDir.System):
# Hide system
if not item.isDir() and not item.isFile():
return False
fileName = item.fileName()
isDot = fileName == "."
isDotDot = fileName == ".."
if not (filters & qt.QDir.Hidden):
# Hide hidden
if not (isDot or isDotDot) and fileInfo.isHidden():
return False
if filters & (qt.QDir.NoDot | qt.QDir.NoDotDot | qt.QDir.NoDotAndDotDot):
# Hide parent/self references
if filters & qt.QDir.NoDot:
if isDot:
return False
if filters & qt.QDir.NoDotDot:
if isDotDot:
return False
if filters & qt.QDir.NoDotAndDotDot:
if isDot or isDotDot:
return False
return True
def filterAcceptsRow(self, sourceRow, sourceParent):
if not sourceParent.isValid():
return True
sourceModel = self.sourceModel()
index = sourceModel.index(sourceRow, 0, sourceParent)
if not index.isValid():
return True
item = sourceModel._item(index)
filters = self.__filters
if item.isDrive():
# Let say a user always have access to a drive
# It avoid to access to fileInfo then avoid to freeze the file
# system
return True
if not self.__filtersAccepted(item, filters):
return False
if self.__nameFilterDisables:
return True
if item.isDir() and (filters & qt.QDir.AllDirs):
# dont apply the filters to directory names
return True
return self.__nameFiltersAccepted(item)
def __nameFiltersAccepted(self, item):
if len(self.__nameFilters) == 0:
return True
fileName = item.fileName()
for reg in self.__nameFilters:
if reg.exactMatch(fileName):
return True
return False
def setNameFilterDisables(self, enable):
self.__nameFilterDisables = enable
self.invalidate()
def nameFilterDisables(self):
return self.__nameFilterDisables
def myComputer(self, role=qt.Qt.DisplayRole):
return self.sourceModel().myComputer(role)
def setNameFilters(self, filters):
self.__nameFilters = []
isCaseSensitive = self.__filters & qt.QDir.CaseSensitive
caseSensitive = qt.Qt.CaseSensitive if isCaseSensitive else qt.Qt.CaseInsensitive
for f in filters:
reg = qt.QRegExp(f, caseSensitive, qt.QRegExp.Wildcard)
self.__nameFilters.append(reg)
self.invalidate()
def nameFilters(self):
return [f.pattern() for f in self.__nameFilters]
def filter(self):
return self.__filters
def setFilter(self, filters):
self.__filters = filters
# In case of change of case sensitivity
self.setNameFilters(self.nameFilters())
self.invalidate()
def setReadOnly(self, enable):
assert(enable is True)
def isReadOnly(self):
return False
def rootPath(self):
return self.sourceModel().rootPath()
def setRootPath(self, path):
index = self.sourceModel().setRootPath(path)
index = self.mapFromSource(index)
return index
def flags(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
filters = sourceModel.flags(index)
if self.__nameFilterDisables:
item = sourceModel._item(index)
if not self.__nameFiltersAccepted(item):
filters &= ~qt.Qt.ItemIsEnabled
return filters
def fileIcon(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.fileIcon(index)
def fileInfo(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.fileInfo(index)
def fileName(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.fileName(index)
def filePath(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.filePath(index)
def isDir(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.isDir(index)
def lastModified(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.lastModified(index)
def permissions(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.permissions(index)
def size(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.size(index)
def type(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.type(index)
| <filename>silx/gui/dialog/SafeFileSystemModel.py
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""
This module contains an :class:`SafeFileSystemModel`.
"""
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "22/11/2017"
import sys
import os.path
import logging
import weakref
from silx.gui import qt
from silx.third_party import six
from .SafeFileIconProvider import SafeFileIconProvider
_logger = logging.getLogger(__name__)
class _Item(object):
def __init__(self, fileInfo):
self.__fileInfo = fileInfo
self.__parent = None
self.__children = None
self.__absolutePath = None
def isDrive(self):
if sys.platform == "win32":
return self.parent().parent() is None
else:
return False
def isRoot(self):
return self.parent() is None
def isFile(self):
"""
Returns true if the path is a file.
It avoid to access to the `Qt.QFileInfo` in case the file is a drive.
"""
if self.isDrive():
return False
return self.__fileInfo.isFile()
def isDir(self):
"""
Returns true if the path is a directory.
The default `qt.QFileInfo.isDir` can freeze the file system with
network drives. This function avoid the freeze in case of browsing
the root.
"""
if self.isDrive():
# A drive is a directory, we don't have to synchronize the
# drive to know that
return True
return self.__fileInfo.isDir()
def absoluteFilePath(self):
"""
Returns an absolute path including the file name.
This function uses in most cases the default
`qt.QFileInfo.absoluteFilePath`. But it is known to freeze the file
system with network drives.
This function uses `qt.QFileInfo.filePath` in case of root drives, to
avoid this kind of issues. In case of drive, the result is the same,
while the file path is already absolute.
:rtype: str
"""
if self.__absolutePath is None:
if self.isRoot():
path = ""
elif self.isDrive():
path = self.__fileInfo.filePath()
else:
path = os.path.join(self.parent().absoluteFilePath(), self.__fileInfo.fileName())
if path == "":
return "/"
self.__absolutePath = path
return self.__absolutePath
def child(self):
self.populate()
return self.__children
def childAt(self, position):
self.populate()
return self.__children[position]
def childCount(self):
self.populate()
return len(self.__children)
def indexOf(self, item):
self.populate()
return self.__children.index(item)
def parent(self):
parent = self.__parent
if parent is None:
return None
return parent()
def filePath(self):
return self.__fileInfo.filePath()
def fileName(self):
if self.isDrive():
name = self.absoluteFilePath()
if name[-1] == "/":
name = name[:-1]
return name
return os.path.basename(self.absoluteFilePath())
def fileInfo(self):
"""
Returns the Qt file info.
:rtype: Qt.QFileInfo
"""
return self.__fileInfo
def _setParent(self, parent):
self.__parent = weakref.ref(parent)
def findChildrenByPath(self, path):
if path == "":
return self
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
names = path.split("/")
caseSensitive = qt.QFSFileEngine(path).caseSensitive()
count = len(names)
cursor = self
for name in names:
for item in cursor.child():
if caseSensitive:
same = item.fileName() == name
else:
same = item.fileName().lower() == name.lower()
if same:
cursor = item
count -= 1
break
else:
return None
if count == 0:
break
else:
return None
return cursor
def populate(self):
if self.__children is not None:
return
self.__children = []
if self.isRoot():
items = qt.QDir.drives()
else:
directory = qt.QDir(self.absoluteFilePath())
filters = qt.QDir.AllEntries | qt.QDir.Hidden | qt.QDir.System
items = directory.entryInfoList(filters)
for fileInfo in items:
i = _Item(fileInfo)
self.__children.append(i)
i._setParent(self)
class _RawFileSystemModel(qt.QAbstractItemModel):
"""
This class implement a file system model and try to avoid freeze. On Qt4,
:class:`qt.QFileSystemModel` is known to freeze the file system when
network drives are available.
To avoid this behaviour, this class does not use
`qt.QFileInfo.absoluteFilePath` nor `qt.QFileInfo.canonicalPath` to reach
information on drives.
This model do not take care of sorting and filtering. This features are
managed by another model, by composition.
And because it is the end of life of Qt4, we do not implement asynchronous
loading of files as it is done by :class:`qt.QFileSystemModel`, nor some
useful features.
"""
__directoryLoadedSync = qt.Signal(str)
"""This signal is connected asynchronously to a slot. It allows to
emit directoryLoaded as an asynchronous signal."""
directoryLoaded = qt.Signal(str)
"""This signal is emitted when the gatherer thread has finished to load the
path."""
rootPathChanged = qt.Signal(str)
"""This signal is emitted whenever the root path has been changed to a
newPath."""
NAME_COLUMN = 0
SIZE_COLUMN = 1
TYPE_COLUMN = 2
LAST_MODIFIED_COLUMN = 3
def __init__(self, parent=None):
qt.QAbstractItemModel.__init__(self, parent)
self.__computer = _Item(qt.QFileInfo())
self.__header = "Name", "Size", "Type", "Last modification"
self.__currentPath = ""
self.__iconProvider = SafeFileIconProvider()
self.__directoryLoadedSync.connect(self.__emitDirectoryLoaded, qt.Qt.QueuedConnection)
def headerData(self, section, orientation, role=qt.Qt.DisplayRole):
if orientation == qt.Qt.Horizontal:
if role == qt.Qt.DisplayRole:
return self.__header[section]
if role == qt.Qt.TextAlignmentRole:
return qt.Qt.AlignRight if section == 1 else qt.Qt.AlignLeft
return None
def flags(self, index):
if not index.isValid():
return 0
return qt.Qt.ItemIsEnabled | qt.Qt.ItemIsSelectable
def columnCount(self, parent=qt.QModelIndex()):
return len(self.__header)
def rowCount(self, parent=qt.QModelIndex()):
item = self.__getItem(parent)
return item.childCount()
def data(self, index, role=qt.Qt.DisplayRole):
if not index.isValid():
return None
column = index.column()
if role in [qt.Qt.DisplayRole, qt.Qt.EditRole]:
if column == self.NAME_COLUMN:
return self.__displayName(index)
elif column == self.SIZE_COLUMN:
return self.size(index)
elif column == self.TYPE_COLUMN:
return self.type(index)
elif column == self.LAST_MODIFIED_COLUMN:
return self.lastModified(index)
else:
_logger.warning("data: invalid display value column %d", index.column())
elif role == qt.QFileSystemModel.FilePathRole:
return self.filePath(index)
elif role == qt.QFileSystemModel.FileNameRole:
return self.fileName(index)
elif role == qt.Qt.DecorationRole:
if column == self.NAME_COLUMN:
icon = self.fileIcon(index)
if icon is None or icon.isNull():
if self.isDir(index):
self.__iconProvider.icon(qt.QFileIconProvider.Folder)
else:
self.__iconProvider.icon(qt.QFileIconProvider.File)
return icon
elif role == qt.Qt.TextAlignmentRole:
if column == self.SIZE_COLUMN:
return qt.Qt.AlignRight
elif role == qt.QFileSystemModel.FilePermissions:
return self.permissions(index)
return None
def index(self, *args, **kwargs):
path_api = False
path_api |= len(args) >= 1 and isinstance(args[0], six.string_types)
path_api |= "path" in kwargs
if path_api:
return self.__indexFromPath(*args, **kwargs)
else:
return self.__index(*args, **kwargs)
def __index(self, row, column, parent=qt.QModelIndex()):
if parent.isValid() and parent.column() != 0:
return None
parentItem = self.__getItem(parent)
item = parentItem.childAt(row)
return self.createIndex(row, column, item)
def __indexFromPath(self, path, column=0):
"""
Uses the index(str) C++ API
:rtype: qt.QModelIndex
"""
if path == "":
return qt.QModelIndex()
item = self.__computer.findChildrenByPath(path)
if item is None:
return qt.QModelIndex()
return self.createIndex(item.parent().indexOf(item), column, item)
def parent(self, index):
if not index.isValid():
return qt.QModelIndex()
item = self.__getItem(index)
if index is None:
return qt.QModelIndex()
parent = item.parent()
if parent is None or parent is self.__computer:
return qt.QModelIndex()
return self.createIndex(parent.parent().indexOf(parent), 0, parent)
def __emitDirectoryLoaded(self, path):
self.directoryLoaded.emit(path)
def __emitRootPathChanged(self, path):
self.rootPathChanged.emit(path)
def __getItem(self, index):
if not index.isValid():
return self.__computer
item = index.internalPointer()
return item
def fileIcon(self, index):
item = self.__getItem(index)
if self.__iconProvider is not None:
fileInfo = item.fileInfo()
result = self.__iconProvider.icon(fileInfo)
else:
style = qt.QApplication.instance().style()
if item.isRoot():
result = style.standardIcon(qt.QStyle.SP_ComputerIcon)
elif item.isDrive():
result = style.standardIcon(qt.QStyle.SP_DriveHDIcon)
elif item.isDir():
result = style.standardIcon(qt.QStyle.SP_DirIcon)
else:
result = style.standardIcon(qt.QStyle.SP_FileIcon)
return result
def _item(self, index):
item = self.__getItem(index)
return item
def fileInfo(self, index):
item = self.__getItem(index)
result = item.fileInfo()
return result
def __fileIcon(self, index):
item = self.__getItem(index)
result = item.fileName()
return result
def __displayName(self, index):
item = self.__getItem(index)
result = item.fileName()
return result
def fileName(self, index):
item = self.__getItem(index)
result = item.fileName()
return result
def filePath(self, index):
item = self.__getItem(index)
result = item.fileInfo().filePath()
return result
def isDir(self, index):
item = self.__getItem(index)
result = item.isDir()
return result
def lastModified(self, index):
item = self.__getItem(index)
result = item.fileInfo().lastModified()
return result
def permissions(self, index):
item = self.__getItem(index)
result = item.fileInfo().permissions()
return result
def size(self, index):
item = self.__getItem(index)
result = item.fileInfo().size()
return result
def type(self, index):
item = self.__getItem(index)
if self.__iconProvider is not None:
fileInfo = item.fileInfo()
result = self.__iconProvider.type(fileInfo)
else:
if item.isRoot():
result = "Computer"
elif item.isDrive():
result = "Drive"
elif item.isDir():
result = "Directory"
else:
fileInfo = item.fileInfo()
result = fileInfo.suffix()
return result
# File manipulation
# bool remove(const QModelIndex & index) const
# bool rmdir(const QModelIndex & index) const
# QModelIndex mkdir(const QModelIndex & parent, const QString & name)
# Configuration
def rootDirectory(self):
return qt.QDir(self.rootPath())
def rootPath(self):
return self.__currentPath
def setRootPath(self, path):
if self.__currentPath == path:
return
self.__currentPath = path
item = self.__computer.findChildrenByPath(path)
self.__emitRootPathChanged(path)
if item is None or item.parent() is None:
return qt.QModelIndex()
index = self.createIndex(item.parent().indexOf(item), 0, item)
self.__directoryLoadedSync.emit(path)
return index
def iconProvider(self):
# FIXME: invalidate the model
return self.__iconProvider
def setIconProvider(self, provider):
# FIXME: invalidate the model
self.__iconProvider = provider
# bool resolveSymlinks() const
# void setResolveSymlinks(bool enable)
def setNameFilterDisables(self, enable):
return None
def nameFilterDisables(self):
return None
def myComputer(self, role=qt.Qt.DisplayRole):
return None
def setNameFilters(self, filters):
return
def nameFilters(self):
return None
def filter(self):
return self.__filters
def setFilter(self, filters):
return
def setReadOnly(self, enable):
assert(enable is True)
def isReadOnly(self):
return False
class SafeFileSystemModel(qt.QSortFilterProxyModel):
"""
This class implement a file system model and try to avoid freeze. On Qt4,
:class:`qt.QFileSystemModel` is known to freeze the file system when
network drives are available.
To avoid this behaviour, this class does not use
`qt.QFileInfo.absoluteFilePath` nor `qt.QFileInfo.canonicalPath` to reach
information on drives.
And because it is the end of life of Qt4, we do not implement asynchronous
loading of files as it is done by :class:`qt.QFileSystemModel`, nor some
useful features.
"""
def __init__(self, parent=None):
qt.QSortFilterProxyModel.__init__(self, parent=parent)
self.__nameFilterDisables = sys.platform == "darwin"
self.__nameFilters = []
self.__filters = qt.QDir.AllEntries | qt.QDir.NoDotAndDotDot | qt.QDir.AllDirs
sourceModel = _RawFileSystemModel(self)
self.setSourceModel(sourceModel)
@property
def directoryLoaded(self):
return self.sourceModel().directoryLoaded
@property
def rootPathChanged(self):
return self.sourceModel().rootPathChanged
def index(self, *args, **kwargs):
path_api = False
path_api |= len(args) >= 1 and isinstance(args[0], six.string_types)
path_api |= "path" in kwargs
if path_api:
return self.__indexFromPath(*args, **kwargs)
else:
return self.__index(*args, **kwargs)
def __index(self, row, column, parent=qt.QModelIndex()):
return qt.QSortFilterProxyModel.index(self, row, column, parent)
def __indexFromPath(self, path, column=0):
"""
Uses the index(str) C++ API
:rtype: qt.QModelIndex
"""
if path == "":
return qt.QModelIndex()
index = self.sourceModel().index(path, column)
index = self.mapFromSource(index)
return index
def lessThan(self, leftSourceIndex, rightSourceIndex):
sourceModel = self.sourceModel()
sortColumn = self.sortColumn()
if sortColumn == _RawFileSystemModel.NAME_COLUMN:
leftItem = sourceModel._item(leftSourceIndex)
rightItem = sourceModel._item(rightSourceIndex)
if sys.platform != "darwin":
# Sort directories before files
leftIsDir = leftItem.isDir()
rightIsDir = rightItem.isDir()
if leftIsDir ^ rightIsDir:
return leftIsDir
return leftItem.fileName().lower() < rightItem.fileName().lower()
elif sortColumn == _RawFileSystemModel.SIZE_COLUMN:
left = sourceModel.fileInfo(leftSourceIndex)
right = sourceModel.fileInfo(rightSourceIndex)
return left.size() < right.size()
elif sortColumn == _RawFileSystemModel.TYPE_COLUMN:
left = sourceModel.type(leftSourceIndex)
right = sourceModel.type(rightSourceIndex)
return left < right
elif sortColumn == _RawFileSystemModel.LAST_MODIFIED_COLUMN:
left = sourceModel.fileInfo(leftSourceIndex)
right = sourceModel.fileInfo(rightSourceIndex)
return left.lastModified() < right.lastModified()
else:
_logger.warning("Unsupported sorted column %d", sortColumn)
return False
def __filtersAccepted(self, item, filters):
"""
Check individual flag filters.
"""
if not (filters & (qt.QDir.Dirs | qt.QDir.AllDirs)):
# Hide dirs
if item.isDir():
return False
if not (filters & qt.QDir.Files):
# Hide files
if item.isFile():
return False
if not (filters & qt.QDir.Drives):
# Hide drives
if item.isDrive():
return False
fileInfo = item.fileInfo()
if fileInfo is None:
return False
filterPermissions = (filters & qt.QDir.PermissionMask) != 0
if filterPermissions and (filters & (qt.QDir.Dirs | qt.QDir.Files)):
if (filters & qt.QDir.Readable):
# Hide unreadable
if not fileInfo.isReadable():
return False
if (filters & qt.QDir.Writable):
# Hide unwritable
if not fileInfo.isWritable():
return False
if (filters & qt.QDir.Executable):
# Hide unexecutable
if not fileInfo.isExecutable():
return False
if (filters & qt.QDir.NoSymLinks):
# Hide sym links
if fileInfo.isSymLink():
return False
if not (filters & qt.QDir.System):
# Hide system
if not item.isDir() and not item.isFile():
return False
fileName = item.fileName()
isDot = fileName == "."
isDotDot = fileName == ".."
if not (filters & qt.QDir.Hidden):
# Hide hidden
if not (isDot or isDotDot) and fileInfo.isHidden():
return False
if filters & (qt.QDir.NoDot | qt.QDir.NoDotDot | qt.QDir.NoDotAndDotDot):
# Hide parent/self references
if filters & qt.QDir.NoDot:
if isDot:
return False
if filters & qt.QDir.NoDotDot:
if isDotDot:
return False
if filters & qt.QDir.NoDotAndDotDot:
if isDot or isDotDot:
return False
return True
def filterAcceptsRow(self, sourceRow, sourceParent):
if not sourceParent.isValid():
return True
sourceModel = self.sourceModel()
index = sourceModel.index(sourceRow, 0, sourceParent)
if not index.isValid():
return True
item = sourceModel._item(index)
filters = self.__filters
if item.isDrive():
# Let say a user always have access to a drive
# It avoid to access to fileInfo then avoid to freeze the file
# system
return True
if not self.__filtersAccepted(item, filters):
return False
if self.__nameFilterDisables:
return True
if item.isDir() and (filters & qt.QDir.AllDirs):
# dont apply the filters to directory names
return True
return self.__nameFiltersAccepted(item)
def __nameFiltersAccepted(self, item):
if len(self.__nameFilters) == 0:
return True
fileName = item.fileName()
for reg in self.__nameFilters:
if reg.exactMatch(fileName):
return True
return False
def setNameFilterDisables(self, enable):
self.__nameFilterDisables = enable
self.invalidate()
def nameFilterDisables(self):
return self.__nameFilterDisables
def myComputer(self, role=qt.Qt.DisplayRole):
return self.sourceModel().myComputer(role)
def setNameFilters(self, filters):
self.__nameFilters = []
isCaseSensitive = self.__filters & qt.QDir.CaseSensitive
caseSensitive = qt.Qt.CaseSensitive if isCaseSensitive else qt.Qt.CaseInsensitive
for f in filters:
reg = qt.QRegExp(f, caseSensitive, qt.QRegExp.Wildcard)
self.__nameFilters.append(reg)
self.invalidate()
def nameFilters(self):
return [f.pattern() for f in self.__nameFilters]
def filter(self):
return self.__filters
def setFilter(self, filters):
self.__filters = filters
# In case of change of case sensitivity
self.setNameFilters(self.nameFilters())
self.invalidate()
def setReadOnly(self, enable):
assert(enable is True)
def isReadOnly(self):
return False
def rootPath(self):
return self.sourceModel().rootPath()
def setRootPath(self, path):
index = self.sourceModel().setRootPath(path)
index = self.mapFromSource(index)
return index
def flags(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
filters = sourceModel.flags(index)
if self.__nameFilterDisables:
item = sourceModel._item(index)
if not self.__nameFiltersAccepted(item):
filters &= ~qt.Qt.ItemIsEnabled
return filters
def fileIcon(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.fileIcon(index)
def fileInfo(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.fileInfo(index)
def fileName(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.fileName(index)
def filePath(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.filePath(index)
def isDir(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.isDir(index)
def lastModified(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.lastModified(index)
def permissions(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.permissions(index)
def size(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.size(index)
def type(self, index):
sourceModel = self.sourceModel()
index = self.mapToSource(index)
return sourceModel.type(index)
| en | 0.752588 | # coding: utf-8 # /*########################################################################## # # Copyright (c) 2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ This module contains an :class:`SafeFileSystemModel`. Returns true if the path is a file. It avoid to access to the `Qt.QFileInfo` in case the file is a drive. Returns true if the path is a directory. The default `qt.QFileInfo.isDir` can freeze the file system with network drives. This function avoid the freeze in case of browsing the root. # A drive is a directory, we don't have to synchronize the # drive to know that Returns an absolute path including the file name. This function uses in most cases the default `qt.QFileInfo.absoluteFilePath`. But it is known to freeze the file system with network drives. This function uses `qt.QFileInfo.filePath` in case of root drives, to avoid this kind of issues. In case of drive, the result is the same, while the file path is already absolute. :rtype: str Returns the Qt file info. :rtype: Qt.QFileInfo This class implement a file system model and try to avoid freeze. On Qt4, :class:`qt.QFileSystemModel` is known to freeze the file system when network drives are available. To avoid this behaviour, this class does not use `qt.QFileInfo.absoluteFilePath` nor `qt.QFileInfo.canonicalPath` to reach information on drives. This model do not take care of sorting and filtering. This features are managed by another model, by composition. And because it is the end of life of Qt4, we do not implement asynchronous loading of files as it is done by :class:`qt.QFileSystemModel`, nor some useful features. This signal is connected asynchronously to a slot. It allows to emit directoryLoaded as an asynchronous signal. This signal is emitted when the gatherer thread has finished to load the path. This signal is emitted whenever the root path has been changed to a newPath. Uses the index(str) C++ API :rtype: qt.QModelIndex # File manipulation # bool remove(const QModelIndex & index) const # bool rmdir(const QModelIndex & index) const # QModelIndex mkdir(const QModelIndex & parent, const QString & name) # Configuration # FIXME: invalidate the model # FIXME: invalidate the model # bool resolveSymlinks() const # void setResolveSymlinks(bool enable) This class implement a file system model and try to avoid freeze. On Qt4, :class:`qt.QFileSystemModel` is known to freeze the file system when network drives are available. To avoid this behaviour, this class does not use `qt.QFileInfo.absoluteFilePath` nor `qt.QFileInfo.canonicalPath` to reach information on drives. And because it is the end of life of Qt4, we do not implement asynchronous loading of files as it is done by :class:`qt.QFileSystemModel`, nor some useful features. Uses the index(str) C++ API :rtype: qt.QModelIndex # Sort directories before files Check individual flag filters. # Hide dirs # Hide files # Hide drives # Hide unreadable # Hide unwritable # Hide unexecutable # Hide sym links # Hide system # Hide hidden # Hide parent/self references # Let say a user always have access to a drive # It avoid to access to fileInfo then avoid to freeze the file # system # dont apply the filters to directory names # In case of change of case sensitivity | 1.519853 | 2 |
data.py | mackstann/schedule_exercise | 0 | 6624765 | <reponame>mackstann/schedule_exercise<gh_stars>0
employer_schedules = [
{"id": 2, "start_time": "2019-02-28T16:00:00.000Z"},
{"id": 6, "start_time": "2019-02-28T21:00:00.000Z"},
{"id": 9, "start_time": "2019-02-28T14:00:00.000Z"},
]
| employer_schedules = [
{"id": 2, "start_time": "2019-02-28T16:00:00.000Z"},
{"id": 6, "start_time": "2019-02-28T21:00:00.000Z"},
{"id": 9, "start_time": "2019-02-28T14:00:00.000Z"},
] | none | 1 | 1.440876 | 1 | |
pixel_table/key_handler.py | Spooner/pixel-table | 0 | 6624766 | from __future__ import absolute_import, division, print_function, unicode_literals
from twisted.protocols import basic
from .sprites.touch_button import TouchButton
# https://stackoverflow.com/questions/23714006/twisted-queue-a-function-interactively
class KeyHandler(basic.LineReceiver):
MODE_BUTTON = b'1'
STATE_BUTTON = b'2'
TOUCH_BUTTONS = {
# Bottom side, player 0
b'X': (0, TouchButton.LEFT),
b'C': (0, TouchButton.RIGHT),
b'V': (0, TouchButton.ACTION),
# Top side, player 1
b'R': (1, TouchButton.LEFT),
b'E': (1, TouchButton.RIGHT),
b'W': (1, TouchButton.ACTION),
# Left side, player 2
b'Q': (2, TouchButton.LEFT),
b'A': (2, TouchButton.RIGHT),
b'Z': (2, TouchButton.ACTION),
# Right side, player 3
b'B': (3, TouchButton.LEFT),
b'G': (3, TouchButton.RIGHT),
b'T': (3, TouchButton.ACTION),
}
def __init__(self, app):
self.setRawMode() # Switch from line mode to "however much I got" mode
self._app = app
def rawDataReceived(self, data):
key = str(data).upper()[0]
if key == self.MODE_BUTTON:
self._app.add_to_event_queue("panel_button_press", "mode")
elif key == self.STATE_BUTTON:
self._app.add_to_event_queue("panel_button_press", "state")
elif key in self.TOUCH_BUTTONS:
self._app.add_to_event_queue("touch_button_press", *self.TOUCH_BUTTONS[key])
self._app.add_to_event_queue("touch_button_held", *self.TOUCH_BUTTONS[key])
def lineReceived(self, line):
pass
| from __future__ import absolute_import, division, print_function, unicode_literals
from twisted.protocols import basic
from .sprites.touch_button import TouchButton
# https://stackoverflow.com/questions/23714006/twisted-queue-a-function-interactively
class KeyHandler(basic.LineReceiver):
MODE_BUTTON = b'1'
STATE_BUTTON = b'2'
TOUCH_BUTTONS = {
# Bottom side, player 0
b'X': (0, TouchButton.LEFT),
b'C': (0, TouchButton.RIGHT),
b'V': (0, TouchButton.ACTION),
# Top side, player 1
b'R': (1, TouchButton.LEFT),
b'E': (1, TouchButton.RIGHT),
b'W': (1, TouchButton.ACTION),
# Left side, player 2
b'Q': (2, TouchButton.LEFT),
b'A': (2, TouchButton.RIGHT),
b'Z': (2, TouchButton.ACTION),
# Right side, player 3
b'B': (3, TouchButton.LEFT),
b'G': (3, TouchButton.RIGHT),
b'T': (3, TouchButton.ACTION),
}
def __init__(self, app):
self.setRawMode() # Switch from line mode to "however much I got" mode
self._app = app
def rawDataReceived(self, data):
key = str(data).upper()[0]
if key == self.MODE_BUTTON:
self._app.add_to_event_queue("panel_button_press", "mode")
elif key == self.STATE_BUTTON:
self._app.add_to_event_queue("panel_button_press", "state")
elif key in self.TOUCH_BUTTONS:
self._app.add_to_event_queue("touch_button_press", *self.TOUCH_BUTTONS[key])
self._app.add_to_event_queue("touch_button_held", *self.TOUCH_BUTTONS[key])
def lineReceived(self, line):
pass
| en | 0.891269 | # https://stackoverflow.com/questions/23714006/twisted-queue-a-function-interactively # Bottom side, player 0 # Top side, player 1 # Left side, player 2 # Right side, player 3 # Switch from line mode to "however much I got" mode | 2.857758 | 3 |
code/st_gcn/graph/hdm05.py | nvski/ST-TR | 0 | 6624767 | import numpy as np
from . import tools
# Edge format: (origin, neighbor)
num_node = 31
self_link = [(i, i) for i in range(num_node)]
inward = [
( 3, 2), ( 4, 3), ( 5, 4), ( 8, 7), ( 9, 8), (10, 9), (13,12),
(14,13), (15,14), (17,16), ( 0,17), (18,14), (19,18), (20,19), (22,21), (25,14),
(26,25), (27,26), (29,28)
] # legs
outward = [(j, i) for (i, j) in inward]
neighbor = inward + outward
class Graph():
""" The Graph to model the skeletons extracted by the openpose
Arguments:
labeling_mode: must be one of the follow candidates
uniform: Uniform Labeling
dastance*: Distance Partitioning*
dastance: Distance Partitioning
spatial: Spatial Configuration
DAD: normalized graph adjacency matrix
DLD: normalized graph laplacian matrix
For more information, please refer to the section 'Partition Strategies' in our paper.
"""
def __init__(self, labeling_mode='uniform'):
self.A = self.get_adjacency_matrix(labeling_mode)
self.num_node = num_node
self.self_link = self_link
self.inward = inward
self.outward = outward
self.neighbor = neighbor
def get_adjacency_matrix(self, labeling_mode=None):
if labeling_mode is None:
return self.A
if labeling_mode == 'uniform':
A = tools.get_uniform_graph(num_node, self_link, neighbor)
elif labeling_mode == 'distance*':
A = tools.get_uniform_distance_graph(num_node, self_link, neighbor)
elif labeling_mode == 'distance':
A = tools.get_distance_graph(num_node, self_link, neighbor)
elif labeling_mode == 'spatial':
A = tools.get_spatial_graph(num_node, self_link, inward, outward)
elif labeling_mode == 'DAD':
A = tools.get_DAD_graph(num_node, self_link, neighbor)
elif labeling_mode == 'DLD':
A = tools.get_DLD_graph(num_node, self_link, neighbor)
# elif labeling_mode == 'customer_mode':
# pass
else:
raise ValueError()
return A
def main():
mode = ['uniform', 'distance*', 'distance', 'spatial', 'DAD', 'DLD']
np.set_printoptions(threshold=np.nan)
for m in mode:
print('=' * 10 + m + '=' * 10)
print(Graph(m).get_adjacency_matrix())
if __name__ == '__main__':
main()
| import numpy as np
from . import tools
# Edge format: (origin, neighbor)
num_node = 31
self_link = [(i, i) for i in range(num_node)]
inward = [
( 3, 2), ( 4, 3), ( 5, 4), ( 8, 7), ( 9, 8), (10, 9), (13,12),
(14,13), (15,14), (17,16), ( 0,17), (18,14), (19,18), (20,19), (22,21), (25,14),
(26,25), (27,26), (29,28)
] # legs
outward = [(j, i) for (i, j) in inward]
neighbor = inward + outward
class Graph():
""" The Graph to model the skeletons extracted by the openpose
Arguments:
labeling_mode: must be one of the follow candidates
uniform: Uniform Labeling
dastance*: Distance Partitioning*
dastance: Distance Partitioning
spatial: Spatial Configuration
DAD: normalized graph adjacency matrix
DLD: normalized graph laplacian matrix
For more information, please refer to the section 'Partition Strategies' in our paper.
"""
def __init__(self, labeling_mode='uniform'):
self.A = self.get_adjacency_matrix(labeling_mode)
self.num_node = num_node
self.self_link = self_link
self.inward = inward
self.outward = outward
self.neighbor = neighbor
def get_adjacency_matrix(self, labeling_mode=None):
if labeling_mode is None:
return self.A
if labeling_mode == 'uniform':
A = tools.get_uniform_graph(num_node, self_link, neighbor)
elif labeling_mode == 'distance*':
A = tools.get_uniform_distance_graph(num_node, self_link, neighbor)
elif labeling_mode == 'distance':
A = tools.get_distance_graph(num_node, self_link, neighbor)
elif labeling_mode == 'spatial':
A = tools.get_spatial_graph(num_node, self_link, inward, outward)
elif labeling_mode == 'DAD':
A = tools.get_DAD_graph(num_node, self_link, neighbor)
elif labeling_mode == 'DLD':
A = tools.get_DLD_graph(num_node, self_link, neighbor)
# elif labeling_mode == 'customer_mode':
# pass
else:
raise ValueError()
return A
def main():
mode = ['uniform', 'distance*', 'distance', 'spatial', 'DAD', 'DLD']
np.set_printoptions(threshold=np.nan)
for m in mode:
print('=' * 10 + m + '=' * 10)
print(Graph(m).get_adjacency_matrix())
if __name__ == '__main__':
main()
| en | 0.785232 | # Edge format: (origin, neighbor) # legs The Graph to model the skeletons extracted by the openpose Arguments: labeling_mode: must be one of the follow candidates uniform: Uniform Labeling dastance*: Distance Partitioning* dastance: Distance Partitioning spatial: Spatial Configuration DAD: normalized graph adjacency matrix DLD: normalized graph laplacian matrix For more information, please refer to the section 'Partition Strategies' in our paper. # elif labeling_mode == 'customer_mode': # pass | 2.756573 | 3 |
setup_build.py | cklb/odes | 95 | 6624768 | <reponame>cklb/odes
import io
import os
from os.path import join
from distutils.log import info
import sys
from numpy.distutils.command.build_ext import build_ext as _build_ext
PKGCONFIG_CVODE = 'sundials-cvode-serial'
PKGCONFIG_IDA = 'sundials-ida-serial'
PKGCONFIG_CVODES = 'sundials-cvodes-serial'
PKGCONFIG_IDAS = 'sundials-idas-serial'
def write_pxi(filename, definitions):
"""
Write a cython include file (.pxi), `filename`, with the definitions in the
`definitions` mapping.
"""
with io.open(filename, mode='w', encoding='utf-8') as pxi_file:
for name, val in definitions.items():
pxi_file.write(u"DEF {name} = {val}\n".format(name=name, val=val))
return filename
def check_macro_def(cmd, symbol, headers=None, include_dirs=None):
"""
Based on numpy.distutils.command.config:config.check_macro_true, checks if
macro is defined or not
"""
cmd._check_compiler()
body = """
int main(void)
{
#ifdef %s
#else
#error undefined macro
#endif
;
return 0;
}""" % (symbol,)
return cmd.try_compile(body, headers, include_dirs)
def get_sundials_config_pxi(include_dirs, dist):
"""
Create pxi file containing some of sundials build config
Don't ask why this is a function, something crazy about
distutils/numpy not setting _setup_distribution at the right time or
something...
"""
SUNDIALS_CONFIG_H = "sundials/sundials_config.h"
BASE_PATH = join('scikits', 'odes', 'sundials')
config_cmd = dist.get_command_obj("config")
# Get float type
if config_cmd.check_macro_true(
"SUNDIALS_DOUBLE_PRECISION", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_FLOAT_TYPE = '"double"'
info("Found sundials built with double precision.")
elif config_cmd.check_macro_true(
"SUNDIALS_SINGLE_PRECISION", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_FLOAT_TYPE = '"single"'
info("Found sundials built with single precision.")
elif config_cmd.check_macro_true(
"SUNDIALS_EXTENDED_PRECISION", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_FLOAT_TYPE = '"extended"'
info("Found sundials built with extended precision.")
else:
# fall back to double
SUNDIALS_FLOAT_TYPE = '"double"'
info("Failed to find sundials precision, falling back to double...")
# Get index (int) type
if config_cmd.check_macro_true(
"SUNDIALS_INT32_T", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_INDEX_SIZE = '"int32"'
info("Found sundials built with int32.")
elif config_cmd.check_macro_true(
"SUNDIALS_INT64_T", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_INDEX_SIZE = '"64"'
info("Found sundials built with int64.")
else:
# fall back to int64
SUNDIALS_INDEX_SIZE = '"64"'
info("Failed to find sundials index type, falling back to int64...")
# Check for blas/lapack
if check_macro_def(
config_cmd,
"SUNDIALS_BLAS_LAPACK", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
has_lapack = True
else:
has_lapack = False
cfg = dict(
float_type = SUNDIALS_FLOAT_TYPE,
index_size = SUNDIALS_INDEX_SIZE,
has_lapack = has_lapack,
)
return write_pxi(join(BASE_PATH, "sundials_config.pxi"), dict(
SUNDIALS_FLOAT_TYPE=SUNDIALS_FLOAT_TYPE,
SUNDIALS_INDEX_SIZE=SUNDIALS_INDEX_SIZE,
SUNDIALS_BLAS_LAPACK=str(has_lapack),
)), cfg
class build_ext(_build_ext):
"""
Custom distutils command which encapsulates api_gen pre-building,
Cython building, and C compilation.
Also handles making the Extension modules, since we can't rely on
NumPy being present in the main body of the setup script.
"""
def _get_cython_ext(self):
from numpy.distutils.system_info import get_info
from setuptools import Extension
base_path = join('scikits', 'odes', 'sundials')
base_module = "scikits.odes.sundials"
SUNDIALS_LIBRARIES = []
CVODE_LIBRARIES = []
IDA_LIBRARIES = []
CVODES_LIBRARIES = []
IDAS_LIBRARIES = []
SUNDIALS_LIBRARY_DIRS = []
CVODE_LIBRARY_DIRS = []
IDA_LIBRARY_DIRS = []
CVODES_LIBRARY_DIRS = []
IDAS_LIBRARY_DIRS = []
SUNDIALS_INCLUDE_DIRS = []
CVODE_INCLUDE_DIRS = []
IDA_INCLUDE_DIRS = []
CVODES_INCLUDE_DIRS = []
IDAS_INCLUDE_DIRS = []
SUNDIALS_LIBDIR = os.environ.get("SUNDIALS_LIBDIR")
SUNDIALS_INCLUDEDIR = os.environ.get("SUNDIALS_INCLUDEDIR")
SUNDIALS_INST_PREFIX = os.environ.get("SUNDIALS_INST")
if SUNDIALS_LIBDIR or SUNDIALS_INCLUDEDIR:
SUNDIALS_INCLUDE_DIRS.extend(
[SUNDIALS_INCLUDEDIR] if SUNDIALS_INCLUDEDIR is not None else []
)
SUNDIALS_LIBRARY_DIRS.extend(
[SUNDIALS_LIBDIR] if SUNDIALS_LIBDIR is not None else []
)
elif SUNDIALS_INST_PREFIX is not None:
SUNDIALS_LIBRARY_DIRS.append(os.path.join(SUNDIALS_INST_PREFIX, "lib"))
SUNDIALS_INCLUDE_DIRS.append(os.path.join(SUNDIALS_INST_PREFIX, "include"))
info("SUNDIALS installation path set to `{}` via $SUNDIALS_INST.".format(
SUNDIALS_INST_PREFIX))
else:
info("Searching for SUNDIALS path...")
# use pkgconfig to find sundials
try:
import pkgconfig
from pkgconfig.pkgconfig import PackageNotFoundError
try:
cvode_pkgconf = pkgconfig.parse(PKGCONFIG_CVODE)
for d in cvode_pkgconf.get('library_dirs', []):
CVODE_LIBRARY_DIRS.append(str(d))
for d in cvode_pkgconf.get('include_dirs', []):
CVODE_INCLUDE_DIRS.append(str(d))
for lib in cvode_pkgconf.get('include_dirs', []):
CVODE_LIBRARIES.append(str(lib))
ida_pkgconf = pkgconfig.parse(PKGCONFIG_IDA)
for d in ida_pkgconf.get('library_dirs', []):
IDA_LIBRARY_DIRS.append(str(d))
for d in ida_pkgconf.get('include_dirs', []):
IDA_INCLUDE_DIRS.append(str(d))
for lib in ida_pkgconf.get('include_dirs', []):
IDA_LIBRARIES.append(str(lib))
cvodes_pkgconf = pkgconfig.parse(PKGCONFIG_CVODES)
for d in cvodes_pkgconf.get('library_dirs', []):
CVODES_LIBRARY_DIRS.append(str(d))
for d in cvodes_pkgconf.get('include_dirs', []):
CVODES_INCLUDE_DIRS.append(str(d))
for lib in cvodes_pkgconf.get('include_dirs', []):
CVODES_LIBRARIES.append(str(lib))
idas_pkgconf = pkgconfig.parse(PKGCONFIG_IDAS)
for d in idas_pkgconf.get('library_dirs', []):
IDAS_LIBRARY_DIRS.append(str(d))
for d in idas_pkgconf.get('include_dirs', []):
IDAS_INCLUDE_DIRS.append(str(d))
for lib in idas_pkgconf.get('include_dirs', []):
IDAS_LIBRARIES.append(str(lib))
except (EnvironmentError, PackageNotFoundError) as e:
pass
except ImportError:
info("pkgconfig module not found, using preset paths")
sundials_pxi, cfg = get_sundials_config_pxi(SUNDIALS_INCLUDE_DIRS,
self.distribution)
has_lapack = cfg['has_lapack']
if not SUNDIALS_LIBRARIES:
# This is where to put N_vector codes (currently only serial is
# supported)
SUNDIALS_LIBRARIES.append('sundials_nvecserial')
# SUNDIALS_LIBRARIES.append('sundials_nvecopenmp')
# SUNDIALS_LIBRARIES.append('sundials_nvecparallel')
# SUNDIALS_LIBRARIES.append('sundials_nvecparhyp')
# SUNDIALS_LIBRARIES.append('sundials_nvecpetsc')
# SUNDIALS_LIBRARIES.append('sundials_nvecpthreads')
# This is where to put SUNLinearSolver codes (klu not supported
# yet)
if has_lapack:
SUNDIALS_LIBRARIES.append('sundials_sunlinsollapackband')
SUNDIALS_LIBRARIES.append('sundials_sunlinsollapackdense')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolband')
SUNDIALS_LIBRARIES.append('sundials_sunlinsoldense')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolpcg')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolspbcgs')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolspfgmr')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolspgmr')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolsptfqmr')
# SUNDIALS_LIBRARIES.append('sundials_sunlinsolklu')
# This is where to put SUNMatrix codes
SUNDIALS_LIBRARIES.append('sundials_sunmatrixband')
SUNDIALS_LIBRARIES.append('sundials_sunmatrixdense')
SUNDIALS_LIBRARIES.append('sundials_sunmatrixsparse')
if not IDA_LIBRARIES:
IDA_LIBRARIES.append('sundials_ida')
if not CVODE_LIBRARIES:
CVODE_LIBRARIES.append('sundials_cvode')
if not IDAS_LIBRARIES:
IDAS_LIBRARIES.append('sundials_idas')
if not CVODES_LIBRARIES:
CVODES_LIBRARIES.append('sundials_cvodes')
if has_lapack:
lapack_opt = get_info('lapack_opt', notfound_action=2)
if lapack_opt:
SUNDIALS_INCLUDE_DIRS.extend(lapack_opt.get('include_dirs',[]))
SUNDIALS_LIBRARY_DIRS.extend(lapack_opt.get('library_dirs',[]))
SUNDIALS_LIBRARIES.extend(lapack_opt.get('libraries',[]))
info('Found LAPACK paths via lapack_opt ...')
else:
info('LAPACK was not found, but SUNDIALS compiled against '
'lapack, check your numpy installation'
)
CVODE_LIBRARIES.extend(SUNDIALS_LIBRARIES)
IDA_LIBRARIES.extend(SUNDIALS_LIBRARIES)
CVODES_LIBRARIES.extend(SUNDIALS_LIBRARIES)
IDAS_LIBRARIES.extend(SUNDIALS_LIBRARIES)
CVODE_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)
IDA_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)
CVODES_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)
IDAS_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)
CVODE_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)
IDA_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)
CVODES_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)
IDAS_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)
return [
Extension(
base_module + '.' + "common_defs",
sources = [join(base_path, 'common_defs.pyx')],
include_dirs=SUNDIALS_INCLUDE_DIRS,
library_dirs=SUNDIALS_LIBRARY_DIRS,
libraries=SUNDIALS_LIBRARIES,
),
Extension(
base_module + '.' + "cvode",
sources = [join(base_path, 'cvode.pyx')],
include_dirs=CVODE_INCLUDE_DIRS,
library_dirs=CVODE_LIBRARY_DIRS,
libraries=CVODE_LIBRARIES,
),
Extension(
base_module + '.' + "ida",
sources = [join(base_path, 'ida.pyx')],
include_dirs=IDA_INCLUDE_DIRS,
library_dirs=IDA_LIBRARY_DIRS,
libraries=IDA_LIBRARIES,
),
Extension(
base_module + '.' + "cvodes",
sources = [join(base_path, 'cvodes.pyx')],
include_dirs=CVODES_INCLUDE_DIRS,
library_dirs=CVODES_LIBRARY_DIRS,
libraries=CVODES_LIBRARIES,
),
Extension(
base_module + '.' + "idas",
sources = [join(base_path, 'idas.pyx')],
include_dirs=IDAS_INCLUDE_DIRS,
library_dirs=IDAS_LIBRARY_DIRS,
libraries=IDAS_LIBRARIES,
),
]
def run(self):
""" Distutils calls this method to run the command """
from Cython.Build import cythonize
self.extensions.extend(cythonize(
self._get_cython_ext(),
compiler_directives= {'language_level' : sys.version_info[0]})
)
_build_ext.run(self) # actually do the build
| import io
import os
from os.path import join
from distutils.log import info
import sys
from numpy.distutils.command.build_ext import build_ext as _build_ext
PKGCONFIG_CVODE = 'sundials-cvode-serial'
PKGCONFIG_IDA = 'sundials-ida-serial'
PKGCONFIG_CVODES = 'sundials-cvodes-serial'
PKGCONFIG_IDAS = 'sundials-idas-serial'
def write_pxi(filename, definitions):
"""
Write a cython include file (.pxi), `filename`, with the definitions in the
`definitions` mapping.
"""
with io.open(filename, mode='w', encoding='utf-8') as pxi_file:
for name, val in definitions.items():
pxi_file.write(u"DEF {name} = {val}\n".format(name=name, val=val))
return filename
def check_macro_def(cmd, symbol, headers=None, include_dirs=None):
"""
Based on numpy.distutils.command.config:config.check_macro_true, checks if
macro is defined or not
"""
cmd._check_compiler()
body = """
int main(void)
{
#ifdef %s
#else
#error undefined macro
#endif
;
return 0;
}""" % (symbol,)
return cmd.try_compile(body, headers, include_dirs)
def get_sundials_config_pxi(include_dirs, dist):
"""
Create pxi file containing some of sundials build config
Don't ask why this is a function, something crazy about
distutils/numpy not setting _setup_distribution at the right time or
something...
"""
SUNDIALS_CONFIG_H = "sundials/sundials_config.h"
BASE_PATH = join('scikits', 'odes', 'sundials')
config_cmd = dist.get_command_obj("config")
# Get float type
if config_cmd.check_macro_true(
"SUNDIALS_DOUBLE_PRECISION", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_FLOAT_TYPE = '"double"'
info("Found sundials built with double precision.")
elif config_cmd.check_macro_true(
"SUNDIALS_SINGLE_PRECISION", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_FLOAT_TYPE = '"single"'
info("Found sundials built with single precision.")
elif config_cmd.check_macro_true(
"SUNDIALS_EXTENDED_PRECISION", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_FLOAT_TYPE = '"extended"'
info("Found sundials built with extended precision.")
else:
# fall back to double
SUNDIALS_FLOAT_TYPE = '"double"'
info("Failed to find sundials precision, falling back to double...")
# Get index (int) type
if config_cmd.check_macro_true(
"SUNDIALS_INT32_T", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_INDEX_SIZE = '"int32"'
info("Found sundials built with int32.")
elif config_cmd.check_macro_true(
"SUNDIALS_INT64_T", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
SUNDIALS_INDEX_SIZE = '"64"'
info("Found sundials built with int64.")
else:
# fall back to int64
SUNDIALS_INDEX_SIZE = '"64"'
info("Failed to find sundials index type, falling back to int64...")
# Check for blas/lapack
if check_macro_def(
config_cmd,
"SUNDIALS_BLAS_LAPACK", headers=[SUNDIALS_CONFIG_H],
include_dirs=include_dirs
):
has_lapack = True
else:
has_lapack = False
cfg = dict(
float_type = SUNDIALS_FLOAT_TYPE,
index_size = SUNDIALS_INDEX_SIZE,
has_lapack = has_lapack,
)
return write_pxi(join(BASE_PATH, "sundials_config.pxi"), dict(
SUNDIALS_FLOAT_TYPE=SUNDIALS_FLOAT_TYPE,
SUNDIALS_INDEX_SIZE=SUNDIALS_INDEX_SIZE,
SUNDIALS_BLAS_LAPACK=str(has_lapack),
)), cfg
class build_ext(_build_ext):
"""
Custom distutils command which encapsulates api_gen pre-building,
Cython building, and C compilation.
Also handles making the Extension modules, since we can't rely on
NumPy being present in the main body of the setup script.
"""
def _get_cython_ext(self):
from numpy.distutils.system_info import get_info
from setuptools import Extension
base_path = join('scikits', 'odes', 'sundials')
base_module = "scikits.odes.sundials"
SUNDIALS_LIBRARIES = []
CVODE_LIBRARIES = []
IDA_LIBRARIES = []
CVODES_LIBRARIES = []
IDAS_LIBRARIES = []
SUNDIALS_LIBRARY_DIRS = []
CVODE_LIBRARY_DIRS = []
IDA_LIBRARY_DIRS = []
CVODES_LIBRARY_DIRS = []
IDAS_LIBRARY_DIRS = []
SUNDIALS_INCLUDE_DIRS = []
CVODE_INCLUDE_DIRS = []
IDA_INCLUDE_DIRS = []
CVODES_INCLUDE_DIRS = []
IDAS_INCLUDE_DIRS = []
SUNDIALS_LIBDIR = os.environ.get("SUNDIALS_LIBDIR")
SUNDIALS_INCLUDEDIR = os.environ.get("SUNDIALS_INCLUDEDIR")
SUNDIALS_INST_PREFIX = os.environ.get("SUNDIALS_INST")
if SUNDIALS_LIBDIR or SUNDIALS_INCLUDEDIR:
SUNDIALS_INCLUDE_DIRS.extend(
[SUNDIALS_INCLUDEDIR] if SUNDIALS_INCLUDEDIR is not None else []
)
SUNDIALS_LIBRARY_DIRS.extend(
[SUNDIALS_LIBDIR] if SUNDIALS_LIBDIR is not None else []
)
elif SUNDIALS_INST_PREFIX is not None:
SUNDIALS_LIBRARY_DIRS.append(os.path.join(SUNDIALS_INST_PREFIX, "lib"))
SUNDIALS_INCLUDE_DIRS.append(os.path.join(SUNDIALS_INST_PREFIX, "include"))
info("SUNDIALS installation path set to `{}` via $SUNDIALS_INST.".format(
SUNDIALS_INST_PREFIX))
else:
info("Searching for SUNDIALS path...")
# use pkgconfig to find sundials
try:
import pkgconfig
from pkgconfig.pkgconfig import PackageNotFoundError
try:
cvode_pkgconf = pkgconfig.parse(PKGCONFIG_CVODE)
for d in cvode_pkgconf.get('library_dirs', []):
CVODE_LIBRARY_DIRS.append(str(d))
for d in cvode_pkgconf.get('include_dirs', []):
CVODE_INCLUDE_DIRS.append(str(d))
for lib in cvode_pkgconf.get('include_dirs', []):
CVODE_LIBRARIES.append(str(lib))
ida_pkgconf = pkgconfig.parse(PKGCONFIG_IDA)
for d in ida_pkgconf.get('library_dirs', []):
IDA_LIBRARY_DIRS.append(str(d))
for d in ida_pkgconf.get('include_dirs', []):
IDA_INCLUDE_DIRS.append(str(d))
for lib in ida_pkgconf.get('include_dirs', []):
IDA_LIBRARIES.append(str(lib))
cvodes_pkgconf = pkgconfig.parse(PKGCONFIG_CVODES)
for d in cvodes_pkgconf.get('library_dirs', []):
CVODES_LIBRARY_DIRS.append(str(d))
for d in cvodes_pkgconf.get('include_dirs', []):
CVODES_INCLUDE_DIRS.append(str(d))
for lib in cvodes_pkgconf.get('include_dirs', []):
CVODES_LIBRARIES.append(str(lib))
idas_pkgconf = pkgconfig.parse(PKGCONFIG_IDAS)
for d in idas_pkgconf.get('library_dirs', []):
IDAS_LIBRARY_DIRS.append(str(d))
for d in idas_pkgconf.get('include_dirs', []):
IDAS_INCLUDE_DIRS.append(str(d))
for lib in idas_pkgconf.get('include_dirs', []):
IDAS_LIBRARIES.append(str(lib))
except (EnvironmentError, PackageNotFoundError) as e:
pass
except ImportError:
info("pkgconfig module not found, using preset paths")
sundials_pxi, cfg = get_sundials_config_pxi(SUNDIALS_INCLUDE_DIRS,
self.distribution)
has_lapack = cfg['has_lapack']
if not SUNDIALS_LIBRARIES:
# This is where to put N_vector codes (currently only serial is
# supported)
SUNDIALS_LIBRARIES.append('sundials_nvecserial')
# SUNDIALS_LIBRARIES.append('sundials_nvecopenmp')
# SUNDIALS_LIBRARIES.append('sundials_nvecparallel')
# SUNDIALS_LIBRARIES.append('sundials_nvecparhyp')
# SUNDIALS_LIBRARIES.append('sundials_nvecpetsc')
# SUNDIALS_LIBRARIES.append('sundials_nvecpthreads')
# This is where to put SUNLinearSolver codes (klu not supported
# yet)
if has_lapack:
SUNDIALS_LIBRARIES.append('sundials_sunlinsollapackband')
SUNDIALS_LIBRARIES.append('sundials_sunlinsollapackdense')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolband')
SUNDIALS_LIBRARIES.append('sundials_sunlinsoldense')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolpcg')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolspbcgs')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolspfgmr')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolspgmr')
SUNDIALS_LIBRARIES.append('sundials_sunlinsolsptfqmr')
# SUNDIALS_LIBRARIES.append('sundials_sunlinsolklu')
# This is where to put SUNMatrix codes
SUNDIALS_LIBRARIES.append('sundials_sunmatrixband')
SUNDIALS_LIBRARIES.append('sundials_sunmatrixdense')
SUNDIALS_LIBRARIES.append('sundials_sunmatrixsparse')
if not IDA_LIBRARIES:
IDA_LIBRARIES.append('sundials_ida')
if not CVODE_LIBRARIES:
CVODE_LIBRARIES.append('sundials_cvode')
if not IDAS_LIBRARIES:
IDAS_LIBRARIES.append('sundials_idas')
if not CVODES_LIBRARIES:
CVODES_LIBRARIES.append('sundials_cvodes')
if has_lapack:
lapack_opt = get_info('lapack_opt', notfound_action=2)
if lapack_opt:
SUNDIALS_INCLUDE_DIRS.extend(lapack_opt.get('include_dirs',[]))
SUNDIALS_LIBRARY_DIRS.extend(lapack_opt.get('library_dirs',[]))
SUNDIALS_LIBRARIES.extend(lapack_opt.get('libraries',[]))
info('Found LAPACK paths via lapack_opt ...')
else:
info('LAPACK was not found, but SUNDIALS compiled against '
'lapack, check your numpy installation'
)
CVODE_LIBRARIES.extend(SUNDIALS_LIBRARIES)
IDA_LIBRARIES.extend(SUNDIALS_LIBRARIES)
CVODES_LIBRARIES.extend(SUNDIALS_LIBRARIES)
IDAS_LIBRARIES.extend(SUNDIALS_LIBRARIES)
CVODE_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)
IDA_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)
CVODES_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)
IDAS_INCLUDE_DIRS.extend(SUNDIALS_INCLUDE_DIRS)
CVODE_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)
IDA_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)
CVODES_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)
IDAS_LIBRARY_DIRS.extend(SUNDIALS_LIBRARY_DIRS)
return [
Extension(
base_module + '.' + "common_defs",
sources = [join(base_path, 'common_defs.pyx')],
include_dirs=SUNDIALS_INCLUDE_DIRS,
library_dirs=SUNDIALS_LIBRARY_DIRS,
libraries=SUNDIALS_LIBRARIES,
),
Extension(
base_module + '.' + "cvode",
sources = [join(base_path, 'cvode.pyx')],
include_dirs=CVODE_INCLUDE_DIRS,
library_dirs=CVODE_LIBRARY_DIRS,
libraries=CVODE_LIBRARIES,
),
Extension(
base_module + '.' + "ida",
sources = [join(base_path, 'ida.pyx')],
include_dirs=IDA_INCLUDE_DIRS,
library_dirs=IDA_LIBRARY_DIRS,
libraries=IDA_LIBRARIES,
),
Extension(
base_module + '.' + "cvodes",
sources = [join(base_path, 'cvodes.pyx')],
include_dirs=CVODES_INCLUDE_DIRS,
library_dirs=CVODES_LIBRARY_DIRS,
libraries=CVODES_LIBRARIES,
),
Extension(
base_module + '.' + "idas",
sources = [join(base_path, 'idas.pyx')],
include_dirs=IDAS_INCLUDE_DIRS,
library_dirs=IDAS_LIBRARY_DIRS,
libraries=IDAS_LIBRARIES,
),
]
def run(self):
""" Distutils calls this method to run the command """
from Cython.Build import cythonize
self.extensions.extend(cythonize(
self._get_cython_ext(),
compiler_directives= {'language_level' : sys.version_info[0]})
)
_build_ext.run(self) # actually do the build | en | 0.696236 | Write a cython include file (.pxi), `filename`, with the definitions in the `definitions` mapping. Based on numpy.distutils.command.config:config.check_macro_true, checks if macro is defined or not int main(void) { #ifdef %s #else #error undefined macro #endif ; return 0; } Create pxi file containing some of sundials build config Don't ask why this is a function, something crazy about distutils/numpy not setting _setup_distribution at the right time or something... # Get float type # fall back to double # Get index (int) type # fall back to int64 # Check for blas/lapack Custom distutils command which encapsulates api_gen pre-building, Cython building, and C compilation. Also handles making the Extension modules, since we can't rely on NumPy being present in the main body of the setup script. # use pkgconfig to find sundials # This is where to put N_vector codes (currently only serial is # supported) # SUNDIALS_LIBRARIES.append('sundials_nvecopenmp') # SUNDIALS_LIBRARIES.append('sundials_nvecparallel') # SUNDIALS_LIBRARIES.append('sundials_nvecparhyp') # SUNDIALS_LIBRARIES.append('sundials_nvecpetsc') # SUNDIALS_LIBRARIES.append('sundials_nvecpthreads') # This is where to put SUNLinearSolver codes (klu not supported # yet) # SUNDIALS_LIBRARIES.append('sundials_sunlinsolklu') # This is where to put SUNMatrix codes Distutils calls this method to run the command # actually do the build | 1.993512 | 2 |
Lib/test/test_socket.py | praleena/newpython | 0 | 6624769 | import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = '<NAME> was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('192.168.127.12'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('192.168.127.12'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('fc00:e968:6179::de52:7100')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('fc00:db20:35b:7399::5:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('192.168.127.12', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('192.168.127.12', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('192.168.127.12', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('192.168.127.12', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'fdf8:f53e:61e4::18:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('fdf8:f53e:61e4::18:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'fdf8:f53e:61e4::18:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('fdf8:f53e:61e4::18:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'fdf8:f53e:61e4::18:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('fdf8:f53e:61e4::18:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('fdf8:f53e:61e4::18:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('fdf8:f53e:61e4::18:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('fdf8:f53e:61e4::18:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('fdf8:f53e:61e4::18:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
expected_errnos.append(errno.EADDRNOTAVAIL)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('<KEY>')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = '<NAME> was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('192.168.127.12'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('192.168.127.12'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('fc00:e968:6179::de52:7100')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('fc00:db20:35b:7399::5:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('192.168.127.12', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('192.168.127.12', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('192.168.127.12', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('192.168.127.12', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'fdf8:f53e:61e4::18:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('fdf8:f53e:61e4::18:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'fdf8:f53e:61e4::18:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('fdf8:f53e:61e4::18:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'fdf8:f53e:61e4::18:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('fdf8:f53e:61e4::18:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('fdf8:f53e:61e4::18:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('fdf8:f53e:61e4::18:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('fdf8:f53e:61e4::18:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('fdf8:f53e:61e4::18:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
if hasattr(errno, 'EADDRNOTAVAIL'):
# bpo-31910: socket.create_connection() fails randomly
# with EADDRNOTAVAIL on Travis CI
expected_errnos.append(errno.EADDRNOTAVAIL)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('<KEY>')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| en | 0.855463 | # test unicode string and carriage return Check whether CAN sockets are supported on this host. Check whether CAN ISOTP sockets are supported on this host. Check whether RDS sockets are supported on this host. Check whether AF_ALG sockets are supported on this host. Check whether AF_QIPCRTR sockets are supported on this host. Check whether AF_VSOCK sockets are supported on this host. # Size in bytes of the int type Subclass of unittest.TestCase with thread-safe cleanup methods. This subclass protects the addCleanup() and doCleanups() methods with a recursive lock. To be able to run this test, a `vcan0` CAN interface can be created with the following commands: # modprobe vcan # ip link add dev vcan0 type vcan # ifconfig vcan0 up The CAN frame structure is defined in <linux/can.h>: struct can_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ __u8 can_dlc; /* data length code: 0 .. 8 */ __u8 data[8] __attribute__((aligned(8))); }; The Broadcast Management Command frame structure is defined in <linux/can/bcm.h>: struct bcm_msg_head { __u32 opcode; __u32 flags; __u32 count; struct timeval ival1, ival2; canid_t can_id; __u32 nframes; struct can_frame frames[0]; } `bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see `struct can_frame` definition). Must use native not standard types for packing. To be able to run this test, the `rds` kernel module must be loaded: # modprobe rds Threadable Test class The ThreadableTest class makes it easy to create a threaded client/server pair from an existing unit test. To create a new threaded class from an existing unit test, use multiple inheritance: class NewClass (OldClass, ThreadableTest): pass This class defines two new fixture functions with obvious purposes for overriding: clientSetUp () clientTearDown () Any new test functions within the class must then define tests in pairs, where the test name is preceded with a '_' to indicate the client portion of the test. Ex: def testFoo(self): # Server portion def _testFoo(self): # Client portion Any exceptions raised by the clients during their tests are caught and transferred to the main thread to alert the testing framework. Note, the server setup function cannot call any blocking functions that rely on the client thread during setup, unless serverExplicitReady() is called just before the blocking call (such as in setting up a client/server connection and performing the accept() in setUp(). # Swap the true setup function This method allows the server to explicitly indicate that it wants the client thread to proceed. This is useful if the server is about to execute a blocking routine that is dependent upon the client thread during its setup routine. # Do some munging to start the client test. # skipTest should not be called here, and will be called in the # server instead # RDS sockets must be bound explicitly to send or receive data # skipTest should not be called here, and will be called in the # server instead Socket tests for client-server connection. self.cli_conn is a client socket connected to the server. The setUp() method guarantees that it is connected to the server. # Indicate explicitly we're ready for the client thread to # proceed and then perform the blocking call to accept # The following classes are used by the sendmsg()/recvmsg() tests. # Combining, for instance, ConnectedStreamTestMixin and TCPTestBase # gives a drop-in replacement for SocketConnectedTest, but different # address families can be used, and the attributes serv_addr and # cli_addr will be set to the addresses of the endpoints. A base class for socket tests. Subclasses must provide methods newSocket() to return a new socket and bindSock(sock) to bind it to an unused address. Creates a socket self.serv and sets self.serv_addr to its address. Bind server socket and set self.serv_addr to its address. Mixin to listen on the server socket. Mixin to add client socket and allow client/server tests. Client socket is self.cli and its address is self.cli_addr. See ThreadableTest for usage information. Return a new socket for use as client. Bind client socket and set self.cli_addr to its address. Mixin to allow client/server stream tests with connected client. Server's socket representing connection to client is self.cli_conn and client's connection to server is self.serv_conn. (Based on SocketConnectedTest.) # Indicate explicitly we're ready for the client thread to # proceed and then perform the blocking call to accept Base class for Unix-domain socket tests. # This class is used for file descriptor passing tests, so we # create the sockets in a private directory so that other users # can't send anything that might be problematic for a privileged # user running the tests. Base class for Unix-domain SOCK_STREAM tests. Base class for IPv4 socket tests. Base class for TCP-over-IPv4 tests. Base class for UDP-over-IPv4 tests. Base class for SCTP tests in one-to-one (SOCK_STREAM) mode. Base class for IPv6 socket tests. Base class for UDP-over-IPv6 tests. # Test-skipping decorators for use with ThreadableTest. Skip decorated test if condition is true, add client_skip decorator. If the decorated object is not a class, sets its attribute "client_skip" to a decorator which will return an empty function if the test is to be skipped, or the original function if it is not. This can be used to avoid running the client part of a skipped test when using ThreadableTest. Skip decorated test if obj is missing any of the given attributes. Sets client_skip attribute as skipWithClientIf() does. Skip decorated test if a socket cannot be created with given arguments. When an argument is given as a string, will use the value of that attribute of the socket module, or skip the test if it doesn't exist. Sets client_skip attribute as skipWithClientIf() does. # XXX: check errno? ####################################################################### ## Begin Tests # Testing socket module exceptions # Testing that sendto doesn't mask failures. See #10169. # 2 args # 3 args # wrong number of args # Testing for mission critical constants # Testing hostname resolution mechanisms # Probably name lookup wasn't set up right; skip this test # Probably a similar problem as above; skip this test # we don't test support.HOSTv6 because there's a chance it doesn't have # a matching name entry (e.g. 'ip6-localhost') # These are all malformed IP addresses and expected not to resolve to # any result. But some ISPs, e.g. AWS, may successfully resolve these # IPs. # running test as root! # Should work with bytes objects too # interface indices are non-zero integers # test nonexistent interface index/name # test with invalid values # Testing reference count for getnameinfo # On some versions, this loses a reference # Making sure getnameinfo doesn't crash the interpreter # On some versions, this crashes the interpreter. # This just checks that htons etc. are their own inverse, # when looking at the lower 16 or 32 bits. # Find one service that exists, then check all the related interfaces. # I've ordered this by protocols that have both a tcp and udp # protocol, at least for modern Linuxes. # avoid the 'echo' service on this platform, as there is an # assumption breaking non-standard port/protocol entry # Try same call with optional protocol omitted # Issue #26936: Android getservbyname() was broken before API 23. # Try udp, but don't barf if it doesn't exist # Now make sure the lookup by port returns the same service name # Issue #26936: Android getservbyport() is broken. # Make sure getservbyport does not accept out of range ports. # Testing default timeout # The default timeout should initially be None # Set the default timeout to 10, and see if it propagates # Reset the default timeout to None, and see if it propagates # Check that setting it to an invalid value raises ValueError # Check that setting it to an invalid type raises TypeError # Test that issue1008086 and issue767150 are fixed. # It must return 4 bytes. # bpo-29972: inet_pton() doesn't fail on AIX # bpo-29972: inet_pton() doesn't fail on AIX # XXX The following don't test module-level functionality... # Testing getsockname() # XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate # it reasonable to get the host's addr in addition to 0.0.0.0. # At least for eCos. This is required for the S/390 to pass. # Probably name lookup wasn't set up right; skip this test # Testing getsockopt() # We know a socket should start without reuse==0 # Testing setsockopt() # testing send() after close() with timeout # Winsock apparently raises ENOTSOCK # testing .family, .type and .protocol # Since find_unused_port() is inherently subject to race conditions, we # call it a couple times if necessary. # see http://bugs.python.org/issue1282647 # len of every sequence is supposed to be == 5 # host can be a domain name, a string representation of an # IPv4/v6 address or None # port can be a string service name such as "http", a numeric # port number or None # Issue #26936: Android getaddrinfo() was broken before API level 23. # test family and socktype filters # test proto and flags arguments # a server willing to support both IPv4 and IPv6 will # usually do this # test keyword arguments # Issue #6697. # Issue 17269: test workaround for OS X platform bug segfault # The arguments here are undefined and the call may succeed # or fail. All we care here is that it doesn't segfault. # only IP addresses are allowed # Check for internet access before running test # (issue #12804, issue #25138). # these should all be successful # this may not work if the forward lookup chooses the IPv6 address, as that doesn't # have a reverse entry yet # socket.gethostbyaddr('испытание.python.org') # socketpair() is not strictly required, but it makes things easier. # Our signal handlers clobber the C errno by calling a math function # with an invalid domain value. # Just above the one second minimum for signal.alarm # An open socket file object gets dereferenced after the socket # sock.close() fails with EBADF # Issue 15989 # Note capital letter `D`. # Just pick up any network interface (Linux, Mac OS X) # Note missing interface name part in IPv6 address # Also works on Linux and Mac OS X, but is not documented (?) # Windows, Linux and Max OS X allow nonexistent interface numbers here. # Note missing interface name part in IPv6 address # Just pick up any network interface. # Note capital letter `D`. # Also works on Linux (undocumented), but does not work on Mac OS X # Windows and Linux allow nonexistent interface numbers here. # Note capital letter `D`. # Make sure that the AF_* and SOCK_* constants have enum-like string # reprs. # Test that when created with a family that's not one of the known # AF_*/SOCK_* constants, socket.family just returns the number. # # To do this we fool socket.socket into believing it already has an # open fd because on this path it doesn't actually verify the family and # type and populates the socket object. # some OS like macOS ignore proto # detach old fd to avoid double close # opcodes # create (cyclic) transmission task # remove (cyclic) transmission task # read properties of (cyclic) transmission task # send one CAN frame # create RX content filter subscription # remove RX content filter subscription # read properties of RX content filter subscription # reply to TX_READ request # notification on performed transmissions (count=0) # reply to RX_READ request # cyclic message is absent # updated CAN frame (detected content change) # most systems limit IFNAMSIZ to 16, take 1024 to be sure Build a CAN frame. Dissect a CAN frame. # most systems limit IFNAMSIZ to 16, take 1024 to be sure # Testing large receive over TCP # Testing receive in chunks over TCP # Testing large recvfrom() over TCP # Testing recvfrom() in chunks over TCP # Testing sendall() with a 2048 byte string over TCP # Testing fromfd() # Testing dup() # Testing shutdown() # wait for _testShutdown to finish: on OS X, when the server # closes the connection the client also becomes disconnected, # and the client's shutdown call will fail. (Issue #4397.) # Issue 15989 # Testing detach() # cli_conn cannot be used anymore... # ...but we can create another socket using the (still open) # file descriptor # Testing sendto() and Recv() over UDP # Testing recvfrom() over UDP # Negative lengths passed to recvfrom should give ValueError. # Tests for the sendmsg()/recvmsg() interface. Where possible, the # same test code is used with different families and types of socket # (e.g. stream, datagram), and tests using recvmsg() are repeated # using recvmsg_into(). # # The generic test classes such as SendmsgTests and # RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be # supplied with sockets cli_sock and serv_sock representing the # client's and the server's end of the connection respectively, and # attributes cli_addr and serv_addr holding their (numeric where # appropriate) addresses. # # The final concrete test classes combine these with subclasses of # SocketTestBase which set up client and server sockets of a specific # type, and with subclasses of SendrecvmsgBase such as # SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these # sockets to cli_sock and serv_sock and override the methods and # attributes of SendrecvmsgBase to fill in destination addresses if # needed when sending, check for specific flags in msg_flags, etc. # # RecvmsgIntoMixin provides a version of doRecvmsg() implemented using # recvmsg_into(). # XXX: like the other datagram (UDP) tests in this module, the code # here assumes that datagram delivery on the local machine will be # reliable. # Base class for sendmsg()/recvmsg() tests. # Time in seconds to wait before considering a test failed, or # None for no timeout. Not all tests actually set a timeout. # Send msg to the server. # Tuple of alternative default arguments for sendmsg() when called # via sendmsgToServer() (e.g. to include a destination address). # Call sendmsg() on self.cli_sock with the given arguments, # filling in any arguments which are not supplied with the # corresponding items of self.sendmsg_to_server_defaults, if # any. # Call recvmsg() on sock with given arguments and return its # result. Should be used for tests which can use either # recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides # this method with one which emulates it using recvmsg_into(), # thus allowing the same test to be used for both methods. # Called by doRecvmsg() with the return value of recvmsg() or # recvmsg_into(). Can be overridden to arrange cleanup based # on the returned ancillary data, for instance. # Called to compare the received address with the address of # the peer. # Flags that are normally unset in msg_flags # Flags that are normally set # Flags set when a complete record has been received (e.g. MSG_EOR # for SCTP) # Flags set when a complete record has not been received # (e.g. MSG_TRUNC for datagram sockets) # Method to check the value of msg_flags returned by recvmsg[_into](). # # Checks that all bits in msg_flags_common_set attribute are # set in "flags" and all bits in msg_flags_common_unset are # unset. # # The "eor" argument specifies whether the flags should # indicate that a full record (or datagram) has been received. # If "eor" is None, no checks are done; otherwise, checks # that: # # * if "eor" is true, all bits in msg_flags_eor_indicator are # set and all bits in msg_flags_non_eor_indicator are unset # # * if "eor" is false, all bits in msg_flags_non_eor_indicator # are set and all bits in msg_flags_eor_indicator are unset # # If "checkset" and/or "checkunset" are supplied, they require # the given bits to be set or unset respectively, overriding # what the attributes require for those bits. # # If any bits are set in "ignore", they will not be checked, # regardless of the other inputs. # # Will raise Exception if the inputs require a bit to be both # set and unset, and it is not ignored. # Function arguments override defaults # Merge arguments with remaining defaults, and check for conflicts #x}".format(inboth)) # Compare with given msg_flags value # Mixin to implement doRecvmsg() using recvmsg_into(). # Defines flags to be checked in msg_flags for datagram sockets. # Defines flags to be checked in msg_flags for SCTP sockets. # Base class for tests on connectionless-mode sockets. Users must # supply sockets on attributes cli and serv to be mapped to # cli_sock and serv_sock respectively. # Base class for tests on connected sockets. Users must supply # sockets on attributes serv_conn and cli_conn (representing the # connections *to* the server and the client), to be mapped to # cli_sock and serv_sock respectively. # Address is currently "unspecified" for a connected socket, # so we don't examine it # Base class to set a timeout on server's socket. # Tests for sendmsg() which can use any socket type and do not # involve recvmsg() or recvmsg_into(). # Send a simple message with sendmsg(). # Send from buffer obtained from a generator (not a sequence). # Gather (empty) ancillary data from a generator. # Send data from an array instead of the usual bytes object. # Send message data from more than one buffer (gather write). # Check that sendmsg() rejects invalid arguments. # Check that invalid ancillary data items are rejected. # Check that invalid ancillary data items are rejected when # more than one item is present. # Check that sendmsg() rejects excess ancillary data items # when the number that can be sent is limited. # Can only send one item # Check that sendmsg() fails on a closed socket. # Tests for sendmsg() which require a stream socket and do not # involve recvmsg() or recvmsg_into(). # Check that peer address can be specified as None. # Check that timeout works with sendmsg(). # bpo-33937 the test randomly fails on Travis CI with # "OSError: [Errno 12] Cannot allocate memory" # XXX: would be nice to have more tests for sendmsg flags argument. # Linux supports MSG_DONTWAIT when sending, but in general, it # only works when receiving. Could add other platforms if they # support it too. # Check that MSG_DONTWAIT in flags causes non-blocking behaviour. # bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI # with "OSError: [Errno 12] Cannot allocate memory" # Tests for sendmsg() which require a connectionless-mode # (e.g. datagram) socket, and do not involve recvmsg() or # recvmsg_into(). # Check that sendmsg() fails when no destination address is # given for unconnected socket. # Tests for recvmsg() which can also be emulated using # recvmsg_into(), and can use any socket type. # Receive a simple message with recvmsg[_into](). # Test recvmsg[_into]() with default arguments provided explicitly. # Receive a message smaller than buffer. # Receive part of message, check for truncation indicators. # Test ancillary data buffer too small to hold any ancillary data. # Test large ancillary data buffer. # Check that recvmsg[_into]() fails on a closed socket. # Check that timeout works. # Check that MSG_PEEK in flags enables examination of pending # data without consuming it. # Receive part of data with MSG_PEEK. # Ignoring MSG_TRUNC here (so this test is the same for stream # and datagram sockets). Some wording in POSIX seems to # suggest that it needn't be set when peeking, but that may # just be a slip. # Receive all data with MSG_PEEK. # Check that the same data can still be received normally. # Test receiving with recvmsg[_into]() when message is sent # using sendmsg(). # Tests which require a stream socket and can use either recvmsg() # or recvmsg_into(). # Receive end-of-stream indicator (b"", peer socket closed). # Might not have end-of-record marker # Receive a message in more than one chunk. # Tests for recvmsg() which can use any socket type. # Check that recvmsg() rejects invalid arguments. # Tests for recvmsg_into() which can use any socket type. # Check that recvmsg_into() rejects invalid arguments. # Receive into buffer obtained from a generator (not a sequence). # Receive into an array rather than the usual bytearray. # Receive into multiple buffers (scatter write). # Test the functions CMSG_LEN() and CMSG_SPACE(). Tests # assumptions used by sendmsg() and recvmsg[_into](), which share # code with these functions. # Match the definition in socketmodule.c # Test CMSG_LEN() with various valid and invalid values, # checking the assumptions used by recvmsg() and sendmsg(). # struct cmsghdr has at least three members, two of which are ints # This is how recvmsg() calculates the data size # sendmsg() shares code with these functions, and requires # that it reject values over the limit. # Test CMSG_SPACE() with various valid and invalid values, # checking the assumptions used by sendmsg(). # struct cmsghdr has at least three members, two of which are ints # sendmsg() shares code with these functions, and requires # that it reject values over the limit. # Tests for file descriptor passing on Unix-domain sockets. # Invalid file descriptor value that's unlikely to evaluate to a # real FD even if one of its bytes is replaced with a different # value (which shouldn't actually happen). # Return a list of n file descriptors for newly-created files # containing their list indices as ASCII numbers. # Check that the file descriptors in the given list contain # their correct list indices as ASCII numbers. # Close all file descriptors specified in the ancillary data # of the given return value from recvmsg() or recvmsg_into(). # Send n new file descriptors created by newFDs() to the # server, with the constant MSG as the non-ancillary data. # Check that constant MSG was received with numfds file # descriptors in a maximum of maxcmsgs control messages (which # must contain only complete integers). By default, check # that MSG_CTRUNC is unset, but ignore any flags in # ignoreflags. # Pass a single FD (array read from bytes object). # Pass multiple FDs in a single array. # Test using CMSG_SPACE() to calculate ancillary buffer size. # Test using CMSG_LEN() to calculate ancillary buffer size. # RFC 3542 says implementations may set # MSG_CTRUNC if there isn't enough space # for trailing padding. #12958") #22397") # Pass two FDs in two separate arrays. Arrays may be combined # into a single control message by the OS. #12958") #22397") #12958") #22397") # Pass two FDs in two separate arrays, receiving them into the # minimum space for two arrays. #12958") #22397") # Try to send msg and ancdata to server, but if the system # call fails, just send msg with no ancillary data. # Check that it was the system call that failed #24725") # Try to pass an empty FD array. Can receive either no array # or an empty array. # Try to pass a truncated FD array. # Try to pass two FD arrays, the first of which is truncated. # Arrays may have been combined in a single control message # Check that no ancillary data items are returned when data is # truncated inside the cmsghdr structure. # Check that no ancillary data is received when no buffer size # is specified. # BSD seems to set MSG_CTRUNC only # if an item has been partially # received. # Check that no ancillary data is received when buffer size is 0. # Check that no ancillary data is returned for various non-zero # (but still too small) buffer sizes. # The cmsghdr structure has at least three members, two of # which are ints, so we still shouldn't see any ancillary # data. # The following tests try to truncate the control message in the # middle of the FD array. # Check that file descriptor data is truncated to between # mindata and maxdata bytes when received with buffer size # ancbuf, and that any complete file descriptor numbers are # valid. # Test sendmsg() and recvmsg[_into]() using the ancillary data # features of the RFC 3542 Advanced Sockets API for IPv6. # Currently we can only handle certain data items (e.g. traffic # class, hop limit, MTU discovery and fragmentation settings) # without resorting to unportable means such as the struct module, # but the tests here are aimed at testing the ancillary data # handling in sendmsg() and recvmsg() rather than the IPv6 API # itself. # Test value to use when setting hop limit of packet # Test value to use when setting traffic class of packet. # -1 means "use kernel default". # Given ancillary data list ancdata, return a mapping from # pairs (cmsg_level, cmsg_type) to corresponding cmsg_data. # Check that no (level, type) pair appears more than once. # Receive hop limit into ancbufsize bytes of ancillary data # space. Check that data is MSG, ancillary data is not # truncated (but ignore any flags in ignoreflags), and hop # limit is between 0 and maxhop inclusive. # Test receiving the packet hop limit as ancillary data. # Need to wait until server has asked to receive ancillary # data, as implementations are not required to buffer it # otherwise. # Test receiving hop limit, using CMSG_SPACE to calculate buffer size. # Could test receiving into buffer sized using CMSG_LEN, but RFC # 3542 says portable applications must provide space for trailing # padding. Implementations may set MSG_CTRUNC if there isn't # enough space for the padding. # Test setting hop limit on outgoing packet and receiving it # at the other end. # Receive traffic class and hop limit into ancbufsize bytes of # ancillary data space. Check that data is MSG, ancillary # data is not truncated (but ignore any flags in ignoreflags), # and traffic class and hop limit are in range (hop limit no # more than maxhop). # Test receiving traffic class and hop limit as ancillary data. # Test receiving traffic class and hop limit, using # CMSG_SPACE() to calculate buffer size. # Test setting traffic class and hop limit on outgoing packet, # and receiving them at the other end. # Try to send ancillary data with first item one byte too # long. Fall back to sending with correct size if this fails, # and check that second item was handled correctly. # Tests for proper handling of truncated ancillary data # Receive hop limit into ancbufsize bytes of ancillary data # space, which should be too small to contain the ancillary # data header (if ancbufsize is None, pass no second argument # to recvmsg()). Check that data is MSG, MSG_CTRUNC is set # (unless included in ignoreflags), and no ancillary data is # returned. # Check that no ancillary data is received when no ancillary # buffer size is provided. # BSD seems to set # MSG_CTRUNC only if an item # has been partially # received. # Check that no ancillary data is received when ancillary # buffer size is zero. # Check that no ancillary data is returned for various non-zero # (but still too small) buffer sizes. # Test truncation of a control message inside its associated # data. The message may be returned with its data truncated, # or not returned at all. # Receive traffic class and hop limit into ancbufsize bytes of # ancillary data space, which should be large enough to # contain the first item, but too small to contain the header # of the second. Check that data is MSG, MSG_CTRUNC is set # (unless included in ignoreflags), and only one ancillary # data item is returned. # Try the above test with various buffer sizes. # Test truncation of the second of two control messages inside # its associated data. # Derive concrete test classes for different socket types. # Called to compare the received address with the address of # the peer, ignoring scope ID #13876") #13876") # Test interrupting the interruptible send/receive methods with a # signal when a timeout is set. These tests avoid having multiple # threads alive during the test so that the OS cannot deliver the # signal to the wrong one. # Base class for interrupted send/receive tests. Installs an # empty handler for SIGALRM and removes it on teardown, along with # any scheduled alarms. # Timeout for socket operations # Provide setAlarm() method to schedule delivery of SIGALRM after # given number of seconds, or cancel it if zero, and an # appropriate time value to use. Use setitimer() if available. # Old systems may deliver the alarm up to one second early # Require siginterrupt() in order to ensure that system calls are # interrupted by default. # Test interrupting the recv*() methods with signals when a # timeout is set. # Check that func(*args, **kwargs) raises # errno of EINTR when interrupted by a signal. # Require siginterrupt() in order to ensure that system calls are # interrupted by default. # Test interrupting the interruptible send*() methods with signals # when a timeout is set. # Use a thread to complete the connection, but wait for it to # terminate before running the test, so that there is only one # thread to accept the signal. # Check that func(*args, **kwargs), run in a loop, raises # OSError with an errno of EINTR when interrupted by a # signal. # Issue #12958: The following tests have problems on OS X prior to 10.7 # Passing an actual address here as Python's wrapper for # sendto() doesn't allow passing a zero-length one; POSIX # requires that the address is ignored since the socket is # connection-mode, however. # Calling close() many times should be safe. # When a Python socket has a non-zero timeout, it's switched # internally to a non-blocking mode. Later, sock.sendall(), # sock.recv(), and other socket operations use a select() call and # handle EWOULDBLOCK/EGAIN on all socket operations. That's how # timeouts are enforced. # Test setblocking() and settimeout() methods # Issue 15989 # create a socket with SOCK_NONBLOCK # bpo-7995: accept() on a listening socket with a timeout and the # default timeout is None, the resulting socket must be blocking. # bpo-7995: accept() on a listening socket with a timeout and the # default timeout is None, the resulting socket must inherit # the default timeout. # Testing non-blocking accept # connect() didn't start: non-blocking accept() fails # connect() completed: non-blocking accept() doesn't block # don't connect before event is set to check # that non-blocking accept() raises BlockingIOError # Testing non-blocking recv # the server didn't send data yet: non-blocking recv() fails # the server sent data yet: non-blocking recv() doesn't block # don't send anything before event is set to check # that non-blocking recv() raises BlockingIOError # send data: recv() will no longer block Unit tests for the object returned by socket.makefile() self.read_file is the io object returned by makefile() on the client connection. You can read from this file to get output from the server. self.write_file is the io object returned by makefile() on the server connection. You can write to this file to send output to the client. # Use default buffer size # Issue #7322: A file object must disallow further reads # after a timeout has occurred. # First read raises a timeout # Second read is disallowed # Performing small file read test # read until EOF # Performing unbuffered file read test # Performing file readline test # The file returned by makefile should keep the socket open. # read until EOF Repeat the tests from FileObjectClassTestCase with bufsize==0. In this case (and in this case only), it should be possible to create a file object, read a line from it, create another file object, read another line from it, without loss of data in the first file object's buffer. Note that http.client relies on this when reading multiple requests from the same socket. # Use unbuffered mode # Read a line, create a new file object, read another line with it # first line # first line # second line # second line # The file returned by makefile should keep the socket open... # ...until the file is itself closed # Non-blocking ops # NOTE: to set `read_file` as non-blocking, we must call # `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp). # Data not arrived (can happen under Windows), wait a bit # Avoid closing the socket before the server test has finished, # otherwise system recv() will return 0 instead of EWOULDBLOCK. # The client thread can't skip directly - the SkipTest exception # would appear as a failure. # Try to saturate the socket buffer pipe with repeated large writes. # The first write() succeeds since a chunk of data can be buffered # Succeeded # Let us know that this test didn't manage to establish # the expected conditions. This is not a failure in itself but, # if it happens repeatedly, the test should be fixed. # Default-buffered for reading; line-buffered for writing # Exercise the buffering code Tests for socket.makefile() in text mode (rather than binary) Tests for socket.makefile() in text mode (rather than binary) Tests for socket.makefile() in text mode (rather than binary) Prove network connection. # We're inherited below by BasicTCPTest2, which also inherits # BasicTCPTest, which defines self.port referenced below. Tests that NetworkConnection does not break existing TCP functionality. Return a socket which times out on connect # Issue #9792: errors raised by create_connection() should have # a proper errno attribute. # Issue #16257: create_connection() calls getaddrinfo() against # 'localhost'. This may result in an IPV6 addr being returned # as well as an IPV4 one: # >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM) # >>> [(2, 2, 0, '', ('127.0.0.1', 41230)), # (26, 2, 0, '', ('::1', 41230, 0, 0))] # # create_connection() enumerates through all the addresses returned # and if it doesn't successfully bind to any of them, it propagates # the last exception it encountered. # # On Solaris, ENETUNREACH is returned in this circumstance instead # of ECONNREFUSED. So, if that errno exists, add it to our list of # expected errnos. # bpo-31910: socket.create_connection() fails randomly # with EADDRNOTAVAIL on Travis CI # Issue #9792: create_connection() should not recast timeout errors # as generic socket errors. # The port number being used is sufficient to show that the bind() # call happened. # passing no explicit timeout uses socket's global default # None timeout means the same as sock.settimeout(None) # XXX I don't know how to do this test on MSWindows or any other # platform that doesn't support signal.alarm() or os.kill(), though # the bug should have existed on all platforms. # must be longer than alarm # POSIX allows alarm to be up to 1 second early # shut off alarm # no alarm can be pending. Safe to restore old handler. # Regression test for issue #28471 # Check that an abstract name can be passed as a string. # Check that an abstract name can be passed as a bytearray. # Return the given path encoded in the file system encoding, # or skip the test if this is not possible. # Bind the socket # Issue #30205 (note getsockname() can return None on OS X) # Test binding to and retrieving a normal string pathname. # Test binding to a bytes pathname. # Test binding to a valid non-ASCII pathname, with the # non-ASCII bytes supplied using surrogateescape encoding. # Test binding to a pathname that cannot be encoded in the # file system encoding. Test the buffer versions of socket.recv() and socket.send(). # See issue #20246. Check if the TIPC module is loaded The TIPC module is not loaded automatically on Ubuntu and probably other Linux distros. # It's ok if the file does not exist, is a directory or if we # have not the permission to read it. # There is a hittable race between serverExplicitReady() and the # accept() call; sleep a little while to avoid it, otherwise # we could get an exception # base test # close inside with block # exception inside with block # clear FD_CLOEXEC flag # timeout == 0: means that getblocking() must be False. # If timeout > 0, the socket will be in a "blocking" mode # from the standpoint of the Python API. For Python socket # object, "blocking" means that operations like 'sock.recv()' # will block. Internally, file descriptors for # "blocking" Python sockets *with timeouts* are in a # *non-blocking* mode, and 'sock.recv()' uses 'select()' # and handles EWOULDBLOCK/EAGAIN to enforce the timeout. # a lot of it seems silly and redundant, but I wanted to test that # changing back and forth worked ok # defaulttimeout # This must be classmethod and not staticmethod or multiprocessing # won't be able to bootstrap it. # Recreate socket from shared data # Send the message # Transfer the listening server socket to another process # and service it from there. # Create process: # Get the shared socket data # Pass the shared socket to the other process # The data that the server will send us # Connect # listen for the data # socket sharing is expected to work only for blocking socket # since the internal python timeout value isn't transferred. # If the user specified "0" for proto, then # internally windows will have picked the correct value. # Python introspection on the socket however will still return # 0. For the shared socket, the python value is recreated # from the actual value, so it may not compare correctly. # This combination is not supported Test the send() implementation of socket.sendfile(). # 10 MiB # Depending on the mixin class being run return either send() # or sendfile() method implementation. # regular file # non regular file # empty file # offset # count # count small # count + offset # non blocking sockets are not supposed to work # timeout (non-triggered) # timeout (triggered) # errors Test the sendfile() implementation of socket.sendfile(). # tests for AF_ALG # type / algorithm is not available # bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY, # at least on ppc64le architecture # Although it should work with 3.19 and newer the test blocks on # Ubuntu 15.10 with Kernel 4.2.0-19. # long message # see issue29324 # send assoc, plain and tag buffer in separate steps # now with msg # create anc data manually # decrypt and verify # see test_aes_cbc # deterministic random bit generator, prediction resistance, sha256 # bpo-35050, off-by-one error in length check # salg_type[14] # salg_name[64] # available since long time ago # available starting with Windows 10 1607 # available starting with Windows 10 1703 # available starting with Windows 10 1709 # These are slow when setitimer() is not available | 2.104001 | 2 |
boilr/data.py | addtt/boiler | 3 | 6624770 | <gh_stars>1-10
import argparse
from typing import Tuple
from torch.utils.data import Dataset, DataLoader
class BaseDatasetManager:
"""Wrapper for DataLoaders.
This is meant to be subclassed for specific experiments and datasets.
The method _make_datasets() must be implemented. A basic implementation of
the method _make_dataloaders() is provided in this class, but can be
overridden for custom behavior.
Args:
cfg (argparse.Namespace): Configuration
cuda (bool): Whether the device in use is cuda
"""
def __init__(self, cfg: argparse.Namespace, cuda: bool, **kwargs):
# Define training and test set
tr_set, ts_set = self._make_datasets(cfg, **kwargs)
# Dataloaders
self._train, self._test = self._make_dataloaders(
tr_set, ts_set, cfg, cuda, **kwargs)
self._data_shape = self._train.dataset[0][0].size()
self._img_size = self._data_shape[1:]
self._color_ch = self._data_shape[0]
@classmethod
def _make_datasets(cls, cfg: argparse.Namespace,
**kwargs) -> Tuple[Dataset, Dataset]:
"""Returns training and test sets as PyTorch Datasets.
Args:
cfg (argparse.Namespace): Configuration
"""
raise NotImplementedError
@classmethod
def _make_dataloaders(cls, train: Dataset, test: Dataset,
cfg: argparse.Namespace, cuda: bool,
**kwargs) -> Tuple[DataLoader, DataLoader]:
"""Returns training and test data loaders.
Default data loaders provided here. Override for custom data loaders.
Args:
train (Dataset): Training set
test (Dataset): Test set
cfg (argparse.Namespace): Configuration
cuda (bool): Whether the device in use is cuda
Returns:
(tuple): tuple containing:
- dl_train (DataLoader): training set data loader
- dl_test (DataLoader): test set data loader
"""
# Default arguments for dataloaders
nw = getattr(kwargs, 'num_workers', 0)
pm = getattr(kwargs, 'pin_memory', False)
dl_kwargs = {'num_workers': nw, 'pin_memory': pm} if cuda else {}
dl_train = DataLoader(train,
batch_size=cfg.batch_size,
shuffle=True,
drop_last=True,
**dl_kwargs)
dl_test = DataLoader(test,
batch_size=cfg.test_batch_size,
shuffle=False,
**dl_kwargs)
return dl_train, dl_test
@property
def train(self) -> DataLoader:
"""DataLoader for training set"""
return self._train
@property
def test(self) -> DataLoader:
"""DataLoader for test set"""
return self._test
@property
def data_shape(self) -> Tuple[int, int, int]:
"""Shape of each datapoint (image): (channels, height, width)."""
return self._data_shape
@property
def img_size(self) -> Tuple[int, int]:
"""Spatial shape of each datapoint (image): height, width."""
return self._img_size
@property
def color_ch(self) -> int:
"""Number of color channels of each datapoint (image)."""
return self._color_ch
| import argparse
from typing import Tuple
from torch.utils.data import Dataset, DataLoader
class BaseDatasetManager:
"""Wrapper for DataLoaders.
This is meant to be subclassed for specific experiments and datasets.
The method _make_datasets() must be implemented. A basic implementation of
the method _make_dataloaders() is provided in this class, but can be
overridden for custom behavior.
Args:
cfg (argparse.Namespace): Configuration
cuda (bool): Whether the device in use is cuda
"""
def __init__(self, cfg: argparse.Namespace, cuda: bool, **kwargs):
# Define training and test set
tr_set, ts_set = self._make_datasets(cfg, **kwargs)
# Dataloaders
self._train, self._test = self._make_dataloaders(
tr_set, ts_set, cfg, cuda, **kwargs)
self._data_shape = self._train.dataset[0][0].size()
self._img_size = self._data_shape[1:]
self._color_ch = self._data_shape[0]
@classmethod
def _make_datasets(cls, cfg: argparse.Namespace,
**kwargs) -> Tuple[Dataset, Dataset]:
"""Returns training and test sets as PyTorch Datasets.
Args:
cfg (argparse.Namespace): Configuration
"""
raise NotImplementedError
@classmethod
def _make_dataloaders(cls, train: Dataset, test: Dataset,
cfg: argparse.Namespace, cuda: bool,
**kwargs) -> Tuple[DataLoader, DataLoader]:
"""Returns training and test data loaders.
Default data loaders provided here. Override for custom data loaders.
Args:
train (Dataset): Training set
test (Dataset): Test set
cfg (argparse.Namespace): Configuration
cuda (bool): Whether the device in use is cuda
Returns:
(tuple): tuple containing:
- dl_train (DataLoader): training set data loader
- dl_test (DataLoader): test set data loader
"""
# Default arguments for dataloaders
nw = getattr(kwargs, 'num_workers', 0)
pm = getattr(kwargs, 'pin_memory', False)
dl_kwargs = {'num_workers': nw, 'pin_memory': pm} if cuda else {}
dl_train = DataLoader(train,
batch_size=cfg.batch_size,
shuffle=True,
drop_last=True,
**dl_kwargs)
dl_test = DataLoader(test,
batch_size=cfg.test_batch_size,
shuffle=False,
**dl_kwargs)
return dl_train, dl_test
@property
def train(self) -> DataLoader:
"""DataLoader for training set"""
return self._train
@property
def test(self) -> DataLoader:
"""DataLoader for test set"""
return self._test
@property
def data_shape(self) -> Tuple[int, int, int]:
"""Shape of each datapoint (image): (channels, height, width)."""
return self._data_shape
@property
def img_size(self) -> Tuple[int, int]:
"""Spatial shape of each datapoint (image): height, width."""
return self._img_size
@property
def color_ch(self) -> int:
"""Number of color channels of each datapoint (image)."""
return self._color_ch | en | 0.667339 | Wrapper for DataLoaders. This is meant to be subclassed for specific experiments and datasets. The method _make_datasets() must be implemented. A basic implementation of the method _make_dataloaders() is provided in this class, but can be overridden for custom behavior. Args: cfg (argparse.Namespace): Configuration cuda (bool): Whether the device in use is cuda # Define training and test set # Dataloaders Returns training and test sets as PyTorch Datasets. Args: cfg (argparse.Namespace): Configuration Returns training and test data loaders. Default data loaders provided here. Override for custom data loaders. Args: train (Dataset): Training set test (Dataset): Test set cfg (argparse.Namespace): Configuration cuda (bool): Whether the device in use is cuda Returns: (tuple): tuple containing: - dl_train (DataLoader): training set data loader - dl_test (DataLoader): test set data loader # Default arguments for dataloaders DataLoader for training set DataLoader for test set Shape of each datapoint (image): (channels, height, width). Spatial shape of each datapoint (image): height, width. Number of color channels of each datapoint (image). | 2.863831 | 3 |
experiments/simulations/two_dimensional_mle.py | giovp/spatial-alignment | 14 | 6624771 | <gh_stars>10-100
import torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
sys.path.append("../..")
from models.gpsa_mle import WarpGPMLE
sys.path.append("../../data")
from simulated.generate_twod_data import generate_twod_data
from plotting.callbacks import callback_twod
from util import ConvergenceChecker
device = "cuda" if torch.cuda.is_available() else "cpu"
LATEX_FONTSIZE = 50
n_spatial_dims = 2
n_views = 2
# n_outputs = 10
N_EPOCHS = 3000
PRINT_EVERY = 25
N_LATENT_GPS = 1
def two_d_gpsa(n_outputs, n_epochs, warp_kernel_variance=0.1, plot_intermediate=True):
X, Y, n_samples_list, view_idx = generate_twod_data(
n_views,
n_outputs,
grid_size=15,
n_latent_gps=None,
kernel_lengthscale=10.0,
kernel_variance=warp_kernel_variance,
noise_variance=1e-4,
)
n_samples_per_view = X.shape[0] // n_views
## Fit GP on one view to get initial estimates of data kernel parameters
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
from sklearn.gaussian_process import GaussianProcessRegressor
kernel = RBF(length_scale=1.0) + WhiteKernel()
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X[view_idx[0]], Y[view_idx[0]])
data_lengthscales_est = gpr.kernel_.k1.theta[0]
x = torch.from_numpy(X).float().clone()
y = torch.from_numpy(Y).float().clone()
data_dict = {
"expression": {
"spatial_coords": x,
"outputs": y,
"n_samples_list": n_samples_list,
}
}
model = WarpGPMLE(
data_dict,
n_spatial_dims=n_spatial_dims,
n_latent_gps=None,
# n_latent_gps=None,
mean_function="identity_fixed",
fixed_warp_kernel_variances=np.ones(n_views) * 0.01,
fixed_warp_kernel_lengthscales=np.ones(n_views) * 10,
# fixed_data_kernel_lengthscales=np.exp(gpr.kernel_.k1.theta.astype(np.float32)),
# fixed_data_kernel_lengthscales=np.exp(data_lengthscales_est),
# mean_function="identity_initialized",
fixed_view_idx=0,
).to(device)
view_idx, Ns, _, _ = model.create_view_idx_dict(data_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
def train(model, loss_fn, optimizer):
model.train()
# Forward pass
model.forward({"expression": x}, view_idx=view_idx, Ns=Ns)
# Compute loss
loss = loss_fn(
X_spatial={"expression": x}, view_idx=view_idx, data_dict=data_dict
)
# Compute gradients and take optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
# Set up figure.
fig = plt.figure(figsize=(14, 7), facecolor="white", constrained_layout=True)
data_expression_ax = fig.add_subplot(122, frameon=False)
latent_expression_ax = fig.add_subplot(121, frameon=False)
plt.show(block=False)
convergence_checker = ConvergenceChecker(span=100)
loss_trace = []
error_trace = []
for t in range(n_epochs):
loss = train(model, model.loss_fn, optimizer)
loss_trace.append(loss)
# print(model.G["expression"][-1])
# print(torch.exp(model.warp_kernel_variances))
if t >= convergence_checker.span - 1:
rel_change = convergence_checker.relative_change(loss_trace)
is_converged = convergence_checker.converged(loss_trace, tol=1e-4)
if is_converged:
convergence_counter += 1
if convergence_counter == 2:
print("CONVERGED")
break
else:
convergence_counter = 0
if plot_intermediate and t % PRINT_EVERY == 0:
print("Iter: {0:<10} LL {1:1.3e}".format(t, -loss))
model.forward({"expression": x}, view_idx=view_idx, Ns=Ns)
callback_twod(
model,
X,
Y,
data_expression_ax=data_expression_ax,
latent_expression_ax=latent_expression_ax,
X_aligned=model.G,
is_mle=True,
)
plt.draw()
plt.pause(1 / 60.0)
err = np.mean(
(
model.G["expression"]
.detach()
.numpy()
.squeeze()[:n_samples_per_view]
- model.G["expression"]
.detach()
.numpy()
.squeeze()[n_samples_per_view:]
)
** 2
)
print("Error: {}".format(err))
if t >= convergence_checker.span - 1:
print(rel_change)
# G_means, G_samples, F_latent_samples, F_samples = model.forward(
# {"expression": x}, view_idx=view_idx, Ns=Ns
# )
print("Done!")
plt.close()
return X, Y, model.G, model
if __name__ == "__main__":
n_outputs = 10
X, Y, G_means, model = two_d_gpsa(n_epochs=N_EPOCHS, n_outputs=n_outputs)
import matplotlib
font = {"size": LATEX_FONTSIZE}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
fig = plt.figure(figsize=(10, 10))
data_expression_ax = fig.add_subplot(211, frameon=False)
latent_expression_ax = fig.add_subplot(212, frameon=False)
callback_twod(
model,
X,
Y,
data_expression_ax=data_expression_ax,
latent_expression_ax=latent_expression_ax,
X_aligned=G_means,
)
plt.tight_layout()
plt.savefig("../../plots/two_d_simulation.png")
plt.show()
import ipdb
ipdb.set_trace()
| import torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
sys.path.append("../..")
from models.gpsa_mle import WarpGPMLE
sys.path.append("../../data")
from simulated.generate_twod_data import generate_twod_data
from plotting.callbacks import callback_twod
from util import ConvergenceChecker
device = "cuda" if torch.cuda.is_available() else "cpu"
LATEX_FONTSIZE = 50
n_spatial_dims = 2
n_views = 2
# n_outputs = 10
N_EPOCHS = 3000
PRINT_EVERY = 25
N_LATENT_GPS = 1
def two_d_gpsa(n_outputs, n_epochs, warp_kernel_variance=0.1, plot_intermediate=True):
X, Y, n_samples_list, view_idx = generate_twod_data(
n_views,
n_outputs,
grid_size=15,
n_latent_gps=None,
kernel_lengthscale=10.0,
kernel_variance=warp_kernel_variance,
noise_variance=1e-4,
)
n_samples_per_view = X.shape[0] // n_views
## Fit GP on one view to get initial estimates of data kernel parameters
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
from sklearn.gaussian_process import GaussianProcessRegressor
kernel = RBF(length_scale=1.0) + WhiteKernel()
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X[view_idx[0]], Y[view_idx[0]])
data_lengthscales_est = gpr.kernel_.k1.theta[0]
x = torch.from_numpy(X).float().clone()
y = torch.from_numpy(Y).float().clone()
data_dict = {
"expression": {
"spatial_coords": x,
"outputs": y,
"n_samples_list": n_samples_list,
}
}
model = WarpGPMLE(
data_dict,
n_spatial_dims=n_spatial_dims,
n_latent_gps=None,
# n_latent_gps=None,
mean_function="identity_fixed",
fixed_warp_kernel_variances=np.ones(n_views) * 0.01,
fixed_warp_kernel_lengthscales=np.ones(n_views) * 10,
# fixed_data_kernel_lengthscales=np.exp(gpr.kernel_.k1.theta.astype(np.float32)),
# fixed_data_kernel_lengthscales=np.exp(data_lengthscales_est),
# mean_function="identity_initialized",
fixed_view_idx=0,
).to(device)
view_idx, Ns, _, _ = model.create_view_idx_dict(data_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)
def train(model, loss_fn, optimizer):
model.train()
# Forward pass
model.forward({"expression": x}, view_idx=view_idx, Ns=Ns)
# Compute loss
loss = loss_fn(
X_spatial={"expression": x}, view_idx=view_idx, data_dict=data_dict
)
# Compute gradients and take optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
# Set up figure.
fig = plt.figure(figsize=(14, 7), facecolor="white", constrained_layout=True)
data_expression_ax = fig.add_subplot(122, frameon=False)
latent_expression_ax = fig.add_subplot(121, frameon=False)
plt.show(block=False)
convergence_checker = ConvergenceChecker(span=100)
loss_trace = []
error_trace = []
for t in range(n_epochs):
loss = train(model, model.loss_fn, optimizer)
loss_trace.append(loss)
# print(model.G["expression"][-1])
# print(torch.exp(model.warp_kernel_variances))
if t >= convergence_checker.span - 1:
rel_change = convergence_checker.relative_change(loss_trace)
is_converged = convergence_checker.converged(loss_trace, tol=1e-4)
if is_converged:
convergence_counter += 1
if convergence_counter == 2:
print("CONVERGED")
break
else:
convergence_counter = 0
if plot_intermediate and t % PRINT_EVERY == 0:
print("Iter: {0:<10} LL {1:1.3e}".format(t, -loss))
model.forward({"expression": x}, view_idx=view_idx, Ns=Ns)
callback_twod(
model,
X,
Y,
data_expression_ax=data_expression_ax,
latent_expression_ax=latent_expression_ax,
X_aligned=model.G,
is_mle=True,
)
plt.draw()
plt.pause(1 / 60.0)
err = np.mean(
(
model.G["expression"]
.detach()
.numpy()
.squeeze()[:n_samples_per_view]
- model.G["expression"]
.detach()
.numpy()
.squeeze()[n_samples_per_view:]
)
** 2
)
print("Error: {}".format(err))
if t >= convergence_checker.span - 1:
print(rel_change)
# G_means, G_samples, F_latent_samples, F_samples = model.forward(
# {"expression": x}, view_idx=view_idx, Ns=Ns
# )
print("Done!")
plt.close()
return X, Y, model.G, model
if __name__ == "__main__":
n_outputs = 10
X, Y, G_means, model = two_d_gpsa(n_epochs=N_EPOCHS, n_outputs=n_outputs)
import matplotlib
font = {"size": LATEX_FONTSIZE}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
fig = plt.figure(figsize=(10, 10))
data_expression_ax = fig.add_subplot(211, frameon=False)
latent_expression_ax = fig.add_subplot(212, frameon=False)
callback_twod(
model,
X,
Y,
data_expression_ax=data_expression_ax,
latent_expression_ax=latent_expression_ax,
X_aligned=G_means,
)
plt.tight_layout()
plt.savefig("../../plots/two_d_simulation.png")
plt.show()
import ipdb
ipdb.set_trace() | en | 0.41746 | # n_outputs = 10 ## Fit GP on one view to get initial estimates of data kernel parameters # n_latent_gps=None, # fixed_data_kernel_lengthscales=np.exp(gpr.kernel_.k1.theta.astype(np.float32)), # fixed_data_kernel_lengthscales=np.exp(data_lengthscales_est), # mean_function="identity_initialized", # Forward pass # Compute loss # Compute gradients and take optimizer step # Set up figure. # print(model.G["expression"][-1]) # print(torch.exp(model.warp_kernel_variances)) # G_means, G_samples, F_latent_samples, F_samples = model.forward( # {"expression": x}, view_idx=view_idx, Ns=Ns # ) | 2.354436 | 2 |
src/sage/categories/filtered_modules.py | bopopescu/sage | 3 | 6624772 | r"""
Filtered Modules
A *filtered module* over a ring `R` with a totally ordered
indexing set `I` (typically `I = \NN`) is an `R`-module `M` equipped
with a family `(F_i)_{i \in I}` of `R`-submodules satisfying
`F_i \subseteq F_j` for all `i,j \in I` having `i \leq j`, and
`M = \bigcup_{i \in I} F_i`. This family is called a *filtration*
of the given module `M`.
.. TODO::
Implement a notion for decreasing filtrations: where `F_j \subseteq F_i`
when `i \leq j`.
.. TODO::
Implement filtrations for all concrete categories.
.. TODO::
Implement `\operatorname{gr}` as a functor.
"""
#*****************************************************************************
# Copyright (C) 2014 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_class_attribute
from sage.categories.category_types import Category_over_base_ring
from sage.categories.category_with_axiom import CategoryWithAxiom_over_base_ring
from sage.categories.covariant_functorial_construction import RegressiveCovariantConstructionCategory
class FilteredModulesCategory(RegressiveCovariantConstructionCategory, Category_over_base_ring):
def __init__(self, base_category):
"""
EXAMPLES::
sage: C = Algebras(QQ).Filtered()
sage: C
Category of filtered algebras over Rational Field
sage: C.base_category()
Category of algebras over Rational Field
sage: sorted(C.super_categories(), key=str)
[Category of algebras over Rational Field,
Category of filtered modules over Rational Field]
sage: AlgebrasWithBasis(QQ).Filtered().base_ring()
Rational Field
sage: HopfAlgebrasWithBasis(QQ).Filtered().base_ring()
Rational Field
"""
super(FilteredModulesCategory, self).__init__(base_category, base_category.base_ring())
_functor_category = "Filtered"
def _repr_object_names(self):
"""
EXAMPLES::
sage: AlgebrasWithBasis(QQ).Filtered() # indirect doctest
Category of filtered algebras with basis over Rational Field
"""
return "filtered {}".format(self.base_category()._repr_object_names())
class FilteredModules(FilteredModulesCategory):
r"""
The category of filtered modules over a given ring `R`.
A *filtered module* over a ring `R` with a totally ordered
indexing set `I` (typically `I = \NN`) is an `R`-module `M` equipped
with a family `(F_i)_{i \in I}` of `R`-submodules satisfying
`F_i \subseteq F_j` for all `i,j \in I` having `i \leq j`, and
`M = \bigcup_{i \in I} F_i`. This family is called a *filtration*
of the given module `M`.
EXAMPLES::
sage: Modules(ZZ).Filtered()
Category of filtered modules over Integer Ring
sage: Modules(ZZ).Filtered().super_categories()
[Category of modules over Integer Ring]
TESTS::
sage: TestSuite(Modules(ZZ).Filtered()).run()
REFERENCES:
- :wikipedia:`Filtration_(mathematics)`
"""
def extra_super_categories(self):
r"""
Add :class:`VectorSpaces` to the super categories of ``self`` if
the base ring is a field.
EXAMPLES::
sage: Modules(QQ).Filtered().extra_super_categories()
[Category of vector spaces over Rational Field]
sage: Modules(ZZ).Filtered().extra_super_categories()
[]
This makes sure that ``Modules(QQ).Filtered()`` returns an
instance of :class:`FilteredModules` and not a join category of
an instance of this class and of ``VectorSpaces(QQ)``::
sage: type(Modules(QQ).Filtered())
<class 'sage.categories.filtered_modules.FilteredModules_with_category'>
.. TODO::
Get rid of this workaround once there is a more systematic
approach for the alias ``Modules(QQ)`` -> ``VectorSpaces(QQ)``.
Probably the latter should be a category with axiom, and
covariant constructions should play well with axioms.
"""
from sage.categories.modules import Modules
from sage.categories.fields import Fields
base_ring = self.base_ring()
if base_ring in Fields:
return [Modules(base_ring)]
else:
return []
class SubcategoryMethods:
@cached_method
def Connected(self):
r"""
Return the full subcategory of the connected objects of ``self``.
A filtered `R`-module `M` with filtration
`(F_0, F_1, F_2, \ldots)` (indexed by `\NN`)
is said to be *connected* if `F_0` is isomorphic
to `R`.
EXAMPLES::
sage: Modules(ZZ).Filtered().Connected()
Category of filtered connected modules over Integer Ring
sage: Coalgebras(QQ).Filtered().Connected()
Join of Category of filtered connected modules over Rational Field
and Category of coalgebras over Rational Field
sage: AlgebrasWithBasis(QQ).Filtered().Connected()
Category of filtered connected algebras with basis over Rational Field
TESTS::
sage: TestSuite(Modules(ZZ).Filtered().Connected()).run()
sage: Coalgebras(QQ).Filtered().Connected.__module__
'sage.categories.filtered_modules'
"""
return self._with_axiom("Connected")
class Connected(CategoryWithAxiom_over_base_ring):
pass
| r"""
Filtered Modules
A *filtered module* over a ring `R` with a totally ordered
indexing set `I` (typically `I = \NN`) is an `R`-module `M` equipped
with a family `(F_i)_{i \in I}` of `R`-submodules satisfying
`F_i \subseteq F_j` for all `i,j \in I` having `i \leq j`, and
`M = \bigcup_{i \in I} F_i`. This family is called a *filtration*
of the given module `M`.
.. TODO::
Implement a notion for decreasing filtrations: where `F_j \subseteq F_i`
when `i \leq j`.
.. TODO::
Implement filtrations for all concrete categories.
.. TODO::
Implement `\operatorname{gr}` as a functor.
"""
#*****************************************************************************
# Copyright (C) 2014 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.cachefunc import cached_method
from sage.misc.lazy_attribute import lazy_class_attribute
from sage.categories.category_types import Category_over_base_ring
from sage.categories.category_with_axiom import CategoryWithAxiom_over_base_ring
from sage.categories.covariant_functorial_construction import RegressiveCovariantConstructionCategory
class FilteredModulesCategory(RegressiveCovariantConstructionCategory, Category_over_base_ring):
def __init__(self, base_category):
"""
EXAMPLES::
sage: C = Algebras(QQ).Filtered()
sage: C
Category of filtered algebras over Rational Field
sage: C.base_category()
Category of algebras over Rational Field
sage: sorted(C.super_categories(), key=str)
[Category of algebras over Rational Field,
Category of filtered modules over Rational Field]
sage: AlgebrasWithBasis(QQ).Filtered().base_ring()
Rational Field
sage: HopfAlgebrasWithBasis(QQ).Filtered().base_ring()
Rational Field
"""
super(FilteredModulesCategory, self).__init__(base_category, base_category.base_ring())
_functor_category = "Filtered"
def _repr_object_names(self):
"""
EXAMPLES::
sage: AlgebrasWithBasis(QQ).Filtered() # indirect doctest
Category of filtered algebras with basis over Rational Field
"""
return "filtered {}".format(self.base_category()._repr_object_names())
class FilteredModules(FilteredModulesCategory):
r"""
The category of filtered modules over a given ring `R`.
A *filtered module* over a ring `R` with a totally ordered
indexing set `I` (typically `I = \NN`) is an `R`-module `M` equipped
with a family `(F_i)_{i \in I}` of `R`-submodules satisfying
`F_i \subseteq F_j` for all `i,j \in I` having `i \leq j`, and
`M = \bigcup_{i \in I} F_i`. This family is called a *filtration*
of the given module `M`.
EXAMPLES::
sage: Modules(ZZ).Filtered()
Category of filtered modules over Integer Ring
sage: Modules(ZZ).Filtered().super_categories()
[Category of modules over Integer Ring]
TESTS::
sage: TestSuite(Modules(ZZ).Filtered()).run()
REFERENCES:
- :wikipedia:`Filtration_(mathematics)`
"""
def extra_super_categories(self):
r"""
Add :class:`VectorSpaces` to the super categories of ``self`` if
the base ring is a field.
EXAMPLES::
sage: Modules(QQ).Filtered().extra_super_categories()
[Category of vector spaces over Rational Field]
sage: Modules(ZZ).Filtered().extra_super_categories()
[]
This makes sure that ``Modules(QQ).Filtered()`` returns an
instance of :class:`FilteredModules` and not a join category of
an instance of this class and of ``VectorSpaces(QQ)``::
sage: type(Modules(QQ).Filtered())
<class 'sage.categories.filtered_modules.FilteredModules_with_category'>
.. TODO::
Get rid of this workaround once there is a more systematic
approach for the alias ``Modules(QQ)`` -> ``VectorSpaces(QQ)``.
Probably the latter should be a category with axiom, and
covariant constructions should play well with axioms.
"""
from sage.categories.modules import Modules
from sage.categories.fields import Fields
base_ring = self.base_ring()
if base_ring in Fields:
return [Modules(base_ring)]
else:
return []
class SubcategoryMethods:
@cached_method
def Connected(self):
r"""
Return the full subcategory of the connected objects of ``self``.
A filtered `R`-module `M` with filtration
`(F_0, F_1, F_2, \ldots)` (indexed by `\NN`)
is said to be *connected* if `F_0` is isomorphic
to `R`.
EXAMPLES::
sage: Modules(ZZ).Filtered().Connected()
Category of filtered connected modules over Integer Ring
sage: Coalgebras(QQ).Filtered().Connected()
Join of Category of filtered connected modules over Rational Field
and Category of coalgebras over Rational Field
sage: AlgebrasWithBasis(QQ).Filtered().Connected()
Category of filtered connected algebras with basis over Rational Field
TESTS::
sage: TestSuite(Modules(ZZ).Filtered().Connected()).run()
sage: Coalgebras(QQ).Filtered().Connected.__module__
'sage.categories.filtered_modules'
"""
return self._with_axiom("Connected")
class Connected(CategoryWithAxiom_over_base_ring):
pass
| en | 0.582589 | Filtered Modules A *filtered module* over a ring `R` with a totally ordered indexing set `I` (typically `I = \NN`) is an `R`-module `M` equipped with a family `(F_i)_{i \in I}` of `R`-submodules satisfying `F_i \subseteq F_j` for all `i,j \in I` having `i \leq j`, and `M = \bigcup_{i \in I} F_i`. This family is called a *filtration* of the given module `M`. .. TODO:: Implement a notion for decreasing filtrations: where `F_j \subseteq F_i` when `i \leq j`. .. TODO:: Implement filtrations for all concrete categories. .. TODO:: Implement `\operatorname{gr}` as a functor. #***************************************************************************** # Copyright (C) 2014 <NAME> <<EMAIL>> # # Distributed under the terms of the GNU General Public License (GPL) # http://www.gnu.org/licenses/ #****************************************************************************** EXAMPLES:: sage: C = Algebras(QQ).Filtered() sage: C Category of filtered algebras over Rational Field sage: C.base_category() Category of algebras over Rational Field sage: sorted(C.super_categories(), key=str) [Category of algebras over Rational Field, Category of filtered modules over Rational Field] sage: AlgebrasWithBasis(QQ).Filtered().base_ring() Rational Field sage: HopfAlgebrasWithBasis(QQ).Filtered().base_ring() Rational Field EXAMPLES:: sage: AlgebrasWithBasis(QQ).Filtered() # indirect doctest Category of filtered algebras with basis over Rational Field The category of filtered modules over a given ring `R`. A *filtered module* over a ring `R` with a totally ordered indexing set `I` (typically `I = \NN`) is an `R`-module `M` equipped with a family `(F_i)_{i \in I}` of `R`-submodules satisfying `F_i \subseteq F_j` for all `i,j \in I` having `i \leq j`, and `M = \bigcup_{i \in I} F_i`. This family is called a *filtration* of the given module `M`. EXAMPLES:: sage: Modules(ZZ).Filtered() Category of filtered modules over Integer Ring sage: Modules(ZZ).Filtered().super_categories() [Category of modules over Integer Ring] TESTS:: sage: TestSuite(Modules(ZZ).Filtered()).run() REFERENCES: - :wikipedia:`Filtration_(mathematics)` Add :class:`VectorSpaces` to the super categories of ``self`` if the base ring is a field. EXAMPLES:: sage: Modules(QQ).Filtered().extra_super_categories() [Category of vector spaces over Rational Field] sage: Modules(ZZ).Filtered().extra_super_categories() [] This makes sure that ``Modules(QQ).Filtered()`` returns an instance of :class:`FilteredModules` and not a join category of an instance of this class and of ``VectorSpaces(QQ)``:: sage: type(Modules(QQ).Filtered()) <class 'sage.categories.filtered_modules.FilteredModules_with_category'> .. TODO:: Get rid of this workaround once there is a more systematic approach for the alias ``Modules(QQ)`` -> ``VectorSpaces(QQ)``. Probably the latter should be a category with axiom, and covariant constructions should play well with axioms. Return the full subcategory of the connected objects of ``self``. A filtered `R`-module `M` with filtration `(F_0, F_1, F_2, \ldots)` (indexed by `\NN`) is said to be *connected* if `F_0` is isomorphic to `R`. EXAMPLES:: sage: Modules(ZZ).Filtered().Connected() Category of filtered connected modules over Integer Ring sage: Coalgebras(QQ).Filtered().Connected() Join of Category of filtered connected modules over Rational Field and Category of coalgebras over Rational Field sage: AlgebrasWithBasis(QQ).Filtered().Connected() Category of filtered connected algebras with basis over Rational Field TESTS:: sage: TestSuite(Modules(ZZ).Filtered().Connected()).run() sage: Coalgebras(QQ).Filtered().Connected.__module__ 'sage.categories.filtered_modules' | 1.353308 | 1 |
secret_santa/response/success_response.py | jacobboesch/secret_santa_program | 0 | 6624773 | from secret_santa.response import Response
class SuccessResponse(Response):
def __init__(self, message, status_code=200, success=True):
data = {"success": success, "message": message}
super().__init__(status_code, data)
| from secret_santa.response import Response
class SuccessResponse(Response):
def __init__(self, message, status_code=200, success=True):
data = {"success": success, "message": message}
super().__init__(status_code, data)
| none | 1 | 2.478913 | 2 | |
data_steward/cdr_cleaner/cleaning_rules/drop_multiple_measurements.py | dcampbell-vumc/curation | 0 | 6624774 | """
Background
It is possible for a participant to have multiple records of Physical Measurements. This typically occurs when earlier
entries are incorrect. Data quality would improve if these earlier entries were removed.
Scope: Develop a cleaning rule to remove all but the most recent of each Physical Measurement for all participants.
Relevant measurement_source_concept_ids are listed in query
"""
# Project imports
import constants.cdr_cleaner.clean_cdr as cdr_consts
INTERMEDIARY_TABLE = 'DC617_dropped_mult_measurements'
INVALID_MULT_MEASUREMENTS = """
CREATE OR REPLACE TABLE
`{project}.{sandbox_dataset}.{intermediary_table}` AS (
SELECT
*
FROM
(SELECT *, ROW_NUMBER() OVER(PARTITION BY person_id, measurement_source_concept_id ORDER BY
measurement_datetime DESC) AS row_num
FROM `{project}.{dataset}.measurement`
WHERE measurement_source_concept_id IN (903131,903119,903107,903124,903115,903126,903136,903118,903135,903132,
903110,903112,903117,903109,903127,1586218,903133,903111,903120,903113,
903129,903105,903125,903114,903134,903116,903106,903108,903123,903130,
903128,903122,903121)
ORDER BY person_id, measurement_source_concept_id, row_num)
--selection measurements to delete where row_num !=1
WHERE row_num != 1
)
"""
VALID_MEASUREMENTS = """
DELETE FROM
`{project}.{dataset}.measurement`
WHERE
(person_id, measurement_concept_id, measurement_id, measurement_date, measurement_datetime)
IN( SELECT
(person_id, measurement_concept_id, measurement_id, measurement_date, measurement_datetime)
FROM `{project}.{sandbox_dataset}.{intermediary_table}` )
"""
def get_drop_multiple_measurement_queries(project_id, dataset_id,
sandbox_dataset):
"""
runs the query which removes all multiple me
:param project_id: Name of the project
:param dataset_id: Name of the dataset where the queries should be run
:return:
"""
queries_list = []
invalid_measurements_query = dict()
invalid_measurements_query[
cdr_consts.QUERY] = INVALID_MULT_MEASUREMENTS.format(
dataset=dataset_id,
project=project_id,
sandbox_dataset=sandbox_dataset,
intermediary_table=INTERMEDIARY_TABLE)
queries_list.append(invalid_measurements_query)
valid_measurements_query = dict()
valid_measurements_query[cdr_consts.QUERY] = VALID_MEASUREMENTS.format(
dataset=dataset_id,
project=project_id,
sandbox_dataset=sandbox_dataset,
intermediary_table=INTERMEDIARY_TABLE)
queries_list.append(valid_measurements_query)
return queries_list
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
import sandbox
ARGS = parser.parse_args()
# Uncomment these lines if running locally
sandbox_dataset_id = sandbox.create_sandbox_dataset(
project_id=ARGS.project_id, dataset_id=ARGS.dataset_id)
clean_engine.add_console_logging(ARGS.console_log)
query_list = get_drop_multiple_measurement_queries(ARGS.project_id,
ARGS.dataset_id,
sandbox_dataset_id)
clean_engine.clean_dataset(ARGS.project_id, query_list)
| """
Background
It is possible for a participant to have multiple records of Physical Measurements. This typically occurs when earlier
entries are incorrect. Data quality would improve if these earlier entries were removed.
Scope: Develop a cleaning rule to remove all but the most recent of each Physical Measurement for all participants.
Relevant measurement_source_concept_ids are listed in query
"""
# Project imports
import constants.cdr_cleaner.clean_cdr as cdr_consts
INTERMEDIARY_TABLE = 'DC617_dropped_mult_measurements'
INVALID_MULT_MEASUREMENTS = """
CREATE OR REPLACE TABLE
`{project}.{sandbox_dataset}.{intermediary_table}` AS (
SELECT
*
FROM
(SELECT *, ROW_NUMBER() OVER(PARTITION BY person_id, measurement_source_concept_id ORDER BY
measurement_datetime DESC) AS row_num
FROM `{project}.{dataset}.measurement`
WHERE measurement_source_concept_id IN (903131,903119,903107,903124,903115,903126,903136,903118,903135,903132,
903110,903112,903117,903109,903127,1586218,903133,903111,903120,903113,
903129,903105,903125,903114,903134,903116,903106,903108,903123,903130,
903128,903122,903121)
ORDER BY person_id, measurement_source_concept_id, row_num)
--selection measurements to delete where row_num !=1
WHERE row_num != 1
)
"""
VALID_MEASUREMENTS = """
DELETE FROM
`{project}.{dataset}.measurement`
WHERE
(person_id, measurement_concept_id, measurement_id, measurement_date, measurement_datetime)
IN( SELECT
(person_id, measurement_concept_id, measurement_id, measurement_date, measurement_datetime)
FROM `{project}.{sandbox_dataset}.{intermediary_table}` )
"""
def get_drop_multiple_measurement_queries(project_id, dataset_id,
sandbox_dataset):
"""
runs the query which removes all multiple me
:param project_id: Name of the project
:param dataset_id: Name of the dataset where the queries should be run
:return:
"""
queries_list = []
invalid_measurements_query = dict()
invalid_measurements_query[
cdr_consts.QUERY] = INVALID_MULT_MEASUREMENTS.format(
dataset=dataset_id,
project=project_id,
sandbox_dataset=sandbox_dataset,
intermediary_table=INTERMEDIARY_TABLE)
queries_list.append(invalid_measurements_query)
valid_measurements_query = dict()
valid_measurements_query[cdr_consts.QUERY] = VALID_MEASUREMENTS.format(
dataset=dataset_id,
project=project_id,
sandbox_dataset=sandbox_dataset,
intermediary_table=INTERMEDIARY_TABLE)
queries_list.append(valid_measurements_query)
return queries_list
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
import sandbox
ARGS = parser.parse_args()
# Uncomment these lines if running locally
sandbox_dataset_id = sandbox.create_sandbox_dataset(
project_id=ARGS.project_id, dataset_id=ARGS.dataset_id)
clean_engine.add_console_logging(ARGS.console_log)
query_list = get_drop_multiple_measurement_queries(ARGS.project_id,
ARGS.dataset_id,
sandbox_dataset_id)
clean_engine.clean_dataset(ARGS.project_id, query_list)
| en | 0.691488 | Background It is possible for a participant to have multiple records of Physical Measurements. This typically occurs when earlier entries are incorrect. Data quality would improve if these earlier entries were removed. Scope: Develop a cleaning rule to remove all but the most recent of each Physical Measurement for all participants. Relevant measurement_source_concept_ids are listed in query # Project imports CREATE OR REPLACE TABLE `{project}.{sandbox_dataset}.{intermediary_table}` AS ( SELECT * FROM (SELECT *, ROW_NUMBER() OVER(PARTITION BY person_id, measurement_source_concept_id ORDER BY measurement_datetime DESC) AS row_num FROM `{project}.{dataset}.measurement` WHERE measurement_source_concept_id IN (903131,903119,903107,903124,903115,903126,903136,903118,903135,903132, 903110,903112,903117,903109,903127,1586218,903133,903111,903120,903113, 903129,903105,903125,903114,903134,903116,903106,903108,903123,903130, 903128,903122,903121) ORDER BY person_id, measurement_source_concept_id, row_num) --selection measurements to delete where row_num !=1 WHERE row_num != 1 ) DELETE FROM `{project}.{dataset}.measurement` WHERE (person_id, measurement_concept_id, measurement_id, measurement_date, measurement_datetime) IN( SELECT (person_id, measurement_concept_id, measurement_id, measurement_date, measurement_datetime) FROM `{project}.{sandbox_dataset}.{intermediary_table}` ) runs the query which removes all multiple me :param project_id: Name of the project :param dataset_id: Name of the dataset where the queries should be run :return: # Uncomment these lines if running locally | 1.997577 | 2 |
tap_list_providers/test/test_parse_example_data.py | danroberts728/hsvdotbeer | 18 | 6624775 | <gh_stars>10-100
"""Test the parsing of example data"""
from decimal import Decimal
from django.core.management import call_command
from django.test import TestCase
from beers.models import Beer, Manufacturer
from beers.test.factories import StyleFactory, StyleAlternateNameFactory
from venues.test.factories import VenueFactory
from venues.models import Venue, VenueAPIConfiguration
from taps.models import Tap
from tap_list_providers.example import ExampleTapListProvider
class CommandsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.venue = VenueFactory(tap_list_provider=ExampleTapListProvider.provider_name)
VenueAPIConfiguration.objects.create(
venue=cls.venue,
url="https://localhost:8000",
)
ipa = StyleFactory(name="American IPA")
StyleAlternateNameFactory(style=ipa, name="IPA - American")
stout = StyleFactory(name="Sweet Stout")
StyleAlternateNameFactory(style=stout, name="stout - milk")
def test_import_example_data(self):
"""Test parsing the JSON data"""
self.assertFalse(Tap.objects.exists())
self.assertEqual(Venue.objects.count(), 1)
self.assertFalse(Beer.objects.exists())
self.assertFalse(Manufacturer.objects.exists())
for dummy in range(2):
# running twice to make sure we're not double-creating
args = []
opts = {}
call_command("parseexampletaplist", *args, **opts)
self.assertEqual(Beer.objects.count(), 3)
self.assertEqual(Manufacturer.objects.count(), 3)
self.assertEqual(Tap.objects.count(), 3)
taps = (
Tap.objects.filter(
venue=self.venue,
tap_number__in=[1, 2],
)
.select_related(
"beer__style",
)
.order_by("tap_number")
)
tap = taps[0]
self.assertEqual(tap.beer.name, "Monkeynaut")
self.assertEqual(tap.beer.abv, Decimal("7.25"))
self.assertEqual(tap.beer.style.name, "American IPA")
tap = taps[1]
self.assertEqual(tap.beer.style.name, "Sweet Stout")
| """Test the parsing of example data"""
from decimal import Decimal
from django.core.management import call_command
from django.test import TestCase
from beers.models import Beer, Manufacturer
from beers.test.factories import StyleFactory, StyleAlternateNameFactory
from venues.test.factories import VenueFactory
from venues.models import Venue, VenueAPIConfiguration
from taps.models import Tap
from tap_list_providers.example import ExampleTapListProvider
class CommandsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.venue = VenueFactory(tap_list_provider=ExampleTapListProvider.provider_name)
VenueAPIConfiguration.objects.create(
venue=cls.venue,
url="https://localhost:8000",
)
ipa = StyleFactory(name="American IPA")
StyleAlternateNameFactory(style=ipa, name="IPA - American")
stout = StyleFactory(name="Sweet Stout")
StyleAlternateNameFactory(style=stout, name="stout - milk")
def test_import_example_data(self):
"""Test parsing the JSON data"""
self.assertFalse(Tap.objects.exists())
self.assertEqual(Venue.objects.count(), 1)
self.assertFalse(Beer.objects.exists())
self.assertFalse(Manufacturer.objects.exists())
for dummy in range(2):
# running twice to make sure we're not double-creating
args = []
opts = {}
call_command("parseexampletaplist", *args, **opts)
self.assertEqual(Beer.objects.count(), 3)
self.assertEqual(Manufacturer.objects.count(), 3)
self.assertEqual(Tap.objects.count(), 3)
taps = (
Tap.objects.filter(
venue=self.venue,
tap_number__in=[1, 2],
)
.select_related(
"beer__style",
)
.order_by("tap_number")
)
tap = taps[0]
self.assertEqual(tap.beer.name, "Monkeynaut")
self.assertEqual(tap.beer.abv, Decimal("7.25"))
self.assertEqual(tap.beer.style.name, "American IPA")
tap = taps[1]
self.assertEqual(tap.beer.style.name, "Sweet Stout") | en | 0.8083 | Test the parsing of example data Test parsing the JSON data # running twice to make sure we're not double-creating | 2.648229 | 3 |
superset/datasets/api.py | razzius/superset | 18,621 | 6624776 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from io import BytesIO
from typing import Any
from zipfile import is_zipfile, ZipFile
import yaml
from flask import g, request, Response, send_file
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import ngettext
from marshmallow import ValidationError
from superset import event_logger, is_feature_enabled
from superset.commands.importers.exceptions import NoValidFilesFoundError
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.connectors.sqla.models import SqlaTable
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.databases.filters import DatabaseFilter
from superset.datasets.commands.bulk_delete import BulkDeleteDatasetCommand
from superset.datasets.commands.create import CreateDatasetCommand
from superset.datasets.commands.delete import DeleteDatasetCommand
from superset.datasets.commands.exceptions import (
DatasetBulkDeleteFailedError,
DatasetCreateFailedError,
DatasetDeleteFailedError,
DatasetForbiddenError,
DatasetInvalidError,
DatasetNotFoundError,
DatasetRefreshFailedError,
DatasetUpdateFailedError,
)
from superset.datasets.commands.export import ExportDatasetsCommand
from superset.datasets.commands.importers.dispatcher import ImportDatasetsCommand
from superset.datasets.commands.refresh import RefreshDatasetCommand
from superset.datasets.commands.update import UpdateDatasetCommand
from superset.datasets.dao import DatasetDAO
from superset.datasets.filters import DatasetIsNullOrEmptyFilter
from superset.datasets.schemas import (
DatasetPostSchema,
DatasetPutSchema,
DatasetRelatedObjectsResponse,
get_delete_ids_schema,
get_export_ids_schema,
)
from superset.utils.core import parse_boolean_string
from superset.views.base import DatasourceFilter, generate_download_headers
from superset.views.base_api import (
BaseSupersetModelRestApi,
RelatedFieldFilter,
statsd_metrics,
)
from superset.views.filters import FilterRelatedOwners
logger = logging.getLogger(__name__)
class DatasetRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(SqlaTable)
base_filters = [["id", DatasourceFilter, lambda: []]]
resource_name = "dataset"
allow_browser_login = True
class_permission_name = "Dataset"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.EXPORT,
RouteMethod.IMPORT,
RouteMethod.RELATED,
RouteMethod.DISTINCT,
"bulk_delete",
"refresh",
"related_objects",
}
list_columns = [
"id",
"database.id",
"database.database_name",
"changed_by_name",
"changed_by_url",
"changed_by.first_name",
"changed_by.username",
"changed_on_utc",
"changed_on_delta_humanized",
"default_endpoint",
"description",
"datasource_type",
"explore_url",
"extra",
"kind",
"owners.id",
"owners.username",
"owners.first_name",
"owners.last_name",
"schema",
"sql",
"table_name",
]
list_select_columns = list_columns + ["changed_on", "changed_by_fk"]
order_columns = [
"table_name",
"schema",
"changed_by.first_name",
"changed_on_delta_humanized",
"database.database_name",
]
show_select_columns = [
"id",
"database.database_name",
"database.id",
"table_name",
"sql",
"filter_select_enabled",
"fetch_values_predicate",
"schema",
"description",
"main_dttm_col",
"offset",
"default_endpoint",
"cache_timeout",
"is_sqllab_view",
"template_params",
"owners.id",
"owners.username",
"owners.first_name",
"owners.last_name",
"columns.changed_on",
"columns.column_name",
"columns.created_on",
"columns.description",
"columns.expression",
"columns.filterable",
"columns.groupby",
"columns.id",
"columns.is_active",
"columns.extra",
"columns.is_dttm",
"columns.python_date_format",
"columns.type",
"columns.uuid",
"columns.verbose_name",
"metrics",
"datasource_type",
"url",
"extra",
]
show_columns = show_select_columns + ["columns.type_generic", "database.backend"]
add_model_schema = DatasetPostSchema()
edit_model_schema = DatasetPutSchema()
add_columns = ["database", "schema", "table_name", "owners"]
edit_columns = [
"table_name",
"sql",
"filter_select_enabled",
"fetch_values_predicate",
"schema",
"description",
"main_dttm_col",
"offset",
"default_endpoint",
"cache_timeout",
"is_sqllab_view",
"template_params",
"owners",
"columns",
"metrics",
"extra",
]
openapi_spec_tag = "Datasets"
related_field_filters = {
"owners": RelatedFieldFilter("first_name", FilterRelatedOwners),
"database": "database_name",
}
search_filters = {"sql": [DatasetIsNullOrEmptyFilter]}
filter_rel_fields = {"database": [["id", DatabaseFilter, lambda: []]]}
allowed_rel_fields = {"database", "owners"}
allowed_distinct_fields = {"schema"}
apispec_parameter_schemas = {
"get_export_ids_schema": get_export_ids_schema,
}
openapi_spec_component_schemas = (DatasetRelatedObjectsResponse,)
@expose("/", methods=["POST"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
log_to_statsd=False,
)
def post(self) -> Response:
"""Creates a new Dataset
---
post:
description: >-
Create a new Dataset
requestBody:
description: Dataset schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Dataset added
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.add_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = CreateDatasetCommand(g.user, item).run()
return self.response(201, id=new_model.id, result=item)
except DatasetInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DatasetCreateFailedError as ex:
logger.error(
"Error creating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<pk>", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
log_to_statsd=False,
)
def put(self, pk: int) -> Response:
"""Changes a Dataset
---
put:
description: >-
Changes a Dataset
parameters:
- in: path
schema:
type: integer
name: pk
- in: query
schema:
type: boolean
name: override_columns
requestBody:
description: Dataset schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Dataset changed
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
override_columns = (
parse_boolean_string(request.args["override_columns"])
if "override_columns" in request.args
else False
)
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.edit_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
changed_model = UpdateDatasetCommand(
g.user, pk, item, override_columns
).run()
if override_columns:
RefreshDatasetCommand(g.user, pk).run()
response = self.response(200, id=changed_model.id, result=item)
except DatasetNotFoundError:
response = self.response_404()
except DatasetForbiddenError:
response = self.response_403()
except DatasetInvalidError as ex:
response = self.response_422(message=ex.normalized_messages())
except DatasetUpdateFailedError as ex:
logger.error(
"Error updating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
response = self.response_422(message=str(ex))
return response
@expose("/<pk>", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.delete",
log_to_statsd=False,
)
def delete(self, pk: int) -> Response:
"""Deletes a Dataset
---
delete:
description: >-
Deletes a Dataset
parameters:
- in: path
schema:
type: integer
name: pk
responses:
200:
description: Dataset delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
DeleteDatasetCommand(g.user, pk).run()
return self.response(200, message="OK")
except DatasetNotFoundError:
return self.response_404()
except DatasetForbiddenError:
return self.response_403()
except DatasetDeleteFailedError as ex:
logger.error(
"Error deleting model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/export/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_export_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.export",
log_to_statsd=False,
) # pylint: disable=too-many-locals
def export(self, **kwargs: Any) -> Response:
"""Export datasets
---
get:
description: >-
Exports multiple datasets and downloads them as YAML files
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_export_ids_schema'
responses:
200:
description: Dataset export
content:
text/plain:
schema:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
requested_ids = kwargs["rison"]
if is_feature_enabled("VERSIONED_EXPORT"):
token = request.args.get("token")
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"dataset_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
try:
for file_name, file_content in ExportDatasetsCommand(
requested_ids
).run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
except DatasetNotFoundError:
return self.response_404()
buf.seek(0)
response = send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
if token:
response.set_cookie(token, "done", max_age=600)
return response
query = self.datamodel.session.query(SqlaTable).filter(
SqlaTable.id.in_(requested_ids)
)
query = self._base_filters.apply_all(query)
items = query.all()
ids = [item.id for item in items]
if len(ids) != len(requested_ids):
return self.response_404()
data = [t.export_to_dict() for t in items]
return Response(
yaml.safe_dump(data),
headers=generate_download_headers("yaml"),
mimetype="application/text",
)
@expose("/<pk>/refresh", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".refresh",
log_to_statsd=False,
)
def refresh(self, pk: int) -> Response:
"""Refresh a Dataset
---
put:
description: >-
Refreshes and updates columns of a dataset
parameters:
- in: path
schema:
type: integer
name: pk
responses:
200:
description: Dataset delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
RefreshDatasetCommand(g.user, pk).run()
return self.response(200, message="OK")
except DatasetNotFoundError:
return self.response_404()
except DatasetForbiddenError:
return self.response_403()
except DatasetRefreshFailedError as ex:
logger.error(
"Error refreshing dataset %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<pk>/related_objects", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".related_objects",
log_to_statsd=False,
)
def related_objects(self, pk: int) -> Response:
"""Get charts and dashboards count associated to a dataset
---
get:
description:
Get charts and dashboards count associated to a dataset
parameters:
- in: path
name: pk
schema:
type: integer
responses:
200:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/DatasetRelatedObjectsResponse"
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
dataset = DatasetDAO.find_by_id(pk)
if not dataset:
return self.response_404()
data = DatasetDAO.get_related_objects(pk)
charts = [
{
"id": chart.id,
"slice_name": chart.slice_name,
"viz_type": chart.viz_type,
}
for chart in data["charts"]
]
dashboards = [
{
"id": dashboard.id,
"json_metadata": dashboard.json_metadata,
"slug": dashboard.slug,
"title": dashboard.dashboard_title,
}
for dashboard in data["dashboards"]
]
return self.response(
200,
charts={"count": len(charts), "result": charts},
dashboards={"count": len(dashboards), "result": dashboards},
)
@expose("/", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@rison(get_delete_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.bulk_delete",
log_to_statsd=False,
)
def bulk_delete(self, **kwargs: Any) -> Response:
"""Delete bulk Datasets
---
delete:
description: >-
Deletes multiple Datasets in a bulk operation.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_delete_ids_schema'
responses:
200:
description: Dataset bulk delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item_ids = kwargs["rison"]
try:
BulkDeleteDatasetCommand(g.user, item_ids).run()
return self.response(
200,
message=ngettext(
"Deleted %(num)d dataset",
"Deleted %(num)d datasets",
num=len(item_ids),
),
)
except DatasetNotFoundError:
return self.response_404()
except DatasetForbiddenError:
return self.response_403()
except DatasetBulkDeleteFailedError as ex:
return self.response_422(message=str(ex))
@expose("/import/", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
def import_(self) -> Response:
"""Import dataset(s) with associated databases
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
formData:
description: upload file (ZIP or YAML)
type: string
format: binary
passwords:
description: JSON map of passwords for each file
type: string
overwrite:
description: overwrite existing datasets?
type: boolean
responses:
200:
description: Dataset import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("formData")
if not upload:
return self.response_400()
if is_zipfile(upload):
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
else:
upload.seek(0)
contents = {upload.filename: upload.read()}
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
overwrite = request.form.get("overwrite") == "true"
command = ImportDatasetsCommand(
contents, passwords=passwords, overwrite=overwrite
)
command.run()
return self.response(200, message="OK")
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from io import BytesIO
from typing import Any
from zipfile import is_zipfile, ZipFile
import yaml
from flask import g, request, Response, send_file
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import ngettext
from marshmallow import ValidationError
from superset import event_logger, is_feature_enabled
from superset.commands.importers.exceptions import NoValidFilesFoundError
from superset.commands.importers.v1.utils import get_contents_from_bundle
from superset.connectors.sqla.models import SqlaTable
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.databases.filters import DatabaseFilter
from superset.datasets.commands.bulk_delete import BulkDeleteDatasetCommand
from superset.datasets.commands.create import CreateDatasetCommand
from superset.datasets.commands.delete import DeleteDatasetCommand
from superset.datasets.commands.exceptions import (
DatasetBulkDeleteFailedError,
DatasetCreateFailedError,
DatasetDeleteFailedError,
DatasetForbiddenError,
DatasetInvalidError,
DatasetNotFoundError,
DatasetRefreshFailedError,
DatasetUpdateFailedError,
)
from superset.datasets.commands.export import ExportDatasetsCommand
from superset.datasets.commands.importers.dispatcher import ImportDatasetsCommand
from superset.datasets.commands.refresh import RefreshDatasetCommand
from superset.datasets.commands.update import UpdateDatasetCommand
from superset.datasets.dao import DatasetDAO
from superset.datasets.filters import DatasetIsNullOrEmptyFilter
from superset.datasets.schemas import (
DatasetPostSchema,
DatasetPutSchema,
DatasetRelatedObjectsResponse,
get_delete_ids_schema,
get_export_ids_schema,
)
from superset.utils.core import parse_boolean_string
from superset.views.base import DatasourceFilter, generate_download_headers
from superset.views.base_api import (
BaseSupersetModelRestApi,
RelatedFieldFilter,
statsd_metrics,
)
from superset.views.filters import FilterRelatedOwners
logger = logging.getLogger(__name__)
class DatasetRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(SqlaTable)
base_filters = [["id", DatasourceFilter, lambda: []]]
resource_name = "dataset"
allow_browser_login = True
class_permission_name = "Dataset"
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | {
RouteMethod.EXPORT,
RouteMethod.IMPORT,
RouteMethod.RELATED,
RouteMethod.DISTINCT,
"bulk_delete",
"refresh",
"related_objects",
}
list_columns = [
"id",
"database.id",
"database.database_name",
"changed_by_name",
"changed_by_url",
"changed_by.first_name",
"changed_by.username",
"changed_on_utc",
"changed_on_delta_humanized",
"default_endpoint",
"description",
"datasource_type",
"explore_url",
"extra",
"kind",
"owners.id",
"owners.username",
"owners.first_name",
"owners.last_name",
"schema",
"sql",
"table_name",
]
list_select_columns = list_columns + ["changed_on", "changed_by_fk"]
order_columns = [
"table_name",
"schema",
"changed_by.first_name",
"changed_on_delta_humanized",
"database.database_name",
]
show_select_columns = [
"id",
"database.database_name",
"database.id",
"table_name",
"sql",
"filter_select_enabled",
"fetch_values_predicate",
"schema",
"description",
"main_dttm_col",
"offset",
"default_endpoint",
"cache_timeout",
"is_sqllab_view",
"template_params",
"owners.id",
"owners.username",
"owners.first_name",
"owners.last_name",
"columns.changed_on",
"columns.column_name",
"columns.created_on",
"columns.description",
"columns.expression",
"columns.filterable",
"columns.groupby",
"columns.id",
"columns.is_active",
"columns.extra",
"columns.is_dttm",
"columns.python_date_format",
"columns.type",
"columns.uuid",
"columns.verbose_name",
"metrics",
"datasource_type",
"url",
"extra",
]
show_columns = show_select_columns + ["columns.type_generic", "database.backend"]
add_model_schema = DatasetPostSchema()
edit_model_schema = DatasetPutSchema()
add_columns = ["database", "schema", "table_name", "owners"]
edit_columns = [
"table_name",
"sql",
"filter_select_enabled",
"fetch_values_predicate",
"schema",
"description",
"main_dttm_col",
"offset",
"default_endpoint",
"cache_timeout",
"is_sqllab_view",
"template_params",
"owners",
"columns",
"metrics",
"extra",
]
openapi_spec_tag = "Datasets"
related_field_filters = {
"owners": RelatedFieldFilter("first_name", FilterRelatedOwners),
"database": "database_name",
}
search_filters = {"sql": [DatasetIsNullOrEmptyFilter]}
filter_rel_fields = {"database": [["id", DatabaseFilter, lambda: []]]}
allowed_rel_fields = {"database", "owners"}
allowed_distinct_fields = {"schema"}
apispec_parameter_schemas = {
"get_export_ids_schema": get_export_ids_schema,
}
openapi_spec_component_schemas = (DatasetRelatedObjectsResponse,)
@expose("/", methods=["POST"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
log_to_statsd=False,
)
def post(self) -> Response:
"""Creates a new Dataset
---
post:
description: >-
Create a new Dataset
requestBody:
description: Dataset schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Dataset added
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.add_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
new_model = CreateDatasetCommand(g.user, item).run()
return self.response(201, id=new_model.id, result=item)
except DatasetInvalidError as ex:
return self.response_422(message=ex.normalized_messages())
except DatasetCreateFailedError as ex:
logger.error(
"Error creating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<pk>", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
log_to_statsd=False,
)
def put(self, pk: int) -> Response:
"""Changes a Dataset
---
put:
description: >-
Changes a Dataset
parameters:
- in: path
schema:
type: integer
name: pk
- in: query
schema:
type: boolean
name: override_columns
requestBody:
description: Dataset schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Dataset changed
content:
application/json:
schema:
type: object
properties:
id:
type: number
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
override_columns = (
parse_boolean_string(request.args["override_columns"])
if "override_columns" in request.args
else False
)
if not request.is_json:
return self.response_400(message="Request is not JSON")
try:
item = self.edit_model_schema.load(request.json)
# This validates custom Schema with custom validations
except ValidationError as error:
return self.response_400(message=error.messages)
try:
changed_model = UpdateDatasetCommand(
g.user, pk, item, override_columns
).run()
if override_columns:
RefreshDatasetCommand(g.user, pk).run()
response = self.response(200, id=changed_model.id, result=item)
except DatasetNotFoundError:
response = self.response_404()
except DatasetForbiddenError:
response = self.response_403()
except DatasetInvalidError as ex:
response = self.response_422(message=ex.normalized_messages())
except DatasetUpdateFailedError as ex:
logger.error(
"Error updating model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
response = self.response_422(message=str(ex))
return response
@expose("/<pk>", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.delete",
log_to_statsd=False,
)
def delete(self, pk: int) -> Response:
"""Deletes a Dataset
---
delete:
description: >-
Deletes a Dataset
parameters:
- in: path
schema:
type: integer
name: pk
responses:
200:
description: Dataset delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
DeleteDatasetCommand(g.user, pk).run()
return self.response(200, message="OK")
except DatasetNotFoundError:
return self.response_404()
except DatasetForbiddenError:
return self.response_403()
except DatasetDeleteFailedError as ex:
logger.error(
"Error deleting model %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/export/", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_export_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.export",
log_to_statsd=False,
) # pylint: disable=too-many-locals
def export(self, **kwargs: Any) -> Response:
"""Export datasets
---
get:
description: >-
Exports multiple datasets and downloads them as YAML files
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_export_ids_schema'
responses:
200:
description: Dataset export
content:
text/plain:
schema:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
requested_ids = kwargs["rison"]
if is_feature_enabled("VERSIONED_EXPORT"):
token = request.args.get("token")
timestamp = datetime.now().strftime("%Y%m%dT%H%M%S")
root = f"dataset_export_{timestamp}"
filename = f"{root}.zip"
buf = BytesIO()
with ZipFile(buf, "w") as bundle:
try:
for file_name, file_content in ExportDatasetsCommand(
requested_ids
).run():
with bundle.open(f"{root}/{file_name}", "w") as fp:
fp.write(file_content.encode())
except DatasetNotFoundError:
return self.response_404()
buf.seek(0)
response = send_file(
buf,
mimetype="application/zip",
as_attachment=True,
attachment_filename=filename,
)
if token:
response.set_cookie(token, "done", max_age=600)
return response
query = self.datamodel.session.query(SqlaTable).filter(
SqlaTable.id.in_(requested_ids)
)
query = self._base_filters.apply_all(query)
items = query.all()
ids = [item.id for item in items]
if len(ids) != len(requested_ids):
return self.response_404()
data = [t.export_to_dict() for t in items]
return Response(
yaml.safe_dump(data),
headers=generate_download_headers("yaml"),
mimetype="application/text",
)
@expose("/<pk>/refresh", methods=["PUT"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}" f".refresh",
log_to_statsd=False,
)
def refresh(self, pk: int) -> Response:
"""Refresh a Dataset
---
put:
description: >-
Refreshes and updates columns of a dataset
parameters:
- in: path
schema:
type: integer
name: pk
responses:
200:
description: Dataset delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
RefreshDatasetCommand(g.user, pk).run()
return self.response(200, message="OK")
except DatasetNotFoundError:
return self.response_404()
except DatasetForbiddenError:
return self.response_403()
except DatasetRefreshFailedError as ex:
logger.error(
"Error refreshing dataset %s: %s",
self.__class__.__name__,
str(ex),
exc_info=True,
)
return self.response_422(message=str(ex))
@expose("/<pk>/related_objects", methods=["GET"])
@protect()
@safe
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}"
f".related_objects",
log_to_statsd=False,
)
def related_objects(self, pk: int) -> Response:
"""Get charts and dashboards count associated to a dataset
---
get:
description:
Get charts and dashboards count associated to a dataset
parameters:
- in: path
name: pk
schema:
type: integer
responses:
200:
200:
description: Query result
content:
application/json:
schema:
$ref: "#/components/schemas/DatasetRelatedObjectsResponse"
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
dataset = DatasetDAO.find_by_id(pk)
if not dataset:
return self.response_404()
data = DatasetDAO.get_related_objects(pk)
charts = [
{
"id": chart.id,
"slice_name": chart.slice_name,
"viz_type": chart.viz_type,
}
for chart in data["charts"]
]
dashboards = [
{
"id": dashboard.id,
"json_metadata": dashboard.json_metadata,
"slug": dashboard.slug,
"title": dashboard.dashboard_title,
}
for dashboard in data["dashboards"]
]
return self.response(
200,
charts={"count": len(charts), "result": charts},
dashboards={"count": len(dashboards), "result": dashboards},
)
@expose("/", methods=["DELETE"])
@protect()
@safe
@statsd_metrics
@rison(get_delete_ids_schema)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.bulk_delete",
log_to_statsd=False,
)
def bulk_delete(self, **kwargs: Any) -> Response:
"""Delete bulk Datasets
---
delete:
description: >-
Deletes multiple Datasets in a bulk operation.
parameters:
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_delete_ids_schema'
responses:
200:
description: Dataset bulk delete
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
403:
$ref: '#/components/responses/403'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item_ids = kwargs["rison"]
try:
BulkDeleteDatasetCommand(g.user, item_ids).run()
return self.response(
200,
message=ngettext(
"Deleted %(num)d dataset",
"Deleted %(num)d datasets",
num=len(item_ids),
),
)
except DatasetNotFoundError:
return self.response_404()
except DatasetForbiddenError:
return self.response_403()
except DatasetBulkDeleteFailedError as ex:
return self.response_422(message=str(ex))
@expose("/import/", methods=["POST"])
@protect()
@statsd_metrics
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.import_",
log_to_statsd=False,
)
def import_(self) -> Response:
"""Import dataset(s) with associated databases
---
post:
requestBody:
required: true
content:
multipart/form-data:
schema:
type: object
properties:
formData:
description: upload file (ZIP or YAML)
type: string
format: binary
passwords:
description: JSON map of passwords for each file
type: string
overwrite:
description: overwrite existing datasets?
type: boolean
responses:
200:
description: Dataset import result
content:
application/json:
schema:
type: object
properties:
message:
type: string
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
upload = request.files.get("formData")
if not upload:
return self.response_400()
if is_zipfile(upload):
with ZipFile(upload) as bundle:
contents = get_contents_from_bundle(bundle)
else:
upload.seek(0)
contents = {upload.filename: upload.read()}
if not contents:
raise NoValidFilesFoundError()
passwords = (
json.loads(request.form["passwords"])
if "passwords" in request.form
else None
)
overwrite = request.form.get("overwrite") == "true"
command = ImportDatasetsCommand(
contents, passwords=passwords, overwrite=overwrite
)
command.run()
return self.response(200, message="OK")
| en | 0.686348 | # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. Creates a new Dataset --- post: description: >- Create a new Dataset requestBody: description: Dataset schema required: true content: application/json: schema: $ref: '#/components/schemas/{{self.__class__.__name__}}.post' responses: 201: description: Dataset added content: application/json: schema: type: object properties: id: type: number result: $ref: '#/components/schemas/{{self.__class__.__name__}}.post' 400: $ref: '#/components/responses/400' 401: $ref: '#/components/responses/401' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' # This validates custom Schema with custom validations Changes a Dataset --- put: description: >- Changes a Dataset parameters: - in: path schema: type: integer name: pk - in: query schema: type: boolean name: override_columns requestBody: description: Dataset schema required: true content: application/json: schema: $ref: '#/components/schemas/{{self.__class__.__name__}}.put' responses: 200: description: Dataset changed content: application/json: schema: type: object properties: id: type: number result: $ref: '#/components/schemas/{{self.__class__.__name__}}.put' 400: $ref: '#/components/responses/400' 401: $ref: '#/components/responses/401' 403: $ref: '#/components/responses/403' 404: $ref: '#/components/responses/404' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' # This validates custom Schema with custom validations Deletes a Dataset --- delete: description: >- Deletes a Dataset parameters: - in: path schema: type: integer name: pk responses: 200: description: Dataset delete content: application/json: schema: type: object properties: message: type: string 401: $ref: '#/components/responses/401' 403: $ref: '#/components/responses/403' 404: $ref: '#/components/responses/404' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' # pylint: disable=too-many-locals Export datasets --- get: description: >- Exports multiple datasets and downloads them as YAML files parameters: - in: query name: q content: application/json: schema: $ref: '#/components/schemas/get_export_ids_schema' responses: 200: description: Dataset export content: text/plain: schema: type: string 400: $ref: '#/components/responses/400' 401: $ref: '#/components/responses/401' 404: $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' Refresh a Dataset --- put: description: >- Refreshes and updates columns of a dataset parameters: - in: path schema: type: integer name: pk responses: 200: description: Dataset delete content: application/json: schema: type: object properties: message: type: string 401: $ref: '#/components/responses/401' 403: $ref: '#/components/responses/403' 404: $ref: '#/components/responses/404' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' Get charts and dashboards count associated to a dataset --- get: description: Get charts and dashboards count associated to a dataset parameters: - in: path name: pk schema: type: integer responses: 200: 200: description: Query result content: application/json: schema: $ref: "#/components/schemas/DatasetRelatedObjectsResponse" 401: $ref: '#/components/responses/401' 404: $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' Delete bulk Datasets --- delete: description: >- Deletes multiple Datasets in a bulk operation. parameters: - in: query name: q content: application/json: schema: $ref: '#/components/schemas/get_delete_ids_schema' responses: 200: description: Dataset bulk delete content: application/json: schema: type: object properties: message: type: string 400: $ref: '#/components/responses/400' 401: $ref: '#/components/responses/401' 403: $ref: '#/components/responses/403' 404: $ref: '#/components/responses/404' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' Import dataset(s) with associated databases --- post: requestBody: required: true content: multipart/form-data: schema: type: object properties: formData: description: upload file (ZIP or YAML) type: string format: binary passwords: description: JSON map of passwords for each file type: string overwrite: description: overwrite existing datasets? type: boolean responses: 200: description: Dataset import result content: application/json: schema: type: object properties: message: type: string 400: $ref: '#/components/responses/400' 401: $ref: '#/components/responses/401' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' | 1.307307 | 1 |
vathos/utils/logger.py | satyajitghana/ProjektDepth | 2 | 6624777 | <filename>vathos/utils/logger.py
import logging
import sys
LOG_LEVEL = logging.INFO
def setup_logger(name):
logger = logging.getLogger(f'vathos.{name}')
if not logger.hasHandlers():
logger.setLevel(LOG_LEVEL) # set the logging level
# logging format
logger_format = logging.Formatter(
'[ %(asctime)s - %(name)s ] %(levelname)s: %(message)s'
)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(logger_format)
logger.addHandler(stream_handler)
logger.propagate = False
return logger # return the logger
| <filename>vathos/utils/logger.py
import logging
import sys
LOG_LEVEL = logging.INFO
def setup_logger(name):
logger = logging.getLogger(f'vathos.{name}')
if not logger.hasHandlers():
logger.setLevel(LOG_LEVEL) # set the logging level
# logging format
logger_format = logging.Formatter(
'[ %(asctime)s - %(name)s ] %(levelname)s: %(message)s'
)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(logger_format)
logger.addHandler(stream_handler)
logger.propagate = False
return logger # return the logger
| en | 0.50234 | # set the logging level # logging format # return the logger | 2.61767 | 3 |
napari/_qt/_tests/test_qt_viewer.py | kolibril13/napari | 0 | 6624778 | import gc
import os
import weakref
from dataclasses import dataclass
from typing import List
from unittest import mock
import numpy as np
import pytest
from qtpy.QtGui import QGuiApplication
from qtpy.QtWidgets import QMessageBox
from napari._tests.utils import (
add_layer_by_type,
check_viewer_functioning,
layer_test_data,
skip_local_popups,
)
from napari.settings import get_settings
from napari.utils.interactions import mouse_press_callbacks
from napari.utils.io import imread
from napari.utils.theme import available_themes
def test_qt_viewer(make_napari_viewer):
"""Test instantiating viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
assert viewer.title == 'napari'
assert view.viewer == viewer
assert len(viewer.layers) == 0
assert view.layers.model().rowCount() == 0
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
def test_qt_viewer_with_console(make_napari_viewer):
"""Test instantiating console from viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
# Check console is created when requested
assert view.console is not None
assert view.dockConsole.widget() is view.console
def test_qt_viewer_toggle_console(make_napari_viewer):
"""Test instantiating console from viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
# Check console has been created when it is supposed to be shown
view.toggle_console_visibility(None)
assert view._console is not None
assert view.dockConsole.widget() is view.console
@pytest.mark.parametrize('layer_class, data, ndim', layer_test_data)
def test_add_layer(make_napari_viewer, layer_class, data, ndim):
viewer = make_napari_viewer(ndisplay=int(np.clip(ndim, 2, 3)))
view = viewer.window.qt_viewer
add_layer_by_type(viewer, layer_class, data)
check_viewer_functioning(viewer, view, data, ndim)
def test_new_labels(make_napari_viewer):
"""Test adding new labels layer."""
# Add labels to empty viewer
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
viewer._new_labels()
assert np.max(viewer.layers[0].data) == 0
assert len(viewer.layers) == 1
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# Add labels with image already present
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
np.random.seed(0)
data = np.random.random((10, 15))
viewer.add_image(data)
viewer._new_labels()
assert np.max(viewer.layers[1].data) == 0
assert len(viewer.layers) == 2
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
def test_new_points(make_napari_viewer):
"""Test adding new points layer."""
# Add labels to empty viewer
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
viewer.add_points()
assert len(viewer.layers[0].data) == 0
assert len(viewer.layers) == 1
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# Add points with image already present
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
np.random.seed(0)
data = np.random.random((10, 15))
viewer.add_image(data)
viewer.add_points()
assert len(viewer.layers[1].data) == 0
assert len(viewer.layers) == 2
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
def test_new_shapes_empty_viewer(make_napari_viewer):
"""Test adding new shapes layer."""
# Add labels to empty viewer
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
viewer.add_shapes()
assert len(viewer.layers[0].data) == 0
assert len(viewer.layers) == 1
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# Add points with image already present
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
np.random.seed(0)
data = np.random.random((10, 15))
viewer.add_image(data)
viewer.add_shapes()
assert len(viewer.layers[1].data) == 0
assert len(viewer.layers) == 2
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
def test_z_order_adding_removing_images(make_napari_viewer):
"""Test z order is correct after adding/ removing images."""
data = np.ones((10, 10))
viewer = make_napari_viewer()
vis = viewer.window.qt_viewer.layer_to_visual
viewer.add_image(data, colormap='red', name='red')
viewer.add_image(data, colormap='green', name='green')
viewer.add_image(data, colormap='blue', name='blue')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
# Remove and re-add image
viewer.layers.remove('red')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
viewer.add_image(data, colormap='red', name='red')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
# Remove two other images
viewer.layers.remove('green')
viewer.layers.remove('blue')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
# Add two other layers back
viewer.add_image(data, colormap='green', name='green')
viewer.add_image(data, colormap='blue', name='blue')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
def test_screenshot(make_napari_viewer):
"Test taking a screenshot"
viewer = make_napari_viewer()
np.random.seed(0)
# Add image
data = np.random.random((10, 15))
viewer.add_image(data)
# Add labels
data = np.random.randint(20, size=(10, 15))
viewer.add_labels(data)
# Add points
data = 20 * np.random.random((10, 2))
viewer.add_points(data)
# Add vectors
data = 20 * np.random.random((10, 2, 2))
viewer.add_vectors(data)
# Add shapes
data = 20 * np.random.random((10, 4, 2))
viewer.add_shapes(data)
# Take screenshot
screenshot = viewer.window.qt_viewer.screenshot(flash=False)
assert screenshot.ndim == 3
@pytest.mark.skip("new approach")
def test_screenshot_dialog(make_napari_viewer, tmpdir):
"""Test save screenshot functionality."""
viewer = make_napari_viewer()
np.random.seed(0)
# Add image
data = np.random.random((10, 15))
viewer.add_image(data)
# Add labels
data = np.random.randint(20, size=(10, 15))
viewer.add_labels(data)
# Add points
data = 20 * np.random.random((10, 2))
viewer.add_points(data)
# Add vectors
data = 20 * np.random.random((10, 2, 2))
viewer.add_vectors(data)
# Add shapes
data = 20 * np.random.random((10, 4, 2))
viewer.add_shapes(data)
# Save screenshot
input_filepath = os.path.join(tmpdir, 'test-save-screenshot')
mock_return = (input_filepath, '')
with mock.patch('napari._qt.qt_viewer.QFileDialog') as mocker, mock.patch(
'napari._qt.qt_viewer.QMessageBox'
) as mocker2:
mocker.getSaveFileName.return_value = mock_return
mocker2.warning.return_value = QMessageBox.Yes
viewer.window.qt_viewer._screenshot_dialog()
# Assert behaviour is correct
expected_filepath = input_filepath + '.png' # add default file extension
assert os.path.exists(expected_filepath)
output_data = imread(expected_filepath)
expected_data = viewer.window.qt_viewer.screenshot(flash=False)
assert np.allclose(output_data, expected_data)
@pytest.mark.parametrize(
"dtype", ['int8', 'uint8', 'int16', 'uint16', 'float32']
)
def test_qt_viewer_data_integrity(make_napari_viewer, dtype):
"""Test that the viewer doesn't change the underlying array."""
image = np.random.rand(10, 32, 32)
image *= 200 if dtype.endswith('8') else 2 ** 14
image = image.astype(dtype)
imean = image.mean()
viewer = make_napari_viewer()
viewer.add_image(image.copy())
datamean = viewer.layers[0].data.mean()
assert datamean == imean
# toggle dimensions
viewer.dims.ndisplay = 3
datamean = viewer.layers[0].data.mean()
assert datamean == imean
# back to 2D
viewer.dims.ndisplay = 2
datamean = viewer.layers[0].data.mean()
assert datamean == imean
def test_points_layer_display_correct_slice_on_scale(make_napari_viewer):
viewer = make_napari_viewer()
data = np.zeros((60, 60, 60))
viewer.add_image(data, scale=[0.29, 0.26, 0.26])
pts = viewer.add_points(name='test', size=1, ndim=3)
pts.add((8.7, 0, 0))
viewer.dims.set_point(0, 30 * 0.29) # middle plane
layer = viewer.layers[1]
indices, scale = layer._slice_data(layer._slice_indices)
np.testing.assert_equal(indices, [0])
def test_qt_viewer_clipboard_with_flash(make_napari_viewer, qtbot):
viewer = make_napari_viewer()
# make sure clipboard is empty
QGuiApplication.clipboard().clear()
clipboard_image = QGuiApplication.clipboard().image()
assert clipboard_image.isNull()
# capture screenshot
viewer.window.qt_viewer.clipboard(flash=True)
clipboard_image = QGuiApplication.clipboard().image()
assert not clipboard_image.isNull()
# ensure the flash effect is applied
assert viewer.window.qt_viewer._canvas_overlay.graphicsEffect() is not None
assert hasattr(viewer.window.qt_viewer._canvas_overlay, "_flash_animation")
qtbot.wait(500) # wait for the animation to finish
assert viewer.window.qt_viewer._canvas_overlay.graphicsEffect() is None
assert not hasattr(
viewer.window.qt_viewer._canvas_overlay, "_flash_animation"
)
# clear clipboard and grab image from application view
QGuiApplication.clipboard().clear()
clipboard_image = QGuiApplication.clipboard().image()
assert clipboard_image.isNull()
# capture screenshot of the entire window
viewer.window.clipboard(flash=True)
clipboard_image = QGuiApplication.clipboard().image()
assert not clipboard_image.isNull()
# ensure the flash effect is applied
assert viewer.window._qt_window.graphicsEffect() is not None
assert hasattr(viewer.window._qt_window, "_flash_animation")
qtbot.wait(500) # wait for the animation to finish
assert viewer.window._qt_window.graphicsEffect() is None
assert not hasattr(viewer.window._qt_window, "_flash_animation")
def test_qt_viewer_clipboard_without_flash(make_napari_viewer):
viewer = make_napari_viewer()
# make sure clipboard is empty
QGuiApplication.clipboard().clear()
clipboard_image = QGuiApplication.clipboard().image()
assert clipboard_image.isNull()
# capture screenshot
viewer.window.qt_viewer.clipboard(flash=False)
clipboard_image = QGuiApplication.clipboard().image()
assert not clipboard_image.isNull()
# ensure the flash effect is not applied
assert viewer.window.qt_viewer._canvas_overlay.graphicsEffect() is None
assert not hasattr(
viewer.window.qt_viewer._canvas_overlay, "_flash_animation"
)
# clear clipboard and grab image from application view
QGuiApplication.clipboard().clear()
clipboard_image = QGuiApplication.clipboard().image()
assert clipboard_image.isNull()
# capture screenshot of the entire window
viewer.window.clipboard(flash=False)
clipboard_image = QGuiApplication.clipboard().image()
assert not clipboard_image.isNull()
# ensure the flash effect is not applied
assert viewer.window._qt_window.graphicsEffect() is None
assert not hasattr(viewer.window._qt_window, "_flash_animation")
def test_active_keybindings(make_napari_viewer):
"""Test instantiating viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
# Check only keybinding is Viewer
assert len(view._key_map_handler.keymap_providers) == 1
assert view._key_map_handler.keymap_providers[0] == viewer
# Add a layer and check it is keybindings are active
data = np.random.random((10, 15))
layer_image = viewer.add_image(data)
assert viewer.layers.selection.active == layer_image
assert len(view._key_map_handler.keymap_providers) == 2
assert view._key_map_handler.keymap_providers[0] == layer_image
# Add a layer and check it is keybindings become active
layer_image_2 = viewer.add_image(data)
assert viewer.layers.selection.active == layer_image_2
assert len(view._key_map_handler.keymap_providers) == 2
assert view._key_map_handler.keymap_providers[0] == layer_image_2
# Change active layer and check it is keybindings become active
viewer.layers.selection.active = layer_image
assert viewer.layers.selection.active == layer_image
assert len(view._key_map_handler.keymap_providers) == 2
assert view._key_map_handler.keymap_providers[0] == layer_image
@dataclass
class MouseEvent:
# mock mouse event class
pos: List[int]
def test_process_mouse_event(make_napari_viewer):
"""Test that the correct properties are added to the
MouseEvent by _process_mouse_events.
"""
# make a mock mouse event
new_pos = [25, 25]
mouse_event = MouseEvent(
pos=new_pos,
)
data = np.zeros((5, 20, 20, 20), dtype=int)
data[1, 0:10, 0:10, 0:10] = 1
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
labels = viewer.add_labels(data, scale=(1, 2, 1, 1), translate=(5, 5, 5))
@labels.mouse_drag_callbacks.append
def on_click(layer, event):
np.testing.assert_almost_equal(event.view_direction, [0, 1, 0, 0])
np.testing.assert_array_equal(event.dims_displayed, [1, 2, 3])
assert event.dims_point[0] == 0
expected_position = view._map_canvas2world(new_pos)
np.testing.assert_almost_equal(expected_position, list(event.position))
viewer.dims.ndisplay = 3
view._process_mouse_event(mouse_press_callbacks, mouse_event)
@skip_local_popups
def test_memory_leaking(qtbot, make_napari_viewer):
data = np.zeros((5, 20, 20, 20), dtype=int)
data[1, 0:10, 0:10, 0:10] = 1
viewer = make_napari_viewer()
image = weakref.ref(viewer.add_image(data))
labels = weakref.ref(viewer.add_labels(data))
del viewer.layers[0]
del viewer.layers[0]
qtbot.wait(100)
gc.collect()
gc.collect()
assert image() is None
assert labels() is None
@skip_local_popups
def test_leaks_image(qtbot, make_napari_viewer):
viewer = make_napari_viewer(show=True)
lr = weakref.ref(viewer.add_image(np.random.rand(10, 10)))
dr = weakref.ref(lr().data)
viewer.layers.clear()
qtbot.wait(100)
gc.collect()
assert not gc.collect()
assert not lr()
assert not dr()
@skip_local_popups
def test_leaks_labels(qtbot, make_napari_viewer):
viewer = make_napari_viewer(show=True)
lr = weakref.ref(
viewer.add_labels((np.random.rand(10, 10) * 10).astype(np.uint8))
)
dr = weakref.ref(lr().data)
viewer.layers.clear()
qtbot.wait(100)
gc.collect()
assert not gc.collect()
assert not lr()
assert not dr()
@pytest.mark.parametrize("theme", available_themes())
def test_canvas_color(make_napari_viewer, theme):
"""Test instantiating viewer with different themes.
See: https://github.com/napari/napari/issues/3278
"""
# This test is to make sure the application starts with
# with different themes
get_settings().appearance.theme = theme
viewer = make_napari_viewer()
assert viewer.theme == theme
def test_remove_points(make_napari_viewer):
viewer = make_napari_viewer()
viewer.add_points([(1, 2), (2, 3)])
del viewer.layers[0]
viewer.add_points([(1, 2), (2, 3)])
def test_remove_image(make_napari_viewer):
viewer = make_napari_viewer()
viewer.add_image(np.random.rand(10, 10))
del viewer.layers[0]
viewer.add_image(np.random.rand(10, 10))
def test_remove_labels(make_napari_viewer):
viewer = make_napari_viewer()
viewer.add_labels((np.random.rand(10, 10) * 10).astype(np.uint8))
del viewer.layers[0]
viewer.add_labels((np.random.rand(10, 10) * 10).astype(np.uint8))
| import gc
import os
import weakref
from dataclasses import dataclass
from typing import List
from unittest import mock
import numpy as np
import pytest
from qtpy.QtGui import QGuiApplication
from qtpy.QtWidgets import QMessageBox
from napari._tests.utils import (
add_layer_by_type,
check_viewer_functioning,
layer_test_data,
skip_local_popups,
)
from napari.settings import get_settings
from napari.utils.interactions import mouse_press_callbacks
from napari.utils.io import imread
from napari.utils.theme import available_themes
def test_qt_viewer(make_napari_viewer):
"""Test instantiating viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
assert viewer.title == 'napari'
assert view.viewer == viewer
assert len(viewer.layers) == 0
assert view.layers.model().rowCount() == 0
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
def test_qt_viewer_with_console(make_napari_viewer):
"""Test instantiating console from viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
# Check console is created when requested
assert view.console is not None
assert view.dockConsole.widget() is view.console
def test_qt_viewer_toggle_console(make_napari_viewer):
"""Test instantiating console from viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
# Check console has been created when it is supposed to be shown
view.toggle_console_visibility(None)
assert view._console is not None
assert view.dockConsole.widget() is view.console
@pytest.mark.parametrize('layer_class, data, ndim', layer_test_data)
def test_add_layer(make_napari_viewer, layer_class, data, ndim):
viewer = make_napari_viewer(ndisplay=int(np.clip(ndim, 2, 3)))
view = viewer.window.qt_viewer
add_layer_by_type(viewer, layer_class, data)
check_viewer_functioning(viewer, view, data, ndim)
def test_new_labels(make_napari_viewer):
"""Test adding new labels layer."""
# Add labels to empty viewer
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
viewer._new_labels()
assert np.max(viewer.layers[0].data) == 0
assert len(viewer.layers) == 1
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# Add labels with image already present
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
np.random.seed(0)
data = np.random.random((10, 15))
viewer.add_image(data)
viewer._new_labels()
assert np.max(viewer.layers[1].data) == 0
assert len(viewer.layers) == 2
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
def test_new_points(make_napari_viewer):
"""Test adding new points layer."""
# Add labels to empty viewer
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
viewer.add_points()
assert len(viewer.layers[0].data) == 0
assert len(viewer.layers) == 1
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# Add points with image already present
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
np.random.seed(0)
data = np.random.random((10, 15))
viewer.add_image(data)
viewer.add_points()
assert len(viewer.layers[1].data) == 0
assert len(viewer.layers) == 2
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
def test_new_shapes_empty_viewer(make_napari_viewer):
"""Test adding new shapes layer."""
# Add labels to empty viewer
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
viewer.add_shapes()
assert len(viewer.layers[0].data) == 0
assert len(viewer.layers) == 1
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
# Add points with image already present
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
np.random.seed(0)
data = np.random.random((10, 15))
viewer.add_image(data)
viewer.add_shapes()
assert len(viewer.layers[1].data) == 0
assert len(viewer.layers) == 2
assert view.layers.model().rowCount() == len(viewer.layers)
assert viewer.dims.ndim == 2
assert view.dims.nsliders == viewer.dims.ndim
assert np.sum(view.dims._displayed_sliders) == 0
def test_z_order_adding_removing_images(make_napari_viewer):
"""Test z order is correct after adding/ removing images."""
data = np.ones((10, 10))
viewer = make_napari_viewer()
vis = viewer.window.qt_viewer.layer_to_visual
viewer.add_image(data, colormap='red', name='red')
viewer.add_image(data, colormap='green', name='green')
viewer.add_image(data, colormap='blue', name='blue')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
# Remove and re-add image
viewer.layers.remove('red')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
viewer.add_image(data, colormap='red', name='red')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
# Remove two other images
viewer.layers.remove('green')
viewer.layers.remove('blue')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
# Add two other layers back
viewer.add_image(data, colormap='green', name='green')
viewer.add_image(data, colormap='blue', name='blue')
order = [vis[x].order for x in viewer.layers]
np.testing.assert_almost_equal(order, list(range(len(viewer.layers))))
def test_screenshot(make_napari_viewer):
"Test taking a screenshot"
viewer = make_napari_viewer()
np.random.seed(0)
# Add image
data = np.random.random((10, 15))
viewer.add_image(data)
# Add labels
data = np.random.randint(20, size=(10, 15))
viewer.add_labels(data)
# Add points
data = 20 * np.random.random((10, 2))
viewer.add_points(data)
# Add vectors
data = 20 * np.random.random((10, 2, 2))
viewer.add_vectors(data)
# Add shapes
data = 20 * np.random.random((10, 4, 2))
viewer.add_shapes(data)
# Take screenshot
screenshot = viewer.window.qt_viewer.screenshot(flash=False)
assert screenshot.ndim == 3
@pytest.mark.skip("new approach")
def test_screenshot_dialog(make_napari_viewer, tmpdir):
"""Test save screenshot functionality."""
viewer = make_napari_viewer()
np.random.seed(0)
# Add image
data = np.random.random((10, 15))
viewer.add_image(data)
# Add labels
data = np.random.randint(20, size=(10, 15))
viewer.add_labels(data)
# Add points
data = 20 * np.random.random((10, 2))
viewer.add_points(data)
# Add vectors
data = 20 * np.random.random((10, 2, 2))
viewer.add_vectors(data)
# Add shapes
data = 20 * np.random.random((10, 4, 2))
viewer.add_shapes(data)
# Save screenshot
input_filepath = os.path.join(tmpdir, 'test-save-screenshot')
mock_return = (input_filepath, '')
with mock.patch('napari._qt.qt_viewer.QFileDialog') as mocker, mock.patch(
'napari._qt.qt_viewer.QMessageBox'
) as mocker2:
mocker.getSaveFileName.return_value = mock_return
mocker2.warning.return_value = QMessageBox.Yes
viewer.window.qt_viewer._screenshot_dialog()
# Assert behaviour is correct
expected_filepath = input_filepath + '.png' # add default file extension
assert os.path.exists(expected_filepath)
output_data = imread(expected_filepath)
expected_data = viewer.window.qt_viewer.screenshot(flash=False)
assert np.allclose(output_data, expected_data)
@pytest.mark.parametrize(
"dtype", ['int8', 'uint8', 'int16', 'uint16', 'float32']
)
def test_qt_viewer_data_integrity(make_napari_viewer, dtype):
"""Test that the viewer doesn't change the underlying array."""
image = np.random.rand(10, 32, 32)
image *= 200 if dtype.endswith('8') else 2 ** 14
image = image.astype(dtype)
imean = image.mean()
viewer = make_napari_viewer()
viewer.add_image(image.copy())
datamean = viewer.layers[0].data.mean()
assert datamean == imean
# toggle dimensions
viewer.dims.ndisplay = 3
datamean = viewer.layers[0].data.mean()
assert datamean == imean
# back to 2D
viewer.dims.ndisplay = 2
datamean = viewer.layers[0].data.mean()
assert datamean == imean
def test_points_layer_display_correct_slice_on_scale(make_napari_viewer):
viewer = make_napari_viewer()
data = np.zeros((60, 60, 60))
viewer.add_image(data, scale=[0.29, 0.26, 0.26])
pts = viewer.add_points(name='test', size=1, ndim=3)
pts.add((8.7, 0, 0))
viewer.dims.set_point(0, 30 * 0.29) # middle plane
layer = viewer.layers[1]
indices, scale = layer._slice_data(layer._slice_indices)
np.testing.assert_equal(indices, [0])
def test_qt_viewer_clipboard_with_flash(make_napari_viewer, qtbot):
viewer = make_napari_viewer()
# make sure clipboard is empty
QGuiApplication.clipboard().clear()
clipboard_image = QGuiApplication.clipboard().image()
assert clipboard_image.isNull()
# capture screenshot
viewer.window.qt_viewer.clipboard(flash=True)
clipboard_image = QGuiApplication.clipboard().image()
assert not clipboard_image.isNull()
# ensure the flash effect is applied
assert viewer.window.qt_viewer._canvas_overlay.graphicsEffect() is not None
assert hasattr(viewer.window.qt_viewer._canvas_overlay, "_flash_animation")
qtbot.wait(500) # wait for the animation to finish
assert viewer.window.qt_viewer._canvas_overlay.graphicsEffect() is None
assert not hasattr(
viewer.window.qt_viewer._canvas_overlay, "_flash_animation"
)
# clear clipboard and grab image from application view
QGuiApplication.clipboard().clear()
clipboard_image = QGuiApplication.clipboard().image()
assert clipboard_image.isNull()
# capture screenshot of the entire window
viewer.window.clipboard(flash=True)
clipboard_image = QGuiApplication.clipboard().image()
assert not clipboard_image.isNull()
# ensure the flash effect is applied
assert viewer.window._qt_window.graphicsEffect() is not None
assert hasattr(viewer.window._qt_window, "_flash_animation")
qtbot.wait(500) # wait for the animation to finish
assert viewer.window._qt_window.graphicsEffect() is None
assert not hasattr(viewer.window._qt_window, "_flash_animation")
def test_qt_viewer_clipboard_without_flash(make_napari_viewer):
viewer = make_napari_viewer()
# make sure clipboard is empty
QGuiApplication.clipboard().clear()
clipboard_image = QGuiApplication.clipboard().image()
assert clipboard_image.isNull()
# capture screenshot
viewer.window.qt_viewer.clipboard(flash=False)
clipboard_image = QGuiApplication.clipboard().image()
assert not clipboard_image.isNull()
# ensure the flash effect is not applied
assert viewer.window.qt_viewer._canvas_overlay.graphicsEffect() is None
assert not hasattr(
viewer.window.qt_viewer._canvas_overlay, "_flash_animation"
)
# clear clipboard and grab image from application view
QGuiApplication.clipboard().clear()
clipboard_image = QGuiApplication.clipboard().image()
assert clipboard_image.isNull()
# capture screenshot of the entire window
viewer.window.clipboard(flash=False)
clipboard_image = QGuiApplication.clipboard().image()
assert not clipboard_image.isNull()
# ensure the flash effect is not applied
assert viewer.window._qt_window.graphicsEffect() is None
assert not hasattr(viewer.window._qt_window, "_flash_animation")
def test_active_keybindings(make_napari_viewer):
"""Test instantiating viewer."""
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
# Check only keybinding is Viewer
assert len(view._key_map_handler.keymap_providers) == 1
assert view._key_map_handler.keymap_providers[0] == viewer
# Add a layer and check it is keybindings are active
data = np.random.random((10, 15))
layer_image = viewer.add_image(data)
assert viewer.layers.selection.active == layer_image
assert len(view._key_map_handler.keymap_providers) == 2
assert view._key_map_handler.keymap_providers[0] == layer_image
# Add a layer and check it is keybindings become active
layer_image_2 = viewer.add_image(data)
assert viewer.layers.selection.active == layer_image_2
assert len(view._key_map_handler.keymap_providers) == 2
assert view._key_map_handler.keymap_providers[0] == layer_image_2
# Change active layer and check it is keybindings become active
viewer.layers.selection.active = layer_image
assert viewer.layers.selection.active == layer_image
assert len(view._key_map_handler.keymap_providers) == 2
assert view._key_map_handler.keymap_providers[0] == layer_image
@dataclass
class MouseEvent:
# mock mouse event class
pos: List[int]
def test_process_mouse_event(make_napari_viewer):
"""Test that the correct properties are added to the
MouseEvent by _process_mouse_events.
"""
# make a mock mouse event
new_pos = [25, 25]
mouse_event = MouseEvent(
pos=new_pos,
)
data = np.zeros((5, 20, 20, 20), dtype=int)
data[1, 0:10, 0:10, 0:10] = 1
viewer = make_napari_viewer()
view = viewer.window.qt_viewer
labels = viewer.add_labels(data, scale=(1, 2, 1, 1), translate=(5, 5, 5))
@labels.mouse_drag_callbacks.append
def on_click(layer, event):
np.testing.assert_almost_equal(event.view_direction, [0, 1, 0, 0])
np.testing.assert_array_equal(event.dims_displayed, [1, 2, 3])
assert event.dims_point[0] == 0
expected_position = view._map_canvas2world(new_pos)
np.testing.assert_almost_equal(expected_position, list(event.position))
viewer.dims.ndisplay = 3
view._process_mouse_event(mouse_press_callbacks, mouse_event)
@skip_local_popups
def test_memory_leaking(qtbot, make_napari_viewer):
data = np.zeros((5, 20, 20, 20), dtype=int)
data[1, 0:10, 0:10, 0:10] = 1
viewer = make_napari_viewer()
image = weakref.ref(viewer.add_image(data))
labels = weakref.ref(viewer.add_labels(data))
del viewer.layers[0]
del viewer.layers[0]
qtbot.wait(100)
gc.collect()
gc.collect()
assert image() is None
assert labels() is None
@skip_local_popups
def test_leaks_image(qtbot, make_napari_viewer):
viewer = make_napari_viewer(show=True)
lr = weakref.ref(viewer.add_image(np.random.rand(10, 10)))
dr = weakref.ref(lr().data)
viewer.layers.clear()
qtbot.wait(100)
gc.collect()
assert not gc.collect()
assert not lr()
assert not dr()
@skip_local_popups
def test_leaks_labels(qtbot, make_napari_viewer):
viewer = make_napari_viewer(show=True)
lr = weakref.ref(
viewer.add_labels((np.random.rand(10, 10) * 10).astype(np.uint8))
)
dr = weakref.ref(lr().data)
viewer.layers.clear()
qtbot.wait(100)
gc.collect()
assert not gc.collect()
assert not lr()
assert not dr()
@pytest.mark.parametrize("theme", available_themes())
def test_canvas_color(make_napari_viewer, theme):
"""Test instantiating viewer with different themes.
See: https://github.com/napari/napari/issues/3278
"""
# This test is to make sure the application starts with
# with different themes
get_settings().appearance.theme = theme
viewer = make_napari_viewer()
assert viewer.theme == theme
def test_remove_points(make_napari_viewer):
viewer = make_napari_viewer()
viewer.add_points([(1, 2), (2, 3)])
del viewer.layers[0]
viewer.add_points([(1, 2), (2, 3)])
def test_remove_image(make_napari_viewer):
viewer = make_napari_viewer()
viewer.add_image(np.random.rand(10, 10))
del viewer.layers[0]
viewer.add_image(np.random.rand(10, 10))
def test_remove_labels(make_napari_viewer):
viewer = make_napari_viewer()
viewer.add_labels((np.random.rand(10, 10) * 10).astype(np.uint8))
del viewer.layers[0]
viewer.add_labels((np.random.rand(10, 10) * 10).astype(np.uint8))
| en | 0.884438 | Test instantiating viewer. Test instantiating console from viewer. # Check console is created when requested Test instantiating console from viewer. # Check console has been created when it is supposed to be shown Test adding new labels layer. # Add labels to empty viewer # Add labels with image already present Test adding new points layer. # Add labels to empty viewer # Add points with image already present Test adding new shapes layer. # Add labels to empty viewer # Add points with image already present Test z order is correct after adding/ removing images. # Remove and re-add image # Remove two other images # Add two other layers back # Add image # Add labels # Add points # Add vectors # Add shapes # Take screenshot Test save screenshot functionality. # Add image # Add labels # Add points # Add vectors # Add shapes # Save screenshot # Assert behaviour is correct # add default file extension Test that the viewer doesn't change the underlying array. # toggle dimensions # back to 2D # middle plane # make sure clipboard is empty # capture screenshot # ensure the flash effect is applied # wait for the animation to finish # clear clipboard and grab image from application view # capture screenshot of the entire window # ensure the flash effect is applied # wait for the animation to finish # make sure clipboard is empty # capture screenshot # ensure the flash effect is not applied # clear clipboard and grab image from application view # capture screenshot of the entire window # ensure the flash effect is not applied Test instantiating viewer. # Check only keybinding is Viewer # Add a layer and check it is keybindings are active # Add a layer and check it is keybindings become active # Change active layer and check it is keybindings become active # mock mouse event class Test that the correct properties are added to the MouseEvent by _process_mouse_events. # make a mock mouse event Test instantiating viewer with different themes. See: https://github.com/napari/napari/issues/3278 # This test is to make sure the application starts with # with different themes | 1.925135 | 2 |
mindsdb/api/mongo/server.py | mindsdb/main | 261 | 6624779 | import socketserver as SocketServer
import socket
import struct
import bson
from bson import codec_options
from collections import OrderedDict
from abc import abstractmethod
import mindsdb.api.mongo.functions as helpers
from mindsdb.api.mongo.classes import RespondersCollection, Session
from mindsdb.api.mongo.responders import responders
from mindsdb.api.mongo.utilities import log
from mindsdb.utilities.with_kwargs_wrapper import WithKWArgsWrapper
from mindsdb.interfaces.storage.db import session as db_session
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.model.model_interface import ModelInterface
from mindsdb.interfaces.database.integrations import IntegrationController
OP_REPLY = 1
OP_UPDATE = 2001
OP_INSERT = 2002
OP_QUERY = 2004
OP_GET_MORE = 2005
OP_DELETE = 2006
OP_KILL_CURSORS = 2007
OP_MSG = 2013
BYTE = '<b'
INT = '<i'
UINT = '<I'
LONG = '<q'
def unpack(format, buffer, start=0):
end = start + struct.calcsize(format)
return struct.unpack(format, buffer[start:end])[0], end
def get_utf8_string(buffer, start=0):
end = buffer.index(b"\x00", start)
s = buffer[start:end].decode('utf8')
return s, end + 1
CODEC_OPTIONS = codec_options.CodecOptions(document_class=OrderedDict)
def decode_documents(buffer, start, content_size):
docs = bson.decode_all(buffer[start:start + content_size], CODEC_OPTIONS)
return docs, start + content_size
class OperationResponder():
def __init__(self, responders):
self.responders = responders
@abstractmethod
def handle(self, query_bytes):
pass
@abstractmethod
def to_bytes(self, response, request_id):
pass
# NOTE probably, it need only for mongo version < 3.6
class OpInsertResponder(OperationResponder):
def handle(self, buffer, request_id, mindsdb_env, session):
flags, pos = unpack(UINT, buffer)
namespace, pos = get_utf8_string(buffer, pos)
query = bson.decode_all(buffer[pos:], CODEC_OPTIONS)
responder = self.responders.find_match(query)
assert responder is not None, 'query cant be processed'
request_args = {
'request_id': request_id
}
documents = responder.handle(query, request_args, mindsdb_env, session)
return documents
def to_bytes(self, response, request_id):
pass
OP_MSG_FLAGS = {
'checksumPresent': 0,
'moreToCome': 1,
'exhaustAllowed': 16
}
# NOTE used in mongo version > 3.6
class OpMsgResponder(OperationResponder):
def handle(self, buffer, request_id, mindsdb_env, session):
query = OrderedDict()
flags, pos = unpack(UINT, buffer)
checksum_present = bool(flags & (1 << OP_MSG_FLAGS['checksumPresent']))
if checksum_present:
msg_len = len(buffer) - 4
else:
msg_len = len(buffer)
# sections
while pos < msg_len:
kind, pos = unpack(BYTE, buffer, pos)
if kind == 0:
# body
section_size, _ = unpack(INT, buffer, pos)
docs, pos = decode_documents(buffer, pos, section_size)
query.update(docs[0])
elif kind == 1:
# Document
section_size, pos = unpack(INT, buffer, pos)
seq_id, pos = get_utf8_string(buffer, pos)
docs_len = section_size - struct.calcsize(INT) - len(seq_id) - 1
docs, pos = decode_documents(buffer, pos, docs_len)
query[seq_id] = docs
remaining = len(buffer) - pos
if checksum_present:
if remaining != 4:
raise Exception('should be checksum at the end of message')
# TODO read and check checksum
elif remaining != 0:
raise Exception('is bytes left after msg parsing')
log.debug(f'GET OpMSG={query}')
responder = self.responders.find_match(query)
assert responder is not None, 'query cant be processed'
request_args = {
'request_id': request_id,
'database': query['$db']
}
documents = responder.handle(query, request_args, mindsdb_env, session)
return documents
def to_bytes(self, response, request_id):
flags = struct.pack("<I", 0) # TODO
payload_type = struct.pack("<b", 0) # TODO
payload_data = bson.BSON.encode(response)
data = b''.join([flags, payload_type, payload_data])
reply_id = 0 # TODO add seq here
response_to = request_id
header = struct.pack("<iiii", 16 + len(data), reply_id, response_to, OP_MSG)
return header + data
# NOTE used in any mongo shell version
class OpQueryResponder(OperationResponder):
def handle(self, buffer, request_id, mindsdb_env, session):
# https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-query
flags, pos = unpack(UINT, buffer)
namespace, pos = get_utf8_string(buffer, pos)
is_command = namespace.endswith('.$cmd')
num_to_skip, pos = unpack(INT, buffer, pos)
num_to_return, pos = unpack(INT, buffer, pos)
docs = bson.decode_all(buffer[pos:], CODEC_OPTIONS)
query = docs[0] # docs = [query, returnFieldsSelector]
log.debug(f'GET OpQuery={query}')
responder = self.responders.find_match(query)
assert responder is not None, 'query cant be processed'
request_args = {
'num_to_skip': num_to_skip,
'num_to_return': num_to_return,
'request_id': request_id,
'is_command': is_command
}
documents = responder.handle(query, request_args, mindsdb_env, session)
return documents
def to_bytes(self, request, request_id):
flags = struct.pack("<i", 0) # TODO
cursor_id = struct.pack("<q", 0) # TODO
starting_from = struct.pack("<i", 0) # TODO
number_returned = struct.pack("<i", len([request]))
reply_id = 123 # TODO
response_to = request_id
log.debug(f'RET docs={request}')
data = b''.join([flags, cursor_id, starting_from, number_returned])
data += b''.join([bson.BSON.encode(doc) for doc in [request]])
message = struct.pack("<i", 16 + len(data))
message += struct.pack("<i", reply_id)
message += struct.pack("<i", response_to)
message += struct.pack("<i", OP_REPLY)
return message + data
class MongoRequestHandler(SocketServer.BaseRequestHandler):
_stopped = False
def _init_ssl(self):
import ssl
import tempfile
import atexit
import os
from mindsdb.utilities.wizards import make_ssl_cert
CERT_PATH = tempfile.mkstemp(prefix='mindsdb_cert_', text=True)[1]
make_ssl_cert(CERT_PATH)
atexit.register(lambda: os.remove(CERT_PATH))
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(CERT_PATH)
ssl_socket = ssl_context.wrap_socket(
self.request,
server_side=True,
do_handshake_on_connect=True
)
self.request = ssl_socket
def handle(self):
log.debug('connect')
log.debug(str(self.server.socket))
self.session = Session(self.server.mindsdb_env)
first_byte = self.request.recv(1, socket.MSG_PEEK)
if first_byte == b'\x16':
# TLS 'client hello' starts from \x16
self._init_ssl()
while True:
header = self._read_bytes(16)
if header is False:
# connection closed by client
break
length, pos = unpack(INT, header)
request_id, pos = unpack(INT, header, pos)
response_to, pos = unpack(INT, header, pos)
opcode, pos = unpack(INT, header, pos)
log.debug(f'GET length={length} id={request_id} opcode={opcode}')
msg_bytes = self._read_bytes(length - pos)
answer = self.get_answer(request_id, opcode, msg_bytes)
if answer is not None:
self.request.send(answer)
db_session.close()
def get_answer(self, request_id, opcode, msg_bytes):
if opcode not in self.server.operationsHandlersMap:
raise NotImplementedError(f'Unknown opcode {opcode}')
responder = self.server.operationsHandlersMap[opcode]
assert responder is not None, 'error'
response = responder.handle(msg_bytes, request_id, self.session.mindsdb_env, self.session)
if response is None:
return None
return responder.to_bytes(response, request_id)
def _read_bytes(self, length):
buffer = b''
while length:
chunk = self.request.recv(length)
if chunk == b'':
log.debug('Connection closed')
return False
length -= len(chunk)
buffer += chunk
return buffer
class MongoServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, config):
mongodb_config = config['api'].get('mongodb')
assert mongodb_config is not None, 'is no mongodb config!'
host = mongodb_config['host']
port = mongodb_config['port']
log.debug(f'start mongo server on {host}:{port}')
super().__init__((host, int(port)), MongoRequestHandler)
self.mindsdb_env = {
'config': config,
'origin_data_store': DataStore(),
'origin_model_interface': ModelInterface(),
'origin_datasource_controller': IntegrationController(),
}
self.mindsdb_env['model_interface'] = WithKWArgsWrapper(
self.mindsdb_env['origin_model_interface'],
company_id=None
)
self.mindsdb_env['data_store'] = WithKWArgsWrapper(
self.mindsdb_env['origin_data_store'],
company_id=None
)
self.mindsdb_env['datasource_controller'] = WithKWArgsWrapper(
self.mindsdb_env['origin_datasource_controller'],
company_id=None
)
respondersCollection = RespondersCollection()
opQueryResponder = OpQueryResponder(respondersCollection)
opMsgResponder = OpMsgResponder(respondersCollection)
opInsertResponder = OpInsertResponder(respondersCollection)
self.operationsHandlersMap = {
OP_QUERY: opQueryResponder,
OP_MSG: opMsgResponder,
OP_INSERT: opInsertResponder
}
respondersCollection.add(
when={'drop': 'system.sessions'},
result={'ok': 1}
)
respondersCollection.add(
when={'update': 'system.version'},
result={'ok': 1}
)
respondersCollection.add(
when={'setFeatureCompatibilityVersion': helpers.is_true},
result={'ok': 1}
)
# OpMSG=OrderedDict([('features', 1), ('$clusterTime', OrderedDict([('clusterTime', Timestamp(1599748325, 1)), ('signature', OrderedDict([('hash', b'\xb8\xc3\x03\x18\xca\xe6bh\xf0\xcb47,\x924\x8a >\xfc\x91'), ('keyId', 6870854312365391875)]))])), ('$configServerState', OrderedDict([('opTime', OrderedDict([('ts', Timestamp(1599748325, 1)), ('t', 1)]))])), ('$db', 'admin')])
respondersCollection.add(
when={'features': helpers.is_true},
result={'ok': 1}
)
# OpMSG=OrderedDict([('serverStatus', 1), ('$clusterTime', OrderedDict([('clusterTime', Timestamp(1599748366, 1)), ('signature', OrderedDict([('hash', b'\xa1E}\xbbIU\xc2D\x95++\x82\x88\xb5\x84\xf5\xda)+B'), ('keyId', 6870854312365391875)]))])), ('$configServerState', OrderedDict([('opTime', OrderedDict([('ts', Timestamp(1599748366, 1)), ('t', 1)]))])), ('$db', 'admin')])
respondersCollection.add(
when={'serverStatus': helpers.is_true},
result={'ok': 1}
)
# OpMSG=OrderedDict([('ismaster', 1), ('$db', 'admin'), ('$clusterTime', OrderedDict([('clusterTime', Timestamp(1599749031, 1)), ('signature', OrderedDict([('hash', b'6\x87\xd5Y\xa7\xc7\xcf$\xab\x1e\xa2{\xe5B\xe5\x99\xdbl\x8d\xf4'), ('keyId', 6870854312365391875)]))])), ('$client', OrderedDict([('application', OrderedDict([('name', 'MongoDB Shell')])), ('driver', OrderedDict([('name', 'MongoDB Internal Client'), ('version', '3.6.3')])), ('os', OrderedDict([('type', 'Linux'), ('name', 'Ubuntu'), ('architecture', 'x86_64'), ('version', '18.04')])), ('mongos', OrderedDict([('host', 'maxs-comp:27103'), ('client', '127.0.0.1:52148'), ('version', '3.6.3')]))])), ('$configServerState', OrderedDict([('opTime', OrderedDict([('ts', Timestamp(1599749031, 1)), ('t', 1)]))]))])
respondersCollection.responders += responders
def run_server(config):
SocketServer.TCPServer.allow_reuse_address = True
with MongoServer(config) as srv:
srv.serve_forever()
| import socketserver as SocketServer
import socket
import struct
import bson
from bson import codec_options
from collections import OrderedDict
from abc import abstractmethod
import mindsdb.api.mongo.functions as helpers
from mindsdb.api.mongo.classes import RespondersCollection, Session
from mindsdb.api.mongo.responders import responders
from mindsdb.api.mongo.utilities import log
from mindsdb.utilities.with_kwargs_wrapper import WithKWArgsWrapper
from mindsdb.interfaces.storage.db import session as db_session
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.model.model_interface import ModelInterface
from mindsdb.interfaces.database.integrations import IntegrationController
OP_REPLY = 1
OP_UPDATE = 2001
OP_INSERT = 2002
OP_QUERY = 2004
OP_GET_MORE = 2005
OP_DELETE = 2006
OP_KILL_CURSORS = 2007
OP_MSG = 2013
BYTE = '<b'
INT = '<i'
UINT = '<I'
LONG = '<q'
def unpack(format, buffer, start=0):
end = start + struct.calcsize(format)
return struct.unpack(format, buffer[start:end])[0], end
def get_utf8_string(buffer, start=0):
end = buffer.index(b"\x00", start)
s = buffer[start:end].decode('utf8')
return s, end + 1
CODEC_OPTIONS = codec_options.CodecOptions(document_class=OrderedDict)
def decode_documents(buffer, start, content_size):
docs = bson.decode_all(buffer[start:start + content_size], CODEC_OPTIONS)
return docs, start + content_size
class OperationResponder():
def __init__(self, responders):
self.responders = responders
@abstractmethod
def handle(self, query_bytes):
pass
@abstractmethod
def to_bytes(self, response, request_id):
pass
# NOTE probably, it need only for mongo version < 3.6
class OpInsertResponder(OperationResponder):
def handle(self, buffer, request_id, mindsdb_env, session):
flags, pos = unpack(UINT, buffer)
namespace, pos = get_utf8_string(buffer, pos)
query = bson.decode_all(buffer[pos:], CODEC_OPTIONS)
responder = self.responders.find_match(query)
assert responder is not None, 'query cant be processed'
request_args = {
'request_id': request_id
}
documents = responder.handle(query, request_args, mindsdb_env, session)
return documents
def to_bytes(self, response, request_id):
pass
OP_MSG_FLAGS = {
'checksumPresent': 0,
'moreToCome': 1,
'exhaustAllowed': 16
}
# NOTE used in mongo version > 3.6
class OpMsgResponder(OperationResponder):
def handle(self, buffer, request_id, mindsdb_env, session):
query = OrderedDict()
flags, pos = unpack(UINT, buffer)
checksum_present = bool(flags & (1 << OP_MSG_FLAGS['checksumPresent']))
if checksum_present:
msg_len = len(buffer) - 4
else:
msg_len = len(buffer)
# sections
while pos < msg_len:
kind, pos = unpack(BYTE, buffer, pos)
if kind == 0:
# body
section_size, _ = unpack(INT, buffer, pos)
docs, pos = decode_documents(buffer, pos, section_size)
query.update(docs[0])
elif kind == 1:
# Document
section_size, pos = unpack(INT, buffer, pos)
seq_id, pos = get_utf8_string(buffer, pos)
docs_len = section_size - struct.calcsize(INT) - len(seq_id) - 1
docs, pos = decode_documents(buffer, pos, docs_len)
query[seq_id] = docs
remaining = len(buffer) - pos
if checksum_present:
if remaining != 4:
raise Exception('should be checksum at the end of message')
# TODO read and check checksum
elif remaining != 0:
raise Exception('is bytes left after msg parsing')
log.debug(f'GET OpMSG={query}')
responder = self.responders.find_match(query)
assert responder is not None, 'query cant be processed'
request_args = {
'request_id': request_id,
'database': query['$db']
}
documents = responder.handle(query, request_args, mindsdb_env, session)
return documents
def to_bytes(self, response, request_id):
flags = struct.pack("<I", 0) # TODO
payload_type = struct.pack("<b", 0) # TODO
payload_data = bson.BSON.encode(response)
data = b''.join([flags, payload_type, payload_data])
reply_id = 0 # TODO add seq here
response_to = request_id
header = struct.pack("<iiii", 16 + len(data), reply_id, response_to, OP_MSG)
return header + data
# NOTE used in any mongo shell version
class OpQueryResponder(OperationResponder):
def handle(self, buffer, request_id, mindsdb_env, session):
# https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-query
flags, pos = unpack(UINT, buffer)
namespace, pos = get_utf8_string(buffer, pos)
is_command = namespace.endswith('.$cmd')
num_to_skip, pos = unpack(INT, buffer, pos)
num_to_return, pos = unpack(INT, buffer, pos)
docs = bson.decode_all(buffer[pos:], CODEC_OPTIONS)
query = docs[0] # docs = [query, returnFieldsSelector]
log.debug(f'GET OpQuery={query}')
responder = self.responders.find_match(query)
assert responder is not None, 'query cant be processed'
request_args = {
'num_to_skip': num_to_skip,
'num_to_return': num_to_return,
'request_id': request_id,
'is_command': is_command
}
documents = responder.handle(query, request_args, mindsdb_env, session)
return documents
def to_bytes(self, request, request_id):
flags = struct.pack("<i", 0) # TODO
cursor_id = struct.pack("<q", 0) # TODO
starting_from = struct.pack("<i", 0) # TODO
number_returned = struct.pack("<i", len([request]))
reply_id = 123 # TODO
response_to = request_id
log.debug(f'RET docs={request}')
data = b''.join([flags, cursor_id, starting_from, number_returned])
data += b''.join([bson.BSON.encode(doc) for doc in [request]])
message = struct.pack("<i", 16 + len(data))
message += struct.pack("<i", reply_id)
message += struct.pack("<i", response_to)
message += struct.pack("<i", OP_REPLY)
return message + data
class MongoRequestHandler(SocketServer.BaseRequestHandler):
_stopped = False
def _init_ssl(self):
import ssl
import tempfile
import atexit
import os
from mindsdb.utilities.wizards import make_ssl_cert
CERT_PATH = tempfile.mkstemp(prefix='mindsdb_cert_', text=True)[1]
make_ssl_cert(CERT_PATH)
atexit.register(lambda: os.remove(CERT_PATH))
ssl_context = ssl.SSLContext()
ssl_context.load_cert_chain(CERT_PATH)
ssl_socket = ssl_context.wrap_socket(
self.request,
server_side=True,
do_handshake_on_connect=True
)
self.request = ssl_socket
def handle(self):
log.debug('connect')
log.debug(str(self.server.socket))
self.session = Session(self.server.mindsdb_env)
first_byte = self.request.recv(1, socket.MSG_PEEK)
if first_byte == b'\x16':
# TLS 'client hello' starts from \x16
self._init_ssl()
while True:
header = self._read_bytes(16)
if header is False:
# connection closed by client
break
length, pos = unpack(INT, header)
request_id, pos = unpack(INT, header, pos)
response_to, pos = unpack(INT, header, pos)
opcode, pos = unpack(INT, header, pos)
log.debug(f'GET length={length} id={request_id} opcode={opcode}')
msg_bytes = self._read_bytes(length - pos)
answer = self.get_answer(request_id, opcode, msg_bytes)
if answer is not None:
self.request.send(answer)
db_session.close()
def get_answer(self, request_id, opcode, msg_bytes):
if opcode not in self.server.operationsHandlersMap:
raise NotImplementedError(f'Unknown opcode {opcode}')
responder = self.server.operationsHandlersMap[opcode]
assert responder is not None, 'error'
response = responder.handle(msg_bytes, request_id, self.session.mindsdb_env, self.session)
if response is None:
return None
return responder.to_bytes(response, request_id)
def _read_bytes(self, length):
buffer = b''
while length:
chunk = self.request.recv(length)
if chunk == b'':
log.debug('Connection closed')
return False
length -= len(chunk)
buffer += chunk
return buffer
class MongoServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def __init__(self, config):
mongodb_config = config['api'].get('mongodb')
assert mongodb_config is not None, 'is no mongodb config!'
host = mongodb_config['host']
port = mongodb_config['port']
log.debug(f'start mongo server on {host}:{port}')
super().__init__((host, int(port)), MongoRequestHandler)
self.mindsdb_env = {
'config': config,
'origin_data_store': DataStore(),
'origin_model_interface': ModelInterface(),
'origin_datasource_controller': IntegrationController(),
}
self.mindsdb_env['model_interface'] = WithKWArgsWrapper(
self.mindsdb_env['origin_model_interface'],
company_id=None
)
self.mindsdb_env['data_store'] = WithKWArgsWrapper(
self.mindsdb_env['origin_data_store'],
company_id=None
)
self.mindsdb_env['datasource_controller'] = WithKWArgsWrapper(
self.mindsdb_env['origin_datasource_controller'],
company_id=None
)
respondersCollection = RespondersCollection()
opQueryResponder = OpQueryResponder(respondersCollection)
opMsgResponder = OpMsgResponder(respondersCollection)
opInsertResponder = OpInsertResponder(respondersCollection)
self.operationsHandlersMap = {
OP_QUERY: opQueryResponder,
OP_MSG: opMsgResponder,
OP_INSERT: opInsertResponder
}
respondersCollection.add(
when={'drop': 'system.sessions'},
result={'ok': 1}
)
respondersCollection.add(
when={'update': 'system.version'},
result={'ok': 1}
)
respondersCollection.add(
when={'setFeatureCompatibilityVersion': helpers.is_true},
result={'ok': 1}
)
# OpMSG=OrderedDict([('features', 1), ('$clusterTime', OrderedDict([('clusterTime', Timestamp(1599748325, 1)), ('signature', OrderedDict([('hash', b'\xb8\xc3\x03\x18\xca\xe6bh\xf0\xcb47,\x924\x8a >\xfc\x91'), ('keyId', 6870854312365391875)]))])), ('$configServerState', OrderedDict([('opTime', OrderedDict([('ts', Timestamp(1599748325, 1)), ('t', 1)]))])), ('$db', 'admin')])
respondersCollection.add(
when={'features': helpers.is_true},
result={'ok': 1}
)
# OpMSG=OrderedDict([('serverStatus', 1), ('$clusterTime', OrderedDict([('clusterTime', Timestamp(1599748366, 1)), ('signature', OrderedDict([('hash', b'\xa1E}\xbbIU\xc2D\x95++\x82\x88\xb5\x84\xf5\xda)+B'), ('keyId', 6870854312365391875)]))])), ('$configServerState', OrderedDict([('opTime', OrderedDict([('ts', Timestamp(1599748366, 1)), ('t', 1)]))])), ('$db', 'admin')])
respondersCollection.add(
when={'serverStatus': helpers.is_true},
result={'ok': 1}
)
# OpMSG=OrderedDict([('ismaster', 1), ('$db', 'admin'), ('$clusterTime', OrderedDict([('clusterTime', Timestamp(1599749031, 1)), ('signature', OrderedDict([('hash', b'6\x87\xd5Y\xa7\xc7\xcf$\xab\x1e\xa2{\xe5B\xe5\x99\xdbl\x8d\xf4'), ('keyId', 6870854312365391875)]))])), ('$client', OrderedDict([('application', OrderedDict([('name', 'MongoDB Shell')])), ('driver', OrderedDict([('name', 'MongoDB Internal Client'), ('version', '3.6.3')])), ('os', OrderedDict([('type', 'Linux'), ('name', 'Ubuntu'), ('architecture', 'x86_64'), ('version', '18.04')])), ('mongos', OrderedDict([('host', 'maxs-comp:27103'), ('client', '127.0.0.1:52148'), ('version', '3.6.3')]))])), ('$configServerState', OrderedDict([('opTime', OrderedDict([('ts', Timestamp(1599749031, 1)), ('t', 1)]))]))])
respondersCollection.responders += responders
def run_server(config):
SocketServer.TCPServer.allow_reuse_address = True
with MongoServer(config) as srv:
srv.serve_forever()
| en | 0.136612 | # NOTE probably, it need only for mongo version < 3.6 # NOTE used in mongo version > 3.6 # sections # body # Document # TODO read and check checksum # TODO # TODO # TODO add seq here # NOTE used in any mongo shell version # https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#wire-op-query # docs = [query, returnFieldsSelector] # TODO # TODO # TODO # TODO # TLS 'client hello' starts from \x16 # connection closed by client # OpMSG=OrderedDict([('features', 1), ('$clusterTime', OrderedDict([('clusterTime', Timestamp(1599748325, 1)), ('signature', OrderedDict([('hash', b'\xb8\xc3\x03\x18\xca\xe6bh\xf0\xcb47,\x924\x8a >\xfc\x91'), ('keyId', 6870854312365391875)]))])), ('$configServerState', OrderedDict([('opTime', OrderedDict([('ts', Timestamp(1599748325, 1)), ('t', 1)]))])), ('$db', 'admin')]) # OpMSG=OrderedDict([('serverStatus', 1), ('$clusterTime', OrderedDict([('clusterTime', Timestamp(1599748366, 1)), ('signature', OrderedDict([('hash', b'\xa1E}\xbbIU\xc2D\x95++\x82\x88\xb5\x84\xf5\xda)+B'), ('keyId', 6870854312365391875)]))])), ('$configServerState', OrderedDict([('opTime', OrderedDict([('ts', Timestamp(1599748366, 1)), ('t', 1)]))])), ('$db', 'admin')]) # OpMSG=OrderedDict([('ismaster', 1), ('$db', 'admin'), ('$clusterTime', OrderedDict([('clusterTime', Timestamp(1599749031, 1)), ('signature', OrderedDict([('hash', b'6\x87\xd5Y\xa7\xc7\xcf$\xab\x1e\xa2{\xe5B\xe5\x99\xdbl\x8d\xf4'), ('keyId', 6870854312365391875)]))])), ('$client', OrderedDict([('application', OrderedDict([('name', 'MongoDB Shell')])), ('driver', OrderedDict([('name', 'MongoDB Internal Client'), ('version', '3.6.3')])), ('os', OrderedDict([('type', 'Linux'), ('name', 'Ubuntu'), ('architecture', 'x86_64'), ('version', '18.04')])), ('mongos', OrderedDict([('host', 'maxs-comp:27103'), ('client', '127.0.0.1:52148'), ('version', '3.6.3')]))])), ('$configServerState', OrderedDict([('opTime', OrderedDict([('ts', Timestamp(1599749031, 1)), ('t', 1)]))]))]) | 1.970497 | 2 |
imfit_modules/Execution/execution_start.py | Akerdogmus/imfit | 0 | 6624780 | from execution_main import ExecutionModule
class StartExecution(ExecutionModule):
def startExecutionBtn(self):
#This button runs the execution plan on Dockers
pass
def createDocker(self):
#This function creates Dockers for execution
pass
def executeOS(self):
#The function executes OS for Docker
pass
def executePython(self):
#The function executes Python for Docker
pass
def executeROS(self):
#The function executes ROS for Docker
pass
def executeGazebo(self):
#The function executes Gazebo for Docker
pass
class TakeDatas(StartExecution):
#These functions take the datas from Dockers for analyzing and monitoring
def takeDockerData(self):
pass
def takeOSData(self):
pass
def takeOSData(self):
pass
def takePythonData(self):
pass
def takeROSData(self):
pass
def takeGazeboData(self):
pass
class LogFileData(StartExecution):
#These functions create and take "Log Files"
def createLogFiles(self):
pass
def takeLogFileData(self):
pass
class RosbagFileData(StartExecution):
#These functions create and take ".rosbag" for monitoring
def createRosbagFile(self):
pass
def takeRosbagFileData(self):
pass
class TerminateDockers(TakeDatas):
def terminateDockers(self):
#This function terminates the dockers for every execute
pass
def checkTerminate(self):
#This function checks the terminate situation
pass
| from execution_main import ExecutionModule
class StartExecution(ExecutionModule):
def startExecutionBtn(self):
#This button runs the execution plan on Dockers
pass
def createDocker(self):
#This function creates Dockers for execution
pass
def executeOS(self):
#The function executes OS for Docker
pass
def executePython(self):
#The function executes Python for Docker
pass
def executeROS(self):
#The function executes ROS for Docker
pass
def executeGazebo(self):
#The function executes Gazebo for Docker
pass
class TakeDatas(StartExecution):
#These functions take the datas from Dockers for analyzing and monitoring
def takeDockerData(self):
pass
def takeOSData(self):
pass
def takeOSData(self):
pass
def takePythonData(self):
pass
def takeROSData(self):
pass
def takeGazeboData(self):
pass
class LogFileData(StartExecution):
#These functions create and take "Log Files"
def createLogFiles(self):
pass
def takeLogFileData(self):
pass
class RosbagFileData(StartExecution):
#These functions create and take ".rosbag" for monitoring
def createRosbagFile(self):
pass
def takeRosbagFileData(self):
pass
class TerminateDockers(TakeDatas):
def terminateDockers(self):
#This function terminates the dockers for every execute
pass
def checkTerminate(self):
#This function checks the terminate situation
pass
| en | 0.702534 | #This button runs the execution plan on Dockers #This function creates Dockers for execution #The function executes OS for Docker #The function executes Python for Docker #The function executes ROS for Docker #The function executes Gazebo for Docker #These functions take the datas from Dockers for analyzing and monitoring #These functions create and take "Log Files" #These functions create and take ".rosbag" for monitoring #This function terminates the dockers for every execute #This function checks the terminate situation | 2.56854 | 3 |
hello_googoo/googoo.py | Florents-Tselai/hellog-googoo | 0 | 6624781 | <filename>hello_googoo/googoo.py
#!/usr/bin/env python
# coding: utf-8
from argparse import ArgumentParser
from bs4 import BeautifulSoup
from selenium.webdriver import Chrome
from selenium.webdriver import ChromeOptions
def main():
arg_parser = ArgumentParser(description='Search Google & Print URLs in the Command Line')
arg_parser.add_argument('-p', '--pages', default=1, type=int, help='Number of pages', required=False)
arg_parser.add_argument('-q', '--query', nargs=1, help='Search query string', required=True)
args = arg_parser.parse_args()
chrome_options = ChromeOptions()
chrome_options.headless = True
chrome_options.add_argument("--disable-logging")
with Chrome(options=chrome_options) as browser:
browser.delete_all_cookies()
browser.get("https://google.com/?q=" + args.query[0])
browser.find_element_by_xpath('//*[@id="tsf"]/div[2]/div/div[3]/center/input[1]').click()
soup = BeautifulSoup(browser.page_source, 'lxml')
for div in soup.find_all('div', class_='g'):
a = div.find('a', recursive=True)
if not a.get('href', '').startswith('/search?'):
print(a.get('href'))
if __name__ == '__main__':
main()
| <filename>hello_googoo/googoo.py
#!/usr/bin/env python
# coding: utf-8
from argparse import ArgumentParser
from bs4 import BeautifulSoup
from selenium.webdriver import Chrome
from selenium.webdriver import ChromeOptions
def main():
arg_parser = ArgumentParser(description='Search Google & Print URLs in the Command Line')
arg_parser.add_argument('-p', '--pages', default=1, type=int, help='Number of pages', required=False)
arg_parser.add_argument('-q', '--query', nargs=1, help='Search query string', required=True)
args = arg_parser.parse_args()
chrome_options = ChromeOptions()
chrome_options.headless = True
chrome_options.add_argument("--disable-logging")
with Chrome(options=chrome_options) as browser:
browser.delete_all_cookies()
browser.get("https://google.com/?q=" + args.query[0])
browser.find_element_by_xpath('//*[@id="tsf"]/div[2]/div/div[3]/center/input[1]').click()
soup = BeautifulSoup(browser.page_source, 'lxml')
for div in soup.find_all('div', class_='g'):
a = div.find('a', recursive=True)
if not a.get('href', '').startswith('/search?'):
print(a.get('href'))
if __name__ == '__main__':
main()
| en | 0.325294 | #!/usr/bin/env python # coding: utf-8 | 2.871978 | 3 |
redis/kyasshu_redis/__init__.py | AlexFence/kyasshu | 0 | 6624782 | <reponame>AlexFence/kyasshu<filename>redis/kyasshu_redis/__init__.py
from .redis_cache import RedisCache
__all__ = ["RedisCache"]
| from .redis_cache import RedisCache
__all__ = ["RedisCache"] | none | 1 | 1.196676 | 1 | |
python/testData/resolve/OverloadsAndImplementationInImportedModule.py | jnthn/intellij-community | 2 | 6624783 | <filename>python/testData/resolve/OverloadsAndImplementationInImportedModule.py
from OverloadsAndImplementationInImportedModuleDep import foo
foo("abc")
<ref> | <filename>python/testData/resolve/OverloadsAndImplementationInImportedModule.py
from OverloadsAndImplementationInImportedModuleDep import foo
foo("abc")
<ref> | none | 1 | 1.314662 | 1 | |
Tracer/infer_RoadTracer_M_line_merge.py | astro-ck/Road-Extraction | 25 | 6624784 | <gh_stars>10-100
import sys
sys.path.append("./discoverlib")
import os
from discoverlib import geom, graph
from rtree import index
import math
def generate_search_box(bounds,padding):
if bounds.start.x + padding < bounds.end.x - padding:
min_x = bounds.start.x + padding
max_x = bounds.end.x - padding
elif bounds.start.x + padding > bounds.end.x - padding:
min_x = bounds.end.x - padding
max_x = bounds.start.x + padding
if bounds.start.y + padding < bounds.end.y - padding:
min_y = bounds.start.y + padding
max_y = bounds.end.y - padding
elif bounds.start.y + padding > bounds.end.y - padding:
min_y = bounds.end.y - padding
max_y = bounds.start.y + padding
return min_x,min_y,max_x,max_y
graph_dir="/out/graph_infer/c2/"
file_name = os.listdir(graph_dir)
# choose the largest as base graph
large_size = os.path.getsize(graph_dir + file_name[0])
large_id = 0
for i in range(1, len(file_name)):
if os.path.getsize(graph_dir + file_name[i]) > large_size:
large_size = os.path.getsize(graph_dir + file_name[i])
large_id = i
print("the largest graph is {}".format(file_name[large_id]))
graph1 = graph.read_graph(graph_dir + file_name[large_id])
print("base on {}".format(file_name[large_id]))
# # choose the first one as base graph
# graph1 = graph.read_graph(graph_dir + file_name[0])
# print("base on {}".format(file_name[0]))
ind1 = index.Index()
id1 = 0
for edge1 in graph1.edges:
bounds1 = edge1.bounds()
# print(edge1.id)
ind1.insert(edge1.id, (bounds1.start.x, bounds1.start.y, bounds1.end.x, bounds1.end.y))
id1 += 1
print("total ids:{}".format(id1))
# reccord th
dislinked_edge = 0
for i in range(0, len(file_name)):
if i != large_id: # except for base graph
# graphs to be merged
print(file_name[i])
print("id={}".format(id1))
graph2 = graph.read_graph(graph_dir + file_name[i])
for edge in graph2.edges:
if edge.id % 2 != 0:
continue
bounds = edge.bounds()
padding = 0.01
min_x, min_y, max_x, max_y=generate_search_box(bounds, padding)
edge_ids = list(ind1.intersection((min_x, min_y, max_x, max_y), objects=True))
# edge_ids=list(edge_ids)
if len(edge_ids) < 1:
# if dislinked_edge == 0:
# # link the disconnected area
# edges_id = list(ind1.index.intersection(
# (bounds.start.x - 40, bounds.start.y - 40, bounds.end.x + 40, bounds.end.y + 40)))
# if len(edges_id) > 0:
# graph1.add_vertex(edges_id[0].dst.point)
# pt_id = len(graph1.vertices)
# graph1.add_edge(graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
# dislinked_edge += 1
# else:
graph1.add_vertex(edge.src.point)
graph1.add_vertex(edge.dst.point)
pt_id = len(graph1.vertices)
graph1.add_edge(graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
new_edge = graph.Edge(len(graph1.edges), graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
# print("lalala:{}".format(len(graph1.edges)))
new_bounds = new_edge.bounds()
ind1.insert(id1, (new_bounds.start.x, new_bounds.start.y, new_bounds.end.x, new_bounds.end.y))
id1 += 1
else:
# min_x, min_y, max_x, max_y = generate_search_box(bounds, padding=0.001)
# edge_ids = list(ind1.intersection((min_x, min_y, max_x, max_y), objects=True))
if len(edge_ids) >= 1:
# print("edge bounds {} {} {} {}".format(bounds.start.x,bounds.start.y,bounds.end.x,bounds.end.y))
# for t_id in edge_ids:
# print("id {}, box {}".format(t_id.id,t_id.bbox))
target_edge_sin = (edge.src.point.y-edge.dst.point.y)/math.sqrt((edge.src.point.y-edge.dst.point.y)**2+(edge.src.point.x-edge.dst.point.x)**2)
target_edge_angle = math.asin(target_edge_sin)
# print("previous edge id is:{}".format(previous_edge_id))
for base_id in edge_ids:
base_edge = graph1.edges[base_id.id]
base_edge_sin = (base_edge.src.point.y-base_edge.dst.point.y)/math.sqrt((base_edge.src.point.y-base_edge.dst.point.y)**2+(base_edge.src.point.x-base_edge.dst.point.x)**2)
base_edge_angle = math.asin(base_edge_sin)
angle_diff = abs(base_edge_angle - target_edge_angle)
if angle_diff > 0.6 and angle_diff < 3.15 / 2 - 0.6:
# print("add edge with angle diff {}".format(angle_diff))
graph1.add_vertex(edge.src.point)
graph1.add_vertex(edge.dst.point)
pt_id = len(graph1.vertices)
graph1.add_edge(graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
new_edge = graph.Edge(len(graph1.edges), graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
# print("lalala:{}".format(len(graph1.edges)))
new_bounds = new_edge.bounds()
ind1.insert(id1, (new_bounds.start.x, new_bounds.start.y, new_bounds.end.x, new_bounds.end.y))
id1 += 1
break
print("vertices length={}".format(len(graph1.vertices)))
print("edge length={}".format(len(graph1.edges)))
# graph1.edges[1].src.point.x
graph1.save("/out/graph_infer/line_merge_final/c2.final.graph")
print("done")
| import sys
sys.path.append("./discoverlib")
import os
from discoverlib import geom, graph
from rtree import index
import math
def generate_search_box(bounds,padding):
if bounds.start.x + padding < bounds.end.x - padding:
min_x = bounds.start.x + padding
max_x = bounds.end.x - padding
elif bounds.start.x + padding > bounds.end.x - padding:
min_x = bounds.end.x - padding
max_x = bounds.start.x + padding
if bounds.start.y + padding < bounds.end.y - padding:
min_y = bounds.start.y + padding
max_y = bounds.end.y - padding
elif bounds.start.y + padding > bounds.end.y - padding:
min_y = bounds.end.y - padding
max_y = bounds.start.y + padding
return min_x,min_y,max_x,max_y
graph_dir="/out/graph_infer/c2/"
file_name = os.listdir(graph_dir)
# choose the largest as base graph
large_size = os.path.getsize(graph_dir + file_name[0])
large_id = 0
for i in range(1, len(file_name)):
if os.path.getsize(graph_dir + file_name[i]) > large_size:
large_size = os.path.getsize(graph_dir + file_name[i])
large_id = i
print("the largest graph is {}".format(file_name[large_id]))
graph1 = graph.read_graph(graph_dir + file_name[large_id])
print("base on {}".format(file_name[large_id]))
# # choose the first one as base graph
# graph1 = graph.read_graph(graph_dir + file_name[0])
# print("base on {}".format(file_name[0]))
ind1 = index.Index()
id1 = 0
for edge1 in graph1.edges:
bounds1 = edge1.bounds()
# print(edge1.id)
ind1.insert(edge1.id, (bounds1.start.x, bounds1.start.y, bounds1.end.x, bounds1.end.y))
id1 += 1
print("total ids:{}".format(id1))
# reccord th
dislinked_edge = 0
for i in range(0, len(file_name)):
if i != large_id: # except for base graph
# graphs to be merged
print(file_name[i])
print("id={}".format(id1))
graph2 = graph.read_graph(graph_dir + file_name[i])
for edge in graph2.edges:
if edge.id % 2 != 0:
continue
bounds = edge.bounds()
padding = 0.01
min_x, min_y, max_x, max_y=generate_search_box(bounds, padding)
edge_ids = list(ind1.intersection((min_x, min_y, max_x, max_y), objects=True))
# edge_ids=list(edge_ids)
if len(edge_ids) < 1:
# if dislinked_edge == 0:
# # link the disconnected area
# edges_id = list(ind1.index.intersection(
# (bounds.start.x - 40, bounds.start.y - 40, bounds.end.x + 40, bounds.end.y + 40)))
# if len(edges_id) > 0:
# graph1.add_vertex(edges_id[0].dst.point)
# pt_id = len(graph1.vertices)
# graph1.add_edge(graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
# dislinked_edge += 1
# else:
graph1.add_vertex(edge.src.point)
graph1.add_vertex(edge.dst.point)
pt_id = len(graph1.vertices)
graph1.add_edge(graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
new_edge = graph.Edge(len(graph1.edges), graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
# print("lalala:{}".format(len(graph1.edges)))
new_bounds = new_edge.bounds()
ind1.insert(id1, (new_bounds.start.x, new_bounds.start.y, new_bounds.end.x, new_bounds.end.y))
id1 += 1
else:
# min_x, min_y, max_x, max_y = generate_search_box(bounds, padding=0.001)
# edge_ids = list(ind1.intersection((min_x, min_y, max_x, max_y), objects=True))
if len(edge_ids) >= 1:
# print("edge bounds {} {} {} {}".format(bounds.start.x,bounds.start.y,bounds.end.x,bounds.end.y))
# for t_id in edge_ids:
# print("id {}, box {}".format(t_id.id,t_id.bbox))
target_edge_sin = (edge.src.point.y-edge.dst.point.y)/math.sqrt((edge.src.point.y-edge.dst.point.y)**2+(edge.src.point.x-edge.dst.point.x)**2)
target_edge_angle = math.asin(target_edge_sin)
# print("previous edge id is:{}".format(previous_edge_id))
for base_id in edge_ids:
base_edge = graph1.edges[base_id.id]
base_edge_sin = (base_edge.src.point.y-base_edge.dst.point.y)/math.sqrt((base_edge.src.point.y-base_edge.dst.point.y)**2+(base_edge.src.point.x-base_edge.dst.point.x)**2)
base_edge_angle = math.asin(base_edge_sin)
angle_diff = abs(base_edge_angle - target_edge_angle)
if angle_diff > 0.6 and angle_diff < 3.15 / 2 - 0.6:
# print("add edge with angle diff {}".format(angle_diff))
graph1.add_vertex(edge.src.point)
graph1.add_vertex(edge.dst.point)
pt_id = len(graph1.vertices)
graph1.add_edge(graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
new_edge = graph.Edge(len(graph1.edges), graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1])
# print("lalala:{}".format(len(graph1.edges)))
new_bounds = new_edge.bounds()
ind1.insert(id1, (new_bounds.start.x, new_bounds.start.y, new_bounds.end.x, new_bounds.end.y))
id1 += 1
break
print("vertices length={}".format(len(graph1.vertices)))
print("edge length={}".format(len(graph1.edges)))
# graph1.edges[1].src.point.x
graph1.save("/out/graph_infer/line_merge_final/c2.final.graph")
print("done") | en | 0.518413 | # choose the largest as base graph # # choose the first one as base graph # graph1 = graph.read_graph(graph_dir + file_name[0]) # print("base on {}".format(file_name[0])) # print(edge1.id) # reccord th # except for base graph # graphs to be merged # edge_ids=list(edge_ids) # if dislinked_edge == 0: # # link the disconnected area # edges_id = list(ind1.index.intersection( # (bounds.start.x - 40, bounds.start.y - 40, bounds.end.x + 40, bounds.end.y + 40))) # if len(edges_id) > 0: # graph1.add_vertex(edges_id[0].dst.point) # pt_id = len(graph1.vertices) # graph1.add_edge(graph1.vertices[pt_id - 2], graph1.vertices[pt_id - 1]) # dislinked_edge += 1 # else: # print("lalala:{}".format(len(graph1.edges))) # min_x, min_y, max_x, max_y = generate_search_box(bounds, padding=0.001) # edge_ids = list(ind1.intersection((min_x, min_y, max_x, max_y), objects=True)) # print("edge bounds {} {} {} {}".format(bounds.start.x,bounds.start.y,bounds.end.x,bounds.end.y)) # for t_id in edge_ids: # print("id {}, box {}".format(t_id.id,t_id.bbox)) # print("previous edge id is:{}".format(previous_edge_id)) # print("add edge with angle diff {}".format(angle_diff)) # print("lalala:{}".format(len(graph1.edges))) # graph1.edges[1].src.point.x | 2.772696 | 3 |
paper/causalimpact_xgboost.py | kjappelbaum/aeml | 0 | 6624785 | <reponame>kjappelbaum/aeml<gh_stars>0
from aeml.causalimpact.utils import get_timestep_tuples, get_causalimpact_splits
import pickle
from aeml.causalimpact.utils import _select_unrelated_x
from aeml.models.gbdt.gbmquantile import LightGBMQuantileRegressor
from aeml.models.gbdt.run import run_ci_model
from aeml.models.gbdt.settings import *
from darts.dataprocessing.transformers import Scaler
from darts import TimeSeries
import pandas as pd
from copy import deepcopy
import time
import numpy as np
import click
import math
settings = {
0: {0: ci_0_0, 1: ci_0_1},
1: {0: ci_1_0, 1: ci_1_1},
2: {0: ci_2_0, 1: ci_2_1},
3: {0: ci_3_0, 1: ci_3_1},
4: {0: ci_4_0, 1: ci_4_1},
5: {0: ci_5_0, 1: ci_5_1},
6: {0: ci_6_0, 1: ci_6_1}
}
TIMESTR = time.strftime("%Y%m%d-%H%M%S")
TARGETS_clean = ["2-Amino-2-methylpropanol C4H11NO", "Piperazine C4H10N2"]
MEAS_COLUMNS = [
"TI-19",
# "FI-16",
# "TI-33",
# "FI-2",
# "FI-151",
# "TI-8",
# "FI-241",
# "valve-position-12", # dry-bed
# "FI-38", # strippera
# "PI-28", # stripper
# "TI-28", # stripper
# "FI-20",
# "FI-30",
"TI-3",
"FI-19",
# "FI-211",
"FI-11",
# "TI-30",
# "PI-30",
"TI-1213",
# "TI-4",
# "FI-23",
# "FI-20",
# "FI-20/FI-23",
# "TI-22",
"delta_t",
"TI-35",
# "delta_t_2"
]
DF = pd.read_pickle("20210508_df_for_causalimpact.pkl")
with open("step_times.pkl", "rb") as handle:
times = pickle.load(handle)
step_changes = [
["TI-19"],
["FI-19"],
["TI-3"],
["FI-11"],
["FI-11", "FI-2"],
["TI-1213"],
["TI-1213", "TI-19"],
["capture rate"],
# # ["capture rate"],
# ["valve-position-12"],
]
to_exclude = {
0: ["TI-19"],
1: ["FI-19"],
2: ["TI-3"],
3: ["FI-11"],
4: ["FI-11"],
5: ["TI-1213", "TI-19"],
6: [],
}
def select_columns(day):
feat_to_exclude = to_exclude[day]
feats = [f for f in MEAS_COLUMNS if f not in feat_to_exclude]
return feats
@click.command('cli')
@click.argument('day', type=click.INT)
@click.argument('target', type=click.INT)
def run_causalimpact_analysis(day, target):
cols = select_columns(day)
y = TimeSeries.from_dataframe(DF)[TARGETS_clean[target]]
x = TimeSeries.from_dataframe(DF[cols])
x_trains = []
y_trains = []
before, during, after, way_after = get_causalimpact_splits(
x, y, day, times, DF
)
# We do multiseries training
x_trains.append(before[0])
y_trains.append(before[1])
x_trains.append(way_after[0])
y_trains.append(way_after[1])
xscaler = Scaler(name="x-scaler")
yscaler = Scaler(name="y-scaler")
longer = np.argmax([len(x_trains[0]), len(x_trains[1])])
shorter = np.argmin([len(x_trains[0]), len(x_trains[1])])
x_trains[longer] = xscaler.fit_transform(x_trains[longer])
y_trains[longer] = yscaler.fit_transform(y_trains[longer])
x_trains[shorter] = xscaler.transform(x_trains[shorter])
y_trains[shorter] = yscaler.transform(y_trains[shorter])
if len(x_trains[shorter]) < 300:
x_trains.pop(shorter)
y_trains.pop(shorter)
before = (
xscaler.transform(before[0]),
yscaler.transform(before[1]),
)
during = (
xscaler.transform(during[0]),
yscaler.transform(during[1]),
)
after = (xscaler.transform(after[0]), yscaler.transform(after[1]))
before_x_df, before_y_df = (
before[0].pd_dataframe(),
before[1].pd_dataframe(),
)
during_x_df, during_y_df = (
during[0].pd_dataframe(),
during[1].pd_dataframe(),
)
after_x_df, after_y_df = (
after[0].pd_dataframe(),
after[1].pd_dataframe(),
)
day_x_df = pd.concat([before_x_df, during_x_df, after_x_df], axis=0)
day_x_ts = TimeSeries.from_dataframe(day_x_df)
day_y_df = pd.concat([before_y_df, during_y_df, after_y_df], axis=0)
day_y_ts = TimeSeries.from_dataframe(day_y_df)
steps = math.ceil(len(during[0])/2)# * 2
model = run_ci_model(
x_trains,
y_trains,
**settings[day][target],
num_features=len(cols),
quantiles=(0.05, 0.5, 0.95),
output_chunk_length=steps
)
buffer = math.ceil(len(during[0])/3)
b = before[1][:-buffer]
predictions = model.forecast(
n = len(during[0]) + 2* buffer,
series = b,
past_covariates = day_x_ts,
)
results = {
'predictions': predictions,
'x_all': day_x_ts,
'before': before,
'during': during,
'after': after
}
with open(
f"{TIMESTR}-causalimpact_{day}_{target}",
"wb",
) as handle:
pickle.dump(results, handle)
if __name__ == '__main__':
run_causalimpact_analysis() | from aeml.causalimpact.utils import get_timestep_tuples, get_causalimpact_splits
import pickle
from aeml.causalimpact.utils import _select_unrelated_x
from aeml.models.gbdt.gbmquantile import LightGBMQuantileRegressor
from aeml.models.gbdt.run import run_ci_model
from aeml.models.gbdt.settings import *
from darts.dataprocessing.transformers import Scaler
from darts import TimeSeries
import pandas as pd
from copy import deepcopy
import time
import numpy as np
import click
import math
settings = {
0: {0: ci_0_0, 1: ci_0_1},
1: {0: ci_1_0, 1: ci_1_1},
2: {0: ci_2_0, 1: ci_2_1},
3: {0: ci_3_0, 1: ci_3_1},
4: {0: ci_4_0, 1: ci_4_1},
5: {0: ci_5_0, 1: ci_5_1},
6: {0: ci_6_0, 1: ci_6_1}
}
TIMESTR = time.strftime("%Y%m%d-%H%M%S")
TARGETS_clean = ["2-Amino-2-methylpropanol C4H11NO", "Piperazine C4H10N2"]
MEAS_COLUMNS = [
"TI-19",
# "FI-16",
# "TI-33",
# "FI-2",
# "FI-151",
# "TI-8",
# "FI-241",
# "valve-position-12", # dry-bed
# "FI-38", # strippera
# "PI-28", # stripper
# "TI-28", # stripper
# "FI-20",
# "FI-30",
"TI-3",
"FI-19",
# "FI-211",
"FI-11",
# "TI-30",
# "PI-30",
"TI-1213",
# "TI-4",
# "FI-23",
# "FI-20",
# "FI-20/FI-23",
# "TI-22",
"delta_t",
"TI-35",
# "delta_t_2"
]
DF = pd.read_pickle("20210508_df_for_causalimpact.pkl")
with open("step_times.pkl", "rb") as handle:
times = pickle.load(handle)
step_changes = [
["TI-19"],
["FI-19"],
["TI-3"],
["FI-11"],
["FI-11", "FI-2"],
["TI-1213"],
["TI-1213", "TI-19"],
["capture rate"],
# # ["capture rate"],
# ["valve-position-12"],
]
to_exclude = {
0: ["TI-19"],
1: ["FI-19"],
2: ["TI-3"],
3: ["FI-11"],
4: ["FI-11"],
5: ["TI-1213", "TI-19"],
6: [],
}
def select_columns(day):
feat_to_exclude = to_exclude[day]
feats = [f for f in MEAS_COLUMNS if f not in feat_to_exclude]
return feats
@click.command('cli')
@click.argument('day', type=click.INT)
@click.argument('target', type=click.INT)
def run_causalimpact_analysis(day, target):
cols = select_columns(day)
y = TimeSeries.from_dataframe(DF)[TARGETS_clean[target]]
x = TimeSeries.from_dataframe(DF[cols])
x_trains = []
y_trains = []
before, during, after, way_after = get_causalimpact_splits(
x, y, day, times, DF
)
# We do multiseries training
x_trains.append(before[0])
y_trains.append(before[1])
x_trains.append(way_after[0])
y_trains.append(way_after[1])
xscaler = Scaler(name="x-scaler")
yscaler = Scaler(name="y-scaler")
longer = np.argmax([len(x_trains[0]), len(x_trains[1])])
shorter = np.argmin([len(x_trains[0]), len(x_trains[1])])
x_trains[longer] = xscaler.fit_transform(x_trains[longer])
y_trains[longer] = yscaler.fit_transform(y_trains[longer])
x_trains[shorter] = xscaler.transform(x_trains[shorter])
y_trains[shorter] = yscaler.transform(y_trains[shorter])
if len(x_trains[shorter]) < 300:
x_trains.pop(shorter)
y_trains.pop(shorter)
before = (
xscaler.transform(before[0]),
yscaler.transform(before[1]),
)
during = (
xscaler.transform(during[0]),
yscaler.transform(during[1]),
)
after = (xscaler.transform(after[0]), yscaler.transform(after[1]))
before_x_df, before_y_df = (
before[0].pd_dataframe(),
before[1].pd_dataframe(),
)
during_x_df, during_y_df = (
during[0].pd_dataframe(),
during[1].pd_dataframe(),
)
after_x_df, after_y_df = (
after[0].pd_dataframe(),
after[1].pd_dataframe(),
)
day_x_df = pd.concat([before_x_df, during_x_df, after_x_df], axis=0)
day_x_ts = TimeSeries.from_dataframe(day_x_df)
day_y_df = pd.concat([before_y_df, during_y_df, after_y_df], axis=0)
day_y_ts = TimeSeries.from_dataframe(day_y_df)
steps = math.ceil(len(during[0])/2)# * 2
model = run_ci_model(
x_trains,
y_trains,
**settings[day][target],
num_features=len(cols),
quantiles=(0.05, 0.5, 0.95),
output_chunk_length=steps
)
buffer = math.ceil(len(during[0])/3)
b = before[1][:-buffer]
predictions = model.forecast(
n = len(during[0]) + 2* buffer,
series = b,
past_covariates = day_x_ts,
)
results = {
'predictions': predictions,
'x_all': day_x_ts,
'before': before,
'during': during,
'after': after
}
with open(
f"{TIMESTR}-causalimpact_{day}_{target}",
"wb",
) as handle:
pickle.dump(results, handle)
if __name__ == '__main__':
run_causalimpact_analysis() | en | 0.485653 | # "FI-16", # "TI-33", # "FI-2", # "FI-151", # "TI-8", # "FI-241", # "valve-position-12", # dry-bed # "FI-38", # strippera # "PI-28", # stripper # "TI-28", # stripper # "FI-20", # "FI-30", # "FI-211", # "TI-30", # "PI-30", # "TI-4", # "FI-23", # "FI-20", # "FI-20/FI-23", # "TI-22", # "delta_t_2" # # ["capture rate"], # ["valve-position-12"], # We do multiseries training # * 2 | 1.722378 | 2 |
tests/integration_tests/test_zn_nodes2.py | zincware/ZnTrack | 16 | 6624786 | import dataclasses
import os
import shutil
import subprocess
import pytest
import znjson
from zntrack import zn
from zntrack.core.base import Node
@pytest.fixture
def proj_path(tmp_path):
shutil.copy(__file__, tmp_path)
os.chdir(tmp_path)
subprocess.check_call(["git", "init"])
subprocess.check_call(["dvc", "init"])
return tmp_path
class NodeViaParams(Node):
_hash = zn.Hash()
param1 = zn.params()
param2 = zn.params()
def run(self):
pass
class ExampleNode(Node):
params1: NodeViaParams = zn.Nodes()
params2: NodeViaParams = zn.Nodes()
outs = zn.outs()
def run(self):
self.outs = self.params1.param1 + self.params2.param2
def test_ExampleNode(proj_path):
ExampleNode(
params1=NodeViaParams(param1="Hello", param2="World"),
params2=NodeViaParams(param1="Lorem", param2="Ipsum"),
).write_graph(run=True)
example_node = ExampleNode.load()
assert example_node.params1.param1 == "Hello"
assert example_node.params1.param2 == "World"
assert example_node.params2.param1 == "Lorem"
assert example_node.params2.param2 == "Ipsum"
assert example_node.outs == "HelloIpsum"
class SingleExampleNode(Node):
params1 = zn.Nodes()
outs = zn.outs()
def run(self):
self.outs = "Lorem Ipsum"
def test_SingleExampleNode(proj_path):
SingleExampleNode().write_graph(run=True)
assert SingleExampleNode.load().outs == "Lorem Ipsum"
class NodeNodeParams(Node):
deps: NodeViaParams = zn.deps()
node: NodeViaParams = zn.Nodes()
_hash = zn.Hash()
def run(self):
pass
class ExampleNode2(Node):
params1: NodeNodeParams = zn.Nodes()
params2 = zn.Nodes()
def run(self):
pass
def test_depth_graph(proj_path):
node_1 = NodeViaParams(param1="Lorem", param2="Ipsum", name="Node1")
node_1.write_graph(run=True) # defined as dependency, so it must run first.
node_2 = NodeViaParams(param1="Lorem", param2="Ipsum")
node_3 = NodeNodeParams(deps=node_1, node=node_2, name="Node3")
node_4 = ExampleNode2(params1=node_3)
node_4.write_graph(run=True)
node_4 = ExampleNode2.load()
assert node_4.params1.deps.param1 == "Lorem"
assert node_4.params1.node.param2 == "Ipsum"
assert node_4.params1.node_name == "ExampleNode2-params1"
assert node_4.params1.deps.node_name == "Node1"
assert node_4.params1.node.node_name == "ExampleNode2-params1-node"
class NodeWithOuts(Node):
input = zn.params()
factor = zn.params()
output = zn.outs()
_hash = zn.Hash()
def run(self):
self.output = self.input * self.factor
def test_NodeWithOuts(proj_path):
node_1 = SingleExampleNode(params1=NodeWithOuts(factor=2))
node_1.write_graph(run=True)
assert SingleExampleNode.load().params1.factor == 2
@dataclasses.dataclass
class Parameter:
value: int = 0
class NodeWithParameter(Node):
parameter = zn.params(Parameter())
_hash = zn.Hash()
class MoreNode(Node):
node: NodeWithParameter = zn.Nodes()
class ParameterConverter(znjson.ConverterBase):
level = 100
representation = "parameter"
instance = Parameter
def _encode(self, obj: Parameter) -> dict:
return dataclasses.asdict(obj)
def _decode(self, value: dict) -> Parameter:
return Parameter(**value)
def test_DataclassNode(proj_path):
znjson.register(ParameterConverter)
node_w_params = NodeWithParameter(parameter=Parameter(value=42))
node_w_params.write_graph()
node = MoreNode(node=NodeWithParameter(parameter=Parameter(value=10)))
node.write_graph()
node_w_params = node_w_params.load()
assert node_w_params.parameter.value == 42
node = node.load()
assert node.node.parameter.value == 10
znjson.deregister(ParameterConverter)
| import dataclasses
import os
import shutil
import subprocess
import pytest
import znjson
from zntrack import zn
from zntrack.core.base import Node
@pytest.fixture
def proj_path(tmp_path):
shutil.copy(__file__, tmp_path)
os.chdir(tmp_path)
subprocess.check_call(["git", "init"])
subprocess.check_call(["dvc", "init"])
return tmp_path
class NodeViaParams(Node):
_hash = zn.Hash()
param1 = zn.params()
param2 = zn.params()
def run(self):
pass
class ExampleNode(Node):
params1: NodeViaParams = zn.Nodes()
params2: NodeViaParams = zn.Nodes()
outs = zn.outs()
def run(self):
self.outs = self.params1.param1 + self.params2.param2
def test_ExampleNode(proj_path):
ExampleNode(
params1=NodeViaParams(param1="Hello", param2="World"),
params2=NodeViaParams(param1="Lorem", param2="Ipsum"),
).write_graph(run=True)
example_node = ExampleNode.load()
assert example_node.params1.param1 == "Hello"
assert example_node.params1.param2 == "World"
assert example_node.params2.param1 == "Lorem"
assert example_node.params2.param2 == "Ipsum"
assert example_node.outs == "HelloIpsum"
class SingleExampleNode(Node):
params1 = zn.Nodes()
outs = zn.outs()
def run(self):
self.outs = "Lorem Ipsum"
def test_SingleExampleNode(proj_path):
SingleExampleNode().write_graph(run=True)
assert SingleExampleNode.load().outs == "Lorem Ipsum"
class NodeNodeParams(Node):
deps: NodeViaParams = zn.deps()
node: NodeViaParams = zn.Nodes()
_hash = zn.Hash()
def run(self):
pass
class ExampleNode2(Node):
params1: NodeNodeParams = zn.Nodes()
params2 = zn.Nodes()
def run(self):
pass
def test_depth_graph(proj_path):
node_1 = NodeViaParams(param1="Lorem", param2="Ipsum", name="Node1")
node_1.write_graph(run=True) # defined as dependency, so it must run first.
node_2 = NodeViaParams(param1="Lorem", param2="Ipsum")
node_3 = NodeNodeParams(deps=node_1, node=node_2, name="Node3")
node_4 = ExampleNode2(params1=node_3)
node_4.write_graph(run=True)
node_4 = ExampleNode2.load()
assert node_4.params1.deps.param1 == "Lorem"
assert node_4.params1.node.param2 == "Ipsum"
assert node_4.params1.node_name == "ExampleNode2-params1"
assert node_4.params1.deps.node_name == "Node1"
assert node_4.params1.node.node_name == "ExampleNode2-params1-node"
class NodeWithOuts(Node):
input = zn.params()
factor = zn.params()
output = zn.outs()
_hash = zn.Hash()
def run(self):
self.output = self.input * self.factor
def test_NodeWithOuts(proj_path):
node_1 = SingleExampleNode(params1=NodeWithOuts(factor=2))
node_1.write_graph(run=True)
assert SingleExampleNode.load().params1.factor == 2
@dataclasses.dataclass
class Parameter:
value: int = 0
class NodeWithParameter(Node):
parameter = zn.params(Parameter())
_hash = zn.Hash()
class MoreNode(Node):
node: NodeWithParameter = zn.Nodes()
class ParameterConverter(znjson.ConverterBase):
level = 100
representation = "parameter"
instance = Parameter
def _encode(self, obj: Parameter) -> dict:
return dataclasses.asdict(obj)
def _decode(self, value: dict) -> Parameter:
return Parameter(**value)
def test_DataclassNode(proj_path):
znjson.register(ParameterConverter)
node_w_params = NodeWithParameter(parameter=Parameter(value=42))
node_w_params.write_graph()
node = MoreNode(node=NodeWithParameter(parameter=Parameter(value=10)))
node.write_graph()
node_w_params = node_w_params.load()
assert node_w_params.parameter.value == 42
node = node.load()
assert node.node.parameter.value == 10
znjson.deregister(ParameterConverter)
| en | 0.96769 | # defined as dependency, so it must run first. | 2.20441 | 2 |
op_robot_tests/tests_files/brokers/openprocurement_client_helper.py | iovzt/robot_tests | 0 | 6624787 | from openprocurement_client.client import Client, EDRClient
from openprocurement_client.dasu_client import DasuClient
from openprocurement_client.document_service_client \
import DocumentServiceClient
from openprocurement_client.plan import PlansClient
from openprocurement_client.contract import ContractingClient
from openprocurement_client.exceptions import IdNotFound
from restkit.errors import RequestFailed, BadStatusLine, ResourceError
from retrying import retry
from time import sleep
import os
import urllib
def retry_if_request_failed(exception):
status_code = getattr(exception, 'status_code', None)
print(status_code)
if 500 <= status_code < 600 or status_code in (409, 429, 412):
return True
else:
return isinstance(exception, BadStatusLine)
class StableClient(Client):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableClient, self).request(*args, **kwargs)
class StableDsClient(DocumentServiceClient):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableDsClient, self).request(*args, **kwargs)
def prepare_api_wrapper(key, resource, host_url, api_version, ds_client=None):
return StableClient(key, resource, host_url, api_version,
ds_client=ds_client)
def prepare_ds_api_wrapper(ds_host_url, auth_ds):
return StableDsClient(ds_host_url, auth_ds)
class ContractingStableClient(ContractingClient):
@retry(stop_max_attempt_number=100, wait_random_min=500, wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(ContractingStableClient, self).request(*args, **kwargs)
def prepare_contract_api_wrapper(key, host_url, api_version, ds_client=None):
return ContractingStableClient(key, host_url, api_version, ds_client=ds_client)
class StableEDRClient(EDRClient):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
try:
res = super(StableEDRClient, self).request(*args, **kwargs)
except ResourceError as re:
if re.status_int == 429:
sleep(int(re.response.headers.get('Retry-After', '30')))
raise re
else:
return res
def prepare_edr_wrapper(host_url, api_version, username, password):
return StableEDRClient(host_url, api_version, username, password)
def get_complaint_internal_id(tender, complaintID):
try:
for complaint in tender.data.complaints:
if complaint.complaintID == complaintID:
return complaint.id
except AttributeError:
pass
try:
for award in tender.data.awards:
for complaint in award.complaints:
if complaint.complaintID == complaintID:
return complaint.id
except AttributeError:
pass
raise IdNotFound
def get_document_by_id(data, doc_id):
for document in data.get('documents', []):
if doc_id in document.get('title', ''):
return document
for complaint in data.get('complaints', []):
for document in complaint.get('documents', []):
if doc_id in document.get('title', ''):
return document
for contract in data.get('contracts', []):
for document in contract.get('documents', []):
if doc_id in document.get('title', ''):
return document
for award in data.get('awards', []):
for document in award.get('documents', []):
if doc_id in document.get('title', ''):
return document
for complaint in award.get('complaints', []):
for document in complaint.get('documents', []):
if doc_id in document.get('title', ''):
return document
for cancellation in data.get('cancellations', []):
for document in cancellation.get('documents', []):
if doc_id in document.get('title', ''):
return document
for bid in data.get('bids', []):
for document in bid.get('documents', []):
if doc_id in document.get('title', ''):
return document
raise Exception('Document with id {} not found'.format(doc_id))
def get_tenders_by_funder_id(client,
funder_id=None,
descending=True,
tender_id_field='tenderID',
opt_fields=('funders',)):
params = {'offset': '',
'opt_fields': ','.join((tender_id_field,) + opt_fields),
'descending': descending}
tender_list = True
client._update_params(params)
tenders_with_funder = {}
while tender_list and not tenders_with_funder:
tender_list = client.get_tenders()
for tender in tender_list:
if 'funders' in tender:
tenders_with_funder[tender[tender_id_field]] = [el['identifier']['id'] for el in tender['funders']]
# In case we are looking for a specific funder
if funder_id:
tenders_with_funder = {k: v for k, v in tenders_with_funder.items() if funder_id in v}
if not tenders_with_funder:
raise IdNotFound
else:
return tenders_with_funder
def download_file_from_url(url, path_to_save_file):
f = open(path_to_save_file, 'wb')
f.write(urllib.urlopen(url).read())
f.close()
return os.path.basename(f.name)
class StableClient_plan(PlansClient):
@retry(stop_max_attempt_number=100, wait_random_min=500, wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableClient_plan, self).request(*args, **kwargs)
def prepare_plan_api_wrapper(key, host_url, api_version):
return StableClient_plan(key, host_url, api_version)
class StableClient_dasu(DasuClient):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableClient_dasu, self).request(*args, **kwargs)
def prepare_dasu_api_wrapper(key, resource, host_url, api_version, ds_client=None):
print key
return StableClient_dasu(key, resource, host_url, api_version,
ds_client=ds_client) | from openprocurement_client.client import Client, EDRClient
from openprocurement_client.dasu_client import DasuClient
from openprocurement_client.document_service_client \
import DocumentServiceClient
from openprocurement_client.plan import PlansClient
from openprocurement_client.contract import ContractingClient
from openprocurement_client.exceptions import IdNotFound
from restkit.errors import RequestFailed, BadStatusLine, ResourceError
from retrying import retry
from time import sleep
import os
import urllib
def retry_if_request_failed(exception):
status_code = getattr(exception, 'status_code', None)
print(status_code)
if 500 <= status_code < 600 or status_code in (409, 429, 412):
return True
else:
return isinstance(exception, BadStatusLine)
class StableClient(Client):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableClient, self).request(*args, **kwargs)
class StableDsClient(DocumentServiceClient):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableDsClient, self).request(*args, **kwargs)
def prepare_api_wrapper(key, resource, host_url, api_version, ds_client=None):
return StableClient(key, resource, host_url, api_version,
ds_client=ds_client)
def prepare_ds_api_wrapper(ds_host_url, auth_ds):
return StableDsClient(ds_host_url, auth_ds)
class ContractingStableClient(ContractingClient):
@retry(stop_max_attempt_number=100, wait_random_min=500, wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(ContractingStableClient, self).request(*args, **kwargs)
def prepare_contract_api_wrapper(key, host_url, api_version, ds_client=None):
return ContractingStableClient(key, host_url, api_version, ds_client=ds_client)
class StableEDRClient(EDRClient):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
try:
res = super(StableEDRClient, self).request(*args, **kwargs)
except ResourceError as re:
if re.status_int == 429:
sleep(int(re.response.headers.get('Retry-After', '30')))
raise re
else:
return res
def prepare_edr_wrapper(host_url, api_version, username, password):
return StableEDRClient(host_url, api_version, username, password)
def get_complaint_internal_id(tender, complaintID):
try:
for complaint in tender.data.complaints:
if complaint.complaintID == complaintID:
return complaint.id
except AttributeError:
pass
try:
for award in tender.data.awards:
for complaint in award.complaints:
if complaint.complaintID == complaintID:
return complaint.id
except AttributeError:
pass
raise IdNotFound
def get_document_by_id(data, doc_id):
for document in data.get('documents', []):
if doc_id in document.get('title', ''):
return document
for complaint in data.get('complaints', []):
for document in complaint.get('documents', []):
if doc_id in document.get('title', ''):
return document
for contract in data.get('contracts', []):
for document in contract.get('documents', []):
if doc_id in document.get('title', ''):
return document
for award in data.get('awards', []):
for document in award.get('documents', []):
if doc_id in document.get('title', ''):
return document
for complaint in award.get('complaints', []):
for document in complaint.get('documents', []):
if doc_id in document.get('title', ''):
return document
for cancellation in data.get('cancellations', []):
for document in cancellation.get('documents', []):
if doc_id in document.get('title', ''):
return document
for bid in data.get('bids', []):
for document in bid.get('documents', []):
if doc_id in document.get('title', ''):
return document
raise Exception('Document with id {} not found'.format(doc_id))
def get_tenders_by_funder_id(client,
funder_id=None,
descending=True,
tender_id_field='tenderID',
opt_fields=('funders',)):
params = {'offset': '',
'opt_fields': ','.join((tender_id_field,) + opt_fields),
'descending': descending}
tender_list = True
client._update_params(params)
tenders_with_funder = {}
while tender_list and not tenders_with_funder:
tender_list = client.get_tenders()
for tender in tender_list:
if 'funders' in tender:
tenders_with_funder[tender[tender_id_field]] = [el['identifier']['id'] for el in tender['funders']]
# In case we are looking for a specific funder
if funder_id:
tenders_with_funder = {k: v for k, v in tenders_with_funder.items() if funder_id in v}
if not tenders_with_funder:
raise IdNotFound
else:
return tenders_with_funder
def download_file_from_url(url, path_to_save_file):
f = open(path_to_save_file, 'wb')
f.write(urllib.urlopen(url).read())
f.close()
return os.path.basename(f.name)
class StableClient_plan(PlansClient):
@retry(stop_max_attempt_number=100, wait_random_min=500, wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableClient_plan, self).request(*args, **kwargs)
def prepare_plan_api_wrapper(key, host_url, api_version):
return StableClient_plan(key, host_url, api_version)
class StableClient_dasu(DasuClient):
@retry(stop_max_attempt_number=100, wait_random_min=500,
wait_random_max=4000, retry_on_exception=retry_if_request_failed)
def request(self, *args, **kwargs):
return super(StableClient_dasu, self).request(*args, **kwargs)
def prepare_dasu_api_wrapper(key, resource, host_url, api_version, ds_client=None):
print key
return StableClient_dasu(key, resource, host_url, api_version,
ds_client=ds_client) | en | 0.916506 | # In case we are looking for a specific funder | 2.092193 | 2 |
lib/galaxy/datatypes/interval.py | lesperry/Metagenomics | 0 | 6624788 | <filename>lib/galaxy/datatypes/interval.py
"""
Interval datatypes
"""
import logging
import math
import sys
import tempfile
from bx.intervals.io import GenomicIntervalReader, ParseError
from six.moves.urllib.parse import quote_plus
from galaxy import util
from galaxy.datatypes import metadata
from galaxy.datatypes.data import DatatypeValidation
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.sniff import (
build_sniff_from_prefix,
get_headers,
iter_headers
)
from galaxy.datatypes.tabular import Tabular
from galaxy.datatypes.util.gff_util import parse_gff3_attributes, parse_gff_attributes
from galaxy.util import compression_utils
from . import (
data,
dataproviders
)
log = logging.getLogger(__name__)
# Contains the meta columns and the words that map to it; list aliases on the
# right side of the : in decreasing order of priority
alias_spec = {
'chromCol' : ['chrom', 'CHROMOSOME', 'CHROM', 'Chromosome Name'],
'startCol' : ['start', 'START', 'chromStart', 'txStart', 'Start Position (bp)'],
'endCol' : ['end', 'END', 'STOP', 'chromEnd', 'txEnd', 'End Position (bp)'],
'strandCol' : ['strand', 'STRAND', 'Strand'],
'nameCol' : ['name', 'NAME', 'Name', 'name2', 'NAME2', 'Name2', 'Ensembl Gene ID', 'Ensembl Transcript ID', 'Ensembl Peptide ID']
}
# a little faster lookup
alias_helper = {}
for key, value in alias_spec.items():
for elem in value:
alias_helper[elem] = key
# Constants for configuring viewport generation: If a line is greater than
# VIEWPORT_MAX_READS_PER_LINE * VIEWPORT_READLINE_BUFFER_SIZE bytes in size,
# then we will not generate a viewport for that dataset
VIEWPORT_READLINE_BUFFER_SIZE = 1048576 # 1MB
VIEWPORT_MAX_READS_PER_LINE = 10
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Interval(Tabular):
"""Tab delimited data containing interval information"""
edam_data = "data_3002"
edam_format = "format_3475"
file_ext = "interval"
line_class = "region"
track_type = "FeatureTrack"
data_sources = {"data": "tabix", "index": "bigwig"}
"""Add metadata elements"""
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0)
MetadataElement(name="nameCol", desc="Name/Identifier column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display apps"""
Tabular.__init__(self, **kwd)
self.add_display_app('ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links')
def init_meta(self, dataset, copy_from=None):
Tabular.init_meta(self, dataset, copy_from=copy_from)
def set_meta(self, dataset, overwrite=True, first_line_is_header=False, **kwd):
"""Tries to guess from the line the location number of the column for the chromosome, region start-end and strand"""
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=0)
if dataset.has_data():
empty_line_count = 0
num_check_lines = 100 # only check up to this many non empty lines
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
line = line.rstrip('\r\n')
if line:
if (first_line_is_header or line[0] == '#'):
self.init_meta(dataset)
line = line.strip('#')
elems = line.split('\t')
for meta_name, header_list in alias_spec.items():
for header_val in header_list:
if header_val in elems:
# found highest priority header to meta_name
setattr(dataset.metadata, meta_name, elems.index(header_val) + 1)
break # next meta_name
break # Our metadata is set, so break out of the outer loop
else:
# Header lines in Interval files are optional. For example, BED is Interval but has no header.
# We'll make a best guess at the location of the metadata columns.
elems = line.split('\t')
if len(elems) > 2:
if overwrite or not dataset.metadata.element_is_set('chromCol'):
dataset.metadata.chromCol = 1
try:
int(elems[1])
if overwrite or not dataset.metadata.element_is_set('startCol'):
dataset.metadata.startCol = 2
except Exception:
pass # Metadata default will be used
try:
int(elems[2])
if overwrite or not dataset.metadata.element_is_set('endCol'):
dataset.metadata.endCol = 3
except Exception:
pass # Metadata default will be used
# we no longer want to guess that this column is the 'name', name must now be set manually for interval files
# we will still guess at the strand, as we can make a more educated guess
# if len( elems ) > 3:
# try:
# int( elems[3] )
# except Exception:
# if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
# dataset.metadata.nameCol = 4
if len(elems) < 6 or elems[5] not in data.valid_strand:
if overwrite or not dataset.metadata.element_is_set('strandCol'):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set('strandCol'):
dataset.metadata.strandCol = 6
break
if (i - empty_line_count) > num_check_lines:
break # Our metadata is set or we examined 100 non-empty lines, so break out of the outer loop
else:
empty_line_count += 1
def displayable(self, dataset):
try:
return dataset.has_data() \
and dataset.state == dataset.states.OK \
and dataset.metadata.columns > 0 \
and dataset.metadata.data_lines != 0 \
and dataset.metadata.chromCol \
and dataset.metadata.startCol \
and dataset.metadata.endCol
except Exception:
return False
def get_estimated_display_viewport(self, dataset, chrom_col=None, start_col=None, end_col=None):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if not self.displayable(dataset):
return (None, None, None)
try:
# If column indexes were not passwed, determine from metadata
if chrom_col is None:
chrom_col = int(dataset.metadata.chromCol) - 1
if start_col is None:
start_col = int(dataset.metadata.startCol) - 1
if end_col is None:
end_col = int(dataset.metadata.endCol) - 1
# Scan lines of file to find a reasonable chromosome and range
chrom = None
start = sys.maxsize
end = 0
max_col = max(chrom_col, start_col, end_col)
with compression_utils.get_fileobj(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
# Skip comment lines
if not line.startswith('#'):
try:
fields = line.rstrip().split('\t')
if len(fields) > max_col:
if chrom is None or chrom == fields[chrom_col]:
start = min(start, int(fields[start_col]))
end = max(end, int(fields[end_col]))
# Set chrom last, in case start and end are not integers
chrom = fields[chrom_col]
viewport_feature_count -= 1
except Exception:
# Most likely a non-integer field has been encountered
# for start / stop. Just ignore and make sure we finish
# reading the line and decrementing the counters.
pass
# Make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip('\n\r') == line:
assert readline_count > 0, Exception('Viewport readline count exceeded for dataset %s.' % dataset.id)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return (chrom, str(start), str(end)) # Necessary to return strings?
except Exception:
# Unexpected error, possibly missing metadata
log.exception("Exception caught attempting to generate viewport for dataset '%d'", dataset.id)
return (None, None, None)
def as_ucsc_display_file(self, dataset, **kwd):
"""Returns file contents with only the bed data"""
with tempfile.NamedTemporaryFile(delete=False, mode='w') as fh:
c, s, e, t, n = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol or 0, dataset.metadata.nameCol or 0
c, s, e, t, n = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1, int(n) - 1
if t >= 0: # strand column (should) exists
for i, elems in enumerate(compression_utils.file_iter(dataset.file_name)):
strand = "+"
name = "region_%i" % i
if n >= 0 and n < len(elems):
name = elems[n]
if t < len(elems):
strand = elems[t]
tmp = [elems[c], elems[s], elems[e], name, '0', strand]
fh.write('%s\n' % '\t'.join(tmp))
elif n >= 0: # name column (should) exists
for i, elems in enumerate(compression_utils.file_iter(dataset.file_name)):
name = "region_%i" % i
if n >= 0 and n < len(elems):
name = elems[n]
tmp = [elems[c], elems[s], elems[e], name]
fh.write('%s\n' % '\t'.join(tmp))
else:
for elems in compression_utils.file_iter(dataset.file_name):
tmp = [elems[c], elems[s], elems[e]]
fh.write('%s\n' % '\t'.join(tmp))
return compression_utils.get_fileobj(fh.name, mode='rb')
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_parameter_alias={'chromCol': 'Chrom', 'startCol': 'Start', 'endCol': 'End', 'strandCol': 'Strand', 'nameCol': 'Name'})
def ucsc_links(self, dataset, type, app, base_url):
"""
Generate links to UCSC genome browser sites based on the dbkey
and content of dataset.
"""
# Filter UCSC sites to only those that are supported by this build and
# enabled.
valid_sites = [(name, url)
for name, url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey)
if name in app.datatypes_registry.get_display_sites('ucsc')]
if not valid_sites:
return []
# If there are any valid sites, we need to generate the estimated
# viewport
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is None:
return []
# Accumulate links for valid sites
ret_val = []
for site_name, site_url in valid_sites:
internal_url = app.url_for(controller='dataset', dataset_id=dataset.id,
action='display_at', filename='ucsc_' + site_name)
display_url = quote_plus("%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" %
(base_url, app.url_for(controller='root'), dataset.id, type))
redirect_url = quote_plus("%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" %
(site_url, dataset.dbkey, chrom, start, stop))
link = '{}?redirect_url={}&display_url={}'.format(internal_url, redirect_url, display_url)
ret_val.append((site_name, link))
return ret_val
def validate(self, dataset, **kwd):
"""Validate an interval file using the bx GenomicIntervalReader"""
c, s, e, t = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol
c, s, e, t = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1
with compression_utils.get_fileobj(dataset.file_name, "r") as infile:
reader = GenomicIntervalReader(
infile,
chrom_col=c,
start_col=s,
end_col=e,
strand_col=t)
while True:
try:
next(reader)
except ParseError as e:
return DatatypeValidation.invalid(util.unicodify(e))
except StopIteration:
return DatatypeValidation.valid()
def repair_methods(self, dataset):
"""Return options for removing errors along with a description"""
return [("lines", "Remove erroneous lines")]
def sniff_prefix(self, file_prefix):
"""
Checks for 'intervalness'
This format is mostly used by galaxy itself. Valid interval files should include
a valid header comment, but this seems to be loosely regulated.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_space.txt' )
>>> Interval().sniff( fname )
False
>>> fname = get_test_fname( 'interval.interval' )
>>> Interval().sniff( fname )
True
"""
found_valid_lines = False
try:
headers = iter_headers(file_prefix, '\t', comment_designator='#')
# If we got here, we already know the file is_column_based and is not bed,
# so we'll just look for some valid data.
for hdr in headers:
if hdr:
if len(hdr) < 3:
return False
# Assume chrom start and end are in column positions 1 and 2
# respectively ( for 0 based columns )
int(hdr[1])
int(hdr[2])
found_valid_lines = True
except Exception:
return False
return found_valid_lines
def get_track_resolution(self, dataset, start, end):
return None
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory('genomic-region',
dataproviders.dataset.GenomicRegionDataProvider.settings)
def genomic_region_dataprovider(self, dataset, **settings):
return dataproviders.dataset.GenomicRegionDataProvider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('genomic-region-dict',
dataproviders.dataset.GenomicRegionDataProvider.settings)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings['named_columns'] = True
return self.genomic_region_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('interval',
dataproviders.dataset.IntervalDataProvider.settings)
def interval_dataprovider(self, dataset, **settings):
return dataproviders.dataset.IntervalDataProvider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('interval-dict',
dataproviders.dataset.IntervalDataProvider.settings)
def interval_dict_dataprovider(self, dataset, **settings):
settings['named_columns'] = True
return self.interval_dataprovider(dataset, **settings)
class BedGraph(Interval):
"""Tab delimited chrom/start/end/datavalue dataset"""
edam_format = "format_3583"
file_ext = "bedgraph"
track_type = "LineTrack"
data_sources = {"data": "bigwig", "index": "bigwig"}
def as_ucsc_display_file(self, dataset, **kwd):
"""
Returns file contents as is with no modifications.
TODO: this is a functional stub and will need to be enhanced moving forward to provide additional support for bedgraph.
"""
return open(dataset.file_name, 'rb')
def get_estimated_display_viewport(self, dataset, chrom_col=0, start_col=1, end_col=2):
"""
Set viewport based on dataset's first 100 lines.
"""
return Interval.get_estimated_display_viewport(self, dataset, chrom_col=chrom_col, start_col=start_col, end_col=end_col)
class Bed(Interval):
"""Tab delimited data in BED format"""
edam_format = "format_3003"
file_ext = "bed"
data_sources = {"data": "tabix", "index": "bigwig", "feature_search": "fli"}
track_type = Interval.track_type
column_names = ['Chrom', 'Start', 'End', 'Name', 'Score', 'Strand', 'ThickStart', 'ThickEnd', 'ItemRGB', 'BlockCount', 'BlockSizes', 'BlockStarts']
"""Add metadata elements"""
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
MetadataElement(name="viz_filter_cols", desc="Score column for visualization", default=[4], param=metadata.ColumnParameter, optional=True, multiple=True)
# do we need to repeat these? they are the same as should be inherited from interval type
def set_meta(self, dataset, overwrite=True, **kwd):
"""Sets the metadata information for datasets previously determined to be in bed format."""
i = 0
if dataset.has_data():
for i, line in enumerate(open(dataset.file_name)):
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
if len(elems) > 2:
if len(elems) > 3:
if overwrite or not dataset.metadata.element_is_set('nameCol'):
dataset.metadata.nameCol = 4
if len(elems) < 6:
if overwrite or not dataset.metadata.element_is_set('strandCol'):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set('strandCol'):
dataset.metadata.strandCol = 6
break
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def as_ucsc_display_file(self, dataset, **kwd):
"""Returns file contents with only the bed data. If bed 6+, treat as interval."""
for line in open(dataset.file_name):
line = line.strip()
if line == "" or line.startswith("#"):
continue
fields = line.split('\t')
"""check to see if this file doesn't conform to strict genome browser accepted bed"""
try:
if len(fields) > 12:
return Interval.as_ucsc_display_file(self, dataset) # too many fields
if len(fields) > 6:
int(fields[6])
if len(fields) > 7:
int(fields[7])
if len(fields) > 8:
if int(fields[8]) != 0:
return Interval.as_ucsc_display_file(self, dataset)
if len(fields) > 9:
int(fields[9])
if len(fields) > 10:
fields2 = fields[10].rstrip(",").split(",") # remove trailing comma and split on comma
for field in fields2:
int(field)
if len(fields) > 11:
fields2 = fields[11].rstrip(",").split(",") # remove trailing comma and split on comma
for field in fields2:
int(field)
except Exception:
return Interval.as_ucsc_display_file(self, dataset)
# only check first line for proper form
break
try:
return open(dataset.file_name, 'rb')
except Exception:
return "This item contains no content"
def sniff_prefix(self, file_prefix):
"""
Checks for 'bedness'
BED lines have three required fields and nine additional optional fields.
The number of fields per line must be consistent throughout any single set of data in
an annotation track. The order of the optional fields is binding: lower-numbered
fields must always be populated if higher-numbered fields are used. The data type of
all 12 columns is:
1-str, 2-int, 3-int, 4-str, 5-int, 6-str, 7-int, 8-int, 9-int or list, 10-int, 11-list, 12-list
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_tab.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'interv1.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'complete.bed' )
>>> Bed().sniff( fname )
True
"""
if not get_headers(file_prefix, '\t', comment_designator='#', count=1):
return False
try:
headers = iter_headers(file_prefix, '\t', comment_designator='#')
for hdr in headers:
if hdr[0] == '':
continue
if len(hdr) < 3 or len(hdr) > 12:
return False
try:
int(hdr[1])
int(hdr[2])
except Exception:
return False
if len(hdr) > 4:
# hdr[3] is a string, 'name', which defines the name of the BED line - difficult to test for this.
# hdr[4] is an int, 'score', a score between 0 and 1000.
try:
if int(hdr[4]) < 0 or int(hdr[4]) > 1000:
return False
except Exception:
return False
if len(hdr) > 5:
# hdr[5] is strand
if hdr[5] not in data.valid_strand:
return False
if len(hdr) > 6:
# hdr[6] is thickStart, the starting position at which the feature is drawn thickly.
try:
int(hdr[6])
except Exception:
return False
if len(hdr) > 7:
# hdr[7] is thickEnd, the ending position at which the feature is drawn thickly
try:
int(hdr[7])
except Exception:
return False
if len(hdr) > 8:
# hdr[8] is itemRgb, an RGB value of the form R,G,B (e.g. 255,0,0). However, this could also be an int (e.g., 0)
try:
int(hdr[8])
except Exception:
try:
hdr[8].split(',')
except Exception:
return False
if len(hdr) > 9:
# hdr[9] is blockCount, the number of blocks (exons) in the BED line.
try:
block_count = int(hdr[9])
except Exception:
return False
if len(hdr) > 10:
# hdr[10] is blockSizes - A comma-separated list of the block sizes.
# Sometimes the blosck_sizes and block_starts lists end in extra commas
try:
block_sizes = hdr[10].rstrip(',').split(',')
except Exception:
return False
if len(hdr) > 11:
# hdr[11] is blockStarts - A comma-separated list of block starts.
try:
block_starts = hdr[11].rstrip(',').split(',')
except Exception:
return False
if len(block_sizes) != block_count or len(block_starts) != block_count:
return False
return True
except Exception:
return False
class ProBed(Bed):
"""Tab delimited data in proBED format - adaptation of BED for proteomics data."""
edam_format = "format_3827"
file_ext = "probed"
column_names = ['Chrom', 'Start', 'End', 'Name', 'Score', 'Strand', 'ThickStart', 'ThickEnd', 'ItemRGB', 'BlockCount', 'BlockSizes', 'BlockStarts', 'ProteinAccession', 'PeptideSequence', 'Uniqueness', 'GenomeReferenceVersion', 'PsmScore', 'Fdr', 'Modifications', 'Charge', 'ExpMassToCharge', 'CalcMassToCharge', 'PsmRank', 'DatasetID', 'Uri']
class BedStrict(Bed):
"""Tab delimited data in strict BED format - no non-standard columns allowed"""
edam_format = "format_3584"
file_ext = "bedstrict"
# no user change of datatype allowed
allow_datatype_change = False
# Read only metadata elements
MetadataElement(name="chromCol", default=1, desc="Chrom column", readonly=True, param=metadata.MetadataParameter)
MetadataElement(name="startCol", default=2, desc="Start column", readonly=True, param=metadata.MetadataParameter) # TODO: start and end should be able to be set to these or the proper thick[start/end]?
MetadataElement(name="endCol", default=3, desc="End column", readonly=True, param=metadata.MetadataParameter)
MetadataElement(name="strandCol", desc="Strand column (click box & select)", readonly=True, param=metadata.MetadataParameter, no_value=0, optional=True)
MetadataElement(name="nameCol", desc="Name/Identifier column (click box & select)", readonly=True, param=metadata.MetadataParameter, no_value=0, optional=True)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.clear_display_apps() # only new style display applications for this datatype
def set_meta(self, dataset, overwrite=True, **kwd):
Tabular.set_meta(self, dataset, overwrite=overwrite, **kwd) # need column count first
if dataset.metadata.columns >= 4:
dataset.metadata.nameCol = 4
if dataset.metadata.columns >= 6:
dataset.metadata.strandCol = 6
def sniff(self, filename):
return False # NOTE: This would require aggressively validating the entire file
class Bed6(BedStrict):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 6"""
edam_format = "format_3585"
file_ext = "bed6"
class Bed12(BedStrict):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 12"""
edam_format = "format_3586"
file_ext = "bed12"
class _RemoteCallMixin:
def _get_remote_call_url(self, redirect_url, site_name, dataset, type, app, base_url):
"""Retrieve the URL to call out to an external site and retrieve data.
This routes our external URL through a local galaxy instance which makes
the data available, followed by redirecting to the remote site with a
link back to the available information.
"""
internal_url = "%s" % app.url_for(controller='dataset', dataset_id=dataset.id, action='display_at', filename='{}_{}'.format(type, site_name))
base_url = app.config.get("display_at_callback", base_url)
display_url = quote_plus("%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" %
(base_url, app.url_for(controller='root'), dataset.id, type))
link = '{}?redirect_url={}&display_url={}'.format(internal_url, redirect_url, display_url)
return link
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Gff(Tabular, _RemoteCallMixin):
"""Tab delimited data in Gff format"""
edam_data = "data_1255"
edam_format = "format_2305"
file_ext = "gff"
valid_gff_frame = ['.', '0', '1', '2']
column_names = ['Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Group']
data_sources = {"data": "interval_index", "index": "bigwig", "feature_search": "fli"}
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement(name="columns", default=9, desc="Number of columns", readonly=True, visible=False)
MetadataElement(name="column_types", default=['str', 'str', 'str', 'int', 'int', 'int', 'str', 'str', 'str'],
param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False)
MetadataElement(name="attributes", default=0, desc="Number of attributes", readonly=True, visible=False, no_value=0)
MetadataElement(name="attribute_types", default={}, desc="Attribute types", param=metadata.DictParameter, readonly=True, visible=False, no_value=[])
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Tabular.__init__(self, **kwd)
self.add_display_app('ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links')
self.add_display_app('gbrowse', 'display in Gbrowse', 'as_gbrowse_display_file', 'gbrowse_links')
def set_attribute_metadata(self, dataset):
"""
Sets metadata elements for dataset's attributes.
"""
# Use first N lines to set metadata for dataset attributes. Attributes
# not found in the first N lines will not have metadata.
num_lines = 200
attribute_types = {}
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
if line and not line.startswith('#'):
elems = line.split('\t')
if len(elems) == 9:
try:
# Loop through attributes to set types.
for name, value in parse_gff_attributes(elems[8]).items():
# Default type is string.
value_type = "str"
try:
# Try int.
int(value)
value_type = "int"
except ValueError:
try:
# Try float.
float(value)
value_type = "float"
except ValueError:
pass
attribute_types[name] = value_type
except Exception:
pass
if i + 1 == num_lines:
break
# Set attribute metadata and then set additional metadata.
dataset.metadata.attribute_types = attribute_types
dataset.metadata.attributes = len(attribute_types)
def set_meta(self, dataset, overwrite=True, **kwd):
self.set_attribute_metadata(dataset)
i = 0
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
if len(elems) == 9:
try:
int(elems[3])
int(elems[4])
break
except Exception:
pass
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
def get_estimated_display_viewport(self, dataset):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
formats. This function should correctly handle both...
"""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if self.displayable(dataset):
try:
seqid = None
start = sys.maxsize
stop = 0
with compression_utils.get_fileobj(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
try:
if line.startswith('##sequence-region'): # ##sequence-region IV 6000000 6030000
elems = line.rstrip('\n\r').split()
if len(elems) > 3:
# line looks like:
# sequence-region ctg123 1 1497228
seqid = elems[1] # IV
start = int(elems[2]) # 6000000
stop = int(elems[3]) # 6030000
break # use location declared in file
elif len(elems) == 2 and elems[1].find('..') > 0:
# line looks like this:
# sequence-region X:120000..140000
elems = elems[1].split(':')
seqid = elems[0]
start = int(elems[1].split('..')[0])
stop = int(elems[1].split('..')[1])
break # use location declared in file
else:
log.debug("line (%s) uses an unsupported ##sequence-region definition." % str(line))
# break #no break, if bad definition, we try another line
elif line.startswith("browser position"):
# Allow UCSC style browser and track info in the GFF file
pos_info = line.split()[-1]
seqid, startend = pos_info.split(":")
start, stop = map(int, startend.split("-"))
break # use location declared in file
elif not line.startswith(('#', 'track', 'browser')):
viewport_feature_count -= 1
elems = line.rstrip('\n\r').split('\t')
if len(elems) > 3:
if not seqid:
# We can only set the viewport for a single chromosome
seqid = elems[0]
if seqid == elems[0]:
# Make sure we have not spanned chromosomes
start = min(start, int(elems[3]))
stop = max(stop, int(elems[4]))
except Exception:
# most likely start/stop is not an int or not enough fields
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip('\n\r') == line:
assert readline_count > 0, Exception('Viewport readline count exceeded for dataset %s.' % dataset.id)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if seqid is not None:
return (seqid, str(start), str(stop)) # Necessary to return strings?
except Exception:
log.exception('Unexpected error')
return (None, None, None) # could not determine viewport
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport(dataset)
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('ucsc'):
redirect_url = quote_plus(
"%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" %
(site_url, dataset.dbkey, seqid, start, stop))
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def gbrowse_links(self, dataset, type, app, base_url):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport(dataset)
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('gbrowse', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('gbrowse'):
if seqid.startswith('chr') and len(seqid) > 3:
seqid = seqid[3:]
redirect_url = quote_plus("{}/?q={}:{}..{}&eurl=%s".format(site_url, seqid, start, stop))
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def sniff_prefix(self, file_prefix):
"""
Determines whether the file is in gff format
GFF lines have nine required fields that must be tab-separated.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format3
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('gff.gff3')
>>> Gff().sniff( fname )
False
>>> fname = get_test_fname('test.gff')
>>> Gff().sniff( fname )
True
"""
if len(get_headers(file_prefix, '\t', count=2)) < 2:
return False
try:
headers = iter_headers(file_prefix, '\t')
for hdr in headers:
if hdr and hdr[0].startswith('##gff-version') and hdr[0].find('2') < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) != 9:
return False
try:
int(hdr[3])
int(hdr[4])
except Exception:
return False
if hdr[5] != '.':
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in data.valid_strand:
return False
if hdr[7] not in self.valid_gff_frame:
return False
return True
except Exception:
return False
# ------------- Dataproviders
# redefine bc super is Tabular
@dataproviders.decorators.dataprovider_factory('genomic-region',
dataproviders.dataset.GenomicRegionDataProvider.settings)
def genomic_region_dataprovider(self, dataset, **settings):
return dataproviders.dataset.GenomicRegionDataProvider(dataset, 0, 3, 4, **settings)
@dataproviders.decorators.dataprovider_factory('genomic-region-dict',
dataproviders.dataset.GenomicRegionDataProvider.settings)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings['named_columns'] = True
return self.genomic_region_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('interval',
dataproviders.dataset.IntervalDataProvider.settings)
def interval_dataprovider(self, dataset, **settings):
return dataproviders.dataset.IntervalDataProvider(dataset, 0, 3, 4, 6, 2, **settings)
@dataproviders.decorators.dataprovider_factory('interval-dict',
dataproviders.dataset.IntervalDataProvider.settings)
def interval_dict_dataprovider(self, dataset, **settings):
settings['named_columns'] = True
return self.interval_dataprovider(dataset, **settings)
class Gff3(Gff):
"""Tab delimited data in Gff3 format"""
edam_format = "format_1975"
file_ext = "gff3"
valid_gff3_strand = ['+', '-', '.', '?']
valid_gff3_phase = Gff.valid_gff_frame
column_names = ['Seqid', 'Source', 'Type', 'Start', 'End', 'Score', 'Strand', 'Phase', 'Attributes']
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement(name="column_types", default=['str', 'str', 'str', 'int', 'int', 'float', 'str', 'int', 'list'],
param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False)
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Gff.__init__(self, **kwd)
def set_meta(self, dataset, overwrite=True, **kwd):
self.set_attribute_metadata(dataset)
i = 0
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
valid_start = False
valid_end = False
if len(elems) == 9:
try:
start = int(elems[3])
valid_start = True
except Exception:
if elems[3] == '.':
valid_start = True
try:
end = int(elems[4])
valid_end = True
except Exception:
if elems[4] == '.':
valid_end = True
strand = elems[6]
phase = elems[7]
if valid_start and valid_end and start < end and strand in self.valid_gff3_strand and phase in self.valid_gff3_phase:
break
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def sniff_prefix(self, file_prefix):
"""
Determines whether the file is in GFF version 3 format
GFF 3 format:
1) adds a mechanism for representing more than one level
of hierarchical grouping of features and subfeatures.
2) separates the ideas of group membership and feature name/id
3) constrains the feature type field to be taken from a controlled
vocabulary.
4) allows a single feature, such as an exon, to belong to more than
one group at a time.
5) provides an explicit convention for pairwise alignments
6) provides an explicit convention for features that occupy disjunct regions
The format consists of 9 columns, separated by tabs (NOT spaces).
Undefined fields are replaced with the "." character, as described in the original GFF spec.
For complete details see http://song.sourceforge.net/gff3.shtml
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test.gff' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname( 'test.gtf' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname('gff.gff3')
>>> Gff3().sniff( fname )
True
"""
if len(get_headers(file_prefix, '\t', count=2)) < 2:
return False
try:
headers = iter_headers(file_prefix, '\t')
for hdr in headers:
if hdr and hdr[0].startswith('##gff-version') and hdr[0].find('3') >= 0:
return True
elif hdr and hdr[0].startswith('##gff-version') and hdr[0].find('3') < 0:
return False
# Header comments may have been stripped, so inspect the data
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) != 9:
return False
try:
int(hdr[3])
except Exception:
if hdr[3] != '.':
return False
try:
int(hdr[4])
except Exception:
if hdr[4] != '.':
return False
if hdr[5] != '.':
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in self.valid_gff3_strand:
return False
if hdr[7] not in self.valid_gff3_phase:
return False
parse_gff3_attributes(hdr[8])
return True
except Exception:
return False
class Gtf(Gff):
"""Tab delimited data in Gtf format"""
edam_format = "format_2306"
file_ext = "gtf"
column_names = ['Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Attributes']
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement(name="columns", default=9, desc="Number of columns", readonly=True, visible=False)
MetadataElement(name="column_types", default=['str', 'str', 'str', 'int', 'int', 'float', 'str', 'int', 'list'],
param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False)
def sniff_prefix(self, file_prefix):
"""
Determines whether the file is in gtf format
GTF lines have nine required fields that must be tab-separated. The first eight GTF fields are the same as GFF.
The group field has been expanded into a list of attributes. Each attribute consists of a type/value pair.
Attributes must end in a semi-colon, and be separated from any following attribute by exactly one space.
The attribute list must begin with the two mandatory attributes:
gene_id value - A globally unique identifier for the genomic source of the sequence.
transcript_id value - A globally unique identifier for the predicted transcript.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format4
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( '1.bed' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gff' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gtf' )
>>> Gtf().sniff( fname )
True
"""
if len(get_headers(file_prefix, '\t', count=2)) < 2:
return False
try:
headers = iter_headers(file_prefix, '\t')
for hdr in headers:
if hdr and hdr[0].startswith('##gff-version') and hdr[0].find('2') < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) != 9:
return False
try:
int(hdr[3])
int(hdr[4])
except Exception:
return False
if hdr[5] != '.':
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in data.valid_strand:
return False
if hdr[7] not in self.valid_gff_frame:
return False
# Check attributes for gene_id, transcript_id
attributes = parse_gff_attributes(hdr[8])
if len(attributes) >= 2:
if 'gene_id' not in attributes:
return False
if 'transcript_id' not in attributes:
return False
else:
return False
return True
except Exception:
return False
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Wiggle(Tabular, _RemoteCallMixin):
"""Tab delimited data in wiggle format"""
edam_format = "format_3005"
file_ext = "wig"
track_type = "LineTrack"
data_sources = {"data": "bigwig", "index": "bigwig"}
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.add_display_app('ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links')
self.add_display_app('gbrowse', 'display in Gbrowse', 'as_gbrowse_display_file', 'gbrowse_links')
def get_estimated_display_viewport(self, dataset):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if self.displayable(dataset):
try:
chrom = None
start = sys.maxsize
end = 0
span = 1
step = None
with open(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
try:
if line.startswith("browser"):
chr_info = line.rstrip('\n\r').split()[-1]
chrom, coords = chr_info.split(":")
start, end = map(int, coords.split("-"))
break # use the browser line
# variableStep chrom=chr20
if line and (line.lower().startswith("variablestep") or line.lower().startswith("fixedstep")):
if chrom is not None:
break # different chrom or different section of the chrom
chrom = line.rstrip('\n\r').split("chrom=")[1].split()[0]
if 'span=' in line:
span = int(line.rstrip('\n\r').split("span=")[1].split()[0])
if 'step=' in line:
step = int(line.rstrip('\n\r').split("step=")[1].split()[0])
start = int(line.rstrip('\n\r').split("start=")[1].split()[0])
else:
fields = line.rstrip('\n\r').split()
if fields:
if step is not None:
if not end:
end = start + span
else:
end += step
else:
start = min(int(fields[0]), start)
end = max(end, int(fields[0]) + span)
viewport_feature_count -= 1
except Exception:
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip('\n\r') == line:
assert readline_count > 0, Exception('Viewport readline count exceeded for dataset %s.' % dataset.id)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return (chrom, str(start), str(end)) # Necessary to return strings?
except Exception:
log.exception('Unexpected error')
return (None, None, None) # could not determine viewport
def gbrowse_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('gbrowse', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('gbrowse'):
if chrom.startswith('chr') and len(chrom) > 3:
chrom = chrom[3:]
redirect_url = quote_plus("{}/?q={}:{}..{}&eurl=%s".format(site_url, chrom, start, stop))
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('ucsc'):
redirect_url = quote_plus("{}db={}&position={}:{}-{}&hgt.customText=%s".format(site_url, dataset.dbkey, chrom, start, stop))
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, skipchars=['track', '#'])
def set_meta(self, dataset, overwrite=True, **kwd):
max_data_lines = None
i = 0
for i, line in enumerate(open(dataset.file_name)):
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
try:
# variableStep format is nucleotide position\tvalue\n,
# fixedStep is value\n
# "Wiggle track data values can be integer or real, positive or negative values"
float(elems[0])
break
except Exception:
# We are either in the track definition line or in a declaration line
pass
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
# we'll arbitrarily only use the first 100 data lines in this wig file to calculate tabular attributes (column types)
# this should be sufficient, except when we have mixed wig track types (bed, variable, fixed),
# but those cases are not a single table that would have consistant column definitions
# optional metadata values set in Tabular class will be 'None'
max_data_lines = 100
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i, max_data_lines=max_data_lines)
def sniff_prefix(self, file_prefix):
"""
Determines wether the file is in wiggle format
The .wig format is line-oriented. Wiggle data is preceeded by a track definition line,
which adds a number of options for controlling the default display of this track.
Following the track definition line is the track data, which can be entered in several
different formats.
The track definition line begins with the word 'track' followed by the track type.
The track type with version is REQUIRED, and it currently must be wiggle_0. For example,
track type=wiggle_0...
For complete details see http://genome.ucsc.edu/goldenPath/help/wiggle.html
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'interv1.bed' )
>>> Wiggle().sniff( fname )
False
>>> fname = get_test_fname( 'wiggle.wig' )
>>> Wiggle().sniff( fname )
True
"""
try:
headers = iter_headers(file_prefix, None)
for hdr in headers:
if len(hdr) > 1 and hdr[0] == 'track' and hdr[1].startswith('type=wiggle'):
return True
return False
except Exception:
return False
def get_track_resolution(self, dataset, start, end):
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = math.ceil(10 ** math.ceil(math.log10(range / 1000)))
# Restrict to valid range
resolution = min(resolution, 100000)
resolution = max(resolution, 1)
return resolution
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory('wiggle', dataproviders.dataset.WiggleDataProvider.settings)
def wiggle_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.WiggleDataProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory('wiggle-dict', dataproviders.dataset.WiggleDataProvider.settings)
def wiggle_dict_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
settings['named_columns'] = True
return dataproviders.dataset.WiggleDataProvider(dataset_source, **settings)
@build_sniff_from_prefix
class CustomTrack(Tabular):
"""UCSC CustomTrack"""
edam_format = "format_3588"
file_ext = "customtrack"
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display app"""
Tabular.__init__(self, **kwd)
self.add_display_app('ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links')
def set_meta(self, dataset, overwrite=True, **kwd):
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=1)
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, skipchars=['track', '#'])
def get_estimated_display_viewport(self, dataset, chrom_col=None, start_col=None, end_col=None):
"""Return a chrom, start, stop tuple for viewing a file."""
# FIXME: only BED and WIG custom tracks are currently supported
# As per previously existing behavior, viewport will only be over the first intervals
max_line_count = 100 # maximum number of lines to check; includes comment lines
variable_step_wig = False
chrom = None
span = 1
if self.displayable(dataset):
try:
with open(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
if not line.startswith('#'):
try:
if variable_step_wig:
fields = line.rstrip().split()
if len(fields) == 2:
start = int(fields[0])
return (chrom, str(start), str(start + span))
elif line and (line.lower().startswith("variablestep") or line.lower().startswith("fixedstep")):
chrom = line.rstrip('\n\r').split("chrom=")[1].split()[0]
if 'span=' in line:
span = int(line.rstrip('\n\r').split("span=")[1].split()[0])
if 'start=' in line:
start = int(line.rstrip('\n\r').split("start=")[1].split()[0])
return (chrom, str(start), str(start + span))
else:
variable_step_wig = True
else:
fields = line.rstrip().split('\t')
if len(fields) >= 3:
chrom = fields[0]
start = int(fields[1])
end = int(fields[2])
return (chrom, str(start), str(end))
except Exception:
# most likely a non-integer field has been encountered for start / stop
continue
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip('\n\r') == line:
assert readline_count > 0, Exception('Viewport readline count exceeded for dataset %s.' % dataset.id)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not max_line_count:
# exceeded viewport or total line count to check
break
except Exception:
log.exception('Unexpected error')
return (None, None, None) # could not determine viewport
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('ucsc'):
internal_url = "%s" % app.url_for(controller='dataset', dataset_id=dataset.id, action='display_at', filename='ucsc_' + site_name)
display_url = quote_plus("%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" % (base_url, app.url_for(controller='root'), dataset.id, type))
redirect_url = quote_plus("{}db={}&position={}:{}-{}&hgt.customText=%s".format(site_url, dataset.dbkey, chrom, start, stop))
link = '{}?redirect_url={}&display_url={}'.format(internal_url, redirect_url, display_url)
ret_val.append((site_name, link))
return ret_val
def sniff_prefix(self, file_prefix):
"""
Determines whether the file is in customtrack format.
CustomTrack files are built within Galaxy and are basically bed or interval files with the first line looking
something like this.
track name="User Track" description="User Supplied Track (from Galaxy)" color=0,0,0 visibility=1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'complete.bed' )
>>> CustomTrack().sniff( fname )
False
>>> fname = get_test_fname( 'ucsc.customtrack' )
>>> CustomTrack().sniff( fname )
True
"""
headers = iter_headers(file_prefix, None)
found_at_least_one_track = False
first_line = True
for hdr in headers:
if first_line:
first_line = False
try:
if hdr[0].startswith('track'):
color_found = False
visibility_found = False
for elem in hdr[1:]:
if elem.startswith('color'):
color_found = True
if elem.startswith('visibility'):
visibility_found = True
if color_found and visibility_found:
break
if not color_found or not visibility_found:
return False
else:
return False
except Exception:
return False
else:
try:
if hdr[0] and not hdr[0].startswith('#'):
if len(hdr) < 3:
return False
try:
int(hdr[1])
int(hdr[2])
except Exception:
return False
found_at_least_one_track = True
except Exception:
return False
return found_at_least_one_track
class ENCODEPeak(Interval):
'''
Human ENCODE peak format. There are both broad and narrow peak formats.
Formats are very similar; narrow peak has an additional column, though.
Broad peak ( http://genome.ucsc.edu/FAQ/FAQformat#format13 ):
This format is used to provide called regions of signal enrichment based
on pooled, normalized (interpreted) data. It is a BED 6+3 format.
Narrow peak http://genome.ucsc.edu/FAQ/FAQformat#format12 and :
This format is used to provide called peaks of signal enrichment based on
pooled, normalized (interpreted) data. It is a BED6+4 format.
'''
edam_format = "format_3612"
file_ext = "encodepeak"
column_names = ['Chrom', 'Start', 'End', 'Name', 'Score', 'Strand', 'SignalValue', 'pValue', 'qValue', 'Peak']
data_sources = {"data": "tabix", "index": "bigwig"}
"""Add metadata elements"""
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def sniff(self, filename):
return False
class ChromatinInteractions(Interval):
'''
Chromatin interactions obtained from 3C/5C/Hi-C experiments.
'''
file_ext = "chrint"
track_type = "DiagonalHeatmapTrack"
data_sources = {"data": "tabix", "index": "bigwig"}
column_names = ['Chrom1', 'Start1', 'End1', 'Chrom2', 'Start2', 'End2', 'Value']
"""Add metadata elements"""
MetadataElement(name="chrom1Col", default=1, desc="Chrom1 column", param=metadata.ColumnParameter)
MetadataElement(name="start1Col", default=2, desc="Start1 column", param=metadata.ColumnParameter)
MetadataElement(name="end1Col", default=3, desc="End1 column", param=metadata.ColumnParameter)
MetadataElement(name="chrom2Col", default=4, desc="Chrom2 column", param=metadata.ColumnParameter)
MetadataElement(name="start2Col", default=5, desc="Start2 column", param=metadata.ColumnParameter)
MetadataElement(name="end2Col", default=6, desc="End2 column", param=metadata.ColumnParameter)
MetadataElement(name="valueCol", default=7, desc="Value column", param=metadata.ColumnParameter)
MetadataElement(name="columns", default=7, desc="Number of columns", readonly=True, visible=False)
def sniff(self, filename):
return False
@build_sniff_from_prefix
class ScIdx(Tabular):
"""
ScIdx files are 1-based and consist of strand-specific coordinate counts.
They always have 5 columns, and the first row is the column labels:
'chrom', 'index', 'forward', 'reverse', 'value'.
Each line following the first consists of data:
chromosome name (type str), peak index (type int), Forward strand peak
count (type int), Reverse strand peak count (type int) and value (type int).
The value of the 5th 'value' column is the sum of the forward and reverse
peak count values.
"""
file_ext = "scidx"
MetadataElement(name="columns", default=0, desc="Number of columns", readonly=True, visible=False)
MetadataElement(name="column_types", default=[], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False, no_value=[])
def __init__(self, **kwd):
"""
Initialize scidx datatype.
"""
Tabular.__init__(self, **kwd)
# Don't set column names since the first
# line of the dataset displays them.
self.column_names = ['chrom', 'index', 'forward', 'reverse', 'value']
def sniff_prefix(self, file_prefix):
"""
Checks for 'scidx-ness.'
"""
count = 0
fh = file_prefix.string_io()
while True:
line = fh.readline()
if not line:
# EOF
if count > 1:
# The second line is always the labels:
# chrom index forward reverse value
# We need at least the column labels and a data line.
return True
return False
line = line.strip()
# The first line is always a comment like this:
# 2015-11-23 20:18:56.51;input.bam;READ1
if count == 0:
if line.startswith('#'):
count += 1
continue
else:
return False
# Skip first line.
if count > 1:
items = line.split('\t')
if len(items) != 5:
return False
index = items[1]
if not index.isdigit():
return False
forward = items[2]
if not forward.isdigit():
return False
reverse = items[3]
if not reverse.isdigit():
return False
value = items[4]
if not value.isdigit():
return False
if int(forward) + int(reverse) != int(value):
return False
if count == 100:
return True
count += 1
if count < 100 and count > 0:
return True
return False
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules[__name__])
| <filename>lib/galaxy/datatypes/interval.py
"""
Interval datatypes
"""
import logging
import math
import sys
import tempfile
from bx.intervals.io import GenomicIntervalReader, ParseError
from six.moves.urllib.parse import quote_plus
from galaxy import util
from galaxy.datatypes import metadata
from galaxy.datatypes.data import DatatypeValidation
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.sniff import (
build_sniff_from_prefix,
get_headers,
iter_headers
)
from galaxy.datatypes.tabular import Tabular
from galaxy.datatypes.util.gff_util import parse_gff3_attributes, parse_gff_attributes
from galaxy.util import compression_utils
from . import (
data,
dataproviders
)
log = logging.getLogger(__name__)
# Contains the meta columns and the words that map to it; list aliases on the
# right side of the : in decreasing order of priority
alias_spec = {
'chromCol' : ['chrom', 'CHROMOSOME', 'CHROM', 'Chromosome Name'],
'startCol' : ['start', 'START', 'chromStart', 'txStart', 'Start Position (bp)'],
'endCol' : ['end', 'END', 'STOP', 'chromEnd', 'txEnd', 'End Position (bp)'],
'strandCol' : ['strand', 'STRAND', 'Strand'],
'nameCol' : ['name', 'NAME', 'Name', 'name2', 'NAME2', 'Name2', 'Ensembl Gene ID', 'Ensembl Transcript ID', 'Ensembl Peptide ID']
}
# a little faster lookup
alias_helper = {}
for key, value in alias_spec.items():
for elem in value:
alias_helper[elem] = key
# Constants for configuring viewport generation: If a line is greater than
# VIEWPORT_MAX_READS_PER_LINE * VIEWPORT_READLINE_BUFFER_SIZE bytes in size,
# then we will not generate a viewport for that dataset
VIEWPORT_READLINE_BUFFER_SIZE = 1048576 # 1MB
VIEWPORT_MAX_READS_PER_LINE = 10
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Interval(Tabular):
"""Tab delimited data containing interval information"""
edam_data = "data_3002"
edam_format = "format_3475"
file_ext = "interval"
line_class = "region"
track_type = "FeatureTrack"
data_sources = {"data": "tabix", "index": "bigwig"}
"""Add metadata elements"""
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0)
MetadataElement(name="nameCol", desc="Name/Identifier column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display apps"""
Tabular.__init__(self, **kwd)
self.add_display_app('ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links')
def init_meta(self, dataset, copy_from=None):
Tabular.init_meta(self, dataset, copy_from=copy_from)
def set_meta(self, dataset, overwrite=True, first_line_is_header=False, **kwd):
"""Tries to guess from the line the location number of the column for the chromosome, region start-end and strand"""
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=0)
if dataset.has_data():
empty_line_count = 0
num_check_lines = 100 # only check up to this many non empty lines
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
line = line.rstrip('\r\n')
if line:
if (first_line_is_header or line[0] == '#'):
self.init_meta(dataset)
line = line.strip('#')
elems = line.split('\t')
for meta_name, header_list in alias_spec.items():
for header_val in header_list:
if header_val in elems:
# found highest priority header to meta_name
setattr(dataset.metadata, meta_name, elems.index(header_val) + 1)
break # next meta_name
break # Our metadata is set, so break out of the outer loop
else:
# Header lines in Interval files are optional. For example, BED is Interval but has no header.
# We'll make a best guess at the location of the metadata columns.
elems = line.split('\t')
if len(elems) > 2:
if overwrite or not dataset.metadata.element_is_set('chromCol'):
dataset.metadata.chromCol = 1
try:
int(elems[1])
if overwrite or not dataset.metadata.element_is_set('startCol'):
dataset.metadata.startCol = 2
except Exception:
pass # Metadata default will be used
try:
int(elems[2])
if overwrite or not dataset.metadata.element_is_set('endCol'):
dataset.metadata.endCol = 3
except Exception:
pass # Metadata default will be used
# we no longer want to guess that this column is the 'name', name must now be set manually for interval files
# we will still guess at the strand, as we can make a more educated guess
# if len( elems ) > 3:
# try:
# int( elems[3] )
# except Exception:
# if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
# dataset.metadata.nameCol = 4
if len(elems) < 6 or elems[5] not in data.valid_strand:
if overwrite or not dataset.metadata.element_is_set('strandCol'):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set('strandCol'):
dataset.metadata.strandCol = 6
break
if (i - empty_line_count) > num_check_lines:
break # Our metadata is set or we examined 100 non-empty lines, so break out of the outer loop
else:
empty_line_count += 1
def displayable(self, dataset):
try:
return dataset.has_data() \
and dataset.state == dataset.states.OK \
and dataset.metadata.columns > 0 \
and dataset.metadata.data_lines != 0 \
and dataset.metadata.chromCol \
and dataset.metadata.startCol \
and dataset.metadata.endCol
except Exception:
return False
def get_estimated_display_viewport(self, dataset, chrom_col=None, start_col=None, end_col=None):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if not self.displayable(dataset):
return (None, None, None)
try:
# If column indexes were not passwed, determine from metadata
if chrom_col is None:
chrom_col = int(dataset.metadata.chromCol) - 1
if start_col is None:
start_col = int(dataset.metadata.startCol) - 1
if end_col is None:
end_col = int(dataset.metadata.endCol) - 1
# Scan lines of file to find a reasonable chromosome and range
chrom = None
start = sys.maxsize
end = 0
max_col = max(chrom_col, start_col, end_col)
with compression_utils.get_fileobj(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
# Skip comment lines
if not line.startswith('#'):
try:
fields = line.rstrip().split('\t')
if len(fields) > max_col:
if chrom is None or chrom == fields[chrom_col]:
start = min(start, int(fields[start_col]))
end = max(end, int(fields[end_col]))
# Set chrom last, in case start and end are not integers
chrom = fields[chrom_col]
viewport_feature_count -= 1
except Exception:
# Most likely a non-integer field has been encountered
# for start / stop. Just ignore and make sure we finish
# reading the line and decrementing the counters.
pass
# Make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip('\n\r') == line:
assert readline_count > 0, Exception('Viewport readline count exceeded for dataset %s.' % dataset.id)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return (chrom, str(start), str(end)) # Necessary to return strings?
except Exception:
# Unexpected error, possibly missing metadata
log.exception("Exception caught attempting to generate viewport for dataset '%d'", dataset.id)
return (None, None, None)
def as_ucsc_display_file(self, dataset, **kwd):
"""Returns file contents with only the bed data"""
with tempfile.NamedTemporaryFile(delete=False, mode='w') as fh:
c, s, e, t, n = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol or 0, dataset.metadata.nameCol or 0
c, s, e, t, n = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1, int(n) - 1
if t >= 0: # strand column (should) exists
for i, elems in enumerate(compression_utils.file_iter(dataset.file_name)):
strand = "+"
name = "region_%i" % i
if n >= 0 and n < len(elems):
name = elems[n]
if t < len(elems):
strand = elems[t]
tmp = [elems[c], elems[s], elems[e], name, '0', strand]
fh.write('%s\n' % '\t'.join(tmp))
elif n >= 0: # name column (should) exists
for i, elems in enumerate(compression_utils.file_iter(dataset.file_name)):
name = "region_%i" % i
if n >= 0 and n < len(elems):
name = elems[n]
tmp = [elems[c], elems[s], elems[e], name]
fh.write('%s\n' % '\t'.join(tmp))
else:
for elems in compression_utils.file_iter(dataset.file_name):
tmp = [elems[c], elems[s], elems[e]]
fh.write('%s\n' % '\t'.join(tmp))
return compression_utils.get_fileobj(fh.name, mode='rb')
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_parameter_alias={'chromCol': 'Chrom', 'startCol': 'Start', 'endCol': 'End', 'strandCol': 'Strand', 'nameCol': 'Name'})
def ucsc_links(self, dataset, type, app, base_url):
"""
Generate links to UCSC genome browser sites based on the dbkey
and content of dataset.
"""
# Filter UCSC sites to only those that are supported by this build and
# enabled.
valid_sites = [(name, url)
for name, url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey)
if name in app.datatypes_registry.get_display_sites('ucsc')]
if not valid_sites:
return []
# If there are any valid sites, we need to generate the estimated
# viewport
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is None:
return []
# Accumulate links for valid sites
ret_val = []
for site_name, site_url in valid_sites:
internal_url = app.url_for(controller='dataset', dataset_id=dataset.id,
action='display_at', filename='ucsc_' + site_name)
display_url = quote_plus("%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" %
(base_url, app.url_for(controller='root'), dataset.id, type))
redirect_url = quote_plus("%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" %
(site_url, dataset.dbkey, chrom, start, stop))
link = '{}?redirect_url={}&display_url={}'.format(internal_url, redirect_url, display_url)
ret_val.append((site_name, link))
return ret_val
def validate(self, dataset, **kwd):
"""Validate an interval file using the bx GenomicIntervalReader"""
c, s, e, t = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol
c, s, e, t = int(c) - 1, int(s) - 1, int(e) - 1, int(t) - 1
with compression_utils.get_fileobj(dataset.file_name, "r") as infile:
reader = GenomicIntervalReader(
infile,
chrom_col=c,
start_col=s,
end_col=e,
strand_col=t)
while True:
try:
next(reader)
except ParseError as e:
return DatatypeValidation.invalid(util.unicodify(e))
except StopIteration:
return DatatypeValidation.valid()
def repair_methods(self, dataset):
"""Return options for removing errors along with a description"""
return [("lines", "Remove erroneous lines")]
def sniff_prefix(self, file_prefix):
"""
Checks for 'intervalness'
This format is mostly used by galaxy itself. Valid interval files should include
a valid header comment, but this seems to be loosely regulated.
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_space.txt' )
>>> Interval().sniff( fname )
False
>>> fname = get_test_fname( 'interval.interval' )
>>> Interval().sniff( fname )
True
"""
found_valid_lines = False
try:
headers = iter_headers(file_prefix, '\t', comment_designator='#')
# If we got here, we already know the file is_column_based and is not bed,
# so we'll just look for some valid data.
for hdr in headers:
if hdr:
if len(hdr) < 3:
return False
# Assume chrom start and end are in column positions 1 and 2
# respectively ( for 0 based columns )
int(hdr[1])
int(hdr[2])
found_valid_lines = True
except Exception:
return False
return found_valid_lines
def get_track_resolution(self, dataset, start, end):
return None
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory('genomic-region',
dataproviders.dataset.GenomicRegionDataProvider.settings)
def genomic_region_dataprovider(self, dataset, **settings):
return dataproviders.dataset.GenomicRegionDataProvider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('genomic-region-dict',
dataproviders.dataset.GenomicRegionDataProvider.settings)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings['named_columns'] = True
return self.genomic_region_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('interval',
dataproviders.dataset.IntervalDataProvider.settings)
def interval_dataprovider(self, dataset, **settings):
return dataproviders.dataset.IntervalDataProvider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('interval-dict',
dataproviders.dataset.IntervalDataProvider.settings)
def interval_dict_dataprovider(self, dataset, **settings):
settings['named_columns'] = True
return self.interval_dataprovider(dataset, **settings)
class BedGraph(Interval):
"""Tab delimited chrom/start/end/datavalue dataset"""
edam_format = "format_3583"
file_ext = "bedgraph"
track_type = "LineTrack"
data_sources = {"data": "bigwig", "index": "bigwig"}
def as_ucsc_display_file(self, dataset, **kwd):
"""
Returns file contents as is with no modifications.
TODO: this is a functional stub and will need to be enhanced moving forward to provide additional support for bedgraph.
"""
return open(dataset.file_name, 'rb')
def get_estimated_display_viewport(self, dataset, chrom_col=0, start_col=1, end_col=2):
"""
Set viewport based on dataset's first 100 lines.
"""
return Interval.get_estimated_display_viewport(self, dataset, chrom_col=chrom_col, start_col=start_col, end_col=end_col)
class Bed(Interval):
"""Tab delimited data in BED format"""
edam_format = "format_3003"
file_ext = "bed"
data_sources = {"data": "tabix", "index": "bigwig", "feature_search": "fli"}
track_type = Interval.track_type
column_names = ['Chrom', 'Start', 'End', 'Name', 'Score', 'Strand', 'ThickStart', 'ThickEnd', 'ItemRGB', 'BlockCount', 'BlockSizes', 'BlockStarts']
"""Add metadata elements"""
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
MetadataElement(name="viz_filter_cols", desc="Score column for visualization", default=[4], param=metadata.ColumnParameter, optional=True, multiple=True)
# do we need to repeat these? they are the same as should be inherited from interval type
def set_meta(self, dataset, overwrite=True, **kwd):
"""Sets the metadata information for datasets previously determined to be in bed format."""
i = 0
if dataset.has_data():
for i, line in enumerate(open(dataset.file_name)):
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
if len(elems) > 2:
if len(elems) > 3:
if overwrite or not dataset.metadata.element_is_set('nameCol'):
dataset.metadata.nameCol = 4
if len(elems) < 6:
if overwrite or not dataset.metadata.element_is_set('strandCol'):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set('strandCol'):
dataset.metadata.strandCol = 6
break
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def as_ucsc_display_file(self, dataset, **kwd):
"""Returns file contents with only the bed data. If bed 6+, treat as interval."""
for line in open(dataset.file_name):
line = line.strip()
if line == "" or line.startswith("#"):
continue
fields = line.split('\t')
"""check to see if this file doesn't conform to strict genome browser accepted bed"""
try:
if len(fields) > 12:
return Interval.as_ucsc_display_file(self, dataset) # too many fields
if len(fields) > 6:
int(fields[6])
if len(fields) > 7:
int(fields[7])
if len(fields) > 8:
if int(fields[8]) != 0:
return Interval.as_ucsc_display_file(self, dataset)
if len(fields) > 9:
int(fields[9])
if len(fields) > 10:
fields2 = fields[10].rstrip(",").split(",") # remove trailing comma and split on comma
for field in fields2:
int(field)
if len(fields) > 11:
fields2 = fields[11].rstrip(",").split(",") # remove trailing comma and split on comma
for field in fields2:
int(field)
except Exception:
return Interval.as_ucsc_display_file(self, dataset)
# only check first line for proper form
break
try:
return open(dataset.file_name, 'rb')
except Exception:
return "This item contains no content"
def sniff_prefix(self, file_prefix):
"""
Checks for 'bedness'
BED lines have three required fields and nine additional optional fields.
The number of fields per line must be consistent throughout any single set of data in
an annotation track. The order of the optional fields is binding: lower-numbered
fields must always be populated if higher-numbered fields are used. The data type of
all 12 columns is:
1-str, 2-int, 3-int, 4-str, 5-int, 6-str, 7-int, 8-int, 9-int or list, 10-int, 11-list, 12-list
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test_tab.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'interv1.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'complete.bed' )
>>> Bed().sniff( fname )
True
"""
if not get_headers(file_prefix, '\t', comment_designator='#', count=1):
return False
try:
headers = iter_headers(file_prefix, '\t', comment_designator='#')
for hdr in headers:
if hdr[0] == '':
continue
if len(hdr) < 3 or len(hdr) > 12:
return False
try:
int(hdr[1])
int(hdr[2])
except Exception:
return False
if len(hdr) > 4:
# hdr[3] is a string, 'name', which defines the name of the BED line - difficult to test for this.
# hdr[4] is an int, 'score', a score between 0 and 1000.
try:
if int(hdr[4]) < 0 or int(hdr[4]) > 1000:
return False
except Exception:
return False
if len(hdr) > 5:
# hdr[5] is strand
if hdr[5] not in data.valid_strand:
return False
if len(hdr) > 6:
# hdr[6] is thickStart, the starting position at which the feature is drawn thickly.
try:
int(hdr[6])
except Exception:
return False
if len(hdr) > 7:
# hdr[7] is thickEnd, the ending position at which the feature is drawn thickly
try:
int(hdr[7])
except Exception:
return False
if len(hdr) > 8:
# hdr[8] is itemRgb, an RGB value of the form R,G,B (e.g. 255,0,0). However, this could also be an int (e.g., 0)
try:
int(hdr[8])
except Exception:
try:
hdr[8].split(',')
except Exception:
return False
if len(hdr) > 9:
# hdr[9] is blockCount, the number of blocks (exons) in the BED line.
try:
block_count = int(hdr[9])
except Exception:
return False
if len(hdr) > 10:
# hdr[10] is blockSizes - A comma-separated list of the block sizes.
# Sometimes the blosck_sizes and block_starts lists end in extra commas
try:
block_sizes = hdr[10].rstrip(',').split(',')
except Exception:
return False
if len(hdr) > 11:
# hdr[11] is blockStarts - A comma-separated list of block starts.
try:
block_starts = hdr[11].rstrip(',').split(',')
except Exception:
return False
if len(block_sizes) != block_count or len(block_starts) != block_count:
return False
return True
except Exception:
return False
class ProBed(Bed):
"""Tab delimited data in proBED format - adaptation of BED for proteomics data."""
edam_format = "format_3827"
file_ext = "probed"
column_names = ['Chrom', 'Start', 'End', 'Name', 'Score', 'Strand', 'ThickStart', 'ThickEnd', 'ItemRGB', 'BlockCount', 'BlockSizes', 'BlockStarts', 'ProteinAccession', 'PeptideSequence', 'Uniqueness', 'GenomeReferenceVersion', 'PsmScore', 'Fdr', 'Modifications', 'Charge', 'ExpMassToCharge', 'CalcMassToCharge', 'PsmRank', 'DatasetID', 'Uri']
class BedStrict(Bed):
"""Tab delimited data in strict BED format - no non-standard columns allowed"""
edam_format = "format_3584"
file_ext = "bedstrict"
# no user change of datatype allowed
allow_datatype_change = False
# Read only metadata elements
MetadataElement(name="chromCol", default=1, desc="Chrom column", readonly=True, param=metadata.MetadataParameter)
MetadataElement(name="startCol", default=2, desc="Start column", readonly=True, param=metadata.MetadataParameter) # TODO: start and end should be able to be set to these or the proper thick[start/end]?
MetadataElement(name="endCol", default=3, desc="End column", readonly=True, param=metadata.MetadataParameter)
MetadataElement(name="strandCol", desc="Strand column (click box & select)", readonly=True, param=metadata.MetadataParameter, no_value=0, optional=True)
MetadataElement(name="nameCol", desc="Name/Identifier column (click box & select)", readonly=True, param=metadata.MetadataParameter, no_value=0, optional=True)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.clear_display_apps() # only new style display applications for this datatype
def set_meta(self, dataset, overwrite=True, **kwd):
Tabular.set_meta(self, dataset, overwrite=overwrite, **kwd) # need column count first
if dataset.metadata.columns >= 4:
dataset.metadata.nameCol = 4
if dataset.metadata.columns >= 6:
dataset.metadata.strandCol = 6
def sniff(self, filename):
return False # NOTE: This would require aggressively validating the entire file
class Bed6(BedStrict):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 6"""
edam_format = "format_3585"
file_ext = "bed6"
class Bed12(BedStrict):
"""Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 12"""
edam_format = "format_3586"
file_ext = "bed12"
class _RemoteCallMixin:
def _get_remote_call_url(self, redirect_url, site_name, dataset, type, app, base_url):
"""Retrieve the URL to call out to an external site and retrieve data.
This routes our external URL through a local galaxy instance which makes
the data available, followed by redirecting to the remote site with a
link back to the available information.
"""
internal_url = "%s" % app.url_for(controller='dataset', dataset_id=dataset.id, action='display_at', filename='{}_{}'.format(type, site_name))
base_url = app.config.get("display_at_callback", base_url)
display_url = quote_plus("%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" %
(base_url, app.url_for(controller='root'), dataset.id, type))
link = '{}?redirect_url={}&display_url={}'.format(internal_url, redirect_url, display_url)
return link
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Gff(Tabular, _RemoteCallMixin):
"""Tab delimited data in Gff format"""
edam_data = "data_1255"
edam_format = "format_2305"
file_ext = "gff"
valid_gff_frame = ['.', '0', '1', '2']
column_names = ['Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Group']
data_sources = {"data": "interval_index", "index": "bigwig", "feature_search": "fli"}
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement(name="columns", default=9, desc="Number of columns", readonly=True, visible=False)
MetadataElement(name="column_types", default=['str', 'str', 'str', 'int', 'int', 'int', 'str', 'str', 'str'],
param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False)
MetadataElement(name="attributes", default=0, desc="Number of attributes", readonly=True, visible=False, no_value=0)
MetadataElement(name="attribute_types", default={}, desc="Attribute types", param=metadata.DictParameter, readonly=True, visible=False, no_value=[])
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Tabular.__init__(self, **kwd)
self.add_display_app('ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links')
self.add_display_app('gbrowse', 'display in Gbrowse', 'as_gbrowse_display_file', 'gbrowse_links')
def set_attribute_metadata(self, dataset):
"""
Sets metadata elements for dataset's attributes.
"""
# Use first N lines to set metadata for dataset attributes. Attributes
# not found in the first N lines will not have metadata.
num_lines = 200
attribute_types = {}
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
if line and not line.startswith('#'):
elems = line.split('\t')
if len(elems) == 9:
try:
# Loop through attributes to set types.
for name, value in parse_gff_attributes(elems[8]).items():
# Default type is string.
value_type = "str"
try:
# Try int.
int(value)
value_type = "int"
except ValueError:
try:
# Try float.
float(value)
value_type = "float"
except ValueError:
pass
attribute_types[name] = value_type
except Exception:
pass
if i + 1 == num_lines:
break
# Set attribute metadata and then set additional metadata.
dataset.metadata.attribute_types = attribute_types
dataset.metadata.attributes = len(attribute_types)
def set_meta(self, dataset, overwrite=True, **kwd):
self.set_attribute_metadata(dataset)
i = 0
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
if len(elems) == 9:
try:
int(elems[3])
int(elems[4])
break
except Exception:
pass
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
def get_estimated_display_viewport(self, dataset):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
formats. This function should correctly handle both...
"""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if self.displayable(dataset):
try:
seqid = None
start = sys.maxsize
stop = 0
with compression_utils.get_fileobj(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
try:
if line.startswith('##sequence-region'): # ##sequence-region IV 6000000 6030000
elems = line.rstrip('\n\r').split()
if len(elems) > 3:
# line looks like:
# sequence-region ctg123 1 1497228
seqid = elems[1] # IV
start = int(elems[2]) # 6000000
stop = int(elems[3]) # 6030000
break # use location declared in file
elif len(elems) == 2 and elems[1].find('..') > 0:
# line looks like this:
# sequence-region X:120000..140000
elems = elems[1].split(':')
seqid = elems[0]
start = int(elems[1].split('..')[0])
stop = int(elems[1].split('..')[1])
break # use location declared in file
else:
log.debug("line (%s) uses an unsupported ##sequence-region definition." % str(line))
# break #no break, if bad definition, we try another line
elif line.startswith("browser position"):
# Allow UCSC style browser and track info in the GFF file
pos_info = line.split()[-1]
seqid, startend = pos_info.split(":")
start, stop = map(int, startend.split("-"))
break # use location declared in file
elif not line.startswith(('#', 'track', 'browser')):
viewport_feature_count -= 1
elems = line.rstrip('\n\r').split('\t')
if len(elems) > 3:
if not seqid:
# We can only set the viewport for a single chromosome
seqid = elems[0]
if seqid == elems[0]:
# Make sure we have not spanned chromosomes
start = min(start, int(elems[3]))
stop = max(stop, int(elems[4]))
except Exception:
# most likely start/stop is not an int or not enough fields
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip('\n\r') == line:
assert readline_count > 0, Exception('Viewport readline count exceeded for dataset %s.' % dataset.id)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if seqid is not None:
return (seqid, str(start), str(stop)) # Necessary to return strings?
except Exception:
log.exception('Unexpected error')
return (None, None, None) # could not determine viewport
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport(dataset)
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('ucsc'):
redirect_url = quote_plus(
"%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" %
(site_url, dataset.dbkey, seqid, start, stop))
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def gbrowse_links(self, dataset, type, app, base_url):
ret_val = []
seqid, start, stop = self.get_estimated_display_viewport(dataset)
if seqid is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('gbrowse', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('gbrowse'):
if seqid.startswith('chr') and len(seqid) > 3:
seqid = seqid[3:]
redirect_url = quote_plus("{}/?q={}:{}..{}&eurl=%s".format(site_url, seqid, start, stop))
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def sniff_prefix(self, file_prefix):
"""
Determines whether the file is in gff format
GFF lines have nine required fields that must be tab-separated.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format3
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname('gff.gff3')
>>> Gff().sniff( fname )
False
>>> fname = get_test_fname('test.gff')
>>> Gff().sniff( fname )
True
"""
if len(get_headers(file_prefix, '\t', count=2)) < 2:
return False
try:
headers = iter_headers(file_prefix, '\t')
for hdr in headers:
if hdr and hdr[0].startswith('##gff-version') and hdr[0].find('2') < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) != 9:
return False
try:
int(hdr[3])
int(hdr[4])
except Exception:
return False
if hdr[5] != '.':
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in data.valid_strand:
return False
if hdr[7] not in self.valid_gff_frame:
return False
return True
except Exception:
return False
# ------------- Dataproviders
# redefine bc super is Tabular
@dataproviders.decorators.dataprovider_factory('genomic-region',
dataproviders.dataset.GenomicRegionDataProvider.settings)
def genomic_region_dataprovider(self, dataset, **settings):
return dataproviders.dataset.GenomicRegionDataProvider(dataset, 0, 3, 4, **settings)
@dataproviders.decorators.dataprovider_factory('genomic-region-dict',
dataproviders.dataset.GenomicRegionDataProvider.settings)
def genomic_region_dict_dataprovider(self, dataset, **settings):
settings['named_columns'] = True
return self.genomic_region_dataprovider(dataset, **settings)
@dataproviders.decorators.dataprovider_factory('interval',
dataproviders.dataset.IntervalDataProvider.settings)
def interval_dataprovider(self, dataset, **settings):
return dataproviders.dataset.IntervalDataProvider(dataset, 0, 3, 4, 6, 2, **settings)
@dataproviders.decorators.dataprovider_factory('interval-dict',
dataproviders.dataset.IntervalDataProvider.settings)
def interval_dict_dataprovider(self, dataset, **settings):
settings['named_columns'] = True
return self.interval_dataprovider(dataset, **settings)
class Gff3(Gff):
"""Tab delimited data in Gff3 format"""
edam_format = "format_1975"
file_ext = "gff3"
valid_gff3_strand = ['+', '-', '.', '?']
valid_gff3_phase = Gff.valid_gff_frame
column_names = ['Seqid', 'Source', 'Type', 'Start', 'End', 'Score', 'Strand', 'Phase', 'Attributes']
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement(name="column_types", default=['str', 'str', 'str', 'int', 'int', 'float', 'str', 'int', 'list'],
param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False)
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Gff.__init__(self, **kwd)
def set_meta(self, dataset, overwrite=True, **kwd):
self.set_attribute_metadata(dataset)
i = 0
with compression_utils.get_fileobj(dataset.file_name) as in_fh:
for i, line in enumerate(in_fh):
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
valid_start = False
valid_end = False
if len(elems) == 9:
try:
start = int(elems[3])
valid_start = True
except Exception:
if elems[3] == '.':
valid_start = True
try:
end = int(elems[4])
valid_end = True
except Exception:
if elems[4] == '.':
valid_end = True
strand = elems[6]
phase = elems[7]
if valid_start and valid_end and start < end and strand in self.valid_gff3_strand and phase in self.valid_gff3_phase:
break
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i)
def sniff_prefix(self, file_prefix):
"""
Determines whether the file is in GFF version 3 format
GFF 3 format:
1) adds a mechanism for representing more than one level
of hierarchical grouping of features and subfeatures.
2) separates the ideas of group membership and feature name/id
3) constrains the feature type field to be taken from a controlled
vocabulary.
4) allows a single feature, such as an exon, to belong to more than
one group at a time.
5) provides an explicit convention for pairwise alignments
6) provides an explicit convention for features that occupy disjunct regions
The format consists of 9 columns, separated by tabs (NOT spaces).
Undefined fields are replaced with the "." character, as described in the original GFF spec.
For complete details see http://song.sourceforge.net/gff3.shtml
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'test.gff' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname( 'test.gtf' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname('gff.gff3')
>>> Gff3().sniff( fname )
True
"""
if len(get_headers(file_prefix, '\t', count=2)) < 2:
return False
try:
headers = iter_headers(file_prefix, '\t')
for hdr in headers:
if hdr and hdr[0].startswith('##gff-version') and hdr[0].find('3') >= 0:
return True
elif hdr and hdr[0].startswith('##gff-version') and hdr[0].find('3') < 0:
return False
# Header comments may have been stripped, so inspect the data
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) != 9:
return False
try:
int(hdr[3])
except Exception:
if hdr[3] != '.':
return False
try:
int(hdr[4])
except Exception:
if hdr[4] != '.':
return False
if hdr[5] != '.':
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in self.valid_gff3_strand:
return False
if hdr[7] not in self.valid_gff3_phase:
return False
parse_gff3_attributes(hdr[8])
return True
except Exception:
return False
class Gtf(Gff):
"""Tab delimited data in Gtf format"""
edam_format = "format_2306"
file_ext = "gtf"
column_names = ['Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Attributes']
track_type = Interval.track_type
"""Add metadata elements"""
MetadataElement(name="columns", default=9, desc="Number of columns", readonly=True, visible=False)
MetadataElement(name="column_types", default=['str', 'str', 'str', 'int', 'int', 'float', 'str', 'int', 'list'],
param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False)
def sniff_prefix(self, file_prefix):
"""
Determines whether the file is in gtf format
GTF lines have nine required fields that must be tab-separated. The first eight GTF fields are the same as GFF.
The group field has been expanded into a list of attributes. Each attribute consists of a type/value pair.
Attributes must end in a semi-colon, and be separated from any following attribute by exactly one space.
The attribute list must begin with the two mandatory attributes:
gene_id value - A globally unique identifier for the genomic source of the sequence.
transcript_id value - A globally unique identifier for the predicted transcript.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format4
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( '1.bed' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gff' )
>>> Gtf().sniff( fname )
False
>>> fname = get_test_fname( 'test.gtf' )
>>> Gtf().sniff( fname )
True
"""
if len(get_headers(file_prefix, '\t', count=2)) < 2:
return False
try:
headers = iter_headers(file_prefix, '\t')
for hdr in headers:
if hdr and hdr[0].startswith('##gff-version') and hdr[0].find('2') < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) != 9:
return False
try:
int(hdr[3])
int(hdr[4])
except Exception:
return False
if hdr[5] != '.':
try:
float(hdr[5])
except Exception:
return False
if hdr[6] not in data.valid_strand:
return False
if hdr[7] not in self.valid_gff_frame:
return False
# Check attributes for gene_id, transcript_id
attributes = parse_gff_attributes(hdr[8])
if len(attributes) >= 2:
if 'gene_id' not in attributes:
return False
if 'transcript_id' not in attributes:
return False
else:
return False
return True
except Exception:
return False
@dataproviders.decorators.has_dataproviders
@build_sniff_from_prefix
class Wiggle(Tabular, _RemoteCallMixin):
"""Tab delimited data in wiggle format"""
edam_format = "format_3005"
file_ext = "wig"
track_type = "LineTrack"
data_sources = {"data": "bigwig", "index": "bigwig"}
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def __init__(self, **kwd):
Tabular.__init__(self, **kwd)
self.add_display_app('ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links')
self.add_display_app('gbrowse', 'display in Gbrowse', 'as_gbrowse_display_file', 'gbrowse_links')
def get_estimated_display_viewport(self, dataset):
"""Return a chrom, start, stop tuple for viewing a file."""
viewport_feature_count = 100 # viewport should check at least 100 features; excludes comment lines
max_line_count = max(viewport_feature_count, 500) # maximum number of lines to check; includes comment lines
if self.displayable(dataset):
try:
chrom = None
start = sys.maxsize
end = 0
span = 1
step = None
with open(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
try:
if line.startswith("browser"):
chr_info = line.rstrip('\n\r').split()[-1]
chrom, coords = chr_info.split(":")
start, end = map(int, coords.split("-"))
break # use the browser line
# variableStep chrom=chr20
if line and (line.lower().startswith("variablestep") or line.lower().startswith("fixedstep")):
if chrom is not None:
break # different chrom or different section of the chrom
chrom = line.rstrip('\n\r').split("chrom=")[1].split()[0]
if 'span=' in line:
span = int(line.rstrip('\n\r').split("span=")[1].split()[0])
if 'step=' in line:
step = int(line.rstrip('\n\r').split("step=")[1].split()[0])
start = int(line.rstrip('\n\r').split("start=")[1].split()[0])
else:
fields = line.rstrip('\n\r').split()
if fields:
if step is not None:
if not end:
end = start + span
else:
end += step
else:
start = min(int(fields[0]), start)
end = max(end, int(fields[0]) + span)
viewport_feature_count -= 1
except Exception:
pass
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip('\n\r') == line:
assert readline_count > 0, Exception('Viewport readline count exceeded for dataset %s.' % dataset.id)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not viewport_feature_count or not max_line_count:
# exceeded viewport or total line count to check
break
if chrom is not None:
return (chrom, str(start), str(end)) # Necessary to return strings?
except Exception:
log.exception('Unexpected error')
return (None, None, None) # could not determine viewport
def gbrowse_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('gbrowse', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('gbrowse'):
if chrom.startswith('chr') and len(chrom) > 3:
chrom = chrom[3:]
redirect_url = quote_plus("{}/?q={}:{}..{}&eurl=%s".format(site_url, chrom, start, stop))
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('ucsc'):
redirect_url = quote_plus("{}db={}&position={}:{}-{}&hgt.customText=%s".format(site_url, dataset.dbkey, chrom, start, stop))
link = self._get_remote_call_url(redirect_url, site_name, dataset, type, app, base_url)
ret_val.append((site_name, link))
return ret_val
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, skipchars=['track', '#'])
def set_meta(self, dataset, overwrite=True, **kwd):
max_data_lines = None
i = 0
for i, line in enumerate(open(dataset.file_name)):
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
try:
# variableStep format is nucleotide position\tvalue\n,
# fixedStep is value\n
# "Wiggle track data values can be integer or real, positive or negative values"
float(elems[0])
break
except Exception:
# We are either in the track definition line or in a declaration line
pass
if self.max_optional_metadata_filesize >= 0 and dataset.get_size() > self.max_optional_metadata_filesize:
# we'll arbitrarily only use the first 100 data lines in this wig file to calculate tabular attributes (column types)
# this should be sufficient, except when we have mixed wig track types (bed, variable, fixed),
# but those cases are not a single table that would have consistant column definitions
# optional metadata values set in Tabular class will be 'None'
max_data_lines = 100
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=i, max_data_lines=max_data_lines)
def sniff_prefix(self, file_prefix):
"""
Determines wether the file is in wiggle format
The .wig format is line-oriented. Wiggle data is preceeded by a track definition line,
which adds a number of options for controlling the default display of this track.
Following the track definition line is the track data, which can be entered in several
different formats.
The track definition line begins with the word 'track' followed by the track type.
The track type with version is REQUIRED, and it currently must be wiggle_0. For example,
track type=wiggle_0...
For complete details see http://genome.ucsc.edu/goldenPath/help/wiggle.html
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'interv1.bed' )
>>> Wiggle().sniff( fname )
False
>>> fname = get_test_fname( 'wiggle.wig' )
>>> Wiggle().sniff( fname )
True
"""
try:
headers = iter_headers(file_prefix, None)
for hdr in headers:
if len(hdr) > 1 and hdr[0] == 'track' and hdr[1].startswith('type=wiggle'):
return True
return False
except Exception:
return False
def get_track_resolution(self, dataset, start, end):
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = math.ceil(10 ** math.ceil(math.log10(range / 1000)))
# Restrict to valid range
resolution = min(resolution, 100000)
resolution = max(resolution, 1)
return resolution
# ------------- Dataproviders
@dataproviders.decorators.dataprovider_factory('wiggle', dataproviders.dataset.WiggleDataProvider.settings)
def wiggle_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
return dataproviders.dataset.WiggleDataProvider(dataset_source, **settings)
@dataproviders.decorators.dataprovider_factory('wiggle-dict', dataproviders.dataset.WiggleDataProvider.settings)
def wiggle_dict_dataprovider(self, dataset, **settings):
dataset_source = dataproviders.dataset.DatasetDataProvider(dataset)
settings['named_columns'] = True
return dataproviders.dataset.WiggleDataProvider(dataset_source, **settings)
@build_sniff_from_prefix
class CustomTrack(Tabular):
"""UCSC CustomTrack"""
edam_format = "format_3588"
file_ext = "customtrack"
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display app"""
Tabular.__init__(self, **kwd)
self.add_display_app('ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links')
def set_meta(self, dataset, overwrite=True, **kwd):
Tabular.set_meta(self, dataset, overwrite=overwrite, skip=1)
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, skipchars=['track', '#'])
def get_estimated_display_viewport(self, dataset, chrom_col=None, start_col=None, end_col=None):
"""Return a chrom, start, stop tuple for viewing a file."""
# FIXME: only BED and WIG custom tracks are currently supported
# As per previously existing behavior, viewport will only be over the first intervals
max_line_count = 100 # maximum number of lines to check; includes comment lines
variable_step_wig = False
chrom = None
span = 1
if self.displayable(dataset):
try:
with open(dataset.file_name) as fh:
for line in util.iter_start_of_line(fh, VIEWPORT_READLINE_BUFFER_SIZE):
if not line.startswith('#'):
try:
if variable_step_wig:
fields = line.rstrip().split()
if len(fields) == 2:
start = int(fields[0])
return (chrom, str(start), str(start + span))
elif line and (line.lower().startswith("variablestep") or line.lower().startswith("fixedstep")):
chrom = line.rstrip('\n\r').split("chrom=")[1].split()[0]
if 'span=' in line:
span = int(line.rstrip('\n\r').split("span=")[1].split()[0])
if 'start=' in line:
start = int(line.rstrip('\n\r').split("start=")[1].split()[0])
return (chrom, str(start), str(start + span))
else:
variable_step_wig = True
else:
fields = line.rstrip().split('\t')
if len(fields) >= 3:
chrom = fields[0]
start = int(fields[1])
end = int(fields[2])
return (chrom, str(start), str(end))
except Exception:
# most likely a non-integer field has been encountered for start / stop
continue
# make sure we are at the next new line
readline_count = VIEWPORT_MAX_READS_PER_LINE
while line.rstrip('\n\r') == line:
assert readline_count > 0, Exception('Viewport readline count exceeded for dataset %s.' % dataset.id)
line = fh.readline(VIEWPORT_READLINE_BUFFER_SIZE)
if not line:
break # EOF
readline_count -= 1
max_line_count -= 1
if not max_line_count:
# exceeded viewport or total line count to check
break
except Exception:
log.exception('Unexpected error')
return (None, None, None) # could not determine viewport
def ucsc_links(self, dataset, type, app, base_url):
ret_val = []
chrom, start, stop = self.get_estimated_display_viewport(dataset)
if chrom is not None:
for site_name, site_url in app.datatypes_registry.get_legacy_sites_by_build('ucsc', dataset.dbkey):
if site_name in app.datatypes_registry.get_display_sites('ucsc'):
internal_url = "%s" % app.url_for(controller='dataset', dataset_id=dataset.id, action='display_at', filename='ucsc_' + site_name)
display_url = quote_plus("%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" % (base_url, app.url_for(controller='root'), dataset.id, type))
redirect_url = quote_plus("{}db={}&position={}:{}-{}&hgt.customText=%s".format(site_url, dataset.dbkey, chrom, start, stop))
link = '{}?redirect_url={}&display_url={}'.format(internal_url, redirect_url, display_url)
ret_val.append((site_name, link))
return ret_val
def sniff_prefix(self, file_prefix):
"""
Determines whether the file is in customtrack format.
CustomTrack files are built within Galaxy and are basically bed or interval files with the first line looking
something like this.
track name="User Track" description="User Supplied Track (from Galaxy)" color=0,0,0 visibility=1
>>> from galaxy.datatypes.sniff import get_test_fname
>>> fname = get_test_fname( 'complete.bed' )
>>> CustomTrack().sniff( fname )
False
>>> fname = get_test_fname( 'ucsc.customtrack' )
>>> CustomTrack().sniff( fname )
True
"""
headers = iter_headers(file_prefix, None)
found_at_least_one_track = False
first_line = True
for hdr in headers:
if first_line:
first_line = False
try:
if hdr[0].startswith('track'):
color_found = False
visibility_found = False
for elem in hdr[1:]:
if elem.startswith('color'):
color_found = True
if elem.startswith('visibility'):
visibility_found = True
if color_found and visibility_found:
break
if not color_found or not visibility_found:
return False
else:
return False
except Exception:
return False
else:
try:
if hdr[0] and not hdr[0].startswith('#'):
if len(hdr) < 3:
return False
try:
int(hdr[1])
int(hdr[2])
except Exception:
return False
found_at_least_one_track = True
except Exception:
return False
return found_at_least_one_track
class ENCODEPeak(Interval):
'''
Human ENCODE peak format. There are both broad and narrow peak formats.
Formats are very similar; narrow peak has an additional column, though.
Broad peak ( http://genome.ucsc.edu/FAQ/FAQformat#format13 ):
This format is used to provide called regions of signal enrichment based
on pooled, normalized (interpreted) data. It is a BED 6+3 format.
Narrow peak http://genome.ucsc.edu/FAQ/FAQformat#format12 and :
This format is used to provide called peaks of signal enrichment based on
pooled, normalized (interpreted) data. It is a BED6+4 format.
'''
edam_format = "format_3612"
file_ext = "encodepeak"
column_names = ['Chrom', 'Start', 'End', 'Name', 'Score', 'Strand', 'SignalValue', 'pValue', 'qValue', 'Peak']
data_sources = {"data": "tabix", "index": "bigwig"}
"""Add metadata elements"""
MetadataElement(name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter)
MetadataElement(name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter)
MetadataElement(name="endCol", default=3, desc="End column", param=metadata.ColumnParameter)
MetadataElement(name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0)
MetadataElement(name="columns", default=3, desc="Number of columns", readonly=True, visible=False)
def sniff(self, filename):
return False
class ChromatinInteractions(Interval):
'''
Chromatin interactions obtained from 3C/5C/Hi-C experiments.
'''
file_ext = "chrint"
track_type = "DiagonalHeatmapTrack"
data_sources = {"data": "tabix", "index": "bigwig"}
column_names = ['Chrom1', 'Start1', 'End1', 'Chrom2', 'Start2', 'End2', 'Value']
"""Add metadata elements"""
MetadataElement(name="chrom1Col", default=1, desc="Chrom1 column", param=metadata.ColumnParameter)
MetadataElement(name="start1Col", default=2, desc="Start1 column", param=metadata.ColumnParameter)
MetadataElement(name="end1Col", default=3, desc="End1 column", param=metadata.ColumnParameter)
MetadataElement(name="chrom2Col", default=4, desc="Chrom2 column", param=metadata.ColumnParameter)
MetadataElement(name="start2Col", default=5, desc="Start2 column", param=metadata.ColumnParameter)
MetadataElement(name="end2Col", default=6, desc="End2 column", param=metadata.ColumnParameter)
MetadataElement(name="valueCol", default=7, desc="Value column", param=metadata.ColumnParameter)
MetadataElement(name="columns", default=7, desc="Number of columns", readonly=True, visible=False)
def sniff(self, filename):
return False
@build_sniff_from_prefix
class ScIdx(Tabular):
"""
ScIdx files are 1-based and consist of strand-specific coordinate counts.
They always have 5 columns, and the first row is the column labels:
'chrom', 'index', 'forward', 'reverse', 'value'.
Each line following the first consists of data:
chromosome name (type str), peak index (type int), Forward strand peak
count (type int), Reverse strand peak count (type int) and value (type int).
The value of the 5th 'value' column is the sum of the forward and reverse
peak count values.
"""
file_ext = "scidx"
MetadataElement(name="columns", default=0, desc="Number of columns", readonly=True, visible=False)
MetadataElement(name="column_types", default=[], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False, no_value=[])
def __init__(self, **kwd):
"""
Initialize scidx datatype.
"""
Tabular.__init__(self, **kwd)
# Don't set column names since the first
# line of the dataset displays them.
self.column_names = ['chrom', 'index', 'forward', 'reverse', 'value']
def sniff_prefix(self, file_prefix):
"""
Checks for 'scidx-ness.'
"""
count = 0
fh = file_prefix.string_io()
while True:
line = fh.readline()
if not line:
# EOF
if count > 1:
# The second line is always the labels:
# chrom index forward reverse value
# We need at least the column labels and a data line.
return True
return False
line = line.strip()
# The first line is always a comment like this:
# 2015-11-23 20:18:56.51;input.bam;READ1
if count == 0:
if line.startswith('#'):
count += 1
continue
else:
return False
# Skip first line.
if count > 1:
items = line.split('\t')
if len(items) != 5:
return False
index = items[1]
if not index.isdigit():
return False
forward = items[2]
if not forward.isdigit():
return False
reverse = items[3]
if not reverse.isdigit():
return False
value = items[4]
if not value.isdigit():
return False
if int(forward) + int(reverse) != int(value):
return False
if count == 100:
return True
count += 1
if count < 100 and count > 0:
return True
return False
if __name__ == '__main__':
import doctest
doctest.testmod(sys.modules[__name__])
| en | 0.770108 | Interval datatypes # Contains the meta columns and the words that map to it; list aliases on the # right side of the : in decreasing order of priority # a little faster lookup # Constants for configuring viewport generation: If a line is greater than # VIEWPORT_MAX_READS_PER_LINE * VIEWPORT_READLINE_BUFFER_SIZE bytes in size, # then we will not generate a viewport for that dataset # 1MB Tab delimited data containing interval information Add metadata elements Initialize interval datatype, by adding UCSC display apps Tries to guess from the line the location number of the column for the chromosome, region start-end and strand # only check up to this many non empty lines # found highest priority header to meta_name # next meta_name # Our metadata is set, so break out of the outer loop # Header lines in Interval files are optional. For example, BED is Interval but has no header. # We'll make a best guess at the location of the metadata columns. # Metadata default will be used # Metadata default will be used # we no longer want to guess that this column is the 'name', name must now be set manually for interval files # we will still guess at the strand, as we can make a more educated guess # if len( elems ) > 3: # try: # int( elems[3] ) # except Exception: # if overwrite or not dataset.metadata.element_is_set( 'nameCol' ): # dataset.metadata.nameCol = 4 # Our metadata is set or we examined 100 non-empty lines, so break out of the outer loop Return a chrom, start, stop tuple for viewing a file. # viewport should check at least 100 features; excludes comment lines # maximum number of lines to check; includes comment lines # If column indexes were not passwed, determine from metadata # Scan lines of file to find a reasonable chromosome and range # Skip comment lines # Set chrom last, in case start and end are not integers # Most likely a non-integer field has been encountered # for start / stop. Just ignore and make sure we finish # reading the line and decrementing the counters. # Make sure we are at the next new line # EOF # exceeded viewport or total line count to check # Necessary to return strings? # Unexpected error, possibly missing metadata Returns file contents with only the bed data # strand column (should) exists # name column (should) exists Returns formated html of peek Generate links to UCSC genome browser sites based on the dbkey and content of dataset. # Filter UCSC sites to only those that are supported by this build and # enabled. # If there are any valid sites, we need to generate the estimated # viewport # Accumulate links for valid sites Validate an interval file using the bx GenomicIntervalReader Return options for removing errors along with a description Checks for 'intervalness' This format is mostly used by galaxy itself. Valid interval files should include a valid header comment, but this seems to be loosely regulated. >>> from galaxy.datatypes.sniff import get_test_fname >>> fname = get_test_fname( 'test_space.txt' ) >>> Interval().sniff( fname ) False >>> fname = get_test_fname( 'interval.interval' ) >>> Interval().sniff( fname ) True # If we got here, we already know the file is_column_based and is not bed, # so we'll just look for some valid data. # Assume chrom start and end are in column positions 1 and 2 # respectively ( for 0 based columns ) # ------------- Dataproviders Tab delimited chrom/start/end/datavalue dataset Returns file contents as is with no modifications. TODO: this is a functional stub and will need to be enhanced moving forward to provide additional support for bedgraph. Set viewport based on dataset's first 100 lines. Tab delimited data in BED format Add metadata elements # do we need to repeat these? they are the same as should be inherited from interval type Sets the metadata information for datasets previously determined to be in bed format. Returns file contents with only the bed data. If bed 6+, treat as interval. check to see if this file doesn't conform to strict genome browser accepted bed # too many fields # remove trailing comma and split on comma # remove trailing comma and split on comma # only check first line for proper form Checks for 'bedness' BED lines have three required fields and nine additional optional fields. The number of fields per line must be consistent throughout any single set of data in an annotation track. The order of the optional fields is binding: lower-numbered fields must always be populated if higher-numbered fields are used. The data type of all 12 columns is: 1-str, 2-int, 3-int, 4-str, 5-int, 6-str, 7-int, 8-int, 9-int or list, 10-int, 11-list, 12-list For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format1 >>> from galaxy.datatypes.sniff import get_test_fname >>> fname = get_test_fname( 'test_tab.bed' ) >>> Bed().sniff( fname ) True >>> fname = get_test_fname( 'interv1.bed' ) >>> Bed().sniff( fname ) True >>> fname = get_test_fname( 'complete.bed' ) >>> Bed().sniff( fname ) True # hdr[3] is a string, 'name', which defines the name of the BED line - difficult to test for this. # hdr[4] is an int, 'score', a score between 0 and 1000. # hdr[5] is strand # hdr[6] is thickStart, the starting position at which the feature is drawn thickly. # hdr[7] is thickEnd, the ending position at which the feature is drawn thickly # hdr[8] is itemRgb, an RGB value of the form R,G,B (e.g. 255,0,0). However, this could also be an int (e.g., 0) # hdr[9] is blockCount, the number of blocks (exons) in the BED line. # hdr[10] is blockSizes - A comma-separated list of the block sizes. # Sometimes the blosck_sizes and block_starts lists end in extra commas # hdr[11] is blockStarts - A comma-separated list of block starts. Tab delimited data in proBED format - adaptation of BED for proteomics data. Tab delimited data in strict BED format - no non-standard columns allowed # no user change of datatype allowed # Read only metadata elements # TODO: start and end should be able to be set to these or the proper thick[start/end]? # only new style display applications for this datatype # need column count first # NOTE: This would require aggressively validating the entire file Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 6 Tab delimited data in strict BED format - no non-standard columns allowed; column count forced to 12 Retrieve the URL to call out to an external site and retrieve data. This routes our external URL through a local galaxy instance which makes the data available, followed by redirecting to the remote site with a link back to the available information. Tab delimited data in Gff format Add metadata elements Initialize datatype, by adding GBrowse display app Sets metadata elements for dataset's attributes. # Use first N lines to set metadata for dataset attributes. Attributes # not found in the first N lines will not have metadata. # Loop through attributes to set types. # Default type is string. # Try int. # Try float. # Set attribute metadata and then set additional metadata. Returns formated html of peek Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3 formats. This function should correctly handle both... # viewport should check at least 100 features; excludes comment lines # maximum number of lines to check; includes comment lines #sequence-region'): # ##sequence-region IV 6000000 6030000 # line looks like: # sequence-region ctg123 1 1497228 # IV # 6000000 # 6030000 # use location declared in file # line looks like this: # sequence-region X:120000..140000 # use location declared in file ##sequence-region definition." % str(line)) # break #no break, if bad definition, we try another line # Allow UCSC style browser and track info in the GFF file # use location declared in file # We can only set the viewport for a single chromosome # Make sure we have not spanned chromosomes # most likely start/stop is not an int or not enough fields # make sure we are at the next new line # EOF # exceeded viewport or total line count to check # Necessary to return strings? # could not determine viewport Determines whether the file is in gff format GFF lines have nine required fields that must be tab-separated. For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format3 >>> from galaxy.datatypes.sniff import get_test_fname >>> fname = get_test_fname('gff.gff3') >>> Gff().sniff( fname ) False >>> fname = get_test_fname('test.gff') >>> Gff().sniff( fname ) True #gff-version') and hdr[0].find('2') < 0: # ------------- Dataproviders # redefine bc super is Tabular Tab delimited data in Gff3 format Add metadata elements Initialize datatype, by adding GBrowse display app Determines whether the file is in GFF version 3 format GFF 3 format: 1) adds a mechanism for representing more than one level of hierarchical grouping of features and subfeatures. 2) separates the ideas of group membership and feature name/id 3) constrains the feature type field to be taken from a controlled vocabulary. 4) allows a single feature, such as an exon, to belong to more than one group at a time. 5) provides an explicit convention for pairwise alignments 6) provides an explicit convention for features that occupy disjunct regions The format consists of 9 columns, separated by tabs (NOT spaces). Undefined fields are replaced with the "." character, as described in the original GFF spec. For complete details see http://song.sourceforge.net/gff3.shtml >>> from galaxy.datatypes.sniff import get_test_fname >>> fname = get_test_fname( 'test.gff' ) >>> Gff3().sniff( fname ) False >>> fname = get_test_fname( 'test.gtf' ) >>> Gff3().sniff( fname ) False >>> fname = get_test_fname('gff.gff3') >>> Gff3().sniff( fname ) True #gff-version') and hdr[0].find('3') >= 0: #gff-version') and hdr[0].find('3') < 0: # Header comments may have been stripped, so inspect the data Tab delimited data in Gtf format Add metadata elements Determines whether the file is in gtf format GTF lines have nine required fields that must be tab-separated. The first eight GTF fields are the same as GFF. The group field has been expanded into a list of attributes. Each attribute consists of a type/value pair. Attributes must end in a semi-colon, and be separated from any following attribute by exactly one space. The attribute list must begin with the two mandatory attributes: gene_id value - A globally unique identifier for the genomic source of the sequence. transcript_id value - A globally unique identifier for the predicted transcript. For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format4 >>> from galaxy.datatypes.sniff import get_test_fname >>> fname = get_test_fname( '1.bed' ) >>> Gtf().sniff( fname ) False >>> fname = get_test_fname( 'test.gff' ) >>> Gtf().sniff( fname ) False >>> fname = get_test_fname( 'test.gtf' ) >>> Gtf().sniff( fname ) True #gff-version') and hdr[0].find('2') < 0: # Check attributes for gene_id, transcript_id Tab delimited data in wiggle format Return a chrom, start, stop tuple for viewing a file. # viewport should check at least 100 features; excludes comment lines # maximum number of lines to check; includes comment lines # use the browser line # variableStep chrom=chr20 # different chrom or different section of the chrom # make sure we are at the next new line # EOF # exceeded viewport or total line count to check # Necessary to return strings? # could not determine viewport Returns formated html of peek # variableStep format is nucleotide position\tvalue\n, # fixedStep is value\n # "Wiggle track data values can be integer or real, positive or negative values" # We are either in the track definition line or in a declaration line # we'll arbitrarily only use the first 100 data lines in this wig file to calculate tabular attributes (column types) # this should be sufficient, except when we have mixed wig track types (bed, variable, fixed), # but those cases are not a single table that would have consistant column definitions # optional metadata values set in Tabular class will be 'None' Determines wether the file is in wiggle format The .wig format is line-oriented. Wiggle data is preceeded by a track definition line, which adds a number of options for controlling the default display of this track. Following the track definition line is the track data, which can be entered in several different formats. The track definition line begins with the word 'track' followed by the track type. The track type with version is REQUIRED, and it currently must be wiggle_0. For example, track type=wiggle_0... For complete details see http://genome.ucsc.edu/goldenPath/help/wiggle.html >>> from galaxy.datatypes.sniff import get_test_fname >>> fname = get_test_fname( 'interv1.bed' ) >>> Wiggle().sniff( fname ) False >>> fname = get_test_fname( 'wiggle.wig' ) >>> Wiggle().sniff( fname ) True # Determine appropriate resolution to plot ~1000 points # Restrict to valid range # ------------- Dataproviders UCSC CustomTrack Initialize interval datatype, by adding UCSC display app Returns formated html of peek Return a chrom, start, stop tuple for viewing a file. # FIXME: only BED and WIG custom tracks are currently supported # As per previously existing behavior, viewport will only be over the first intervals # maximum number of lines to check; includes comment lines # most likely a non-integer field has been encountered for start / stop # make sure we are at the next new line # EOF # exceeded viewport or total line count to check # could not determine viewport Determines whether the file is in customtrack format. CustomTrack files are built within Galaxy and are basically bed or interval files with the first line looking something like this. track name="User Track" description="User Supplied Track (from Galaxy)" color=0,0,0 visibility=1 >>> from galaxy.datatypes.sniff import get_test_fname >>> fname = get_test_fname( 'complete.bed' ) >>> CustomTrack().sniff( fname ) False >>> fname = get_test_fname( 'ucsc.customtrack' ) >>> CustomTrack().sniff( fname ) True Human ENCODE peak format. There are both broad and narrow peak formats. Formats are very similar; narrow peak has an additional column, though. Broad peak ( http://genome.ucsc.edu/FAQ/FAQformat#format13 ): This format is used to provide called regions of signal enrichment based on pooled, normalized (interpreted) data. It is a BED 6+3 format. Narrow peak http://genome.ucsc.edu/FAQ/FAQformat#format12 and : This format is used to provide called peaks of signal enrichment based on pooled, normalized (interpreted) data. It is a BED6+4 format. Add metadata elements Chromatin interactions obtained from 3C/5C/Hi-C experiments. Add metadata elements ScIdx files are 1-based and consist of strand-specific coordinate counts. They always have 5 columns, and the first row is the column labels: 'chrom', 'index', 'forward', 'reverse', 'value'. Each line following the first consists of data: chromosome name (type str), peak index (type int), Forward strand peak count (type int), Reverse strand peak count (type int) and value (type int). The value of the 5th 'value' column is the sum of the forward and reverse peak count values. Initialize scidx datatype. # Don't set column names since the first # line of the dataset displays them. Checks for 'scidx-ness.' # EOF # The second line is always the labels: # chrom index forward reverse value # We need at least the column labels and a data line. # The first line is always a comment like this: # 2015-11-23 20:18:56.51;input.bam;READ1 # Skip first line. | 2.361309 | 2 |
setup.py | kuhy/pyAPDUFuzzer | 0 | 6624789 | <reponame>kuhy/pyAPDUFuzzer<filename>setup.py
#!/usr/bin/env python
import io
from setuptools import setup
from setuptools import find_packages
version = '0.0.3'
install_requires = [
'six',
'llsmartcard-ph4',
'psutil',
'pyhashxx',
]
afl_extras = [
'python-afl-ph4',
]
dev_extras = [
'pep8',
'tox',
'pypandoc',
'jupyter',
]
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
long_description = long_description.replace("\r", '')
except Exception: # (IOError, ImportError):
import io
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='apdu-fuzzer',
version=version,
description='APDU fuzzer',
long_description=long_description,
url='https://github.com/petrs/APDUFuzzer',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'dev': dev_extras,
'afl': afl_extras,
},
entry_points={
'console_scripts': [
'apdu-fuzz = apdu_fuzzer.main:main',
'apdu-afl-fuzz = apdu_fuzzer.main_afl:main',
],
}
)
| #!/usr/bin/env python
import io
from setuptools import setup
from setuptools import find_packages
version = '0.0.3'
install_requires = [
'six',
'llsmartcard-ph4',
'psutil',
'pyhashxx',
]
afl_extras = [
'python-afl-ph4',
]
dev_extras = [
'pep8',
'tox',
'pypandoc',
'jupyter',
]
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
long_description = long_description.replace("\r", '')
except Exception: # (IOError, ImportError):
import io
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='apdu-fuzzer',
version=version,
description='APDU fuzzer',
long_description=long_description,
url='https://github.com/petrs/APDUFuzzer',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'dev': dev_extras,
'afl': afl_extras,
},
entry_points={
'console_scripts': [
'apdu-fuzz = apdu_fuzzer.main:main',
'apdu-afl-fuzz = apdu_fuzzer.main_afl:main',
],
}
) | en | 0.207962 | #!/usr/bin/env python # (IOError, ImportError): | 1.606765 | 2 |
LineSync/filters.py | l-mda/AsyncLine | 2 | 6624790 | import re
from .lib.Gen.f_LineService import Operation
class Filter:
def __call__(self, message):
raise NotImplementedError
def __invert__(self):
return InvertFilter(self)
def __and__(self, other):
return AndFilter(self, other)
def __or__(self, other):
return OrFilter(self, other)
class InvertFilter(Filter):
def __init__(self, base):
self.base = base
def __call__(self, message):
return not self.base(message)
class AndFilter(Filter):
def __init__(self, base, other):
self.base = base
self.other = other
def __call__(self, message):
return self.base(message) and self.other(message)
class OrFilter(Filter):
def __init__(self, base, other):
self.base = base
self.other = other
def __call__(self, message):
return self.base(message) or self.other(message)
def create(name: str, func: callable, **kwargs) -> type:
d = {"__call__": func}
d.update(kwargs)
return type(name, (Filter,), d)()
class Filters:
create = create
#Messages
text = create("Text", lambda _,m: bool(m.contentType == 0 and m.text is not None))
"""Filter text messages."""
image = create("Image", lambda _,m: bool(m.contentType == 1))
"""Filter message that contain object Image"""
video = create("Video", lambda _,m: bool(m.contentType == 2))
"""Filter message that contain object Video"""
audio = create("Audio", lambda _,m: bool(m.contentType == 3))
"""Filter message that contain object Audio"""
html = create("Html", lambda _,m: bool(m.contentType == 4))
"""Filter message that contain object HTML"""
pdf = create("Pdf", lambda _,m: bool(m.contentType == 5))
"""Filter message that contain object PDF"""
call = create("Call", lambda _,m: bool(m.contentType == 6))
"""Filter calling from other"""
sticker = create("Sticker", lambda _,m: bool(m.contentType == 7))
"""Filter message that contain object Sticker"""
gift = create("Gift", lambda _,m: bool(m.contentType == 9))
"""Filter message that contain object Gift"""
link = create("Link", lambda _,m: bool(m.contentType == 12))
"""Filter message that contain object Link"""
contact = create("Contact", lambda _,m: bool(m.contentType == 13))
"""Filter message that contain object Contact"""
files = create("Files", lambda _,m: bool(m.contentType == 14))
"""Filter message that contain object Files"""
location = create("Location", lambda _,m: bool(m.contentType == 15))
"""Filter message that contain object Location"""
post = create("Post", lambda _,m: bool(m.contentType == 16))
"""Filter message that contain object Post"""
rich = create("Rich", lambda _,m: bool(m.contentType == 17))
"""Filter message that contain object Message Rich"""
event = create("Event", lambda _,m: bool(m.contentType == 18))
"""Filter message that contain object Event"""
music = create("Music", lambda _,m: bool(m.contentType == 19))
"""Filter message that contain object Music"""
mention = create("Mention", lambda _,m: bool('MENTION' in m.contentMetadata.keys()))
"""Filter message that contain object Mention"""
reply = create("Reply", lambda _,m: bool("reply" in m.contentMetadata.values() or "SRC_SVC_CODE" in m.contentMetadata.keys()))
"""Filter message that are Reply"""
#TODO: Forward only worked for text type, i cant found clue to catch forward media as image,video,audio
forward = create("Forward", lambda _,m: bool("forward" in m.contentMetadata.values()))
"""Filter message that are Forwarded"""
#Grouping
group = create("Group", lambda _,m: bool(m.toType == 2))
private = create("Private", lambda _,m: bool(m.toType == 0))
both = create("Both", lambda _,m: bool(m.toType in [0, 2, 1]))
#Event
flex = create("Flex", lambda _,m: bool(m.contentType == 22 and "FLEX_JSON" in m.contentMetadata.keys()))
image_carousel = create("ImageCarousel", lambda _,m: bool(Filters.html and m.contentMetadata["HTML_CONTENT"] != None))
@staticmethod
def command(commands: str or list,
prefix: str or list = "/",
separator: str = " ",
case_sensitive: bool = True):
"""Filter commands, i.e.: text messages starting with "/" or any other custom prefix.
Args:
command (``str`` | ``list``):
The command or list of commands as string the filter should look for.
Examples: "start", ["start", "help", "settings"]. When a message text containing
a command arrives, the command itself and its arguments will be stored in the *command*
field of the :class:`Message <akad.ttypes.Message>`.
prefix (``str`` | ``list``, *optional*):
A prefix or a list of prefixes as string the filter should look for.
Defaults to "/" (slash). Examples: ".", "!", ["/", "!", "."].
Can be None or "" (empty string) to allow commands with no prefix at all.
separator (``str``, *optional*):
The command arguments separator. Defaults to " " (white space).
Examples: /start first second, /start-first-second, /start.first.second.
case_sensitive (``bool``, *optional*):
Pass True if you want your command(s) to be case sensitive. Defaults to False.
Examples: when True, command="Start" would trigger /Start but not /start.
"""
def f(_, m):
m.command = []
if m.text:
for i in _.p:
if m.text.startswith(i):
t = m.text.split(_.s)
c, a = t[0][len(i):], t[1:]
c = c if _.cs else c.lower()
m.command = ([c] + a) if c in _.c else None
return bool(m.command)
return create(
"Command",
f,
c = {commands if case_sensitive
else commands.lower()}
if not isinstance(commands, list)
else {c if case_sensitive
else c.lower()
for c in commands},
p=set(prefix) if prefix else {""},
s=separator,
cs=case_sensitive
)
@staticmethod
def regex(pattern, flags: int = 0):
"""Filter messages that match a given RegEx pattern.
Args:
pattern (``str``):
The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches,
all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`
flags (``int``, *optional*):
RegEx flags.
"""
def f(_, m):
m.matches = [i for i in _.p.finditer(m.text or "")]
return bool(m.matches)
return create("Regex", f, p=re.compile(pattern, flags))
class user(Filter, set):
"""Filter messages coming from one or more users.
You can use `set bound methods <https://docs.python.org/3/library/stdtypes.html#set>`_ to manipulate the
users container.
Args:
users (``str`` | ``list``):
Pass one or more user mid to filter users.
Defaults to None (no users).
"""
def __init__(self, users: int or str or list = None):
users = [] if users is None else users if isinstance(users, list) else [users]
super().__init__(
{"me" if i in ["me", "self"] else i.lower() if isinstance(i, str) else i for i in users}
if isinstance(users, list) else
{"me" if users in ["me", "self"] else users.lower() if isinstance(users, str) else user}
)
def __call__(self, message):
return bool(
message.from_
and (message.from_ in self
or ("me" in self)
)
)
class chat(Filter, set):
"""Filter messages coming from one or more chats.
You can use `set bound methods <https://docs.python.org/3/library/stdtypes.html#set>`_ to manipulate the
chats container.
Args:
chats (``str`` | ``list``):
Pass one or more chat mid to filter chats.
Defaults to None (no chats).
"""
def __init__(self, chats: int or str or list = None):
chats = [] if chats is None else chats if isinstance(chats, list) else [chats]
super().__init__(
{i.lower() if isinstance(i, str) else i for i in chats}
if isinstance(chats, list) else
{chats.lower() if isinstance(chats, list) else chats}
)
def __call__(self, message):
return bool(
message.toType == 2
and (message.to in self)
) | import re
from .lib.Gen.f_LineService import Operation
class Filter:
def __call__(self, message):
raise NotImplementedError
def __invert__(self):
return InvertFilter(self)
def __and__(self, other):
return AndFilter(self, other)
def __or__(self, other):
return OrFilter(self, other)
class InvertFilter(Filter):
def __init__(self, base):
self.base = base
def __call__(self, message):
return not self.base(message)
class AndFilter(Filter):
def __init__(self, base, other):
self.base = base
self.other = other
def __call__(self, message):
return self.base(message) and self.other(message)
class OrFilter(Filter):
def __init__(self, base, other):
self.base = base
self.other = other
def __call__(self, message):
return self.base(message) or self.other(message)
def create(name: str, func: callable, **kwargs) -> type:
d = {"__call__": func}
d.update(kwargs)
return type(name, (Filter,), d)()
class Filters:
create = create
#Messages
text = create("Text", lambda _,m: bool(m.contentType == 0 and m.text is not None))
"""Filter text messages."""
image = create("Image", lambda _,m: bool(m.contentType == 1))
"""Filter message that contain object Image"""
video = create("Video", lambda _,m: bool(m.contentType == 2))
"""Filter message that contain object Video"""
audio = create("Audio", lambda _,m: bool(m.contentType == 3))
"""Filter message that contain object Audio"""
html = create("Html", lambda _,m: bool(m.contentType == 4))
"""Filter message that contain object HTML"""
pdf = create("Pdf", lambda _,m: bool(m.contentType == 5))
"""Filter message that contain object PDF"""
call = create("Call", lambda _,m: bool(m.contentType == 6))
"""Filter calling from other"""
sticker = create("Sticker", lambda _,m: bool(m.contentType == 7))
"""Filter message that contain object Sticker"""
gift = create("Gift", lambda _,m: bool(m.contentType == 9))
"""Filter message that contain object Gift"""
link = create("Link", lambda _,m: bool(m.contentType == 12))
"""Filter message that contain object Link"""
contact = create("Contact", lambda _,m: bool(m.contentType == 13))
"""Filter message that contain object Contact"""
files = create("Files", lambda _,m: bool(m.contentType == 14))
"""Filter message that contain object Files"""
location = create("Location", lambda _,m: bool(m.contentType == 15))
"""Filter message that contain object Location"""
post = create("Post", lambda _,m: bool(m.contentType == 16))
"""Filter message that contain object Post"""
rich = create("Rich", lambda _,m: bool(m.contentType == 17))
"""Filter message that contain object Message Rich"""
event = create("Event", lambda _,m: bool(m.contentType == 18))
"""Filter message that contain object Event"""
music = create("Music", lambda _,m: bool(m.contentType == 19))
"""Filter message that contain object Music"""
mention = create("Mention", lambda _,m: bool('MENTION' in m.contentMetadata.keys()))
"""Filter message that contain object Mention"""
reply = create("Reply", lambda _,m: bool("reply" in m.contentMetadata.values() or "SRC_SVC_CODE" in m.contentMetadata.keys()))
"""Filter message that are Reply"""
#TODO: Forward only worked for text type, i cant found clue to catch forward media as image,video,audio
forward = create("Forward", lambda _,m: bool("forward" in m.contentMetadata.values()))
"""Filter message that are Forwarded"""
#Grouping
group = create("Group", lambda _,m: bool(m.toType == 2))
private = create("Private", lambda _,m: bool(m.toType == 0))
both = create("Both", lambda _,m: bool(m.toType in [0, 2, 1]))
#Event
flex = create("Flex", lambda _,m: bool(m.contentType == 22 and "FLEX_JSON" in m.contentMetadata.keys()))
image_carousel = create("ImageCarousel", lambda _,m: bool(Filters.html and m.contentMetadata["HTML_CONTENT"] != None))
@staticmethod
def command(commands: str or list,
prefix: str or list = "/",
separator: str = " ",
case_sensitive: bool = True):
"""Filter commands, i.e.: text messages starting with "/" or any other custom prefix.
Args:
command (``str`` | ``list``):
The command or list of commands as string the filter should look for.
Examples: "start", ["start", "help", "settings"]. When a message text containing
a command arrives, the command itself and its arguments will be stored in the *command*
field of the :class:`Message <akad.ttypes.Message>`.
prefix (``str`` | ``list``, *optional*):
A prefix or a list of prefixes as string the filter should look for.
Defaults to "/" (slash). Examples: ".", "!", ["/", "!", "."].
Can be None or "" (empty string) to allow commands with no prefix at all.
separator (``str``, *optional*):
The command arguments separator. Defaults to " " (white space).
Examples: /start first second, /start-first-second, /start.first.second.
case_sensitive (``bool``, *optional*):
Pass True if you want your command(s) to be case sensitive. Defaults to False.
Examples: when True, command="Start" would trigger /Start but not /start.
"""
def f(_, m):
m.command = []
if m.text:
for i in _.p:
if m.text.startswith(i):
t = m.text.split(_.s)
c, a = t[0][len(i):], t[1:]
c = c if _.cs else c.lower()
m.command = ([c] + a) if c in _.c else None
return bool(m.command)
return create(
"Command",
f,
c = {commands if case_sensitive
else commands.lower()}
if not isinstance(commands, list)
else {c if case_sensitive
else c.lower()
for c in commands},
p=set(prefix) if prefix else {""},
s=separator,
cs=case_sensitive
)
@staticmethod
def regex(pattern, flags: int = 0):
"""Filter messages that match a given RegEx pattern.
Args:
pattern (``str``):
The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches,
all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`
flags (``int``, *optional*):
RegEx flags.
"""
def f(_, m):
m.matches = [i for i in _.p.finditer(m.text or "")]
return bool(m.matches)
return create("Regex", f, p=re.compile(pattern, flags))
class user(Filter, set):
"""Filter messages coming from one or more users.
You can use `set bound methods <https://docs.python.org/3/library/stdtypes.html#set>`_ to manipulate the
users container.
Args:
users (``str`` | ``list``):
Pass one or more user mid to filter users.
Defaults to None (no users).
"""
def __init__(self, users: int or str or list = None):
users = [] if users is None else users if isinstance(users, list) else [users]
super().__init__(
{"me" if i in ["me", "self"] else i.lower() if isinstance(i, str) else i for i in users}
if isinstance(users, list) else
{"me" if users in ["me", "self"] else users.lower() if isinstance(users, str) else user}
)
def __call__(self, message):
return bool(
message.from_
and (message.from_ in self
or ("me" in self)
)
)
class chat(Filter, set):
"""Filter messages coming from one or more chats.
You can use `set bound methods <https://docs.python.org/3/library/stdtypes.html#set>`_ to manipulate the
chats container.
Args:
chats (``str`` | ``list``):
Pass one or more chat mid to filter chats.
Defaults to None (no chats).
"""
def __init__(self, chats: int or str or list = None):
chats = [] if chats is None else chats if isinstance(chats, list) else [chats]
super().__init__(
{i.lower() if isinstance(i, str) else i for i in chats}
if isinstance(chats, list) else
{chats.lower() if isinstance(chats, list) else chats}
)
def __call__(self, message):
return bool(
message.toType == 2
and (message.to in self)
) | en | 0.596521 | #Messages Filter text messages. Filter message that contain object Image Filter message that contain object Video Filter message that contain object Audio Filter message that contain object HTML Filter message that contain object PDF Filter calling from other Filter message that contain object Sticker Filter message that contain object Gift Filter message that contain object Link Filter message that contain object Contact Filter message that contain object Files Filter message that contain object Location Filter message that contain object Post Filter message that contain object Message Rich Filter message that contain object Event Filter message that contain object Music Filter message that contain object Mention Filter message that are Reply #TODO: Forward only worked for text type, i cant found clue to catch forward media as image,video,audio Filter message that are Forwarded #Grouping #Event Filter commands, i.e.: text messages starting with "/" or any other custom prefix. Args: command (``str`` | ``list``): The command or list of commands as string the filter should look for. Examples: "start", ["start", "help", "settings"]. When a message text containing a command arrives, the command itself and its arguments will be stored in the *command* field of the :class:`Message <akad.ttypes.Message>`. prefix (``str`` | ``list``, *optional*): A prefix or a list of prefixes as string the filter should look for. Defaults to "/" (slash). Examples: ".", "!", ["/", "!", "."]. Can be None or "" (empty string) to allow commands with no prefix at all. separator (``str``, *optional*): The command arguments separator. Defaults to " " (white space). Examples: /start first second, /start-first-second, /start.first.second. case_sensitive (``bool``, *optional*): Pass True if you want your command(s) to be case sensitive. Defaults to False. Examples: when True, command="Start" would trigger /Start but not /start. Filter messages that match a given RegEx pattern. Args: pattern (``str``): The RegEx pattern as string, it will be applied to the text of a message. When a pattern matches, all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>` flags (``int``, *optional*): RegEx flags. Filter messages coming from one or more users. You can use `set bound methods <https://docs.python.org/3/library/stdtypes.html#set>`_ to manipulate the users container. Args: users (``str`` | ``list``): Pass one or more user mid to filter users. Defaults to None (no users). Filter messages coming from one or more chats. You can use `set bound methods <https://docs.python.org/3/library/stdtypes.html#set>`_ to manipulate the chats container. Args: chats (``str`` | ``list``): Pass one or more chat mid to filter chats. Defaults to None (no chats). | 2.612748 | 3 |
base.py | fujita/bgperf | 12 | 6624791 | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from settings import dckr
import io
import os
from itertools import chain
from threading import Thread
import netaddr
import sys
import time
import datetime
from jinja2 import Environment, FileSystemLoader, PackageLoader, StrictUndefined, make_logging_undefined
flatten = lambda l: chain.from_iterable(l)
def get_ctn_names():
names = list(flatten(n['Names'] for n in dckr.containers(all=True)))
return [n[1:] if n[0] == '/' else n for n in names]
def ctn_exists(name):
return name in get_ctn_names()
def img_exists(name):
return name in [ctn['RepoTags'][0].split(':')[0] for ctn in dckr.images() if ctn['RepoTags'] != None]
def rm_line():
print('\x1b[1A\x1b[2K\x1b[1D\x1b[1A')
class Container(object):
def __init__(self, name, image, host_dir, guest_dir, conf):
self.name = name
self.image = image
self.host_dir = host_dir
self.guest_dir = guest_dir
self.conf = conf
self.config_name = None
self.stop_monitoring = False
self.command = None
self.environment = None
self.volumes = [self.guest_dir]
if not os.path.exists(host_dir):
os.makedirs(host_dir)
os.chmod(host_dir, 0o777)
@classmethod
def build_image(cls, force, tag, nocache=False):
def insert_after_from(dockerfile, line):
lines = dockerfile.split('\n')
i = -1
for idx, l in enumerate(lines):
elems = [e.strip() for e in l.split()]
if len(elems) > 0 and elems[0] == 'FROM':
i = idx
if i < 0:
raise Exception('no FROM statement')
lines.insert(i+1, line)
return '\n'.join(lines)
for env in ['http_proxy', 'https_proxy']:
if env in os.environ:
cls.dockerfile = insert_after_from(cls.dockerfile, 'ENV {0} {1}'.format(env, os.environ[env]))
f = io.BytesIO(cls.dockerfile.encode('utf-8'))
if force or not img_exists(tag):
print('build {0}...'.format(tag))
for line in dckr.build(fileobj=f, rm=True, tag=tag, decode=True, nocache=nocache):
if 'stream' in line:
print(line['stream'].strip())
def get_ipv4_addresses(self):
if 'local-address' in self.conf:
local_addr = self.conf['local-address']
return [local_addr]
raise NotImplementedError()
def get_host_config(self):
host_config = dckr.create_host_config(
binds=['{0}:{1}'.format(os.path.abspath(self.host_dir), self.guest_dir)],
privileged=True,
network_mode='bridge',
cap_add=['NET_ADMIN']
)
return host_config
def run(self, dckr_net_name='', rm=True):
if rm and ctn_exists(self.name):
print('remove container:', self.name)
dckr.remove_container(self.name, force=True)
host_config = self.get_host_config()
ctn = dckr.create_container(image=self.image, command=self.command, environment=self.environment,
detach=True, name=self.name,
stdin_open=True, volumes=self.volumes, host_config=host_config)
self.ctn_id = ctn['Id']
ipv4_addresses = self.get_ipv4_addresses()
net_id = None
for network in dckr.networks(names=[dckr_net_name]):
if network['Name'] != dckr_net_name:
continue
net_id = network['Id']
if not 'IPAM' in network:
print(('can\'t verify if container\'s IP addresses '
'are valid for Docker network {}: missing IPAM'.format(dckr_net_name)))
break
ipam = network['IPAM']
if not 'Config' in ipam:
print(('can\'t verify if container\'s IP addresses '
'are valid for Docker network {}: missing IPAM.Config'.format(dckr_net_name)))
break
ip_ok = False
network_subnets = [item['Subnet'] for item in ipam['Config'] if 'Subnet' in item]
for ip in ipv4_addresses:
for subnet in network_subnets:
ip_ok = netaddr.IPAddress(ip) in netaddr.IPNetwork(subnet)
if not ip_ok:
print(('the container\'s IP address {} is not valid for Docker network {} '
'since it\'s not part of any of its subnets ({})'.format(
ip, dckr_net_name, ', '.join(network_subnets))))
print(('Please consider removing the Docket network {net} '
'to allow bgperf to create it again using the '
'expected subnet:\n'
' docker network rm {net}'.format(net=dckr_net_name)))
sys.exit(1)
break
if net_id is None:
print('Docker network "{}" not found!'.format(dckr_net_name))
return
dckr.connect_container_to_network(self.ctn_id, net_id, ipv4_address=ipv4_addresses[0])
dckr.start(container=self.name)
if len(ipv4_addresses) > 1:
# get the interface used by the first IP address already added by Docker
dev = None
pxlen = None
res = self.local('ip addr').decode("utf-8")
for line in res.split('\n'):
if ipv4_addresses[0] in line:
dev = line.split(' ')[-1].strip()
pxlen = line.split('/')[1].split(' ')[0].strip()
if not dev:
dev = "eth0"
pxlen = 8
for ip in ipv4_addresses[1:]:
self.local(f'ip addr add {ip}/{pxlen} dev {dev}')
return ctn
def stats(self, queue):
def stats():
if self.stop_monitoring:
return
for stat in dckr.stats(self.ctn_id, decode=True):
if self.stop_monitoring:
return
cpu_percentage = 0.0
prev_cpu = stat['precpu_stats']['cpu_usage']['total_usage']
if 'system_cpu_usage' in stat['precpu_stats']:
prev_system = stat['precpu_stats']['system_cpu_usage']
else:
prev_system = 0
cpu = stat['cpu_stats']['cpu_usage']['total_usage']
system = stat['cpu_stats']['system_cpu_usage'] if 'system_cpu_usage' in stat['cpu_stats'] else 0
if not 'percpu_usage' in stat['cpu_stats']['cpu_usage']:
continue
cpu_num = len(stat['cpu_stats']['cpu_usage']['percpu_usage'])
cpu_delta = float(cpu) - float(prev_cpu)
system_delta = float(system) - float(prev_system)
if system_delta > 0.0 and cpu_delta > 0.0:
cpu_percentage = (cpu_delta / system_delta) * float(cpu_num) * 100.0
mem_usage = stat['memory_stats'].get('usage', 0)
queue.put({'who': self.name, 'cpu': cpu_percentage, 'mem': mem_usage, 'time': datetime.datetime.now()})
t = Thread(target=stats)
t.daemon = True
t.start()
def neighbor_stats(self, queue):
def stats():
while True:
if self.stop_monitoring:
return
neighbors_received_full, neighbors_checked = self.get_neighbor_received_routes()
queue.put({'who': self.name, 'neighbors_checked': neighbors_checked})
queue.put({'who': self.name, 'neighbors_received_full': neighbors_received_full})
time.sleep(1)
t = Thread(target=stats)
t.daemon = True
t.start()
def local(self, cmd, stream=False, detach=False, stderr=False):
i = dckr.exec_create(container=self.name, cmd=cmd, stderr=stderr)
return dckr.exec_start(i['Id'], stream=stream, detach=detach)
def get_startup_cmd(self):
raise NotImplementedError()
def get_version_cmd(self):
raise NotImplementedError()
def exec_version_cmd(self):
version = self.get_version_cmd()
i = dckr.exec_create(container=self.name, cmd=version, stderr=False)
return dckr.exec_start(i['Id'], stream=False, detach=False).decode('utf-8')
def exec_startup_cmd(self, stream=False, detach=False):
startup_content = self.get_startup_cmd()
if not startup_content:
return
filename = '{0}/start.sh'.format(self.host_dir)
with open(filename, 'w') as f:
f.write(startup_content)
os.chmod(filename, 0o777)
return self.local('{0}/start.sh'.format(self.guest_dir),
detach=detach,
stream=stream)
def get_test_counts(self):
'''gets the configured counts that each tester is supposed to send'''
tester_count = {}
neighbors_checked = {}
for tester in self.scenario_global_conf['testers']:
for n in tester['neighbors'].keys():
tester_count[n] = tester['neighbors'][n]['check-points']
neighbors_checked[n] = False
return tester_count, neighbors_checked
def get_neighbor_received_routes(self):
## if we ccall this before the daemon starts we will not get output
tester_count, neighbors_checked = self.get_test_counts()
neighbors_received_full = neighbors_checked.copy()
neighbors_received, neighbors_accepted = self.get_neighbors_state()
for n in neighbors_accepted.keys():
#this will include the monitor, we don't want to check that
if n in tester_count and neighbors_accepted[n] >= tester_count[n]:
neighbors_checked[n] = True
for n in neighbors_received.keys():
#this will include the monitor, we don't want to check that
if (n in tester_count and neighbors_received[n] >= tester_count[n]) or neighbors_received[n] == True:
neighbors_received_full[n] = True
return neighbors_received_full, neighbors_checked
class Target(Container):
CONFIG_FILE_NAME = None
def write_config(self):
raise NotImplementedError()
def use_existing_config(self):
if 'config_path' in self.conf:
with open('{0}/{1}'.format(self.host_dir, self.CONFIG_FILE_NAME), 'w') as f:
with open(self.conf['config_path'], 'r') as orig:
f.write(orig.read())
return True
return False
def run(self, scenario_global_conf, dckr_net_name=''):
self.scenario_global_conf = scenario_global_conf
# create config before container is created
if not self.use_existing_config():
self.write_config()
ctn = super(Target, self).run(dckr_net_name)
self.exec_startup_cmd(detach=True)
return ctn
def get_template(self, data, template_file="junos.j2",):
env = Environment(loader=FileSystemLoader(searchpath="./nos_templates"))
template = env.get_template(template_file)
output = template.render(data=data)
return output
class Tester(Container):
CONTAINER_NAME_PREFIX = None
def __init__(self, name, host_dir, conf, image):
Container.__init__(self, self.CONTAINER_NAME_PREFIX + name, image, host_dir, self.GUEST_DIR, conf)
def get_ipv4_addresses(self):
res = []
peers = list(self.conf.get('neighbors', {}).values())
for p in peers:
res.append(p['local-address'])
return res
def configure_neighbors(self, target_conf):
raise NotImplementedError()
def run(self, target_conf, dckr_net_name):
self.ctn = super(Tester, self).run(dckr_net_name)
self.configure_neighbors(target_conf)
def launch(self):
output = self.exec_startup_cmd(stream=True, detach=False)
cnt = 0
prev_pid = 0
for lines in output: # This is the ExaBGP output
lines = lines.decode("utf-8").strip().split('\n')
for line in lines:
fields = line.split('|')
if len(fields) >2:
# Get PID from ExaBGP output
try:
# ExaBGP Version >= 4
# e.g. 00:00:00 | 111 | control | command/comment
pid = int(fields[1])
except ValueError:
# ExaBGP Version = 3
# e.g. 00:00:00 | INFO | 111 | control | command
pid = int(fields[2])
if pid != prev_pid:
prev_pid = pid
cnt += 1
if cnt > 1:
rm_line()
print('tester booting.. ({0}/{1})'.format(cnt, len(list(self.conf.get('neighbors', {}).values()))))
else:
print(lines)
return None
def find_errors():
return 0
def find_timeouts():
return 0
| # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from settings import dckr
import io
import os
from itertools import chain
from threading import Thread
import netaddr
import sys
import time
import datetime
from jinja2 import Environment, FileSystemLoader, PackageLoader, StrictUndefined, make_logging_undefined
flatten = lambda l: chain.from_iterable(l)
def get_ctn_names():
names = list(flatten(n['Names'] for n in dckr.containers(all=True)))
return [n[1:] if n[0] == '/' else n for n in names]
def ctn_exists(name):
return name in get_ctn_names()
def img_exists(name):
return name in [ctn['RepoTags'][0].split(':')[0] for ctn in dckr.images() if ctn['RepoTags'] != None]
def rm_line():
print('\x1b[1A\x1b[2K\x1b[1D\x1b[1A')
class Container(object):
def __init__(self, name, image, host_dir, guest_dir, conf):
self.name = name
self.image = image
self.host_dir = host_dir
self.guest_dir = guest_dir
self.conf = conf
self.config_name = None
self.stop_monitoring = False
self.command = None
self.environment = None
self.volumes = [self.guest_dir]
if not os.path.exists(host_dir):
os.makedirs(host_dir)
os.chmod(host_dir, 0o777)
@classmethod
def build_image(cls, force, tag, nocache=False):
def insert_after_from(dockerfile, line):
lines = dockerfile.split('\n')
i = -1
for idx, l in enumerate(lines):
elems = [e.strip() for e in l.split()]
if len(elems) > 0 and elems[0] == 'FROM':
i = idx
if i < 0:
raise Exception('no FROM statement')
lines.insert(i+1, line)
return '\n'.join(lines)
for env in ['http_proxy', 'https_proxy']:
if env in os.environ:
cls.dockerfile = insert_after_from(cls.dockerfile, 'ENV {0} {1}'.format(env, os.environ[env]))
f = io.BytesIO(cls.dockerfile.encode('utf-8'))
if force or not img_exists(tag):
print('build {0}...'.format(tag))
for line in dckr.build(fileobj=f, rm=True, tag=tag, decode=True, nocache=nocache):
if 'stream' in line:
print(line['stream'].strip())
def get_ipv4_addresses(self):
if 'local-address' in self.conf:
local_addr = self.conf['local-address']
return [local_addr]
raise NotImplementedError()
def get_host_config(self):
host_config = dckr.create_host_config(
binds=['{0}:{1}'.format(os.path.abspath(self.host_dir), self.guest_dir)],
privileged=True,
network_mode='bridge',
cap_add=['NET_ADMIN']
)
return host_config
def run(self, dckr_net_name='', rm=True):
if rm and ctn_exists(self.name):
print('remove container:', self.name)
dckr.remove_container(self.name, force=True)
host_config = self.get_host_config()
ctn = dckr.create_container(image=self.image, command=self.command, environment=self.environment,
detach=True, name=self.name,
stdin_open=True, volumes=self.volumes, host_config=host_config)
self.ctn_id = ctn['Id']
ipv4_addresses = self.get_ipv4_addresses()
net_id = None
for network in dckr.networks(names=[dckr_net_name]):
if network['Name'] != dckr_net_name:
continue
net_id = network['Id']
if not 'IPAM' in network:
print(('can\'t verify if container\'s IP addresses '
'are valid for Docker network {}: missing IPAM'.format(dckr_net_name)))
break
ipam = network['IPAM']
if not 'Config' in ipam:
print(('can\'t verify if container\'s IP addresses '
'are valid for Docker network {}: missing IPAM.Config'.format(dckr_net_name)))
break
ip_ok = False
network_subnets = [item['Subnet'] for item in ipam['Config'] if 'Subnet' in item]
for ip in ipv4_addresses:
for subnet in network_subnets:
ip_ok = netaddr.IPAddress(ip) in netaddr.IPNetwork(subnet)
if not ip_ok:
print(('the container\'s IP address {} is not valid for Docker network {} '
'since it\'s not part of any of its subnets ({})'.format(
ip, dckr_net_name, ', '.join(network_subnets))))
print(('Please consider removing the Docket network {net} '
'to allow bgperf to create it again using the '
'expected subnet:\n'
' docker network rm {net}'.format(net=dckr_net_name)))
sys.exit(1)
break
if net_id is None:
print('Docker network "{}" not found!'.format(dckr_net_name))
return
dckr.connect_container_to_network(self.ctn_id, net_id, ipv4_address=ipv4_addresses[0])
dckr.start(container=self.name)
if len(ipv4_addresses) > 1:
# get the interface used by the first IP address already added by Docker
dev = None
pxlen = None
res = self.local('ip addr').decode("utf-8")
for line in res.split('\n'):
if ipv4_addresses[0] in line:
dev = line.split(' ')[-1].strip()
pxlen = line.split('/')[1].split(' ')[0].strip()
if not dev:
dev = "eth0"
pxlen = 8
for ip in ipv4_addresses[1:]:
self.local(f'ip addr add {ip}/{pxlen} dev {dev}')
return ctn
def stats(self, queue):
def stats():
if self.stop_monitoring:
return
for stat in dckr.stats(self.ctn_id, decode=True):
if self.stop_monitoring:
return
cpu_percentage = 0.0
prev_cpu = stat['precpu_stats']['cpu_usage']['total_usage']
if 'system_cpu_usage' in stat['precpu_stats']:
prev_system = stat['precpu_stats']['system_cpu_usage']
else:
prev_system = 0
cpu = stat['cpu_stats']['cpu_usage']['total_usage']
system = stat['cpu_stats']['system_cpu_usage'] if 'system_cpu_usage' in stat['cpu_stats'] else 0
if not 'percpu_usage' in stat['cpu_stats']['cpu_usage']:
continue
cpu_num = len(stat['cpu_stats']['cpu_usage']['percpu_usage'])
cpu_delta = float(cpu) - float(prev_cpu)
system_delta = float(system) - float(prev_system)
if system_delta > 0.0 and cpu_delta > 0.0:
cpu_percentage = (cpu_delta / system_delta) * float(cpu_num) * 100.0
mem_usage = stat['memory_stats'].get('usage', 0)
queue.put({'who': self.name, 'cpu': cpu_percentage, 'mem': mem_usage, 'time': datetime.datetime.now()})
t = Thread(target=stats)
t.daemon = True
t.start()
def neighbor_stats(self, queue):
def stats():
while True:
if self.stop_monitoring:
return
neighbors_received_full, neighbors_checked = self.get_neighbor_received_routes()
queue.put({'who': self.name, 'neighbors_checked': neighbors_checked})
queue.put({'who': self.name, 'neighbors_received_full': neighbors_received_full})
time.sleep(1)
t = Thread(target=stats)
t.daemon = True
t.start()
def local(self, cmd, stream=False, detach=False, stderr=False):
i = dckr.exec_create(container=self.name, cmd=cmd, stderr=stderr)
return dckr.exec_start(i['Id'], stream=stream, detach=detach)
def get_startup_cmd(self):
raise NotImplementedError()
def get_version_cmd(self):
raise NotImplementedError()
def exec_version_cmd(self):
version = self.get_version_cmd()
i = dckr.exec_create(container=self.name, cmd=version, stderr=False)
return dckr.exec_start(i['Id'], stream=False, detach=False).decode('utf-8')
def exec_startup_cmd(self, stream=False, detach=False):
startup_content = self.get_startup_cmd()
if not startup_content:
return
filename = '{0}/start.sh'.format(self.host_dir)
with open(filename, 'w') as f:
f.write(startup_content)
os.chmod(filename, 0o777)
return self.local('{0}/start.sh'.format(self.guest_dir),
detach=detach,
stream=stream)
def get_test_counts(self):
'''gets the configured counts that each tester is supposed to send'''
tester_count = {}
neighbors_checked = {}
for tester in self.scenario_global_conf['testers']:
for n in tester['neighbors'].keys():
tester_count[n] = tester['neighbors'][n]['check-points']
neighbors_checked[n] = False
return tester_count, neighbors_checked
def get_neighbor_received_routes(self):
## if we ccall this before the daemon starts we will not get output
tester_count, neighbors_checked = self.get_test_counts()
neighbors_received_full = neighbors_checked.copy()
neighbors_received, neighbors_accepted = self.get_neighbors_state()
for n in neighbors_accepted.keys():
#this will include the monitor, we don't want to check that
if n in tester_count and neighbors_accepted[n] >= tester_count[n]:
neighbors_checked[n] = True
for n in neighbors_received.keys():
#this will include the monitor, we don't want to check that
if (n in tester_count and neighbors_received[n] >= tester_count[n]) or neighbors_received[n] == True:
neighbors_received_full[n] = True
return neighbors_received_full, neighbors_checked
class Target(Container):
CONFIG_FILE_NAME = None
def write_config(self):
raise NotImplementedError()
def use_existing_config(self):
if 'config_path' in self.conf:
with open('{0}/{1}'.format(self.host_dir, self.CONFIG_FILE_NAME), 'w') as f:
with open(self.conf['config_path'], 'r') as orig:
f.write(orig.read())
return True
return False
def run(self, scenario_global_conf, dckr_net_name=''):
self.scenario_global_conf = scenario_global_conf
# create config before container is created
if not self.use_existing_config():
self.write_config()
ctn = super(Target, self).run(dckr_net_name)
self.exec_startup_cmd(detach=True)
return ctn
def get_template(self, data, template_file="junos.j2",):
env = Environment(loader=FileSystemLoader(searchpath="./nos_templates"))
template = env.get_template(template_file)
output = template.render(data=data)
return output
class Tester(Container):
CONTAINER_NAME_PREFIX = None
def __init__(self, name, host_dir, conf, image):
Container.__init__(self, self.CONTAINER_NAME_PREFIX + name, image, host_dir, self.GUEST_DIR, conf)
def get_ipv4_addresses(self):
res = []
peers = list(self.conf.get('neighbors', {}).values())
for p in peers:
res.append(p['local-address'])
return res
def configure_neighbors(self, target_conf):
raise NotImplementedError()
def run(self, target_conf, dckr_net_name):
self.ctn = super(Tester, self).run(dckr_net_name)
self.configure_neighbors(target_conf)
def launch(self):
output = self.exec_startup_cmd(stream=True, detach=False)
cnt = 0
prev_pid = 0
for lines in output: # This is the ExaBGP output
lines = lines.decode("utf-8").strip().split('\n')
for line in lines:
fields = line.split('|')
if len(fields) >2:
# Get PID from ExaBGP output
try:
# ExaBGP Version >= 4
# e.g. 00:00:00 | 111 | control | command/comment
pid = int(fields[1])
except ValueError:
# ExaBGP Version = 3
# e.g. 00:00:00 | INFO | 111 | control | command
pid = int(fields[2])
if pid != prev_pid:
prev_pid = pid
cnt += 1
if cnt > 1:
rm_line()
print('tester booting.. ({0}/{1})'.format(cnt, len(list(self.conf.get('neighbors', {}).values()))))
else:
print(lines)
return None
def find_errors():
return 0
def find_timeouts():
return 0
| en | 0.891995 | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # get the interface used by the first IP address already added by Docker gets the configured counts that each tester is supposed to send ## if we ccall this before the daemon starts we will not get output #this will include the monitor, we don't want to check that #this will include the monitor, we don't want to check that # create config before container is created # This is the ExaBGP output # Get PID from ExaBGP output # ExaBGP Version >= 4 # e.g. 00:00:00 | 111 | control | command/comment # ExaBGP Version = 3 # e.g. 00:00:00 | INFO | 111 | control | command | 2.026186 | 2 |
lcfeats/ioutils.py | rbiswas4/LightCurveFeatures | 0 | 6624792 | <reponame>rbiswas4/LightCurveFeatures
"""
"""
from __future__ import division, absolute_import, print_function
__all__ = ['get_rootfile', 'get_photTable', 'process_fname']
from sndata import SNANASims
import glob
import os
import pandas as pd
def process_fname(fname, location, filter_query="SNID%50==0", outDir='./',
write=False, format='hdf', complevel=9):
"""
fname : string, mandatory
the name of a head file.
location : string, mandatory
directory in which the files are
filter_query: string, defaults to "SNID%50 == 0"
query to be run selecting the objects from both the head and phot file.
outDir : string, defaults to './'
directory where stuff will be written if write is True
write : Bool, defaults to False
format : string, defaults to hdf
{'hdf'|'csv'}
complevel : int, defaults to 9
Actually not used, but enforced
"""
# print(rootfile)
# return
rootfile = get_rootfile(fname)
snsims = SNANASims.fromSNANAfileroot(rootfile, coerce_inds2int=True, location=location, gzipped=True)
# If the SNANA Sims represented is None, we are done
if snsims is None:
return None, None
ht = snsims.headData.reset_index().query('SNID%50==0')
if len(ht) == 0:
return None, None
phottable = get_photTable(snsims, filter_query=filter_query)
dirname = os.path.split(location)[-1] + '_compressed'
# Use the one below instead of the one above, not tested
# dirname = os.path.basename(location) + '_compressed'
dirloc = os.path.join(outDir, dirname)
if not os.path.exists(dirloc):
os.mkdir(dirloc)
if write:
key = rootfile.split('-')[-1]
if format == 'hdf':
phottable.to_hdf(os.path.join(dirloc, 'summary' + '_phot.hdf'), key=key, complib='zlib', complevel=9)
ht.to_hdf(os.path.join(dirloc, 'summary' + '_head.hdf'), key=key,complib='zlib', complevel=9)
elif format == 'csv':
phottable.to_csv(os.path.join(dirloc, rootfile + '_phot.csv.gz'), compression='gzip')
ht.to_csv(os.path.join(dirloc, rootfile + '_head.csv.gz'), compression='gzip')
else:
raise NotImlementedError('formmat not implemented')
return ht, phottable
def get_rootfile(headname):
return os.path.split(headname)[1].split('_HEAD')[0]
def get_photTable(snsims, filter_query='SNID%50==0'):
"""
Instance of `SNANASims`
"""
if snsims is None:
return None
snids = snsims.headData.reset_index().query(filter_query).SNID
lcs = []
for snid in snids:
lcdata = snsims.get_SNANA_photometry(snid, keepSnid=True).lightCurve
if len(lcdata) > 9:
lcs.append(lcdata)
# Need something if snid is None:
if len(lcs) == 0:
return lcs
df = pd.concat(lcs)
df['SNR'] = df.flux / df.fluxerr
return df
| """
"""
from __future__ import division, absolute_import, print_function
__all__ = ['get_rootfile', 'get_photTable', 'process_fname']
from sndata import SNANASims
import glob
import os
import pandas as pd
def process_fname(fname, location, filter_query="SNID%50==0", outDir='./',
write=False, format='hdf', complevel=9):
"""
fname : string, mandatory
the name of a head file.
location : string, mandatory
directory in which the files are
filter_query: string, defaults to "SNID%50 == 0"
query to be run selecting the objects from both the head and phot file.
outDir : string, defaults to './'
directory where stuff will be written if write is True
write : Bool, defaults to False
format : string, defaults to hdf
{'hdf'|'csv'}
complevel : int, defaults to 9
Actually not used, but enforced
"""
# print(rootfile)
# return
rootfile = get_rootfile(fname)
snsims = SNANASims.fromSNANAfileroot(rootfile, coerce_inds2int=True, location=location, gzipped=True)
# If the SNANA Sims represented is None, we are done
if snsims is None:
return None, None
ht = snsims.headData.reset_index().query('SNID%50==0')
if len(ht) == 0:
return None, None
phottable = get_photTable(snsims, filter_query=filter_query)
dirname = os.path.split(location)[-1] + '_compressed'
# Use the one below instead of the one above, not tested
# dirname = os.path.basename(location) + '_compressed'
dirloc = os.path.join(outDir, dirname)
if not os.path.exists(dirloc):
os.mkdir(dirloc)
if write:
key = rootfile.split('-')[-1]
if format == 'hdf':
phottable.to_hdf(os.path.join(dirloc, 'summary' + '_phot.hdf'), key=key, complib='zlib', complevel=9)
ht.to_hdf(os.path.join(dirloc, 'summary' + '_head.hdf'), key=key,complib='zlib', complevel=9)
elif format == 'csv':
phottable.to_csv(os.path.join(dirloc, rootfile + '_phot.csv.gz'), compression='gzip')
ht.to_csv(os.path.join(dirloc, rootfile + '_head.csv.gz'), compression='gzip')
else:
raise NotImlementedError('formmat not implemented')
return ht, phottable
def get_rootfile(headname):
return os.path.split(headname)[1].split('_HEAD')[0]
def get_photTable(snsims, filter_query='SNID%50==0'):
"""
Instance of `SNANASims`
"""
if snsims is None:
return None
snids = snsims.headData.reset_index().query(filter_query).SNID
lcs = []
for snid in snids:
lcdata = snsims.get_SNANA_photometry(snid, keepSnid=True).lightCurve
if len(lcdata) > 9:
lcs.append(lcdata)
# Need something if snid is None:
if len(lcs) == 0:
return lcs
df = pd.concat(lcs)
df['SNR'] = df.flux / df.fluxerr
return df | en | 0.709229 | fname : string, mandatory the name of a head file. location : string, mandatory directory in which the files are filter_query: string, defaults to "SNID%50 == 0" query to be run selecting the objects from both the head and phot file. outDir : string, defaults to './' directory where stuff will be written if write is True write : Bool, defaults to False format : string, defaults to hdf {'hdf'|'csv'} complevel : int, defaults to 9 Actually not used, but enforced # print(rootfile) # return # If the SNANA Sims represented is None, we are done # Use the one below instead of the one above, not tested # dirname = os.path.basename(location) + '_compressed' Instance of `SNANASims` # Need something if snid is None: | 2.372275 | 2 |
plugins/advanced_regex/icon_advanced_regex/actions/data_extraction/schema.py | lukaszlaszuk/insightconnect-plugins | 46 | 6624793 | <reponame>lukaszlaszuk/insightconnect-plugins
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Extract data via regex from a string"
class Input:
ASCII = "ascii"
DOTALL = "dotall"
IGNORECASE = "ignorecase"
IN_REGEX = "in_regex"
IN_STRING = "in_string"
MULTILINE = "multiline"
class Output:
MATCHES = "matches"
class DataExtractionInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"ascii": {
"type": "boolean",
"title": "ASCII",
"description": "Make \\\\w \\\\W \\\\b \\\\B follow ASCII rules",
"default": false,
"order": 6
},
"dotall": {
"type": "boolean",
"title": "Dot All",
"description": "Make . match newline",
"default": false,
"order": 5
},
"ignorecase": {
"type": "boolean",
"title": "Ignore Case",
"description": "Make regex non-case sensitive",
"default": false,
"order": 3
},
"in_regex": {
"type": "string",
"title": "Regex",
"description": "Regex to use for data extraction",
"order": 2
},
"in_string": {
"type": "string",
"title": "Input String",
"description": "Input string",
"order": 1
},
"multiline": {
"type": "boolean",
"title": "Multiline",
"description": "Make begin/end consider each line",
"default": false,
"order": 4
}
},
"required": [
"in_regex",
"in_string"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class DataExtractionOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"matches": {
"type": "array",
"title": "Matches",
"description": "An array of string arrays matching the output of Python re.findall()",
"items": {
"type": "array",
"items": {
"type": "string"
}
},
"order": 1
}
},
"required": [
"matches"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Extract data via regex from a string"
class Input:
ASCII = "ascii"
DOTALL = "dotall"
IGNORECASE = "ignorecase"
IN_REGEX = "in_regex"
IN_STRING = "in_string"
MULTILINE = "multiline"
class Output:
MATCHES = "matches"
class DataExtractionInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"ascii": {
"type": "boolean",
"title": "ASCII",
"description": "Make \\\\w \\\\W \\\\b \\\\B follow ASCII rules",
"default": false,
"order": 6
},
"dotall": {
"type": "boolean",
"title": "Dot All",
"description": "Make . match newline",
"default": false,
"order": 5
},
"ignorecase": {
"type": "boolean",
"title": "Ignore Case",
"description": "Make regex non-case sensitive",
"default": false,
"order": 3
},
"in_regex": {
"type": "string",
"title": "Regex",
"description": "Regex to use for data extraction",
"order": 2
},
"in_string": {
"type": "string",
"title": "Input String",
"description": "Input string",
"order": 1
},
"multiline": {
"type": "boolean",
"title": "Multiline",
"description": "Make begin/end consider each line",
"default": false,
"order": 4
}
},
"required": [
"in_regex",
"in_string"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class DataExtractionOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"matches": {
"type": "array",
"title": "Matches",
"description": "An array of string arrays matching the output of Python re.findall()",
"items": {
"type": "array",
"items": {
"type": "string"
}
},
"order": 1
}
},
"required": [
"matches"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema) | en | 0.211776 | # GENERATED BY KOMAND SDK - DO NOT EDIT { "type": "object", "title": "Variables", "properties": { "ascii": { "type": "boolean", "title": "ASCII", "description": "Make \\\\w \\\\W \\\\b \\\\B follow ASCII rules", "default": false, "order": 6 }, "dotall": { "type": "boolean", "title": "Dot All", "description": "Make . match newline", "default": false, "order": 5 }, "ignorecase": { "type": "boolean", "title": "Ignore Case", "description": "Make regex non-case sensitive", "default": false, "order": 3 }, "in_regex": { "type": "string", "title": "Regex", "description": "Regex to use for data extraction", "order": 2 }, "in_string": { "type": "string", "title": "Input String", "description": "Input string", "order": 1 }, "multiline": { "type": "boolean", "title": "Multiline", "description": "Make begin/end consider each line", "default": false, "order": 4 } }, "required": [ "in_regex", "in_string" ] } { "type": "object", "title": "Variables", "properties": { "matches": { "type": "array", "title": "Matches", "description": "An array of string arrays matching the output of Python re.findall()", "items": { "type": "array", "items": { "type": "string" } }, "order": 1 } }, "required": [ "matches" ] } | 2.555781 | 3 |
componentstore/cli.py | ludeeus/custom-installer | 14 | 6624794 | <reponame>ludeeus/custom-installer
"""Enable CLI."""
import click
@click.command()
@click.option('--port', '-P', default=9999, help='port number.')
@click.option('--redishost', '-RH', default=None, help='Redis host.')
@click.option('--redisport', '-RP', default=None, help='Redis port.')
@click.option('--nocache', is_flag=True, help='Redis port.')
@click.option('--username', '-U', default='pi', help='Username.')
@click.option('--password', '-P', default='<PASSWORD>', help='Password.')
@click.option('--no_auth', is_flag=True, help='Disable auth.')
@click.option('--ha_path', default='/config', help='Full path to HA config dir.')
def cli(port, redishost, redisport, nocache, username, password, no_auth,
ha_path):
"""CLI for this package."""
from componentstore.server import run_server
run_server(ha_path, username, password, no_auth, port, redishost,
redisport, nocache)
cli() # pylint: disable=E1120
| """Enable CLI."""
import click
@click.command()
@click.option('--port', '-P', default=9999, help='port number.')
@click.option('--redishost', '-RH', default=None, help='Redis host.')
@click.option('--redisport', '-RP', default=None, help='Redis port.')
@click.option('--nocache', is_flag=True, help='Redis port.')
@click.option('--username', '-U', default='pi', help='Username.')
@click.option('--password', '-P', default='<PASSWORD>', help='Password.')
@click.option('--no_auth', is_flag=True, help='Disable auth.')
@click.option('--ha_path', default='/config', help='Full path to HA config dir.')
def cli(port, redishost, redisport, nocache, username, password, no_auth,
ha_path):
"""CLI for this package."""
from componentstore.server import run_server
run_server(ha_path, username, password, no_auth, port, redishost,
redisport, nocache)
cli() # pylint: disable=E1120 | en | 0.592526 | Enable CLI. CLI for this package. # pylint: disable=E1120 | 2.06354 | 2 |
PDFWaterMark.py | santhipriya13/PracticeCode | 0 | 6624795 | <filename>PDFWaterMark.py
import PyPDF2
template=PyPDF2.PdfFileReader(open("super.pdf", 'rb'))
template.numPages | <filename>PDFWaterMark.py
import PyPDF2
template=PyPDF2.PdfFileReader(open("super.pdf", 'rb'))
template.numPages | none | 1 | 2.276587 | 2 | |
model.py | bicsi/StarGAN | 0 | 6624796 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torchvision import transforms
class ResidualBlock(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True))
def forward(self, x):
return x + self.main(x)
class Generator(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, image_size=128):
super(Generator, self).__init__()
def conv2d_output_size(image_size, kernel, stride, pad):
return ((image_size + 2 * pad - kernel)//stride) + 1
def conv2dtranspose_output_size(image_size, kernel, stride, pad):
return (image_size - 1) * stride - 2 * pad + kernel
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
image_size = conv2d_output_size(image_size=image_size, kernel=7, stride=1, pad=3)
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
image_size = conv2d_output_size(image_size=image_size, kernel=4, stride=2, pad=1)
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
image_size = conv2d_output_size(image_size=image_size, kernel=3, stride=1, pad=1)
# Up-Sampling
for i in range(2):
image_size = conv2dtranspose_output_size(image_size=image_size, kernel=4, stride=2, pad=1)
layers.append(nn.Upsample(size=image_size, mode='bilinear'))
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
def forward(self, x, c):
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
return self.main(x)
class Discriminator(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(Discriminator, self).__init__()
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = curr_dim * 2
k_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=k_size, bias=False)
def forward(self, x):
h = self.main(x)
out_real = self.conv1(h)
out_aux = self.conv2(h)
return out_real.squeeze(), out_aux.squeeze()
| import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torchvision import transforms
class ResidualBlock(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True))
def forward(self, x):
return x + self.main(x)
class Generator(nn.Module):
"""Generator. Encoder-Decoder Architecture."""
def __init__(self, conv_dim=64, c_dim=5, repeat_num=6, image_size=128):
super(Generator, self).__init__()
def conv2d_output_size(image_size, kernel, stride, pad):
return ((image_size + 2 * pad - kernel)//stride) + 1
def conv2dtranspose_output_size(image_size, kernel, stride, pad):
return (image_size - 1) * stride - 2 * pad + kernel
layers = []
layers.append(nn.Conv2d(3+c_dim, conv_dim, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
image_size = conv2d_output_size(image_size=image_size, kernel=7, stride=1, pad=3)
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim*2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
image_size = conv2d_output_size(image_size=image_size, kernel=4, stride=2, pad=1)
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
image_size = conv2d_output_size(image_size=image_size, kernel=3, stride=1, pad=1)
# Up-Sampling
for i in range(2):
image_size = conv2dtranspose_output_size(image_size=image_size, kernel=4, stride=2, pad=1)
layers.append(nn.Upsample(size=image_size, mode='bilinear'))
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.main = nn.Sequential(*layers)
def forward(self, x, c):
# replicate spatially and concatenate domain information
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), x.size(2), x.size(3))
x = torch.cat([x, c], dim=1)
return self.main(x)
class Discriminator(nn.Module):
"""Discriminator. PatchGAN."""
def __init__(self, image_size=128, conv_dim=64, c_dim=5, repeat_num=6):
super(Discriminator, self).__init__()
layers = []
layers.append(nn.Conv2d(3, conv_dim, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = conv_dim
for i in range(1, repeat_num):
layers.append(nn.Conv2d(curr_dim, curr_dim*2, kernel_size=4, stride=2, padding=1))
layers.append(nn.LeakyReLU(0.01, inplace=True))
curr_dim = curr_dim * 2
k_size = int(image_size / np.power(2, repeat_num))
self.main = nn.Sequential(*layers)
self.conv1 = nn.Conv2d(curr_dim, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.conv2 = nn.Conv2d(curr_dim, c_dim, kernel_size=k_size, bias=False)
def forward(self, x):
h = self.main(x)
out_real = self.conv1(h)
out_aux = self.conv2(h)
return out_real.squeeze(), out_aux.squeeze()
| en | 0.613431 | Residual Block. Generator. Encoder-Decoder Architecture. # Down-Sampling # Bottleneck # Up-Sampling # replicate spatially and concatenate domain information Discriminator. PatchGAN. | 2.93772 | 3 |
docs/examples_src/raw_query_usage/extract_from_raw.py | sanders41/odmantic | 0 | 6624797 | <gh_stars>0
from bson import ObjectId
from odmantic import Field, Model
class User(Model):
name: str = Field(key_name="username")
document = {"username": "John", "_id": ObjectId("5f8352a87a733b8b18b0cb27")}
user = User.parse_doc(document)
print(user)
#> id=ObjectId('5f8352a87a733b8b18b0cb27') name='John'
| from bson import ObjectId
from odmantic import Field, Model
class User(Model):
name: str = Field(key_name="username")
document = {"username": "John", "_id": ObjectId("5f8352a87a733b8b18b0cb27")}
user = User.parse_doc(document)
print(user)
#> id=ObjectId('5f8352a87a733b8b18b0cb27') name='John' | en | 0.187066 | #> id=ObjectId('5f8352a87a733b8b18b0cb27') name='John' | 2.698868 | 3 |
pyss/mpi/util/separator.py | ibara1454/pyss | 0 | 6624798 | <reponame>ibara1454/pyss
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABCMeta
class Separator(metaclass=ABCMeta):
def __call__(a, comm):
"""
Parameters
----------
a : (N, M) array_like
comm : MPI communicator
Returns
-------
ws : ndarray
"""
pass
class HSeparator(Separator):
"""
A horizontal separator.
"""
def __call__(a, comm):
pass
class IdentitySeparator(Separator):
def __call__(a, comm):
return a
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABCMeta
class Separator(metaclass=ABCMeta):
def __call__(a, comm):
"""
Parameters
----------
a : (N, M) array_like
comm : MPI communicator
Returns
-------
ws : ndarray
"""
pass
class HSeparator(Separator):
"""
A horizontal separator.
"""
def __call__(a, comm):
pass
class IdentitySeparator(Separator):
def __call__(a, comm):
return a | en | 0.197825 | #!/usr/bin/env python # -*- coding: utf-8 -*- Parameters ---------- a : (N, M) array_like comm : MPI communicator Returns ------- ws : ndarray A horizontal separator. | 3.085516 | 3 |
celescope/celescope.py | frostmoure98/CeleScope | 0 | 6624799 | <reponame>frostmoure98/CeleScope
#!/bin/env python
# coding=utf8
import argparse
from celescope.__init__ import __VERSION__, ASSAY_DICT
def main():
parser = argparse.ArgumentParser(description='CeleScope')
parser.add_argument(
'-v',
'--version',
action='version',
version=__VERSION__)
subparsers = parser.add_subparsers()
# rna
assay = 'rna'
text = ASSAY_DICT[assay]
subparsers_rna = subparsers.add_parser(assay, help=text, description=text)
subparsers_rna_sub = subparsers_rna.add_subparsers()
from celescope.tools.sample_info import sample_info, get_opts_sample
parser_sample = subparsers_rna_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
from celescope.tools.barcode import barcode, get_opts_barcode
parser_barcode = subparsers_rna_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
from celescope.tools.cutadapt import cutadapt, get_opts_cutadapt
parser_cutadapt = subparsers_rna_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.tools.STAR import STAR, get_opts_STAR
parser_STAR = subparsers_rna_sub.add_parser('STAR')
get_opts_STAR(parser_STAR, True)
parser_STAR.set_defaults(func=STAR)
from celescope.tools.featureCounts import featureCounts, get_opts_featureCounts
parser_featureCounts = subparsers_rna_sub.add_parser('featureCounts')
get_opts_featureCounts(parser_featureCounts, True)
parser_featureCounts.set_defaults(func=featureCounts)
from celescope.tools.count import count, get_opts_count
parser_count = subparsers_rna_sub.add_parser('count')
get_opts_count(parser_count, True)
parser_count.set_defaults(func=count)
from celescope.tools.analysis import analysis, get_opts_analysis
parser_analysis = subparsers_rna_sub.add_parser('analysis')
get_opts_analysis(parser_analysis, True)
parser_analysis.set_defaults(func=analysis)
from celescope.rna.run import run
parser_run = subparsers_rna_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR(parser_run, False)
get_opts_featureCounts(parser_run, False)
get_opts_count(parser_run, False)
get_opts_analysis(parser_run, False)
parser_run.set_defaults(func=run)
# rna_virus
assay = 'rna_virus'
text = ASSAY_DICT[assay]
subparsers_rna_virus = subparsers.add_parser(
assay, help=text, description=text)
subparsers_rna_virus_sub = subparsers_rna_virus.add_subparsers()
parser_sample = subparsers_rna_virus_sub.add_parser(
'sample', description='sample infomation')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
from celescope.tools.barcode import barcode, get_opts_barcode
parser_barcode = subparsers_rna_virus_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
from celescope.tools.cutadapt import cutadapt, get_opts_cutadapt
parser_cutadapt = subparsers_rna_virus_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.tools.STAR import STAR, get_opts_STAR
parser_STAR = subparsers_rna_virus_sub.add_parser('STAR')
get_opts_STAR(parser_STAR, True)
parser_STAR.set_defaults(func=STAR)
from celescope.rna_virus.STAR_virus import STAR_virus, get_opts_STAR_virus
parser_STAR_virus = subparsers_rna_virus_sub.add_parser('STAR_virus')
get_opts_STAR_virus(parser_STAR_virus, True)
parser_STAR_virus.set_defaults(func=STAR_virus)
from celescope.tools.featureCounts import featureCounts, get_opts_featureCounts
parser_featureCounts = subparsers_rna_virus_sub.add_parser('featureCounts')
get_opts_featureCounts(parser_featureCounts, True)
parser_featureCounts.set_defaults(func=featureCounts)
from celescope.tools.count import count, get_opts_count
parser_count = subparsers_rna_virus_sub.add_parser('count')
get_opts_count(parser_count, True)
parser_count.set_defaults(func=count)
from celescope.rna_virus.count_virus import count_virus, get_opts_count_virus
parser_count_virus = subparsers_rna_virus_sub.add_parser('count_virus')
get_opts_count_virus(parser_count_virus, True)
parser_count_virus.set_defaults(func=count_virus)
from celescope.rna_virus.analysis_rna_virus import analysis_rna_virus, get_opts_analysis_rna_virus
parser_analysis_rna_virus = subparsers_rna_virus_sub.add_parser(
'analysis_rna_virus')
get_opts_analysis_rna_virus(parser_analysis_rna_virus, True)
parser_analysis_rna_virus.set_defaults(func=analysis_rna_virus)
from celescope.rna_virus.run import run
parser_run = subparsers_rna_virus_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR(parser_run, False)
get_opts_STAR_virus(parser_run, False)
get_opts_featureCounts(parser_run, False)
get_opts_count_virus(parser_run, False)
get_opts_analysis_rna_virus(parser_run, False)
parser_run.set_defaults(func=run)
# capture_virus
assay = 'capture_virus'
text = ASSAY_DICT[assay]
subparsers_capture_virus = subparsers.add_parser(
assay, help=text, description=text)
subparsers_capture_virus_sub = subparsers_capture_virus.add_subparsers()
parser_sample = subparsers_capture_virus_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
parser_barcode = subparsers_capture_virus_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
parser_cutadapt = subparsers_capture_virus_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
parser_STAR_virus = subparsers_capture_virus_sub.add_parser('STAR_virus')
get_opts_STAR_virus(parser_STAR_virus, True)
parser_STAR_virus.set_defaults(func=STAR_virus)
from celescope.capture_virus.count_capture_virus import count_capture_virus, get_opts_count_capture_virus
parser_count_capture_virus = subparsers_capture_virus_sub.add_parser(
'count_capture_virus')
get_opts_count_capture_virus(parser_count_capture_virus, True)
parser_count_capture_virus.set_defaults(func=count_capture_virus)
from celescope.capture_virus.analysis_capture_virus import analysis_capture_virus, get_opts_analysis_capture_virus
parser_analysis_capture_virus = subparsers_capture_virus_sub.add_parser(
'analysis_capture_virus')
get_opts_analysis_capture_virus(parser_analysis_capture_virus, True)
parser_analysis_capture_virus.set_defaults(func=analysis_capture_virus)
from celescope.capture_virus.run import run
parser_run = subparsers_capture_virus_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR_virus(parser_run, False)
get_opts_count_capture_virus(parser_run, False)
parser_run.set_defaults(func=run)
# fusion
assay = 'fusion'
text = ASSAY_DICT[assay]
subparsers_fusion = subparsers.add_parser(
assay, help=text, description=text)
subparsers_fusion_sub = subparsers_fusion.add_subparsers()
parser_sample = subparsers_fusion_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
parser_barcode = subparsers_fusion_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
parser_cutadapt = subparsers_fusion_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.fusion.STAR_fusion import STAR_fusion, get_opts_STAR_fusion
parser_STAR_fusion = subparsers_fusion_sub.add_parser('STAR_fusion')
get_opts_STAR_fusion(parser_STAR_fusion, True)
parser_STAR_fusion.set_defaults(func=STAR_fusion)
from celescope.fusion.count_fusion import count_fusion, get_opts_count_fusion
parser_count_fusion = subparsers_fusion_sub.add_parser('count_fusion')
get_opts_count_fusion(parser_count_fusion, True)
parser_count_fusion.set_defaults(func=count_fusion)
from celescope.fusion.run import run
parser_run = subparsers_fusion_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR_fusion(parser_run, False)
get_opts_count_fusion(parser_run, False)
parser_run.set_defaults(func=run)
# smk
assay = 'smk'
text = ASSAY_DICT[assay]
subparsers_assay = subparsers.add_parser(
assay, help=text, description=text)
subparsers_assay_sub = subparsers_assay.add_subparsers()
parser_tmp = subparsers_assay_sub.add_parser('sample')
get_opts_sample(parser_tmp, True)
parser_tmp.set_defaults(func=sample_info)
parser_tmp = subparsers_assay_sub.add_parser('barcode')
get_opts_barcode(parser_tmp, True)
parser_tmp.set_defaults(func=barcode)
parser_tmp = subparsers_assay_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_tmp, True)
parser_tmp.set_defaults(func=cutadapt)
from celescope.smk.mapping_smk import mapping_smk, get_opts_mapping_smk
parser_tmp = subparsers_assay_sub.add_parser('mapping_smk')
get_opts_mapping_smk(parser_tmp, True)
parser_tmp.set_defaults(func=mapping_smk)
from celescope.smk.count_smk import count_smk, get_opts_count_smk
parser_tmp = subparsers_assay_sub.add_parser('count_smk')
get_opts_count_smk(parser_tmp, True)
parser_tmp.set_defaults(func=count_smk)
from celescope.smk.analysis_smk import analysis_smk, get_opts_analysis_smk
parser_tmp = subparsers_assay_sub.add_parser('analysis_smk')
get_opts_analysis_smk(parser_tmp, True)
parser_tmp.set_defaults(func=analysis_smk)
from celescope.smk.run import run
parser_tmp = subparsers_assay_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_tmp, False)
get_opts_barcode(parser_tmp, False)
get_opts_cutadapt(parser_tmp, False)
get_opts_mapping_smk(parser_tmp, False)
get_opts_count_smk(parser_tmp, False)
get_opts_analysis_smk(parser_tmp, False)
parser_tmp.set_defaults(func=run)
# vdj
assay = 'vdj'
text = ASSAY_DICT[assay]
subparsers_assay = subparsers.add_parser(
assay, help=text, description=text)
subparsers_assay_sub = subparsers_assay.add_subparsers()
parser_tmp = subparsers_assay_sub.add_parser('sample')
get_opts_sample(parser_tmp, True)
parser_tmp.set_defaults(func=sample_info)
parser_tmp = subparsers_assay_sub.add_parser('barcode')
get_opts_barcode(parser_tmp, True)
parser_tmp.set_defaults(func=barcode)
parser_tmp = subparsers_assay_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_tmp, True)
parser_tmp.set_defaults(func=cutadapt)
from celescope.vdj.mapping_vdj import mapping_vdj, get_opts_mapping_vdj
parser_tmp = subparsers_assay_sub.add_parser('mapping_vdj')
get_opts_mapping_vdj(parser_tmp, True)
parser_tmp.set_defaults(func=mapping_vdj)
from celescope.vdj.count_vdj import count_vdj, get_opts_count_vdj
parser_tmp = subparsers_assay_sub.add_parser('count_vdj')
get_opts_count_vdj(parser_tmp, True)
parser_tmp.set_defaults(func=count_vdj)
from celescope.vdj.run import run
parser_tmp = subparsers_assay_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_tmp, False)
get_opts_barcode(parser_tmp, False)
get_opts_cutadapt(parser_tmp, False)
get_opts_mapping_vdj(parser_tmp, False)
get_opts_count_vdj(parser_tmp, False)
parser_tmp.set_defaults(func=run)
'''
# mut
assay = 'mut'
text = ASSAY_DICT[assay]
subparsers_mut = subparsers.add_parser(
assay, help=text, description=text)
subparsers_mut_sub = subparsers_mut.add_subparsers()
parser_sample = subparsers_mut_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
parser_barcode = subparsers_mut_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
parser_cutadapt = subparsers_mut_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.mut.mapping_mut import mapping_mut, get_opts_mapping_mut
parser_mapping_mut = subparsers_mut_sub.add_parser('mapping_mut')
get_opts_mapping_mut(parser_mapping_mut, True)
parser_mapping_mut.set_defaults(func=mapping_mut)
from celescope.mut.count_mut import count_mut, get_opts_count_mut
parser_count_mut = subparsers_mut_sub.add_parser('count_mut')
get_opts_count_mut(parser_count_mut, True)
parser_count_mut.set_defaults(func=count_mut)
from celescope.fusion.run import run
parser_run = subparsers_fusion_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR_fusion(parser_run, False)
get_opts_count_fusion(parser_run, False)
parser_run.set_defaults(func=run)
'''
# hla
assay = 'hla'
text = ASSAY_DICT[assay]
subparsers_hla = subparsers.add_parser(
assay, help=text, description=text)
subparsers_hla_sub = subparsers_hla.add_subparsers()
parser_sample = subparsers_hla_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
parser_barcode = subparsers_hla_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
parser_cutadapt = subparsers_hla_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.hla.mapping_hla import mapping_hla, get_opts_mapping_hla
parser_mapping_hla = subparsers_hla_sub.add_parser('mapping_hla')
get_opts_mapping_hla(parser_mapping_hla, True)
parser_mapping_hla.set_defaults(func=mapping_hla)
'''
from celescope.hla.count_hla import count_hla, get_opts_count_hla
parser_count_hla = subparsers_hla_sub.add_parser('count_hla')
get_opts_count_hla(parser_count_hla, True)
parser_count_hla.set_defaults(func=count_hla)
from celescope.hla.run import run
parser_run = subparsers_fusion_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_mapping_hla(parser_run, False)
get_opts_count_hla(parser_run, False)
parser_run.set_defaults(func=run)
'''
# capture_rna
assay = 'capture_rna'
text = ASSAY_DICT[assay]
subparsers_capture_rna = subparsers.add_parser(assay, help=text, description=text)
subparsers_capture_rna_sub = subparsers_capture_rna.add_subparsers()
from celescope.tools.sample_info import sample_info, get_opts_sample
parser_sample = subparsers_capture_rna_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
from celescope.tools.barcode import barcode, get_opts_barcode
parser_barcode = subparsers_capture_rna_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
from celescope.tools.cutadapt import cutadapt, get_opts_cutadapt
parser_cutadapt = subparsers_capture_rna_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.tools.STAR import STAR, get_opts_STAR
parser_STAR = subparsers_capture_rna_sub.add_parser('STAR')
get_opts_STAR(parser_STAR, True)
parser_STAR.set_defaults(func=STAR)
from celescope.tools.featureCounts import featureCounts, get_opts_featureCounts
parser_featureCounts = subparsers_capture_rna_sub.add_parser('featureCounts')
get_opts_featureCounts(parser_featureCounts, True)
parser_featureCounts.set_defaults(func=featureCounts)
from celescope.capture_rna.count_capture_rna import count_capture_rna, get_opts_count_capture_rna
parser_count_capture_rna = subparsers_capture_rna_sub.add_parser('count_capture_rna')
get_opts_count_capture_rna(parser_count_capture_rna, True)
parser_count_capture_rna.set_defaults(func=count_capture_rna)
from celescope.tools.analysis import analysis, get_opts_analysis
parser_analysis = subparsers_capture_rna_sub.add_parser('analysis')
get_opts_analysis(parser_analysis, True)
parser_analysis.set_defaults(func=analysis)
# snp
assay = 'snp'
text = ASSAY_DICT[assay]
subparsers_snp = subparsers.add_parser(assay, help=text, description=text)
subparsers_snp = subparsers_snp.add_subparsers()
from celescope.tools.sample_info import sample_info, get_opts_sample
parser_sample = subparsers_snp.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
from celescope.tools.barcode import barcode, get_opts_barcode
parser_barcode = subparsers_snp.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
from celescope.tools.cutadapt import cutadapt, get_opts_cutadapt
parser_cutadapt = subparsers_snp.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.tools.STAR import STAR, get_opts_STAR
parser_STAR = subparsers_snp.add_parser('STAR')
get_opts_STAR(parser_STAR, True)
parser_STAR.set_defaults(func=STAR)
from celescope.tools.featureCounts import featureCounts, get_opts_featureCounts
parser_featureCounts = subparsers_snp.add_parser('featureCounts')
get_opts_featureCounts(parser_featureCounts, True)
parser_featureCounts.set_defaults(func=featureCounts)
from celescope.snp.snpCalling import snpCalling, get_opts_snpCalling
parser_snpCalling = subparsers_snp.add_parser('snpCalling')
get_opts_snpCalling(parser_snpCalling, True)
parser_snpCalling.set_defaults(func=snpCalling)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| #!/bin/env python
# coding=utf8
import argparse
from celescope.__init__ import __VERSION__, ASSAY_DICT
def main():
parser = argparse.ArgumentParser(description='CeleScope')
parser.add_argument(
'-v',
'--version',
action='version',
version=__VERSION__)
subparsers = parser.add_subparsers()
# rna
assay = 'rna'
text = ASSAY_DICT[assay]
subparsers_rna = subparsers.add_parser(assay, help=text, description=text)
subparsers_rna_sub = subparsers_rna.add_subparsers()
from celescope.tools.sample_info import sample_info, get_opts_sample
parser_sample = subparsers_rna_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
from celescope.tools.barcode import barcode, get_opts_barcode
parser_barcode = subparsers_rna_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
from celescope.tools.cutadapt import cutadapt, get_opts_cutadapt
parser_cutadapt = subparsers_rna_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.tools.STAR import STAR, get_opts_STAR
parser_STAR = subparsers_rna_sub.add_parser('STAR')
get_opts_STAR(parser_STAR, True)
parser_STAR.set_defaults(func=STAR)
from celescope.tools.featureCounts import featureCounts, get_opts_featureCounts
parser_featureCounts = subparsers_rna_sub.add_parser('featureCounts')
get_opts_featureCounts(parser_featureCounts, True)
parser_featureCounts.set_defaults(func=featureCounts)
from celescope.tools.count import count, get_opts_count
parser_count = subparsers_rna_sub.add_parser('count')
get_opts_count(parser_count, True)
parser_count.set_defaults(func=count)
from celescope.tools.analysis import analysis, get_opts_analysis
parser_analysis = subparsers_rna_sub.add_parser('analysis')
get_opts_analysis(parser_analysis, True)
parser_analysis.set_defaults(func=analysis)
from celescope.rna.run import run
parser_run = subparsers_rna_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR(parser_run, False)
get_opts_featureCounts(parser_run, False)
get_opts_count(parser_run, False)
get_opts_analysis(parser_run, False)
parser_run.set_defaults(func=run)
# rna_virus
assay = 'rna_virus'
text = ASSAY_DICT[assay]
subparsers_rna_virus = subparsers.add_parser(
assay, help=text, description=text)
subparsers_rna_virus_sub = subparsers_rna_virus.add_subparsers()
parser_sample = subparsers_rna_virus_sub.add_parser(
'sample', description='sample infomation')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
from celescope.tools.barcode import barcode, get_opts_barcode
parser_barcode = subparsers_rna_virus_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
from celescope.tools.cutadapt import cutadapt, get_opts_cutadapt
parser_cutadapt = subparsers_rna_virus_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.tools.STAR import STAR, get_opts_STAR
parser_STAR = subparsers_rna_virus_sub.add_parser('STAR')
get_opts_STAR(parser_STAR, True)
parser_STAR.set_defaults(func=STAR)
from celescope.rna_virus.STAR_virus import STAR_virus, get_opts_STAR_virus
parser_STAR_virus = subparsers_rna_virus_sub.add_parser('STAR_virus')
get_opts_STAR_virus(parser_STAR_virus, True)
parser_STAR_virus.set_defaults(func=STAR_virus)
from celescope.tools.featureCounts import featureCounts, get_opts_featureCounts
parser_featureCounts = subparsers_rna_virus_sub.add_parser('featureCounts')
get_opts_featureCounts(parser_featureCounts, True)
parser_featureCounts.set_defaults(func=featureCounts)
from celescope.tools.count import count, get_opts_count
parser_count = subparsers_rna_virus_sub.add_parser('count')
get_opts_count(parser_count, True)
parser_count.set_defaults(func=count)
from celescope.rna_virus.count_virus import count_virus, get_opts_count_virus
parser_count_virus = subparsers_rna_virus_sub.add_parser('count_virus')
get_opts_count_virus(parser_count_virus, True)
parser_count_virus.set_defaults(func=count_virus)
from celescope.rna_virus.analysis_rna_virus import analysis_rna_virus, get_opts_analysis_rna_virus
parser_analysis_rna_virus = subparsers_rna_virus_sub.add_parser(
'analysis_rna_virus')
get_opts_analysis_rna_virus(parser_analysis_rna_virus, True)
parser_analysis_rna_virus.set_defaults(func=analysis_rna_virus)
from celescope.rna_virus.run import run
parser_run = subparsers_rna_virus_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR(parser_run, False)
get_opts_STAR_virus(parser_run, False)
get_opts_featureCounts(parser_run, False)
get_opts_count_virus(parser_run, False)
get_opts_analysis_rna_virus(parser_run, False)
parser_run.set_defaults(func=run)
# capture_virus
assay = 'capture_virus'
text = ASSAY_DICT[assay]
subparsers_capture_virus = subparsers.add_parser(
assay, help=text, description=text)
subparsers_capture_virus_sub = subparsers_capture_virus.add_subparsers()
parser_sample = subparsers_capture_virus_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
parser_barcode = subparsers_capture_virus_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
parser_cutadapt = subparsers_capture_virus_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
parser_STAR_virus = subparsers_capture_virus_sub.add_parser('STAR_virus')
get_opts_STAR_virus(parser_STAR_virus, True)
parser_STAR_virus.set_defaults(func=STAR_virus)
from celescope.capture_virus.count_capture_virus import count_capture_virus, get_opts_count_capture_virus
parser_count_capture_virus = subparsers_capture_virus_sub.add_parser(
'count_capture_virus')
get_opts_count_capture_virus(parser_count_capture_virus, True)
parser_count_capture_virus.set_defaults(func=count_capture_virus)
from celescope.capture_virus.analysis_capture_virus import analysis_capture_virus, get_opts_analysis_capture_virus
parser_analysis_capture_virus = subparsers_capture_virus_sub.add_parser(
'analysis_capture_virus')
get_opts_analysis_capture_virus(parser_analysis_capture_virus, True)
parser_analysis_capture_virus.set_defaults(func=analysis_capture_virus)
from celescope.capture_virus.run import run
parser_run = subparsers_capture_virus_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR_virus(parser_run, False)
get_opts_count_capture_virus(parser_run, False)
parser_run.set_defaults(func=run)
# fusion
assay = 'fusion'
text = ASSAY_DICT[assay]
subparsers_fusion = subparsers.add_parser(
assay, help=text, description=text)
subparsers_fusion_sub = subparsers_fusion.add_subparsers()
parser_sample = subparsers_fusion_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
parser_barcode = subparsers_fusion_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
parser_cutadapt = subparsers_fusion_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.fusion.STAR_fusion import STAR_fusion, get_opts_STAR_fusion
parser_STAR_fusion = subparsers_fusion_sub.add_parser('STAR_fusion')
get_opts_STAR_fusion(parser_STAR_fusion, True)
parser_STAR_fusion.set_defaults(func=STAR_fusion)
from celescope.fusion.count_fusion import count_fusion, get_opts_count_fusion
parser_count_fusion = subparsers_fusion_sub.add_parser('count_fusion')
get_opts_count_fusion(parser_count_fusion, True)
parser_count_fusion.set_defaults(func=count_fusion)
from celescope.fusion.run import run
parser_run = subparsers_fusion_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR_fusion(parser_run, False)
get_opts_count_fusion(parser_run, False)
parser_run.set_defaults(func=run)
# smk
assay = 'smk'
text = ASSAY_DICT[assay]
subparsers_assay = subparsers.add_parser(
assay, help=text, description=text)
subparsers_assay_sub = subparsers_assay.add_subparsers()
parser_tmp = subparsers_assay_sub.add_parser('sample')
get_opts_sample(parser_tmp, True)
parser_tmp.set_defaults(func=sample_info)
parser_tmp = subparsers_assay_sub.add_parser('barcode')
get_opts_barcode(parser_tmp, True)
parser_tmp.set_defaults(func=barcode)
parser_tmp = subparsers_assay_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_tmp, True)
parser_tmp.set_defaults(func=cutadapt)
from celescope.smk.mapping_smk import mapping_smk, get_opts_mapping_smk
parser_tmp = subparsers_assay_sub.add_parser('mapping_smk')
get_opts_mapping_smk(parser_tmp, True)
parser_tmp.set_defaults(func=mapping_smk)
from celescope.smk.count_smk import count_smk, get_opts_count_smk
parser_tmp = subparsers_assay_sub.add_parser('count_smk')
get_opts_count_smk(parser_tmp, True)
parser_tmp.set_defaults(func=count_smk)
from celescope.smk.analysis_smk import analysis_smk, get_opts_analysis_smk
parser_tmp = subparsers_assay_sub.add_parser('analysis_smk')
get_opts_analysis_smk(parser_tmp, True)
parser_tmp.set_defaults(func=analysis_smk)
from celescope.smk.run import run
parser_tmp = subparsers_assay_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_tmp, False)
get_opts_barcode(parser_tmp, False)
get_opts_cutadapt(parser_tmp, False)
get_opts_mapping_smk(parser_tmp, False)
get_opts_count_smk(parser_tmp, False)
get_opts_analysis_smk(parser_tmp, False)
parser_tmp.set_defaults(func=run)
# vdj
assay = 'vdj'
text = ASSAY_DICT[assay]
subparsers_assay = subparsers.add_parser(
assay, help=text, description=text)
subparsers_assay_sub = subparsers_assay.add_subparsers()
parser_tmp = subparsers_assay_sub.add_parser('sample')
get_opts_sample(parser_tmp, True)
parser_tmp.set_defaults(func=sample_info)
parser_tmp = subparsers_assay_sub.add_parser('barcode')
get_opts_barcode(parser_tmp, True)
parser_tmp.set_defaults(func=barcode)
parser_tmp = subparsers_assay_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_tmp, True)
parser_tmp.set_defaults(func=cutadapt)
from celescope.vdj.mapping_vdj import mapping_vdj, get_opts_mapping_vdj
parser_tmp = subparsers_assay_sub.add_parser('mapping_vdj')
get_opts_mapping_vdj(parser_tmp, True)
parser_tmp.set_defaults(func=mapping_vdj)
from celescope.vdj.count_vdj import count_vdj, get_opts_count_vdj
parser_tmp = subparsers_assay_sub.add_parser('count_vdj')
get_opts_count_vdj(parser_tmp, True)
parser_tmp.set_defaults(func=count_vdj)
from celescope.vdj.run import run
parser_tmp = subparsers_assay_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_tmp, False)
get_opts_barcode(parser_tmp, False)
get_opts_cutadapt(parser_tmp, False)
get_opts_mapping_vdj(parser_tmp, False)
get_opts_count_vdj(parser_tmp, False)
parser_tmp.set_defaults(func=run)
'''
# mut
assay = 'mut'
text = ASSAY_DICT[assay]
subparsers_mut = subparsers.add_parser(
assay, help=text, description=text)
subparsers_mut_sub = subparsers_mut.add_subparsers()
parser_sample = subparsers_mut_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
parser_barcode = subparsers_mut_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
parser_cutadapt = subparsers_mut_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.mut.mapping_mut import mapping_mut, get_opts_mapping_mut
parser_mapping_mut = subparsers_mut_sub.add_parser('mapping_mut')
get_opts_mapping_mut(parser_mapping_mut, True)
parser_mapping_mut.set_defaults(func=mapping_mut)
from celescope.mut.count_mut import count_mut, get_opts_count_mut
parser_count_mut = subparsers_mut_sub.add_parser('count_mut')
get_opts_count_mut(parser_count_mut, True)
parser_count_mut.set_defaults(func=count_mut)
from celescope.fusion.run import run
parser_run = subparsers_fusion_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_STAR_fusion(parser_run, False)
get_opts_count_fusion(parser_run, False)
parser_run.set_defaults(func=run)
'''
# hla
assay = 'hla'
text = ASSAY_DICT[assay]
subparsers_hla = subparsers.add_parser(
assay, help=text, description=text)
subparsers_hla_sub = subparsers_hla.add_subparsers()
parser_sample = subparsers_hla_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
parser_barcode = subparsers_hla_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
parser_cutadapt = subparsers_hla_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.hla.mapping_hla import mapping_hla, get_opts_mapping_hla
parser_mapping_hla = subparsers_hla_sub.add_parser('mapping_hla')
get_opts_mapping_hla(parser_mapping_hla, True)
parser_mapping_hla.set_defaults(func=mapping_hla)
'''
from celescope.hla.count_hla import count_hla, get_opts_count_hla
parser_count_hla = subparsers_hla_sub.add_parser('count_hla')
get_opts_count_hla(parser_count_hla, True)
parser_count_hla.set_defaults(func=count_hla)
from celescope.hla.run import run
parser_run = subparsers_fusion_sub.add_parser(
'run', help='run all steps', conflict_handler='resolve')
get_opts_sample(parser_run, False)
get_opts_barcode(parser_run, False)
get_opts_cutadapt(parser_run, False)
get_opts_mapping_hla(parser_run, False)
get_opts_count_hla(parser_run, False)
parser_run.set_defaults(func=run)
'''
# capture_rna
assay = 'capture_rna'
text = ASSAY_DICT[assay]
subparsers_capture_rna = subparsers.add_parser(assay, help=text, description=text)
subparsers_capture_rna_sub = subparsers_capture_rna.add_subparsers()
from celescope.tools.sample_info import sample_info, get_opts_sample
parser_sample = subparsers_capture_rna_sub.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
from celescope.tools.barcode import barcode, get_opts_barcode
parser_barcode = subparsers_capture_rna_sub.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
from celescope.tools.cutadapt import cutadapt, get_opts_cutadapt
parser_cutadapt = subparsers_capture_rna_sub.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.tools.STAR import STAR, get_opts_STAR
parser_STAR = subparsers_capture_rna_sub.add_parser('STAR')
get_opts_STAR(parser_STAR, True)
parser_STAR.set_defaults(func=STAR)
from celescope.tools.featureCounts import featureCounts, get_opts_featureCounts
parser_featureCounts = subparsers_capture_rna_sub.add_parser('featureCounts')
get_opts_featureCounts(parser_featureCounts, True)
parser_featureCounts.set_defaults(func=featureCounts)
from celescope.capture_rna.count_capture_rna import count_capture_rna, get_opts_count_capture_rna
parser_count_capture_rna = subparsers_capture_rna_sub.add_parser('count_capture_rna')
get_opts_count_capture_rna(parser_count_capture_rna, True)
parser_count_capture_rna.set_defaults(func=count_capture_rna)
from celescope.tools.analysis import analysis, get_opts_analysis
parser_analysis = subparsers_capture_rna_sub.add_parser('analysis')
get_opts_analysis(parser_analysis, True)
parser_analysis.set_defaults(func=analysis)
# snp
assay = 'snp'
text = ASSAY_DICT[assay]
subparsers_snp = subparsers.add_parser(assay, help=text, description=text)
subparsers_snp = subparsers_snp.add_subparsers()
from celescope.tools.sample_info import sample_info, get_opts_sample
parser_sample = subparsers_snp.add_parser('sample')
get_opts_sample(parser_sample, True)
parser_sample.set_defaults(func=sample_info)
from celescope.tools.barcode import barcode, get_opts_barcode
parser_barcode = subparsers_snp.add_parser('barcode')
get_opts_barcode(parser_barcode, True)
parser_barcode.set_defaults(func=barcode)
from celescope.tools.cutadapt import cutadapt, get_opts_cutadapt
parser_cutadapt = subparsers_snp.add_parser('cutadapt')
get_opts_cutadapt(parser_cutadapt, True)
parser_cutadapt.set_defaults(func=cutadapt)
from celescope.tools.STAR import STAR, get_opts_STAR
parser_STAR = subparsers_snp.add_parser('STAR')
get_opts_STAR(parser_STAR, True)
parser_STAR.set_defaults(func=STAR)
from celescope.tools.featureCounts import featureCounts, get_opts_featureCounts
parser_featureCounts = subparsers_snp.add_parser('featureCounts')
get_opts_featureCounts(parser_featureCounts, True)
parser_featureCounts.set_defaults(func=featureCounts)
from celescope.snp.snpCalling import snpCalling, get_opts_snpCalling
parser_snpCalling = subparsers_snp.add_parser('snpCalling')
get_opts_snpCalling(parser_snpCalling, True)
parser_snpCalling.set_defaults(func=snpCalling)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main() | en | 0.129809 | #!/bin/env python # coding=utf8 # rna # rna_virus # capture_virus # fusion # smk # vdj # mut assay = 'mut' text = ASSAY_DICT[assay] subparsers_mut = subparsers.add_parser( assay, help=text, description=text) subparsers_mut_sub = subparsers_mut.add_subparsers() parser_sample = subparsers_mut_sub.add_parser('sample') get_opts_sample(parser_sample, True) parser_sample.set_defaults(func=sample_info) parser_barcode = subparsers_mut_sub.add_parser('barcode') get_opts_barcode(parser_barcode, True) parser_barcode.set_defaults(func=barcode) parser_cutadapt = subparsers_mut_sub.add_parser('cutadapt') get_opts_cutadapt(parser_cutadapt, True) parser_cutadapt.set_defaults(func=cutadapt) from celescope.mut.mapping_mut import mapping_mut, get_opts_mapping_mut parser_mapping_mut = subparsers_mut_sub.add_parser('mapping_mut') get_opts_mapping_mut(parser_mapping_mut, True) parser_mapping_mut.set_defaults(func=mapping_mut) from celescope.mut.count_mut import count_mut, get_opts_count_mut parser_count_mut = subparsers_mut_sub.add_parser('count_mut') get_opts_count_mut(parser_count_mut, True) parser_count_mut.set_defaults(func=count_mut) from celescope.fusion.run import run parser_run = subparsers_fusion_sub.add_parser( 'run', help='run all steps', conflict_handler='resolve') get_opts_sample(parser_run, False) get_opts_barcode(parser_run, False) get_opts_cutadapt(parser_run, False) get_opts_STAR_fusion(parser_run, False) get_opts_count_fusion(parser_run, False) parser_run.set_defaults(func=run) # hla from celescope.hla.count_hla import count_hla, get_opts_count_hla parser_count_hla = subparsers_hla_sub.add_parser('count_hla') get_opts_count_hla(parser_count_hla, True) parser_count_hla.set_defaults(func=count_hla) from celescope.hla.run import run parser_run = subparsers_fusion_sub.add_parser( 'run', help='run all steps', conflict_handler='resolve') get_opts_sample(parser_run, False) get_opts_barcode(parser_run, False) get_opts_cutadapt(parser_run, False) get_opts_mapping_hla(parser_run, False) get_opts_count_hla(parser_run, False) parser_run.set_defaults(func=run) # capture_rna # snp | 2.311516 | 2 |
examples/tof-viewer/external/newton_host_driver/src/host_api/examples/python/tests/test_grouped.py | rick-yhchen1013/aditof-sdk-rework | 5 | 6624800 | <reponame>rick-yhchen1013/aditof-sdk-rework<filename>examples/tof-viewer/external/newton_host_driver/src/host_api/examples/python/tests/test_grouped.py
#!/usr/bin/env python
""" Script to test grouped command
Usage:
test_grouped.py [--count=<word_count>]
Options:
--help Shows this help message.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from docopt import docopt
import sys
import io
import os
import time
import struct
import subprocess
import ctypes
from collections import OrderedDict
import threading
from newton_control_main import newton as newton
if __name__ == "__main__":
args = docopt(__doc__, version='0.1')
newtonTarget = os.environ["NEWTON_TARGET"]
if args['--count']:
count = int( args['--count'] )
else:
count = 0
cmd_file = os.path.expanduser( "~/host_api/dataFiles/grouped.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
rc = newton.adi_newton_config( 0 )
if rc != 0:
print( "ERROR: newton.adi_newton_config return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Generating file ~/host_api/dataFiles/grouped.txt with count = " + str( count ) )
cmd = "~/host_api/examples/python/generateBootImage.py grouped " + cmd_file + " --seed=1 --count=" + str( count )
rc = os.system( cmd )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\"" )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
else:
print( "INFO: test PASSED!" )
| #!/usr/bin/env python
""" Script to test grouped command
Usage:
test_grouped.py [--count=<word_count>]
Options:
--help Shows this help message.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from docopt import docopt
import sys
import io
import os
import time
import struct
import subprocess
import ctypes
from collections import OrderedDict
import threading
from newton_control_main import newton as newton
if __name__ == "__main__":
args = docopt(__doc__, version='0.1')
newtonTarget = os.environ["NEWTON_TARGET"]
if args['--count']:
count = int( args['--count'] )
else:
count = 0
cmd_file = os.path.expanduser( "~/host_api/dataFiles/grouped.txt" )
cmd_file_bytes = cmd_file.encode(encoding='utf-8')
rc = newton.adi_newton_config( 0 )
if rc != 0:
print( "ERROR: newton.adi_newton_config return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Generating file ~/host_api/dataFiles/grouped.txt with count = " + str( count ) )
cmd = "~/host_api/examples/python/generateBootImage.py grouped " + cmd_file + " --seed=1 --count=" + str( count )
rc = os.system( cmd )
if rc != 0:
print( "ERROR: Error generateBootImage.py return an error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Loading command file \"" + cmd_file + "\"" )
rc = newton.adi_load_command_file( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_load_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
print( "INFO: Verifying RAMs loaded by command file \"" + cmd_file + "\"" )
if newtonTarget == "FPGA":
rc = newton.adi_verify_command_file( cmd_file_bytes )
else:
rc = newton.adi_verify_command_file_hsp( cmd_file_bytes )
if rc != 0:
print( "ERROR: Error newton.adi_verify_command_file returned error (" + str( rc ) + ")." )
sys.exit( rc )
else:
print( "INFO: test PASSED!" ) | en | 0.312504 | #!/usr/bin/env python Script to test grouped command Usage: test_grouped.py [--count=<word_count>] Options: --help Shows this help message. | 2.568339 | 3 |
handwriting_gen/tests/test_distributions.py | sinbycos/handwriting-generation | 0 | 6624801 | import numpy as np
import tensorflow as tf
from handwriting_gen.distributions import bivariate_normal_likelihood
def test_bivariate_normal_likelihood():
from scipy.stats import multivariate_normal
mu1, mu2 = -0.5, 0.22
sigma1, sigma2 = 0.3, 0.9
rho = -0.15
x1, x2 = -1.0, 2.3
cov_off_diag = rho * sigma1 * sigma2
p = multivariate_normal(
[mu1, mu2], [[sigma1**2, cov_off_diag], [cov_off_diag, sigma2**2]]
).pdf([x1, x2])
sess = tf.Session()
assert np.allclose(p, sess.run(
bivariate_normal_likelihood(x1, x2, mu1, mu2, sigma1, sigma2, rho)))
| import numpy as np
import tensorflow as tf
from handwriting_gen.distributions import bivariate_normal_likelihood
def test_bivariate_normal_likelihood():
from scipy.stats import multivariate_normal
mu1, mu2 = -0.5, 0.22
sigma1, sigma2 = 0.3, 0.9
rho = -0.15
x1, x2 = -1.0, 2.3
cov_off_diag = rho * sigma1 * sigma2
p = multivariate_normal(
[mu1, mu2], [[sigma1**2, cov_off_diag], [cov_off_diag, sigma2**2]]
).pdf([x1, x2])
sess = tf.Session()
assert np.allclose(p, sess.run(
bivariate_normal_likelihood(x1, x2, mu1, mu2, sigma1, sigma2, rho)))
| none | 1 | 2.570834 | 3 | |
denonavr/decorators.py | elad-bar/denonavr | 0 | 6624802 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module implements the REST API to Denon AVR receivers.
:copyright: (c) 2021 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
import asyncio
import inspect
import logging
import time
import xml.etree.ElementTree as ET
from functools import wraps
from typing import Callable
import httpx
from defusedxml import DefusedXmlException
from defusedxml.ElementTree import ParseError
from .exceptions import (
AvrRequestError,
AvrForbiddenError,
AvrTimoutError,
AvrInvalidResponseError)
_LOGGER = logging.getLogger(__name__)
def async_handle_receiver_exceptions(func: Callable) -> Callable:
"""
Handle exceptions raised when calling an Denon AVR endpoint asynchronously.
The decorated function must either have a string variable as second
argument or as "request" keyword argument.
"""
@wraps(func)
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except httpx.HTTPStatusError as err:
_LOGGER.debug(
"HTTP status error on request %s", err.request, exc_info=True)
# Separate handling of 403 errors
if err.response.status_code == 403:
raise AvrForbiddenError(
"HTTPStatusError: {}".format(err), err.request) from err
raise AvrRequestError(
"HTTPStatusError: {}".format(err), err.request) from err
except httpx.TimeoutException as err:
_LOGGER.debug(
"HTTP timeout exception on request %s", err.request,
exc_info=True)
raise AvrTimoutError(
"TimeoutException: {}".format(err), err.request) from err
except (ET.ParseError, DefusedXmlException, ParseError) as err:
_LOGGER.debug(
"Defusedxml parse error on request %s", (args, kwargs),
exc_info=True)
raise AvrInvalidResponseError(
"XMLParseError: {}".format(err), (args, kwargs)) from err
return wrapper
def set_cache_id(func: Callable) -> Callable:
"""
Decorate a function to add cache_id keyword argument if it is not present.
The function must be called with a fix cache_id keyword argument to be able
to get cached data. This prevents accidential caching of a function result.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if kwargs.get("cache_id") is None:
kwargs["cache_id"] = time.time()
return func(*args, **kwargs)
return wrapper
def run_async_synchronously(async_func: Callable) -> Callable:
"""
Decorate to run the configured asynchronous function synchronously instead.
If available the corresponding function with async_ prefix is called in an
own event loop. This is not efficient but it ensures backwards
compatibility of this library.
"""
def decorator(func: Callable):
# Check if function is a coroutine
if not inspect.iscoroutinefunction(async_func):
raise AttributeError(
"Function {} is not a coroutine function".format(async_func))
# Check if the signature of both functions is equal
if inspect.signature(func) != inspect.signature(async_func):
raise AttributeError(
"Functions {} and {} have different signatures".format(
func, async_func))
@wraps(func)
def wrapper(*args, **kwargs):
# Run async function in own event loop
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(async_func(*args, **kwargs))
finally:
loop.close()
return wrapper
return decorator
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module implements the REST API to Denon AVR receivers.
:copyright: (c) 2021 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
import asyncio
import inspect
import logging
import time
import xml.etree.ElementTree as ET
from functools import wraps
from typing import Callable
import httpx
from defusedxml import DefusedXmlException
from defusedxml.ElementTree import ParseError
from .exceptions import (
AvrRequestError,
AvrForbiddenError,
AvrTimoutError,
AvrInvalidResponseError)
_LOGGER = logging.getLogger(__name__)
def async_handle_receiver_exceptions(func: Callable) -> Callable:
"""
Handle exceptions raised when calling an Denon AVR endpoint asynchronously.
The decorated function must either have a string variable as second
argument or as "request" keyword argument.
"""
@wraps(func)
async def wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except httpx.HTTPStatusError as err:
_LOGGER.debug(
"HTTP status error on request %s", err.request, exc_info=True)
# Separate handling of 403 errors
if err.response.status_code == 403:
raise AvrForbiddenError(
"HTTPStatusError: {}".format(err), err.request) from err
raise AvrRequestError(
"HTTPStatusError: {}".format(err), err.request) from err
except httpx.TimeoutException as err:
_LOGGER.debug(
"HTTP timeout exception on request %s", err.request,
exc_info=True)
raise AvrTimoutError(
"TimeoutException: {}".format(err), err.request) from err
except (ET.ParseError, DefusedXmlException, ParseError) as err:
_LOGGER.debug(
"Defusedxml parse error on request %s", (args, kwargs),
exc_info=True)
raise AvrInvalidResponseError(
"XMLParseError: {}".format(err), (args, kwargs)) from err
return wrapper
def set_cache_id(func: Callable) -> Callable:
"""
Decorate a function to add cache_id keyword argument if it is not present.
The function must be called with a fix cache_id keyword argument to be able
to get cached data. This prevents accidential caching of a function result.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if kwargs.get("cache_id") is None:
kwargs["cache_id"] = time.time()
return func(*args, **kwargs)
return wrapper
def run_async_synchronously(async_func: Callable) -> Callable:
"""
Decorate to run the configured asynchronous function synchronously instead.
If available the corresponding function with async_ prefix is called in an
own event loop. This is not efficient but it ensures backwards
compatibility of this library.
"""
def decorator(func: Callable):
# Check if function is a coroutine
if not inspect.iscoroutinefunction(async_func):
raise AttributeError(
"Function {} is not a coroutine function".format(async_func))
# Check if the signature of both functions is equal
if inspect.signature(func) != inspect.signature(async_func):
raise AttributeError(
"Functions {} and {} have different signatures".format(
func, async_func))
@wraps(func)
def wrapper(*args, **kwargs):
# Run async function in own event loop
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(async_func(*args, **kwargs))
finally:
loop.close()
return wrapper
return decorator
| en | 0.770866 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- This module implements the REST API to Denon AVR receivers. :copyright: (c) 2021 by <NAME>. :license: MIT, see LICENSE for more details. Handle exceptions raised when calling an Denon AVR endpoint asynchronously. The decorated function must either have a string variable as second argument or as "request" keyword argument. # Separate handling of 403 errors Decorate a function to add cache_id keyword argument if it is not present. The function must be called with a fix cache_id keyword argument to be able to get cached data. This prevents accidential caching of a function result. Decorate to run the configured asynchronous function synchronously instead. If available the corresponding function with async_ prefix is called in an own event loop. This is not efficient but it ensures backwards compatibility of this library. # Check if function is a coroutine # Check if the signature of both functions is equal # Run async function in own event loop | 2.16932 | 2 |
server/src/repository/image/i_image_repository.py | konrad2508/picgal | 4 | 6624803 | <gh_stars>1-10
from repository.image.i_image_database_repository import IImageDatabaseRepository
from repository.image.i_virtual_tag_database_repository import IVirtualTagDatabaseRepository
class IImageRepository(IImageDatabaseRepository, IVirtualTagDatabaseRepository): ...
| from repository.image.i_image_database_repository import IImageDatabaseRepository
from repository.image.i_virtual_tag_database_repository import IVirtualTagDatabaseRepository
class IImageRepository(IImageDatabaseRepository, IVirtualTagDatabaseRepository): ... | none | 1 | 1.229701 | 1 | |
netbox/utilities/utils.py | cocoon/netbox | 0 | 6624804 | <filename>netbox/utilities/utils.py
import six
def csv_format(data):
"""
Encapsulate any data which contains a comma within double quotes.
"""
csv = []
for value in data:
# Represent None or False with empty string
if value in [None, False]:
csv.append(u'')
continue
# Force conversion to string first so we can check for any commas
if not isinstance(value, six.string_types):
value = u'{}'.format(value)
# Double-quote the value if it contains a comma
if u',' in value:
csv.append(u'"{}"'.format(value))
else:
csv.append(u'{}'.format(value))
return u','.join(csv)
| <filename>netbox/utilities/utils.py
import six
def csv_format(data):
"""
Encapsulate any data which contains a comma within double quotes.
"""
csv = []
for value in data:
# Represent None or False with empty string
if value in [None, False]:
csv.append(u'')
continue
# Force conversion to string first so we can check for any commas
if not isinstance(value, six.string_types):
value = u'{}'.format(value)
# Double-quote the value if it contains a comma
if u',' in value:
csv.append(u'"{}"'.format(value))
else:
csv.append(u'{}'.format(value))
return u','.join(csv)
| en | 0.762368 | Encapsulate any data which contains a comma within double quotes. # Represent None or False with empty string # Force conversion to string first so we can check for any commas # Double-quote the value if it contains a comma | 3.790975 | 4 |
Honey-badger-labs/orders.py | amanovishnu/Job-Assignments | 0 | 6624805 | <filename>Honey-badger-labs/orders.py
def orders(): #function to calculate the received orders on website.
print("\n")
print("Total number of orders: ",end=" ")
print(len(new_list)) | <filename>Honey-badger-labs/orders.py
def orders(): #function to calculate the received orders on website.
print("\n")
print("Total number of orders: ",end=" ")
print(len(new_list)) | en | 0.974054 | #function to calculate the received orders on website. | 2.388094 | 2 |
test/counts_table/test_basic_xr.py | iosonofabio/singlet | 11 | 6624806 | <reponame>iosonofabio/singlet<gh_stars>10-100
#!/usr/bin/env python
# vim: fdm=indent
'''
author: <NAME>
date: 15/08/17
content: Test CountsTableSparse class.
'''
import numpy as np
import pytest
@pytest.fixture(scope="module")
def ct():
print('Instantiating CountsTableXR')
from singlet.counts_table import CountsTableXR
ctable = CountsTableXR.from_tablename('example_table_tsv')
print('Done!')
return ctable
def test_str(ct):
assert(str(ct)[:42] == "<singlet.CountsTableXR 'example_table_tsv'")
def test_repr(ct):
assert(ct.__repr__()[:42] == "<singlet.CountsTableXR 'example_table_tsv'")
def test_log(ct):
ctlog = ct.log(base=10)
ctunlog = ctlog.unlog(base=10)
def test_eq(ct):
assert(ct.__eq__(ct))
def test_abs(ct):
assert(ct.__abs__() == ct)
def test_add(ct):
assert((ct.__add__(ct)._data == 2 * ct._data).all())
def test_and(ct):
assert(((ct == 3).__and__((ct == 3)))._data.data.sum() == 1278)
def test_copy(ct):
assert(ct.__copy__() == ct)
def test_deepcopy(ct):
assert(ct.__deepcopy__() == ct)
def test_ge(ct):
assert(ct.__ge__(ct))
def test_gt(ct):
assert(ct.__gt__(ct)._data.data.sum() == 0)
def test_le(ct):
assert(ct.__le__(ct))
def test_lt(ct):
assert(ct.__lt__(ct)._data.data.sum() == 0)
def test_mod(ct):
assert(ct.__mod__(1) == ct)
def test_mul(ct):
assert(ct.__mul__(1) == ct)
def test_neg(ct):
ct2 = ct.__copy__()
ct2._data = -ct2._data
assert(ct.__neg__() == ct2)
def test_or(ct):
assert(((ct == 3).__or__((ct == 3)))._data.data.sum() == 1278)
def test_xor(ct):
assert(((ct == 3).__xor__((ct == 3)))._data.data.sum() == 0)
def test_all(ct):
assert(bool(ct.all().data) is False)
def test_any(ct):
assert(bool(ct.any().data) is True)
def test_getitem(ct):
assert(ct[0, 0]._data.data == 188.0)
def test_delitem(ct):
ct2 = ct.__copy__()
del ct2['gene name']
assert(list(ct2.coords.keys()) == ['sample name'])
def test_radd(ct):
assert((ct.__radd__(ct)._data == 2 * ct._data).all())
def test_rand(ct):
assert(((ct == 3).__rand__((ct == 3)))._data.data.sum() == 1278)
def test_rmod(ct):
assert(ct.__rmod__(1) == ct)
def test_rmul(ct):
assert(ct.__rmul__(1) == ct)
def test_ror(ct):
assert(((ct == 3).__ror__((ct == 3)))._data.data.sum() == 1278)
def test_rtruediv(ct):
assert(ct.__rtruediv__(1) == ct)
def test_rfloordiv(ct):
assert(ct.__rfloordiv__(1) == ct)
def test_rxor(ct):
assert(((ct == 3).__rxor__((ct == 3)))._data.data.sum() == 0)
def test_rsub(ct):
assert((ct.__rsub__(ct)._data == 0).all())
def test_sub(ct):
assert((ct.__sub__(ct)._data == 0).all())
def test_truediv(ct):
assert(ct.__truediv__(1) == ct)
def test_floordi(ct):
assert(ct.__floordiv__(1) == ct)
def test_iadd(ct):
ct2 = ct.__deepcopy__()
ct2 += 1
assert(ct2.sum() == 60721 * 4 + ct.sum())
def test_isub(ct):
ct2 = ct.__deepcopy__()
ct2 -= 1
assert(ct2.sum() == -60721 * 4 + ct.sum())
def test_imul(ct):
ct2 = ct.__deepcopy__()
ct2 *= 2
assert(ct2.sum() == 2 * ct.sum())
def test_itruediv(ct):
ct2 = ct.__deepcopy__()
ct2 /= 2
assert(ct2.sum() == 0.5 * ct.sum())
def test_ifloordiv(ct):
ct2 = ct.__deepcopy__()
ct2 //= 2
assert(ct2.sum() == 5534256)
def test_imod(ct):
ct2 = ct.__deepcopy__()
ct2 %= 2
assert(ct2.sum() == 17767.0)
def test_dims(ct):
assert(ct.dims == ('gene name', 'sample name'))
def test_dot(ct):
assert(np.isclose(float(ct.dot(ct)), 1.08029e+13))
def test_dropna(ct):
assert(ct.dropna(dim='gene name') == ct)
def test_equals(ct):
assert(ct.equals(ct))
def test_fillna(ct):
assert(ct.fillna(0) == ct)
def test_get_axis_num(ct):
assert(ct.get_axis_num('gene name') == 0)
def test_identical(ct):
assert(ct.identical(ct))
def test_max(ct):
assert(ct.max() == 1976680.0)
def test_min(ct):
assert(ct.min() == 0)
def test_mean(ct):
assert(np.isclose(ct.mean(), 45.644336))
def test_var(ct):
assert(np.isclose(ct.var(), 44475514.709833))
def test_median(ct):
assert(ct.median() == 0)
def test_shape(ct):
assert(ct.shape == (60721, 4))
# FIXME: how does this work?
#def test_swap_dims(ct):
# assert(ct.swap_dims(
# {'gene name': 'sample name',
# 'sample name': 'gene name'}).shape == (4, 60721))
def test_T(ct):
ct = ct.__copy__()
assert(ct.T.shape == (4, 60721))
def test_transpose(ct):
assert(ct.transpose('sample name', 'gene name').shape == (4, 60721))
def test_values(ct):
assert(ct.values.shape == (60721, 4))
# FIXME
#def test_getspikeins(ct):
# print('Get spikeins')
# assert(ct.get_spikeins().index[0] == 'ERCC-00002')
# print('Done!')
#
#
#def test_getother(ct):
# print('Get spikeins')
# assert(ct.get_other_features().index[0] == 'NIST_ConsensusVector')
# print('Done!')
| #!/usr/bin/env python
# vim: fdm=indent
'''
author: <NAME>
date: 15/08/17
content: Test CountsTableSparse class.
'''
import numpy as np
import pytest
@pytest.fixture(scope="module")
def ct():
print('Instantiating CountsTableXR')
from singlet.counts_table import CountsTableXR
ctable = CountsTableXR.from_tablename('example_table_tsv')
print('Done!')
return ctable
def test_str(ct):
assert(str(ct)[:42] == "<singlet.CountsTableXR 'example_table_tsv'")
def test_repr(ct):
assert(ct.__repr__()[:42] == "<singlet.CountsTableXR 'example_table_tsv'")
def test_log(ct):
ctlog = ct.log(base=10)
ctunlog = ctlog.unlog(base=10)
def test_eq(ct):
assert(ct.__eq__(ct))
def test_abs(ct):
assert(ct.__abs__() == ct)
def test_add(ct):
assert((ct.__add__(ct)._data == 2 * ct._data).all())
def test_and(ct):
assert(((ct == 3).__and__((ct == 3)))._data.data.sum() == 1278)
def test_copy(ct):
assert(ct.__copy__() == ct)
def test_deepcopy(ct):
assert(ct.__deepcopy__() == ct)
def test_ge(ct):
assert(ct.__ge__(ct))
def test_gt(ct):
assert(ct.__gt__(ct)._data.data.sum() == 0)
def test_le(ct):
assert(ct.__le__(ct))
def test_lt(ct):
assert(ct.__lt__(ct)._data.data.sum() == 0)
def test_mod(ct):
assert(ct.__mod__(1) == ct)
def test_mul(ct):
assert(ct.__mul__(1) == ct)
def test_neg(ct):
ct2 = ct.__copy__()
ct2._data = -ct2._data
assert(ct.__neg__() == ct2)
def test_or(ct):
assert(((ct == 3).__or__((ct == 3)))._data.data.sum() == 1278)
def test_xor(ct):
assert(((ct == 3).__xor__((ct == 3)))._data.data.sum() == 0)
def test_all(ct):
assert(bool(ct.all().data) is False)
def test_any(ct):
assert(bool(ct.any().data) is True)
def test_getitem(ct):
assert(ct[0, 0]._data.data == 188.0)
def test_delitem(ct):
ct2 = ct.__copy__()
del ct2['gene name']
assert(list(ct2.coords.keys()) == ['sample name'])
def test_radd(ct):
assert((ct.__radd__(ct)._data == 2 * ct._data).all())
def test_rand(ct):
assert(((ct == 3).__rand__((ct == 3)))._data.data.sum() == 1278)
def test_rmod(ct):
assert(ct.__rmod__(1) == ct)
def test_rmul(ct):
assert(ct.__rmul__(1) == ct)
def test_ror(ct):
assert(((ct == 3).__ror__((ct == 3)))._data.data.sum() == 1278)
def test_rtruediv(ct):
assert(ct.__rtruediv__(1) == ct)
def test_rfloordiv(ct):
assert(ct.__rfloordiv__(1) == ct)
def test_rxor(ct):
assert(((ct == 3).__rxor__((ct == 3)))._data.data.sum() == 0)
def test_rsub(ct):
assert((ct.__rsub__(ct)._data == 0).all())
def test_sub(ct):
assert((ct.__sub__(ct)._data == 0).all())
def test_truediv(ct):
assert(ct.__truediv__(1) == ct)
def test_floordi(ct):
assert(ct.__floordiv__(1) == ct)
def test_iadd(ct):
ct2 = ct.__deepcopy__()
ct2 += 1
assert(ct2.sum() == 60721 * 4 + ct.sum())
def test_isub(ct):
ct2 = ct.__deepcopy__()
ct2 -= 1
assert(ct2.sum() == -60721 * 4 + ct.sum())
def test_imul(ct):
ct2 = ct.__deepcopy__()
ct2 *= 2
assert(ct2.sum() == 2 * ct.sum())
def test_itruediv(ct):
ct2 = ct.__deepcopy__()
ct2 /= 2
assert(ct2.sum() == 0.5 * ct.sum())
def test_ifloordiv(ct):
ct2 = ct.__deepcopy__()
ct2 //= 2
assert(ct2.sum() == 5534256)
def test_imod(ct):
ct2 = ct.__deepcopy__()
ct2 %= 2
assert(ct2.sum() == 17767.0)
def test_dims(ct):
assert(ct.dims == ('gene name', 'sample name'))
def test_dot(ct):
assert(np.isclose(float(ct.dot(ct)), 1.08029e+13))
def test_dropna(ct):
assert(ct.dropna(dim='gene name') == ct)
def test_equals(ct):
assert(ct.equals(ct))
def test_fillna(ct):
assert(ct.fillna(0) == ct)
def test_get_axis_num(ct):
assert(ct.get_axis_num('gene name') == 0)
def test_identical(ct):
assert(ct.identical(ct))
def test_max(ct):
assert(ct.max() == 1976680.0)
def test_min(ct):
assert(ct.min() == 0)
def test_mean(ct):
assert(np.isclose(ct.mean(), 45.644336))
def test_var(ct):
assert(np.isclose(ct.var(), 44475514.709833))
def test_median(ct):
assert(ct.median() == 0)
def test_shape(ct):
assert(ct.shape == (60721, 4))
# FIXME: how does this work?
#def test_swap_dims(ct):
# assert(ct.swap_dims(
# {'gene name': 'sample name',
# 'sample name': 'gene name'}).shape == (4, 60721))
def test_T(ct):
ct = ct.__copy__()
assert(ct.T.shape == (4, 60721))
def test_transpose(ct):
assert(ct.transpose('sample name', 'gene name').shape == (4, 60721))
def test_values(ct):
assert(ct.values.shape == (60721, 4))
# FIXME
#def test_getspikeins(ct):
# print('Get spikeins')
# assert(ct.get_spikeins().index[0] == 'ERCC-00002')
# print('Done!')
#
#
#def test_getother(ct):
# print('Get spikeins')
# assert(ct.get_other_features().index[0] == 'NIST_ConsensusVector')
# print('Done!') | en | 0.372055 | #!/usr/bin/env python # vim: fdm=indent author: <NAME> date: 15/08/17 content: Test CountsTableSparse class. # FIXME: how does this work? #def test_swap_dims(ct): # assert(ct.swap_dims( # {'gene name': 'sample name', # 'sample name': 'gene name'}).shape == (4, 60721)) # FIXME #def test_getspikeins(ct): # print('Get spikeins') # assert(ct.get_spikeins().index[0] == 'ERCC-00002') # print('Done!') # # #def test_getother(ct): # print('Get spikeins') # assert(ct.get_other_features().index[0] == 'NIST_ConsensusVector') # print('Done!') | 2.439337 | 2 |
Configuration/Geometry/python/GeometryExtended2023D36Reco_cff.py | bisnupriyasahu/cmssw | 1 | 6624807 | import FWCore.ParameterSet.Config as cms
# This config was generated automatically using generate2023Geometry.py
# If you notice a mistake, please update the generating script, not just this config
from Configuration.Geometry.GeometryExtended2023D36_cff import *
# tracker
from Geometry.CommonDetUnit.globalTrackingGeometry_cfi import *
from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *
from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *
from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *
from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *
trackerGeometry.applyAlignment = cms.bool(False)
# calo
from Geometry.CaloEventSetup.HGCalV6Topology_cfi import *
from Geometry.HGCalGeometry.HGCalV6GeometryESProducer_cfi import *
from Geometry.CaloEventSetup.CaloTopology_cfi import *
from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import *
CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",
SelectedCalos = cms.vstring("HCAL" ,
"ZDC" ,
"EcalBarrel" ,
"TOWER" ,
"HGCalEESensitive" ,
"HGCalHESiliconSensitive"
)
)
from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import *
from Geometry.HcalEventSetup.HcalGeometry_cfi import *
from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import *
from Geometry.HcalEventSetup.CaloTowerTopology_cfi import *
from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *
from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *
from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *
from Geometry.EcalMapping.EcalMapping_cfi import *
from Geometry.EcalMapping.EcalMappingRecord_cfi import *
# muon
from Geometry.MuonNumbering.muonNumberingInitialization_cfi import *
from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import *
from Geometry.GEMGeometryBuilder.gemGeometry_cfi import *
from Geometry.GEMGeometryBuilder.me0Geometry_cfi import *
from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import *
from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import *
# forward
from Geometry.ForwardGeometry.ForwardGeometry_cfi import *
| import FWCore.ParameterSet.Config as cms
# This config was generated automatically using generate2023Geometry.py
# If you notice a mistake, please update the generating script, not just this config
from Configuration.Geometry.GeometryExtended2023D36_cff import *
# tracker
from Geometry.CommonDetUnit.globalTrackingGeometry_cfi import *
from RecoTracker.GeometryESProducer.TrackerRecoGeometryESProducer_cfi import *
from Geometry.TrackerGeometryBuilder.trackerParameters_cfi import *
from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *
from Geometry.TrackerGeometryBuilder.idealForDigiTrackerGeometry_cff import *
trackerGeometry.applyAlignment = cms.bool(False)
# calo
from Geometry.CaloEventSetup.HGCalV6Topology_cfi import *
from Geometry.HGCalGeometry.HGCalV6GeometryESProducer_cfi import *
from Geometry.CaloEventSetup.CaloTopology_cfi import *
from Geometry.CaloEventSetup.CaloGeometryBuilder_cfi import *
CaloGeometryBuilder = cms.ESProducer("CaloGeometryBuilder",
SelectedCalos = cms.vstring("HCAL" ,
"ZDC" ,
"EcalBarrel" ,
"TOWER" ,
"HGCalEESensitive" ,
"HGCalHESiliconSensitive"
)
)
from Geometry.EcalAlgo.EcalBarrelGeometry_cfi import *
from Geometry.HcalEventSetup.HcalGeometry_cfi import *
from Geometry.HcalEventSetup.CaloTowerGeometry_cfi import *
from Geometry.HcalEventSetup.CaloTowerTopology_cfi import *
from Geometry.HcalCommonData.hcalDDDRecConstants_cfi import *
from Geometry.HcalEventSetup.hcalTopologyIdeal_cfi import *
from Geometry.CaloEventSetup.EcalTrigTowerConstituents_cfi import *
from Geometry.EcalMapping.EcalMapping_cfi import *
from Geometry.EcalMapping.EcalMappingRecord_cfi import *
# muon
from Geometry.MuonNumbering.muonNumberingInitialization_cfi import *
from RecoMuon.DetLayers.muonDetLayerGeometry_cfi import *
from Geometry.GEMGeometryBuilder.gemGeometry_cfi import *
from Geometry.GEMGeometryBuilder.me0Geometry_cfi import *
from Geometry.CSCGeometryBuilder.idealForDigiCscGeometry_cff import *
from Geometry.DTGeometryBuilder.idealForDigiDtGeometry_cff import *
# forward
from Geometry.ForwardGeometry.ForwardGeometry_cfi import *
| en | 0.631989 | # This config was generated automatically using generate2023Geometry.py # If you notice a mistake, please update the generating script, not just this config # tracker # calo # muon # forward | 1.062324 | 1 |
homeassistant/components/repetier/sensor.py | andersop91/core | 22,481 | 6624808 | """Support for monitoring Repetier Server Sensors."""
from datetime import datetime
import logging
import time
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import REPETIER_API, SENSOR_TYPES, UPDATE_SIGNAL, RepetierSensorEntityDescription
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Repetier Server sensors."""
if discovery_info is None:
return
sensor_map = {
"bed_temperature": RepetierTempSensor,
"extruder_temperature": RepetierTempSensor,
"chamber_temperature": RepetierTempSensor,
"current_state": RepetierSensor,
"current_job": RepetierJobSensor,
"job_end": RepetierJobEndSensor,
"job_start": RepetierJobStartSensor,
}
entities = []
for info in discovery_info:
printer_name = info["printer_name"]
api = hass.data[REPETIER_API][printer_name]
printer_id = info["printer_id"]
sensor_type = info["sensor_type"]
temp_id = info["temp_id"]
description = SENSOR_TYPES[sensor_type]
name = f"{info['name']}{description.name or ''}"
if temp_id is not None:
_LOGGER.debug("%s Temp_id: %s", sensor_type, temp_id)
name = f"{name}{temp_id}"
sensor_class = sensor_map[sensor_type]
entity = sensor_class(api, temp_id, name, printer_id, description)
entities.append(entity)
add_entities(entities, True)
class RepetierSensor(SensorEntity):
"""Class to create and populate a Repetier Sensor."""
entity_description: RepetierSensorEntityDescription
_attr_should_poll = False
def __init__(
self,
api,
temp_id,
name,
printer_id,
description: RepetierSensorEntityDescription,
):
"""Init new sensor."""
self.entity_description = description
self._api = api
self._attributes: dict = {}
self._temp_id = temp_id
self._printer_id = printer_id
self._state = None
self._attr_name = name
self._attr_available = False
@property
def extra_state_attributes(self):
"""Return sensor attributes."""
return self._attributes
@property
def native_value(self):
"""Return sensor state."""
return self._state
@callback
def update_callback(self):
"""Get new data and update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Connect update callbacks."""
self.async_on_remove(
async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self.update_callback)
)
def _get_data(self):
"""Return new data from the api cache."""
sensor_type = self.entity_description.key
data = self._api.get_data(self._printer_id, sensor_type, self._temp_id)
if data is None:
_LOGGER.debug("Data not found for %s and %s", sensor_type, self._temp_id)
self._attr_available = False
return None
self._attr_available = True
return data
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
state = data.pop("state")
_LOGGER.debug("Printer %s State %s", self.name, state)
self._attributes.update(data)
self._state = state
class RepetierTempSensor(RepetierSensor):
"""Represent a Repetier temp sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
state = data.pop("state")
temp_set = data["temp_set"]
_LOGGER.debug("Printer %s Setpoint: %s, Temp: %s", self.name, temp_set, state)
self._attributes.update(data)
self._state = state
class RepetierJobSensor(RepetierSensor):
"""Represent a Repetier job sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
class RepetierJobEndSensor(RepetierSensor):
"""Class to create and populate a Repetier Job End timestamp Sensor."""
_attr_device_class = SensorDeviceClass.TIMESTAMP
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
job_name = data["job_name"]
start = data["start"]
print_time = data["print_time"]
from_start = data["from_start"]
time_end = start + round(print_time, 0)
self._state = datetime.utcfromtimestamp(time_end)
remaining = print_time - from_start
remaining_secs = int(round(remaining, 0))
_LOGGER.debug(
"Job %s remaining %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(remaining_secs)),
)
class RepetierJobStartSensor(RepetierSensor):
"""Class to create and populate a Repetier Job Start timestamp Sensor."""
_attr_device_class = SensorDeviceClass.TIMESTAMP
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
job_name = data["job_name"]
start = data["start"]
from_start = data["from_start"]
self._state = datetime.utcfromtimestamp(start)
elapsed_secs = int(round(from_start, 0))
_LOGGER.debug(
"Job %s elapsed %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(elapsed_secs)),
)
| """Support for monitoring Repetier Server Sensors."""
from datetime import datetime
import logging
import time
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import REPETIER_API, SENSOR_TYPES, UPDATE_SIGNAL, RepetierSensorEntityDescription
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the available Repetier Server sensors."""
if discovery_info is None:
return
sensor_map = {
"bed_temperature": RepetierTempSensor,
"extruder_temperature": RepetierTempSensor,
"chamber_temperature": RepetierTempSensor,
"current_state": RepetierSensor,
"current_job": RepetierJobSensor,
"job_end": RepetierJobEndSensor,
"job_start": RepetierJobStartSensor,
}
entities = []
for info in discovery_info:
printer_name = info["printer_name"]
api = hass.data[REPETIER_API][printer_name]
printer_id = info["printer_id"]
sensor_type = info["sensor_type"]
temp_id = info["temp_id"]
description = SENSOR_TYPES[sensor_type]
name = f"{info['name']}{description.name or ''}"
if temp_id is not None:
_LOGGER.debug("%s Temp_id: %s", sensor_type, temp_id)
name = f"{name}{temp_id}"
sensor_class = sensor_map[sensor_type]
entity = sensor_class(api, temp_id, name, printer_id, description)
entities.append(entity)
add_entities(entities, True)
class RepetierSensor(SensorEntity):
"""Class to create and populate a Repetier Sensor."""
entity_description: RepetierSensorEntityDescription
_attr_should_poll = False
def __init__(
self,
api,
temp_id,
name,
printer_id,
description: RepetierSensorEntityDescription,
):
"""Init new sensor."""
self.entity_description = description
self._api = api
self._attributes: dict = {}
self._temp_id = temp_id
self._printer_id = printer_id
self._state = None
self._attr_name = name
self._attr_available = False
@property
def extra_state_attributes(self):
"""Return sensor attributes."""
return self._attributes
@property
def native_value(self):
"""Return sensor state."""
return self._state
@callback
def update_callback(self):
"""Get new data and update state."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Connect update callbacks."""
self.async_on_remove(
async_dispatcher_connect(self.hass, UPDATE_SIGNAL, self.update_callback)
)
def _get_data(self):
"""Return new data from the api cache."""
sensor_type = self.entity_description.key
data = self._api.get_data(self._printer_id, sensor_type, self._temp_id)
if data is None:
_LOGGER.debug("Data not found for %s and %s", sensor_type, self._temp_id)
self._attr_available = False
return None
self._attr_available = True
return data
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
state = data.pop("state")
_LOGGER.debug("Printer %s State %s", self.name, state)
self._attributes.update(data)
self._state = state
class RepetierTempSensor(RepetierSensor):
"""Represent a Repetier temp sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
state = data.pop("state")
temp_set = data["temp_set"]
_LOGGER.debug("Printer %s Setpoint: %s, Temp: %s", self.name, temp_set, state)
self._attributes.update(data)
self._state = state
class RepetierJobSensor(RepetierSensor):
"""Represent a Repetier job sensor."""
@property
def native_value(self):
"""Return sensor state."""
if self._state is None:
return None
return round(self._state, 2)
class RepetierJobEndSensor(RepetierSensor):
"""Class to create and populate a Repetier Job End timestamp Sensor."""
_attr_device_class = SensorDeviceClass.TIMESTAMP
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
job_name = data["job_name"]
start = data["start"]
print_time = data["print_time"]
from_start = data["from_start"]
time_end = start + round(print_time, 0)
self._state = datetime.utcfromtimestamp(time_end)
remaining = print_time - from_start
remaining_secs = int(round(remaining, 0))
_LOGGER.debug(
"Job %s remaining %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(remaining_secs)),
)
class RepetierJobStartSensor(RepetierSensor):
"""Class to create and populate a Repetier Job Start timestamp Sensor."""
_attr_device_class = SensorDeviceClass.TIMESTAMP
def update(self):
"""Update the sensor."""
if (data := self._get_data()) is None:
return
job_name = data["job_name"]
start = data["start"]
from_start = data["from_start"]
self._state = datetime.utcfromtimestamp(start)
elapsed_secs = int(round(from_start, 0))
_LOGGER.debug(
"Job %s elapsed %s",
job_name,
time.strftime("%H:%M:%S", time.gmtime(elapsed_secs)),
)
| en | 0.659083 | Support for monitoring Repetier Server Sensors. Set up the available Repetier Server sensors. Class to create and populate a Repetier Sensor. Init new sensor. Return sensor attributes. Return sensor state. Get new data and update state. Connect update callbacks. Return new data from the api cache. Update the sensor. Represent a Repetier temp sensor. Return sensor state. Update the sensor. Represent a Repetier job sensor. Return sensor state. Class to create and populate a Repetier Job End timestamp Sensor. Update the sensor. Class to create and populate a Repetier Job Start timestamp Sensor. Update the sensor. | 2.345178 | 2 |
plants/models.py | kermox/Plantation | 1 | 6624809 | from django.conf import settings
from django.db import models
class UserMixin(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.PROTECT,
null=False,
blank=False,
verbose_name='User',
help_text='',
)
class Meta:
abstract = True
class NameDescriptionMixin(models.Model):
name = models.CharField(
max_length=50,
null=False,
blank=False,
verbose_name="Name",
help_text='',
)
description = models.CharField(
max_length=150,
blank=True,
verbose_name="Description",
help_text='',
default='',
)
class Meta:
abstract = True
class ImageMixin(models.Model):
image_url = models.URLField(
blank=True,
default='',
verbose_name="Image URL",
help_text='',
)
class Meta:
abstract = True
class Category(UserMixin, NameDescriptionMixin, ImageMixin, models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.name
class Plant(UserMixin, NameDescriptionMixin, models.Model):
category = models.ForeignKey(
Category,
on_delete=models.PROTECT,
default=''
)
watering_interval = models.PositiveIntegerField(
null=False,
blank=False,
verbose_name='Watering interval',
help_text="In seconds",
)
fertilizing_interval = models.PositiveIntegerField(
null=False,
blank=False,
verbose_name='Fertilising interval',
help_text="In seconds",
)
EXPOSURE_CHOICES = [
('dark', 'Dark'),
('shade', 'Shade'),
('partsun', 'Part Sun'),
('fullsun', 'Full Sun'),
]
required_exposure = models.CharField(
max_length=10, choices=EXPOSURE_CHOICES,
null=False, blank=False,
verbose_name='Amount of sun',
help_text='Amount of sun'
)
HUMIDITY_CHOICES = [
('low', 'Low'),
('medium', 'Medium'),
('high', 'High'),
]
required_humidity = models.CharField(
max_length=10, choices=HUMIDITY_CHOICES,
null=False, blank=False,
verbose_name='Humidity',
help_text=''
)
TEMPERATURE_CHOICES = [
('cold', 'Cold'),
('medium', 'Medium'),
('warm', 'Warm'),
]
required_temperature = models.CharField(
max_length=10, choices=TEMPERATURE_CHOICES,
null=False, blank=False,
verbose_name='Temperature',
help_text=''
)
blooming = models.BooleanField(
default=False,
null=False,
blank=True,
help_text=''
)
DIFFICULTY_CHOICES = [
(1, 'Low'),
(2, 'Medium-Low'),
(3, 'Medium'),
(4, 'Medium-Hight'),
(5, 'Hight'),
]
difficulty = models.PositiveIntegerField(
choices=DIFFICULTY_CHOICES,
null=False,
blank=False,
default=1,
verbose_name='Cultivation difficulty level',
help_text='',
)
class Room(UserMixin, NameDescriptionMixin, models.Model):
EXPOSURE_CHOICES = Plant.EXPOSURE_CHOICES
exposure = models.CharField(
max_length=10, choices=EXPOSURE_CHOICES,
null=False, blank=False,
verbose_name='Amount of sun in the room',
help_text=''
)
HUMIDITY_CHOICES = Plant.HUMIDITY_CHOICES
humidity = models.CharField(
max_length=10, choices=HUMIDITY_CHOICES,
null=False, blank=False,
verbose_name='Humidity in the room',
help_text=''
)
TEMPERATURE_CHOICES = Plant.TEMPERATURE_CHOICES
temperature = models.CharField(
max_length=10, choices=TEMPERATURE_CHOICES,
null=False, blank=False,
verbose_name='Temperature in the room',
help_text=''
)
drafty = models.BooleanField(
default=False,
null=False,
blank=False,
verbose_name='Drafty?',
help_text=''
)
class UserPlant(UserMixin, NameDescriptionMixin, ImageMixin, models.Model):
room = models.ForeignKey(
Room,
on_delete=models.PROTECT,
verbose_name='Room'
)
plant = models.ForeignKey(
Plant,
on_delete=models.PROTECT,
verbose_name='Type of plant',
)
last_watered = models.DateTimeField(
null=True,
blank=True,
verbose_name='Timestamp of last watering',
)
last_fertilized = models.DateTimeField(
null=True,
blank=True,
verbose_name='Timestamp of last fertilizing',
)
| from django.conf import settings
from django.db import models
class UserMixin(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.PROTECT,
null=False,
blank=False,
verbose_name='User',
help_text='',
)
class Meta:
abstract = True
class NameDescriptionMixin(models.Model):
name = models.CharField(
max_length=50,
null=False,
blank=False,
verbose_name="Name",
help_text='',
)
description = models.CharField(
max_length=150,
blank=True,
verbose_name="Description",
help_text='',
default='',
)
class Meta:
abstract = True
class ImageMixin(models.Model):
image_url = models.URLField(
blank=True,
default='',
verbose_name="Image URL",
help_text='',
)
class Meta:
abstract = True
class Category(UserMixin, NameDescriptionMixin, ImageMixin, models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.name
class Plant(UserMixin, NameDescriptionMixin, models.Model):
category = models.ForeignKey(
Category,
on_delete=models.PROTECT,
default=''
)
watering_interval = models.PositiveIntegerField(
null=False,
blank=False,
verbose_name='Watering interval',
help_text="In seconds",
)
fertilizing_interval = models.PositiveIntegerField(
null=False,
blank=False,
verbose_name='Fertilising interval',
help_text="In seconds",
)
EXPOSURE_CHOICES = [
('dark', 'Dark'),
('shade', 'Shade'),
('partsun', 'Part Sun'),
('fullsun', 'Full Sun'),
]
required_exposure = models.CharField(
max_length=10, choices=EXPOSURE_CHOICES,
null=False, blank=False,
verbose_name='Amount of sun',
help_text='Amount of sun'
)
HUMIDITY_CHOICES = [
('low', 'Low'),
('medium', 'Medium'),
('high', 'High'),
]
required_humidity = models.CharField(
max_length=10, choices=HUMIDITY_CHOICES,
null=False, blank=False,
verbose_name='Humidity',
help_text=''
)
TEMPERATURE_CHOICES = [
('cold', 'Cold'),
('medium', 'Medium'),
('warm', 'Warm'),
]
required_temperature = models.CharField(
max_length=10, choices=TEMPERATURE_CHOICES,
null=False, blank=False,
verbose_name='Temperature',
help_text=''
)
blooming = models.BooleanField(
default=False,
null=False,
blank=True,
help_text=''
)
DIFFICULTY_CHOICES = [
(1, 'Low'),
(2, 'Medium-Low'),
(3, 'Medium'),
(4, 'Medium-Hight'),
(5, 'Hight'),
]
difficulty = models.PositiveIntegerField(
choices=DIFFICULTY_CHOICES,
null=False,
blank=False,
default=1,
verbose_name='Cultivation difficulty level',
help_text='',
)
class Room(UserMixin, NameDescriptionMixin, models.Model):
EXPOSURE_CHOICES = Plant.EXPOSURE_CHOICES
exposure = models.CharField(
max_length=10, choices=EXPOSURE_CHOICES,
null=False, blank=False,
verbose_name='Amount of sun in the room',
help_text=''
)
HUMIDITY_CHOICES = Plant.HUMIDITY_CHOICES
humidity = models.CharField(
max_length=10, choices=HUMIDITY_CHOICES,
null=False, blank=False,
verbose_name='Humidity in the room',
help_text=''
)
TEMPERATURE_CHOICES = Plant.TEMPERATURE_CHOICES
temperature = models.CharField(
max_length=10, choices=TEMPERATURE_CHOICES,
null=False, blank=False,
verbose_name='Temperature in the room',
help_text=''
)
drafty = models.BooleanField(
default=False,
null=False,
blank=False,
verbose_name='Drafty?',
help_text=''
)
class UserPlant(UserMixin, NameDescriptionMixin, ImageMixin, models.Model):
room = models.ForeignKey(
Room,
on_delete=models.PROTECT,
verbose_name='Room'
)
plant = models.ForeignKey(
Plant,
on_delete=models.PROTECT,
verbose_name='Type of plant',
)
last_watered = models.DateTimeField(
null=True,
blank=True,
verbose_name='Timestamp of last watering',
)
last_fertilized = models.DateTimeField(
null=True,
blank=True,
verbose_name='Timestamp of last fertilizing',
)
| none | 1 | 2.10789 | 2 | |
RunProject.py | BlindBMan/Facial-Recognition | 0 | 6624810 | <filename>RunProject.py
from Parameters import *
from FacialDetector import *
import pdb
from Visualize import *
params: Parameters = Parameters()
params.dim_window = 36 # exemplele pozitive (fete de oameni cropate) au 36x36 pixeli
params.dim_hog_cell = 6 # dimensiunea celulei
params.overlap = 0.3
params.number_positive_examples = 6713 # numarul exemplelor pozitive
params.number_negative_examples = 10000 # numarul exemplelor negative
params.threshold = 0 # toate ferestrele cu scorul > threshold si maxime locale devin detectii
params.has_annotations = True
params.scaling_ratio = 0.9
params.use_hard_mining = False # (optional)antrenare cu exemple puternic negative
params.use_flip_images = False # adauga imaginile cu fete oglindite
facial_detector: FacialDetector = FacialDetector(params)
# Pasul 1. Incarcam exemplele pozitive (cropate) si exemple negative generate exemple pozitive
# verificam daca ii avem deja salvati
positive_features_path = os.path.join(params.dir_save_files, 'descriptoriExemplePozitive_' + str(params.dim_hog_cell) + '_' +
str(params.number_positive_examples) + '.npy')
if os.path.exists(positive_features_path):
positive_features = np.load(positive_features_path)
print('Am incarcat descriptorii pentru exemplele pozitive')
else:
print('Construim descriptorii pentru exemplele pozitive:')
positive_features = facial_detector.get_positive_descriptors()
np.save(positive_features_path, positive_features)
print('Am salvat descriptorii pentru exemplele pozitive in fisierul %s' % positive_features_path)
# exemple negative
negative_features_path = os.path.join(params.dir_save_files, 'descriptoriExempleNegative_' + str(params.dim_hog_cell) + '_' +
str(params.number_negative_examples) + '.npy')
if os.path.exists(negative_features_path):
negative_features = np.load(negative_features_path)
print('Am incarcat descriptorii pentru exemplele negative')
else:
print('Construim descriptorii pentru exemplele negative:')
negative_features = facial_detector.get_negative_descriptors()
np.save(negative_features_path, negative_features)
print('Am salvat descriptorii pentru exemplele negative in fisierul %s' % negative_features_path)
# Pasul 2. Invatam clasificatorul liniar
training_examples = np.concatenate((np.squeeze(positive_features), np.squeeze(negative_features)), axis=0)
train_labels = np.concatenate((np.ones(params.number_positive_examples), np.zeros(negative_features.shape[0])))
facial_detector.train_classifier(training_examples, train_labels)
# Pasul 3. (optional) Antrenare cu exemple puternic negative (detectii cu scor >0 din cele 274 de imagini negative)
# Daca implementati acest pas ar trebui sa modificati functia FacialDetector.run()
# astfel incat sa va returneze descriptorii detectiilor cu scor > 0 din cele 274 imagini negative
# completati codul in continuare
# TODO: (optional) completeaza codul in continuare
# Pasul 4. Ruleaza detectorul facial pe imaginile de test.
detections, scores, file_names = facial_detector.run()
# Pasul 5. Evalueaza si vizualizeaza detectiile
# Pentru imagini pentru care exista adnotari (cele din setul de date CMU+MIT) folositi functia show_detection_with_ground_truth
# pentru imagini fara adnotari (cele realizate la curs si laborator) folositi functia show_detection_without_ground_truth
if params.has_annotations:
facial_detector.eval_detections(detections, scores, file_names)
show_detections_with_ground_truth(detections, scores, file_names, params)
else:
show_detections_without_ground_truth(detections, scores, file_names, params) | <filename>RunProject.py
from Parameters import *
from FacialDetector import *
import pdb
from Visualize import *
params: Parameters = Parameters()
params.dim_window = 36 # exemplele pozitive (fete de oameni cropate) au 36x36 pixeli
params.dim_hog_cell = 6 # dimensiunea celulei
params.overlap = 0.3
params.number_positive_examples = 6713 # numarul exemplelor pozitive
params.number_negative_examples = 10000 # numarul exemplelor negative
params.threshold = 0 # toate ferestrele cu scorul > threshold si maxime locale devin detectii
params.has_annotations = True
params.scaling_ratio = 0.9
params.use_hard_mining = False # (optional)antrenare cu exemple puternic negative
params.use_flip_images = False # adauga imaginile cu fete oglindite
facial_detector: FacialDetector = FacialDetector(params)
# Pasul 1. Incarcam exemplele pozitive (cropate) si exemple negative generate exemple pozitive
# verificam daca ii avem deja salvati
positive_features_path = os.path.join(params.dir_save_files, 'descriptoriExemplePozitive_' + str(params.dim_hog_cell) + '_' +
str(params.number_positive_examples) + '.npy')
if os.path.exists(positive_features_path):
positive_features = np.load(positive_features_path)
print('Am incarcat descriptorii pentru exemplele pozitive')
else:
print('Construim descriptorii pentru exemplele pozitive:')
positive_features = facial_detector.get_positive_descriptors()
np.save(positive_features_path, positive_features)
print('Am salvat descriptorii pentru exemplele pozitive in fisierul %s' % positive_features_path)
# exemple negative
negative_features_path = os.path.join(params.dir_save_files, 'descriptoriExempleNegative_' + str(params.dim_hog_cell) + '_' +
str(params.number_negative_examples) + '.npy')
if os.path.exists(negative_features_path):
negative_features = np.load(negative_features_path)
print('Am incarcat descriptorii pentru exemplele negative')
else:
print('Construim descriptorii pentru exemplele negative:')
negative_features = facial_detector.get_negative_descriptors()
np.save(negative_features_path, negative_features)
print('Am salvat descriptorii pentru exemplele negative in fisierul %s' % negative_features_path)
# Pasul 2. Invatam clasificatorul liniar
training_examples = np.concatenate((np.squeeze(positive_features), np.squeeze(negative_features)), axis=0)
train_labels = np.concatenate((np.ones(params.number_positive_examples), np.zeros(negative_features.shape[0])))
facial_detector.train_classifier(training_examples, train_labels)
# Pasul 3. (optional) Antrenare cu exemple puternic negative (detectii cu scor >0 din cele 274 de imagini negative)
# Daca implementati acest pas ar trebui sa modificati functia FacialDetector.run()
# astfel incat sa va returneze descriptorii detectiilor cu scor > 0 din cele 274 imagini negative
# completati codul in continuare
# TODO: (optional) completeaza codul in continuare
# Pasul 4. Ruleaza detectorul facial pe imaginile de test.
detections, scores, file_names = facial_detector.run()
# Pasul 5. Evalueaza si vizualizeaza detectiile
# Pentru imagini pentru care exista adnotari (cele din setul de date CMU+MIT) folositi functia show_detection_with_ground_truth
# pentru imagini fara adnotari (cele realizate la curs si laborator) folositi functia show_detection_without_ground_truth
if params.has_annotations:
facial_detector.eval_detections(detections, scores, file_names)
show_detections_with_ground_truth(detections, scores, file_names, params)
else:
show_detections_without_ground_truth(detections, scores, file_names, params) | ro | 0.492228 | # exemplele pozitive (fete de oameni cropate) au 36x36 pixeli # dimensiunea celulei # numarul exemplelor pozitive # numarul exemplelor negative # toate ferestrele cu scorul > threshold si maxime locale devin detectii # (optional)antrenare cu exemple puternic negative # adauga imaginile cu fete oglindite # Pasul 1. Incarcam exemplele pozitive (cropate) si exemple negative generate exemple pozitive # verificam daca ii avem deja salvati # exemple negative # Pasul 2. Invatam clasificatorul liniar # Pasul 3. (optional) Antrenare cu exemple puternic negative (detectii cu scor >0 din cele 274 de imagini negative) # Daca implementati acest pas ar trebui sa modificati functia FacialDetector.run() # astfel incat sa va returneze descriptorii detectiilor cu scor > 0 din cele 274 imagini negative # completati codul in continuare # TODO: (optional) completeaza codul in continuare # Pasul 4. Ruleaza detectorul facial pe imaginile de test. # Pasul 5. Evalueaza si vizualizeaza detectiile # Pentru imagini pentru care exista adnotari (cele din setul de date CMU+MIT) folositi functia show_detection_with_ground_truth # pentru imagini fara adnotari (cele realizate la curs si laborator) folositi functia show_detection_without_ground_truth | 2.466221 | 2 |
ind_1.py | GrishakV/Lab9 | 0 | 6624811 | <reponame>GrishakV/Lab9
#!/usr/bin/env python3
# -*- config: utf-8 -*-
# 2. Написать программу, которая считывает текст из файла и выводит на экран только
# предложения, содержащие введенное с клавиатуры слово.
if __name__ == '__main__':
a = input("Введите слово для поиска")
with open('ind1.txt', 'r') as f:
text = f.read()
text = text.replace("!", ".")
text = text.replace("?", ".")
while ".." in text:
text.replace("..", ".")
sentences = text.split(".")
for sentence in sentences:
if a in sentence:
with open('text.txt', 'r')as s:
f_text = s.read()
if sentence in f_text:
print(f'{sentence}{f_text[f_text.rfind(sentence) + len(sentence)]}')
| #!/usr/bin/env python3
# -*- config: utf-8 -*-
# 2. Написать программу, которая считывает текст из файла и выводит на экран только
# предложения, содержащие введенное с клавиатуры слово.
if __name__ == '__main__':
a = input("Введите слово для поиска")
with open('ind1.txt', 'r') as f:
text = f.read()
text = text.replace("!", ".")
text = text.replace("?", ".")
while ".." in text:
text.replace("..", ".")
sentences = text.split(".")
for sentence in sentences:
if a in sentence:
with open('text.txt', 'r')as s:
f_text = s.read()
if sentence in f_text:
print(f'{sentence}{f_text[f_text.rfind(sentence) + len(sentence)]}') | ru | 0.98809 | #!/usr/bin/env python3 # -*- config: utf-8 -*- # 2. Написать программу, которая считывает текст из файла и выводит на экран только # предложения, содержащие введенное с клавиатуры слово. | 3.849034 | 4 |
app/content/content.py | Mandhiraj/codingblind | 0 | 6624812 | <gh_stars>0
from lesson1 import lesson1
from lesson2 import lesson2
from lesson3 import lesson3
from lesson4 import lesson4
lessons = [lesson1, lesson2, lesson3, lesson4] | from lesson1 import lesson1
from lesson2 import lesson2
from lesson3 import lesson3
from lesson4 import lesson4
lessons = [lesson1, lesson2, lesson3, lesson4] | none | 1 | 1.943702 | 2 | |
ITP2/ITP2_7_C.py | yu8ikmnbgt6y/MyAOJ | 1 | 6624813 | <reponame>yu8ikmnbgt6y/MyAOJ
import sys
import io
import time
import pprint
input_txt = """
9
0 1
0 2
0 3
2 2
1 1
1 2
1 3
0 4
3 2 4
"""
sys.stdin = io.StringIO(input_txt);input()
#sys.stdin = open('ITP2_7_B_in10.test')
start = time.time()
# copy the below part and paste to the submission form.
# ---------function------------
import sys
import bisect
nq = int(input())
lines = sys.stdin.readlines()
ans = [None] * nq
arr = []
for i in range(nq):
q, *arg = lines[i].split()
x = int(arg[0])
idx = bisect.bisect_left(arr,x)
if q == '0': # insert x
if idx == len(arr) or arr[idx] != x:
arr.insert(idx, x)
ans[i] = str(len(arr))
elif q == '1': # find x
ans[i] = '1' if idx < len(arr) and arr[idx] == x else '0'
elif q == '2': # delete x
if idx < len(arr) and arr[idx] == x:
del arr[idx]
else: # dump L R
r_idx = bisect.bisect_right(arr, int(arg[1]))
ans[i] = '\n'.join(map(str, arr[idx:r_idx]))
#print(q, *arg, '\t', arr)
[print(x) for x in ans if x]
# -----------------------------
print("elapsed:", time.time() - start)
sys.stdin = sys.__stdin__ | import sys
import io
import time
import pprint
input_txt = """
9
0 1
0 2
0 3
2 2
1 1
1 2
1 3
0 4
3 2 4
"""
sys.stdin = io.StringIO(input_txt);input()
#sys.stdin = open('ITP2_7_B_in10.test')
start = time.time()
# copy the below part and paste to the submission form.
# ---------function------------
import sys
import bisect
nq = int(input())
lines = sys.stdin.readlines()
ans = [None] * nq
arr = []
for i in range(nq):
q, *arg = lines[i].split()
x = int(arg[0])
idx = bisect.bisect_left(arr,x)
if q == '0': # insert x
if idx == len(arr) or arr[idx] != x:
arr.insert(idx, x)
ans[i] = str(len(arr))
elif q == '1': # find x
ans[i] = '1' if idx < len(arr) and arr[idx] == x else '0'
elif q == '2': # delete x
if idx < len(arr) and arr[idx] == x:
del arr[idx]
else: # dump L R
r_idx = bisect.bisect_right(arr, int(arg[1]))
ans[i] = '\n'.join(map(str, arr[idx:r_idx]))
#print(q, *arg, '\t', arr)
[print(x) for x in ans if x]
# -----------------------------
print("elapsed:", time.time() - start)
sys.stdin = sys.__stdin__ | en | 0.469508 | 9
0 1
0 2
0 3
2 2
1 1
1 2
1 3
0 4
3 2 4 #sys.stdin = open('ITP2_7_B_in10.test') # copy the below part and paste to the submission form. # ---------function------------ # insert x # find x # delete x # dump L R #print(q, *arg, '\t', arr) # ----------------------------- | 2.757406 | 3 |
examples/pylab_examples/custom_cmap.py | takluyver/matplotlib | 3 | 6624814 | <reponame>takluyver/matplotlib
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
"""
Example: suppose you want red to increase from 0 to 1 over the bottom
half, green to do the same over the middle half, and blue over the top
half. Then you would use:
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0))}
If, as in this example, there are no discontinuities in the r, g, and b
components, then it is quite simple: the second and third element of
each tuple, above, is the same--call it "y". The first element ("x")
defines interpolation intervals over the full range of 0 to 1, and it
must span that whole range. In other words, the values of x divide the
0-to-1 range into a set of segments, and y gives the end-point color
values for each segment.
Now consider the green. cdict['green'] is saying that for
0 <= x <= 0.25, y is zero; no green.
0.25 < x <= 0.75, y varies linearly from 0 to 1.
x > 0.75, y remains at 1, full green.
If there are discontinuities, then it is a little more complicated.
Label the 3 elements in each row in the cdict entry for a given color as
(x, y0, y1). Then for values of x between x[i] and x[i+1] the color
value is interpolated between y1[i] and y0[i+1].
Going back to the cookbook example, look at cdict['red']; because y0 !=
y1, it is saying that for x from 0 to 0.5, red increases from 0 to 1,
but then it jumps down, so that for x from 0.5 to 1, red increases from
0.7 to 1. Green ramps from 0 to 1 as x goes from 0 to 0.5, then jumps
back to 0, and ramps back to 1 as x goes from 0.5 to 1.
row i: x y0 y1
/
/
row i+1: x y0 y1
Above is an attempt to show that for x in the range x[i] to x[i+1], the
interpolation is between y1[i] and y0[i+1]. So, y0[0] and y1[-1] are
never used.
"""
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
cdict2 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 1.0),
(1.0, 0.1, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.1),
(0.5, 1.0, 0.0),
(1.0, 0.0, 0.0))
}
cdict3 = {'red': ((0.0, 0.0, 0.0),
(0.25,0.0, 0.0),
(0.5, 0.8, 1.0),
(0.75,1.0, 1.0),
(1.0, 0.4, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25,0.0, 0.0),
(0.5, 0.9, 0.9),
(0.75,0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.4),
(0.25,1.0, 1.0),
(0.5, 1.0, 0.8),
(0.75,0.0, 0.0),
(1.0, 0.0, 0.0))
}
# Now we will use this example to illustrate 3 ways of
# handling custom colormaps.
# First, the most direct and explicit:
blue_red1 = LinearSegmentedColormap('BlueRed1', cdict1)
# Second, create the map explicitly and register it.
# Like the first method, this method works with any kind
# of Colormap, not just
# a LinearSegmentedColormap:
blue_red2 = LinearSegmentedColormap('BlueRed2', cdict2)
plt.register_cmap(cmap=blue_red2)
# Third, for LinearSegmentedColormap only,
# leave everything to register_cmap:
plt.register_cmap(name='BlueRed3', data=cdict3) # optional lut kwarg
x = np.arange(0, np.pi, 0.1)
y = np.arange(0, 2*np.pi, 0.1)
X, Y = np.meshgrid(x,y)
Z = np.cos(X) * np.sin(Y)
plt.figure(figsize=(10,4))
plt.subplots_adjust(wspace=0.3)
plt.subplot(1,3,1)
plt.imshow(Z, interpolation='nearest', cmap=blue_red1)
plt.colorbar()
plt.subplot(1,3,2)
cmap = plt.get_cmap('BlueRed2')
plt.imshow(Z, interpolation='nearest', cmap=cmap)
plt.colorbar()
# Now we will set the third cmap as the default. One would
# not normally do this in the middle of a script like this;
# it is done here just to illustrate the method.
plt.rcParams['image.cmap'] = 'BlueRed3'
# Also see below for an alternative, particularly for
# interactive use.
plt.subplot(1,3,3)
plt.imshow(Z, interpolation='nearest')
plt.colorbar()
# Or as yet another variation, we could replace the rcParams
# specification *before* the imshow with the following *after*
# imshow:
#
# plt.set_cmap('BlueRed3')
#
# This sets the new default *and* sets the colormap of the last
# image-like item plotted via pyplot, if any.
plt.suptitle('Custom Blue-Red colormaps')
plt.show()
| #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
"""
Example: suppose you want red to increase from 0 to 1 over the bottom
half, green to do the same over the middle half, and blue over the top
half. Then you would use:
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0))}
If, as in this example, there are no discontinuities in the r, g, and b
components, then it is quite simple: the second and third element of
each tuple, above, is the same--call it "y". The first element ("x")
defines interpolation intervals over the full range of 0 to 1, and it
must span that whole range. In other words, the values of x divide the
0-to-1 range into a set of segments, and y gives the end-point color
values for each segment.
Now consider the green. cdict['green'] is saying that for
0 <= x <= 0.25, y is zero; no green.
0.25 < x <= 0.75, y varies linearly from 0 to 1.
x > 0.75, y remains at 1, full green.
If there are discontinuities, then it is a little more complicated.
Label the 3 elements in each row in the cdict entry for a given color as
(x, y0, y1). Then for values of x between x[i] and x[i+1] the color
value is interpolated between y1[i] and y0[i+1].
Going back to the cookbook example, look at cdict['red']; because y0 !=
y1, it is saying that for x from 0 to 0.5, red increases from 0 to 1,
but then it jumps down, so that for x from 0.5 to 1, red increases from
0.7 to 1. Green ramps from 0 to 1 as x goes from 0 to 0.5, then jumps
back to 0, and ramps back to 1 as x goes from 0.5 to 1.
row i: x y0 y1
/
/
row i+1: x y0 y1
Above is an attempt to show that for x in the range x[i] to x[i+1], the
interpolation is between y1[i] and y0[i+1]. So, y0[0] and y1[-1] are
never used.
"""
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
cdict2 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 1.0),
(1.0, 0.1, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.1),
(0.5, 1.0, 0.0),
(1.0, 0.0, 0.0))
}
cdict3 = {'red': ((0.0, 0.0, 0.0),
(0.25,0.0, 0.0),
(0.5, 0.8, 1.0),
(0.75,1.0, 1.0),
(1.0, 0.4, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25,0.0, 0.0),
(0.5, 0.9, 0.9),
(0.75,0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.4),
(0.25,1.0, 1.0),
(0.5, 1.0, 0.8),
(0.75,0.0, 0.0),
(1.0, 0.0, 0.0))
}
# Now we will use this example to illustrate 3 ways of
# handling custom colormaps.
# First, the most direct and explicit:
blue_red1 = LinearSegmentedColormap('BlueRed1', cdict1)
# Second, create the map explicitly and register it.
# Like the first method, this method works with any kind
# of Colormap, not just
# a LinearSegmentedColormap:
blue_red2 = LinearSegmentedColormap('BlueRed2', cdict2)
plt.register_cmap(cmap=blue_red2)
# Third, for LinearSegmentedColormap only,
# leave everything to register_cmap:
plt.register_cmap(name='BlueRed3', data=cdict3) # optional lut kwarg
x = np.arange(0, np.pi, 0.1)
y = np.arange(0, 2*np.pi, 0.1)
X, Y = np.meshgrid(x,y)
Z = np.cos(X) * np.sin(Y)
plt.figure(figsize=(10,4))
plt.subplots_adjust(wspace=0.3)
plt.subplot(1,3,1)
plt.imshow(Z, interpolation='nearest', cmap=blue_red1)
plt.colorbar()
plt.subplot(1,3,2)
cmap = plt.get_cmap('BlueRed2')
plt.imshow(Z, interpolation='nearest', cmap=cmap)
plt.colorbar()
# Now we will set the third cmap as the default. One would
# not normally do this in the middle of a script like this;
# it is done here just to illustrate the method.
plt.rcParams['image.cmap'] = 'BlueRed3'
# Also see below for an alternative, particularly for
# interactive use.
plt.subplot(1,3,3)
plt.imshow(Z, interpolation='nearest')
plt.colorbar()
# Or as yet another variation, we could replace the rcParams
# specification *before* the imshow with the following *after*
# imshow:
#
# plt.set_cmap('BlueRed3')
#
# This sets the new default *and* sets the colormap of the last
# image-like item plotted via pyplot, if any.
plt.suptitle('Custom Blue-Red colormaps')
plt.show() | en | 0.868562 | #!/usr/bin/env python Example: suppose you want red to increase from 0 to 1 over the bottom half, green to do the same over the middle half, and blue over the top half. Then you would use: cdict = {'red': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.0), (0.25, 0.0, 0.0), (0.75, 1.0, 1.0), (1.0, 1.0, 1.0)), 'blue': ((0.0, 0.0, 0.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0))} If, as in this example, there are no discontinuities in the r, g, and b components, then it is quite simple: the second and third element of each tuple, above, is the same--call it "y". The first element ("x") defines interpolation intervals over the full range of 0 to 1, and it must span that whole range. In other words, the values of x divide the 0-to-1 range into a set of segments, and y gives the end-point color values for each segment. Now consider the green. cdict['green'] is saying that for 0 <= x <= 0.25, y is zero; no green. 0.25 < x <= 0.75, y varies linearly from 0 to 1. x > 0.75, y remains at 1, full green. If there are discontinuities, then it is a little more complicated. Label the 3 elements in each row in the cdict entry for a given color as (x, y0, y1). Then for values of x between x[i] and x[i+1] the color value is interpolated between y1[i] and y0[i+1]. Going back to the cookbook example, look at cdict['red']; because y0 != y1, it is saying that for x from 0 to 0.5, red increases from 0 to 1, but then it jumps down, so that for x from 0.5 to 1, red increases from 0.7 to 1. Green ramps from 0 to 1 as x goes from 0 to 0.5, then jumps back to 0, and ramps back to 1 as x goes from 0.5 to 1. row i: x y0 y1 / / row i+1: x y0 y1 Above is an attempt to show that for x in the range x[i] to x[i+1], the interpolation is between y1[i] and y0[i+1]. So, y0[0] and y1[-1] are never used. # Now we will use this example to illustrate 3 ways of # handling custom colormaps. # First, the most direct and explicit: # Second, create the map explicitly and register it. # Like the first method, this method works with any kind # of Colormap, not just # a LinearSegmentedColormap: # Third, for LinearSegmentedColormap only, # leave everything to register_cmap: # optional lut kwarg # Now we will set the third cmap as the default. One would # not normally do this in the middle of a script like this; # it is done here just to illustrate the method. # Also see below for an alternative, particularly for # interactive use. # Or as yet another variation, we could replace the rcParams # specification *before* the imshow with the following *after* # imshow: # # plt.set_cmap('BlueRed3') # # This sets the new default *and* sets the colormap of the last # image-like item plotted via pyplot, if any. | 3.929317 | 4 |
aerforge/prefabs/gun.py | Aermoss/AerForge | 2 | 6624815 | <gh_stars>1-10
from aerforge import *
from aerforge.math import *
import math
import time
class Gun:
def __init__(self, window, x = 0, y = 0, bullet_speed = 50, shoot_cooldown = 0.1, magazine_size = 20, selected = True, automatic = False, reload_time = 2.4):
self.window = window
self.x = x
self.y = y
self.bullet_speed = bullet_speed
self.shoot_cooldown = shoot_cooldown
self.magazine_size = magazine_size
self.reload_time = reload_time
self.selected = selected
self.automatic = automatic
self.bullet_count = self.magazine_size
self.reload_start_time = time.time()
self.last_shoot = time.time()
self.all_bullets = []
self.state = True
self.reloading = False
self.shooting = False
def reload(self):
if not self.reloading:
self.reloading = True
self.reload_start_time = time.time()
def shoot(self):
self.shooting = True
mouse = self.window.input.mouse_pos()
distance_x = mouse.x - self.x
distance_y = mouse.y - self.y
angle = math.atan2(distance_y, distance_x)
speed_x = self.bullet_speed * math.cos(angle)
speed_y = self.bullet_speed * math.sin(angle)
self.all_bullets.append([self.x, self.y, speed_x, speed_y])
def get_input(self):
if self.window.input.key_pressed(self.window.keys["R"]):
if self.bullet_count != self.magazine_size:
self.reload()
if self.window.input.mouse_pressed(self.window.buttons["LEFT"]):
if self.state:
if not self.reloading:
if self.bullet_count > 0:
self.shoot()
self.state = False
self.last_shoot = time.time()
self.bullet_count = self.bullet_count - 1
else:
if not self.automatic:
if self.last_shoot + self.shoot_cooldown < time.time():
self.state = True
def update(self):
if self.bullet_count > self.magazine_size:
self.bullet_count = self.magazine_size
if self.shooting:
self.shooting = False
if self.selected:
if self.bullet_count <= 0:
self.reload()
if self.automatic:
if self.last_shoot + self.shoot_cooldown < time.time():
self.state = True
self.get_input()
if self.reloading:
if self.reload_start_time + self.reload_time < time.time():
self.reloading = False
self.bullet_count = self.magazine_size
else:
self.reloading = False
for item in self.all_bullets:
item[0] = item[0] + item[2]
item[1] = item[1] + item[3]
def draw(self):
for pos_x, pos_y, speed_x, speed_y in self.all_bullets:
if pos_x > self.window.width or pos_x < 0 or pos_y > self.window.height or pos_y < 0:
self.all_bullets.pop(self.all_bullets.index([pos_x, pos_y, speed_x, speed_y]))
else:
self.window.draw(shape = shape.Rect, width = 10, height = 10, x = int(pos_x), y = int(pos_y))
def is_reloading(self):
if self.reloading:
return True
return False
def is_shooting(self):
if self.shooting:
return True
return False
def hit(self, entity, destroy_bullet = False):
for pos_x, pos_y, speed_x, speed_y in self.all_bullets:
if entity.hit(Vec2(pos_x, pos_y)):
if destroy_bullet:
self.all_bullets.pop(self.all_bullets.index([pos_x, pos_y, speed_x, speed_y]))
return True
return False
def center(self):
self.x = self.window.width / 2
self.y = self.window.height / 2
def center_x(self):
self.x = self.window.width / 2
def center_y(self):
self.y = self.window.height / 2
def set_magazine_size(self, magazine_size):
self.magazine_size = magazine_size
if self.bullet_count > self.magazine_size:
self.bullet_count = self.magazine_size
def get_magazine_size(self):
return self.magazine_size
def set_bullet_count(self, count):
self.bullet_count = count
if self.bullet_count > self.magazine_size:
self.bullet_count = self.magazine_size
def get_bullet_count(self):
return self.bullet_count | from aerforge import *
from aerforge.math import *
import math
import time
class Gun:
def __init__(self, window, x = 0, y = 0, bullet_speed = 50, shoot_cooldown = 0.1, magazine_size = 20, selected = True, automatic = False, reload_time = 2.4):
self.window = window
self.x = x
self.y = y
self.bullet_speed = bullet_speed
self.shoot_cooldown = shoot_cooldown
self.magazine_size = magazine_size
self.reload_time = reload_time
self.selected = selected
self.automatic = automatic
self.bullet_count = self.magazine_size
self.reload_start_time = time.time()
self.last_shoot = time.time()
self.all_bullets = []
self.state = True
self.reloading = False
self.shooting = False
def reload(self):
if not self.reloading:
self.reloading = True
self.reload_start_time = time.time()
def shoot(self):
self.shooting = True
mouse = self.window.input.mouse_pos()
distance_x = mouse.x - self.x
distance_y = mouse.y - self.y
angle = math.atan2(distance_y, distance_x)
speed_x = self.bullet_speed * math.cos(angle)
speed_y = self.bullet_speed * math.sin(angle)
self.all_bullets.append([self.x, self.y, speed_x, speed_y])
def get_input(self):
if self.window.input.key_pressed(self.window.keys["R"]):
if self.bullet_count != self.magazine_size:
self.reload()
if self.window.input.mouse_pressed(self.window.buttons["LEFT"]):
if self.state:
if not self.reloading:
if self.bullet_count > 0:
self.shoot()
self.state = False
self.last_shoot = time.time()
self.bullet_count = self.bullet_count - 1
else:
if not self.automatic:
if self.last_shoot + self.shoot_cooldown < time.time():
self.state = True
def update(self):
if self.bullet_count > self.magazine_size:
self.bullet_count = self.magazine_size
if self.shooting:
self.shooting = False
if self.selected:
if self.bullet_count <= 0:
self.reload()
if self.automatic:
if self.last_shoot + self.shoot_cooldown < time.time():
self.state = True
self.get_input()
if self.reloading:
if self.reload_start_time + self.reload_time < time.time():
self.reloading = False
self.bullet_count = self.magazine_size
else:
self.reloading = False
for item in self.all_bullets:
item[0] = item[0] + item[2]
item[1] = item[1] + item[3]
def draw(self):
for pos_x, pos_y, speed_x, speed_y in self.all_bullets:
if pos_x > self.window.width or pos_x < 0 or pos_y > self.window.height or pos_y < 0:
self.all_bullets.pop(self.all_bullets.index([pos_x, pos_y, speed_x, speed_y]))
else:
self.window.draw(shape = shape.Rect, width = 10, height = 10, x = int(pos_x), y = int(pos_y))
def is_reloading(self):
if self.reloading:
return True
return False
def is_shooting(self):
if self.shooting:
return True
return False
def hit(self, entity, destroy_bullet = False):
for pos_x, pos_y, speed_x, speed_y in self.all_bullets:
if entity.hit(Vec2(pos_x, pos_y)):
if destroy_bullet:
self.all_bullets.pop(self.all_bullets.index([pos_x, pos_y, speed_x, speed_y]))
return True
return False
def center(self):
self.x = self.window.width / 2
self.y = self.window.height / 2
def center_x(self):
self.x = self.window.width / 2
def center_y(self):
self.y = self.window.height / 2
def set_magazine_size(self, magazine_size):
self.magazine_size = magazine_size
if self.bullet_count > self.magazine_size:
self.bullet_count = self.magazine_size
def get_magazine_size(self):
return self.magazine_size
def set_bullet_count(self, count):
self.bullet_count = count
if self.bullet_count > self.magazine_size:
self.bullet_count = self.magazine_size
def get_bullet_count(self):
return self.bullet_count | none | 1 | 2.822924 | 3 | |
spyder/plugins/onlinehelp/tests/test_pydocgui.py | mwtoews/spyder | 1 | 6624816 | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for pydocgui.py
"""
# Standard library imports
import os
from unittest.mock import MagicMock
# Test library imports
import numpy as np
from numpy.lib import NumpyVersion
import pytest
from flaky import flaky
# Local imports
from spyder.plugins.onlinehelp.widgets import PydocBrowser
@pytest.fixture
def pydocbrowser(qtbot):
"""Set up pydocbrowser."""
plugin_mock = MagicMock()
plugin_mock.CONF_SECTION = 'onlinehelp'
widget = PydocBrowser(parent=None, plugin=plugin_mock, name='pydoc')
widget._setup()
widget.setup()
widget.resize(640, 480)
widget.show()
with qtbot.waitSignal(widget.sig_load_finished, timeout=20000):
widget.initialize()
qtbot.addWidget(widget)
return widget
@flaky(max_runs=5)
@pytest.mark.parametrize(
"lib",
[('str', 'class str', [0, 1]), ('numpy.testing', 'numpy.testing', [5, 10])]
)
@pytest.mark.skipif(
(not os.name == 'nt' or
NumpyVersion(np.__version__) < NumpyVersion('1.21.0')),
reason="Fails on Mac and older versions of Numpy"
)
def test_get_pydoc(pydocbrowser, qtbot, lib):
"""
Go to the documentation by url.
Regression test for spyder-ide/spyder#10740
"""
browser = pydocbrowser
element, doc, matches = lib
webview = browser.webview
element_url = browser.text_to_url(element)
with qtbot.waitSignal(webview.loadFinished):
browser.set_url(element_url)
expected_range = list(range(matches[0], matches[1]))
qtbot.waitUntil(lambda: webview.get_number_matches(doc) in expected_range)
if __name__ == "__main__":
pytest.main()
| # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
#
"""
Tests for pydocgui.py
"""
# Standard library imports
import os
from unittest.mock import MagicMock
# Test library imports
import numpy as np
from numpy.lib import NumpyVersion
import pytest
from flaky import flaky
# Local imports
from spyder.plugins.onlinehelp.widgets import PydocBrowser
@pytest.fixture
def pydocbrowser(qtbot):
"""Set up pydocbrowser."""
plugin_mock = MagicMock()
plugin_mock.CONF_SECTION = 'onlinehelp'
widget = PydocBrowser(parent=None, plugin=plugin_mock, name='pydoc')
widget._setup()
widget.setup()
widget.resize(640, 480)
widget.show()
with qtbot.waitSignal(widget.sig_load_finished, timeout=20000):
widget.initialize()
qtbot.addWidget(widget)
return widget
@flaky(max_runs=5)
@pytest.mark.parametrize(
"lib",
[('str', 'class str', [0, 1]), ('numpy.testing', 'numpy.testing', [5, 10])]
)
@pytest.mark.skipif(
(not os.name == 'nt' or
NumpyVersion(np.__version__) < NumpyVersion('1.21.0')),
reason="Fails on Mac and older versions of Numpy"
)
def test_get_pydoc(pydocbrowser, qtbot, lib):
"""
Go to the documentation by url.
Regression test for spyder-ide/spyder#10740
"""
browser = pydocbrowser
element, doc, matches = lib
webview = browser.webview
element_url = browser.text_to_url(element)
with qtbot.waitSignal(webview.loadFinished):
browser.set_url(element_url)
expected_range = list(range(matches[0], matches[1]))
qtbot.waitUntil(lambda: webview.get_number_matches(doc) in expected_range)
if __name__ == "__main__":
pytest.main()
| en | 0.687047 | # -*- coding: utf-8 -*- # # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License # Tests for pydocgui.py # Standard library imports # Test library imports # Local imports Set up pydocbrowser. Go to the documentation by url. Regression test for spyder-ide/spyder#10740 | 2.16751 | 2 |
reinforcement_learning/rl_deepracer_robomaker_coach_gazebo/src/markov/boto/sqs/constants.py | jerrypeng7773/amazon-sagemaker-examples | 2,610 | 6624817 | <gh_stars>1000+
"""This module houses the constants for the sqs client"""
from enum import Enum
class StatusIndicator(Enum):
"""Enum containing the integers signifing the
the return status code for error accumulation.
"""
SUCCESS = 0
CLIENT_ERROR = 1
SYSTEM_ERROR = 2
| """This module houses the constants for the sqs client"""
from enum import Enum
class StatusIndicator(Enum):
"""Enum containing the integers signifing the
the return status code for error accumulation.
"""
SUCCESS = 0
CLIENT_ERROR = 1
SYSTEM_ERROR = 2 | en | 0.620965 | This module houses the constants for the sqs client Enum containing the integers signifing the the return status code for error accumulation. | 2.740455 | 3 |
src/TF/_04Transformation_Broadcaster.py | PiyushMahamuni/roslearning | 1 | 6624818 | <filename>src/TF/_04Transformation_Broadcaster.py
#!/usr/bin/env python
import rospy
import tf
# CONSTANTS
NODE_NAME = "frame_a_frame_b_broadcaster"
PARENT_FRAME = "frame_a"
CHILD_FRAME = "frame_b"
transform_broadcaster = None
# SETTING UP THE NODE
def setup():
rospy.init_node(NODE_NAME)
global transform_broadcaster
transform_broadcaster = tf.TransformBroadcaster()
def main():
setup()
loop_rate = rospy.Rate(2)
while not rospy.is_shutdown():
# create a quaternion
rotation_quaternion = tf.transformations.quaternion_from_euler(
0.2, 0.3, 0.1)
# create translation vector
translation_vector = (1.0, 2.0, 3.0)
# current time
ctime = rospy.Time.now()
# each transformation needs to be stamped with appropriate time, here the time
# just before broadcasting it is used.
transform_broadcaster.sendTransform(
translation_vector, rotation_quaternion, ctime, CHILD_FRAME, PARENT_FRAME)
# the last two arguments are strings - frame_id(s) of parent and then child frame
loop_rate.sleep()
if __name__ == "__main__":
main()
# now after running this script, open a new terminal and run following command
# rosrun tf2_tools echo.py frame_a frame_b
# rosrun tf2_tools view_frames.py
# evince frames.py | <filename>src/TF/_04Transformation_Broadcaster.py
#!/usr/bin/env python
import rospy
import tf
# CONSTANTS
NODE_NAME = "frame_a_frame_b_broadcaster"
PARENT_FRAME = "frame_a"
CHILD_FRAME = "frame_b"
transform_broadcaster = None
# SETTING UP THE NODE
def setup():
rospy.init_node(NODE_NAME)
global transform_broadcaster
transform_broadcaster = tf.TransformBroadcaster()
def main():
setup()
loop_rate = rospy.Rate(2)
while not rospy.is_shutdown():
# create a quaternion
rotation_quaternion = tf.transformations.quaternion_from_euler(
0.2, 0.3, 0.1)
# create translation vector
translation_vector = (1.0, 2.0, 3.0)
# current time
ctime = rospy.Time.now()
# each transformation needs to be stamped with appropriate time, here the time
# just before broadcasting it is used.
transform_broadcaster.sendTransform(
translation_vector, rotation_quaternion, ctime, CHILD_FRAME, PARENT_FRAME)
# the last two arguments are strings - frame_id(s) of parent and then child frame
loop_rate.sleep()
if __name__ == "__main__":
main()
# now after running this script, open a new terminal and run following command
# rosrun tf2_tools echo.py frame_a frame_b
# rosrun tf2_tools view_frames.py
# evince frames.py | en | 0.686803 | #!/usr/bin/env python # CONSTANTS # SETTING UP THE NODE # create a quaternion # create translation vector # current time # each transformation needs to be stamped with appropriate time, here the time # just before broadcasting it is used. # the last two arguments are strings - frame_id(s) of parent and then child frame # now after running this script, open a new terminal and run following command # rosrun tf2_tools echo.py frame_a frame_b # rosrun tf2_tools view_frames.py # evince frames.py | 2.475362 | 2 |
app/tests/studies_tests/factories.py | njmhendrix/grand-challenge.org | 0 | 6624819 | import datetime
import factory.fuzzy
import pytz
from grandchallenge.studies.models import Study
from tests.patients_tests.factories import PatientFactory
class StudyFactory(factory.DjangoModelFactory):
class Meta:
model = Study
name = factory.Sequence(lambda n: f"Study {n}")
patient = factory.SubFactory(PatientFactory)
datetime = factory.fuzzy.FuzzyDateTime(
datetime.datetime(1950, 1, 1, 0, 0, 0, 0, pytz.UTC)
)
| import datetime
import factory.fuzzy
import pytz
from grandchallenge.studies.models import Study
from tests.patients_tests.factories import PatientFactory
class StudyFactory(factory.DjangoModelFactory):
class Meta:
model = Study
name = factory.Sequence(lambda n: f"Study {n}")
patient = factory.SubFactory(PatientFactory)
datetime = factory.fuzzy.FuzzyDateTime(
datetime.datetime(1950, 1, 1, 0, 0, 0, 0, pytz.UTC)
)
| none | 1 | 2.076725 | 2 | |
openerp/report/render/render.py | ntiufalara/openerp7 | 3 | 6624820 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Why doing some multi-thread instead of using OSE capabilities ?
# For progress bar.
#
# Add a transparant multi-thread layer to all report rendering layers
#
# TODO: method to stock on the disk
class render(object):
""" Represents a report job being rendered.
@param bin_datas a dictionary of name:<binary content> of images etc.
@param path the path in which binary files can be discovered, useful
for components (images) of the report. It can be:
- a string, relative or absolute path to images
- a list, containing strings of paths.
If a string is absolute path, it will be opened as such, else
it will be passed to tools.file_open() which also considers zip
addons.
Reporting classes must subclass this class and redefine the __init__ and
_render methods (not the other methods).
"""
def __init__(self, bin_datas=None, path='.'):
self.done = False
if bin_datas is None:
self.bin_datas = {}
else:
self.bin_datas = bin_datas
self.path = path
def _render(self):
return None
def render(self):
self.done = False
self._result = self._render()
self.done = True
return True
def is_done(self):
return self.done
def get(self):
if self.is_done():
return self._result
else:
return None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# Why doing some multi-thread instead of using OSE capabilities ?
# For progress bar.
#
# Add a transparant multi-thread layer to all report rendering layers
#
# TODO: method to stock on the disk
class render(object):
""" Represents a report job being rendered.
@param bin_datas a dictionary of name:<binary content> of images etc.
@param path the path in which binary files can be discovered, useful
for components (images) of the report. It can be:
- a string, relative or absolute path to images
- a list, containing strings of paths.
If a string is absolute path, it will be opened as such, else
it will be passed to tools.file_open() which also considers zip
addons.
Reporting classes must subclass this class and redefine the __init__ and
_render methods (not the other methods).
"""
def __init__(self, bin_datas=None, path='.'):
self.done = False
if bin_datas is None:
self.bin_datas = {}
else:
self.bin_datas = bin_datas
self.path = path
def _render(self):
return None
def render(self):
self.done = False
self._result = self._render()
self.done = True
return True
def is_done(self):
return self.done
def get(self):
if self.is_done():
return self._result
else:
return None
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| en | 0.724452 | # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## # Why doing some multi-thread instead of using OSE capabilities ? # For progress bar. # # Add a transparant multi-thread layer to all report rendering layers # # TODO: method to stock on the disk Represents a report job being rendered. @param bin_datas a dictionary of name:<binary content> of images etc. @param path the path in which binary files can be discovered, useful for components (images) of the report. It can be: - a string, relative or absolute path to images - a list, containing strings of paths. If a string is absolute path, it will be opened as such, else it will be passed to tools.file_open() which also considers zip addons. Reporting classes must subclass this class and redefine the __init__ and _render methods (not the other methods). # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | 1.342458 | 1 |
Alfred.alfredpreferences/workflows/user.workflow.73C16A76-E7DC-4A21-AE3B-32FD533AB1FF/src/args.py | Puritanic/Dotfiles | 34 | 6624821 | <gh_stars>10-100
#!/usr/bin/python
# encoding: utf-8
START_ARG = 'start'
STOP_ARG = 'stop'
BREAK_ARG = 'break'
| #!/usr/bin/python
# encoding: utf-8
START_ARG = 'start'
STOP_ARG = 'stop'
BREAK_ARG = 'break' | en | 0.45155 | #!/usr/bin/python # encoding: utf-8 | 1.313423 | 1 |
test.py | DLwbm123/LBAM_inpainting | 7 | 6624822 | import os
import math
import argparse
import torch
import torch.backends.cudnn as cudnn
from PIL import Image
from torchvision.utils import save_image
from torchvision import datasets
from models.LBAMModel import LBAMModel
from PIL import Image
from torchvision.transforms import Compose, ToTensor, Resize, ToPILImage
from data.basicFunction import CheckImageFile
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='', help='input damaged image')
parser.add_argument('--mask', type=str, default='', help='input mask')
parser.add_argument('--output', type=str, default='output', help='output file name')
parser.add_argument('--pretrained', type=str, default='', help='load pretrained model')
parser.add_argument('--loadSize', type=int, default=350,
help='image loading size')
parser.add_argument('--cropSize', type=int, default=256,
help='image training size')
args = parser.parse_args()
ImageTransform = Compose([
Resize(size=args.cropSize, interpolation=Image.NEAREST),
ToTensor(),
])
MaskTransform = Compose([
Resize(size=args.cropSize, interpolation=Image.NEAREST),
ToTensor(),
])
if not CheckImageFile(args.input):
print('Input file is not image file!')
elif not CheckImageFile(args.mask):
print('Input mask is not image file!')
elif args.pretrained == '':
print('Provide pretrained model!')
else:
image = ImageTransform(Image.open(args.input).convert('RGB'))
mask = MaskTransform(Image.open(args.mask).convert('RGB'))
mask = 1 - mask
sizes = image.size()
image = image * mask
inputImage = torch.cat((image, mask[0].view(1, sizes[1], sizes[2])), 0)
inputImage = inputImage.view(1, 4, sizes[1], sizes[2])
mask = mask.view(1, sizes[0], sizes[1], sizes[2])
netG = LBAMModel(4, 3)
netG.load_state_dict(torch.load(args.pretrained))
for param in netG.parameters():
param.requires_grad = False
netG.eval()
if torch.cuda.is_available():
netG = netG.cuda()
inputImage = inputImage.cuda()
mask = mask.cuda()
output = netG(inputImage, mask)
save_image(output, args.output + '.png') | import os
import math
import argparse
import torch
import torch.backends.cudnn as cudnn
from PIL import Image
from torchvision.utils import save_image
from torchvision import datasets
from models.LBAMModel import LBAMModel
from PIL import Image
from torchvision.transforms import Compose, ToTensor, Resize, ToPILImage
from data.basicFunction import CheckImageFile
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='', help='input damaged image')
parser.add_argument('--mask', type=str, default='', help='input mask')
parser.add_argument('--output', type=str, default='output', help='output file name')
parser.add_argument('--pretrained', type=str, default='', help='load pretrained model')
parser.add_argument('--loadSize', type=int, default=350,
help='image loading size')
parser.add_argument('--cropSize', type=int, default=256,
help='image training size')
args = parser.parse_args()
ImageTransform = Compose([
Resize(size=args.cropSize, interpolation=Image.NEAREST),
ToTensor(),
])
MaskTransform = Compose([
Resize(size=args.cropSize, interpolation=Image.NEAREST),
ToTensor(),
])
if not CheckImageFile(args.input):
print('Input file is not image file!')
elif not CheckImageFile(args.mask):
print('Input mask is not image file!')
elif args.pretrained == '':
print('Provide pretrained model!')
else:
image = ImageTransform(Image.open(args.input).convert('RGB'))
mask = MaskTransform(Image.open(args.mask).convert('RGB'))
mask = 1 - mask
sizes = image.size()
image = image * mask
inputImage = torch.cat((image, mask[0].view(1, sizes[1], sizes[2])), 0)
inputImage = inputImage.view(1, 4, sizes[1], sizes[2])
mask = mask.view(1, sizes[0], sizes[1], sizes[2])
netG = LBAMModel(4, 3)
netG.load_state_dict(torch.load(args.pretrained))
for param in netG.parameters():
param.requires_grad = False
netG.eval()
if torch.cuda.is_available():
netG = netG.cuda()
inputImage = inputImage.cuda()
mask = mask.cuda()
output = netG(inputImage, mask)
save_image(output, args.output + '.png') | none | 1 | 2.391628 | 2 | |
Machine Learning/Sklearn Implementations/Dimensionality Reduction/Principal Component Analysis.py | adityamanglik/Algorithm-Implementations | 0 | 6624823 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 18:15:29 2020
@author: admangli
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('Wine.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
#%%
# Scale dataset
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(X)
# Split the dataset
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, stratify = y)
#%%
# Applying dimensionality reduction
from sklearn.decomposition import PCA
# First set n_components to 'mle', check explained_variance vector to
# determine how many attributes are actually contributing to results
pca = PCA(n_components = 2, svd_solver = 'auto')
# After determining the n_components, set it to the value and apply fit_transform
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)
# The values in this vector denote the percent of variance explained by the feature
explained_variance = pca.explained_variance_ratio_
#%%
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 500, criterion = 'entropy')
classifier.fit(X_train_pca, y_train)
y_pred = classifier.predict(X_test_pca)
#%%
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
#%% Training plot
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = .01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = .01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'cyan')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('darkred', 'darkgreen', 'blue'))(i), label = j)
plt.title('Training set')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
#%% Test plot
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = .01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = .01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('darkred', 'darkgreen'))(i), label = j)
plt.title('SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 18:15:29 2020
@author: admangli
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('Wine.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
#%%
# Scale dataset
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(X)
# Split the dataset
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, stratify = y)
#%%
# Applying dimensionality reduction
from sklearn.decomposition import PCA
# First set n_components to 'mle', check explained_variance vector to
# determine how many attributes are actually contributing to results
pca = PCA(n_components = 2, svd_solver = 'auto')
# After determining the n_components, set it to the value and apply fit_transform
X_train_pca = pca.fit_transform(X_train)
X_test_pca = pca.transform(X_test)
# The values in this vector denote the percent of variance explained by the feature
explained_variance = pca.explained_variance_ratio_
#%%
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 500, criterion = 'entropy')
classifier.fit(X_train_pca, y_train)
y_pred = classifier.predict(X_test_pca)
#%%
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
#%% Training plot
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = .01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = .01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'cyan')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('darkred', 'darkgreen', 'blue'))(i), label = j)
plt.title('Training set')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
#%% Test plot
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = .01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = .01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('darkred', 'darkgreen'))(i), label = j)
plt.title('SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show() | en | 0.724295 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Fri Apr 10 18:15:29 2020 @author: admangli #%% # Scale dataset # Split the dataset #%% # Applying dimensionality reduction # First set n_components to 'mle', check explained_variance vector to # determine how many attributes are actually contributing to results # After determining the n_components, set it to the value and apply fit_transform # The values in this vector denote the percent of variance explained by the feature #%% #%% #%% Training plot #%% Test plot | 2.958164 | 3 |
BackgroundSubtraction/test_files/exponentialFilter.py | JanuszJSzturo/ImageAnalysis2020 | 0 | 6624824 | import numpy as np
import cv2
cap = cv2.VideoCapture('project.avi')
kernel = np.ones((2,2),np.uint8)
init_frames = 20
alpha = 0.05 #learning rate [0,1]
background = np.zeros(shape=(240,320))
for i in range(init_frames):
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
background = background + frame
background = background/init_frames
background = background.astype(np.uint8)
while(1):
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mask_frame = np.abs(frame - background)
ret, thresh1 = cv2.threshold(mask_frame, 100, 255, cv2.THRESH_BINARY)
thresh1 = cv2.morphologyEx(thresh1, cv2.MORPH_OPEN, kernel)
background = ((1-alpha)*background+alpha*frame).astype(np.uint8)
cv2.imshow('frame', thresh1)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | import numpy as np
import cv2
cap = cv2.VideoCapture('project.avi')
kernel = np.ones((2,2),np.uint8)
init_frames = 20
alpha = 0.05 #learning rate [0,1]
background = np.zeros(shape=(240,320))
for i in range(init_frames):
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
background = background + frame
background = background/init_frames
background = background.astype(np.uint8)
while(1):
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
mask_frame = np.abs(frame - background)
ret, thresh1 = cv2.threshold(mask_frame, 100, 255, cv2.THRESH_BINARY)
thresh1 = cv2.morphologyEx(thresh1, cv2.MORPH_OPEN, kernel)
background = ((1-alpha)*background+alpha*frame).astype(np.uint8)
cv2.imshow('frame', thresh1)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows() | en | 0.666348 | #learning rate [0,1] | 2.544658 | 3 |
tudo/ex082.py | Ramon-Erik/Exercicios-Python | 1 | 6624825 | lista1, par, imp = [], [], []
temp = 0
while True:
print('='*46)
lista1.append(int(input('Digite um valor: ')))
keep = str(input('Deseja continuar? [S/N] '))
if lista1[temp] % 2 == 0:
par.append(lista1[temp])
else:
imp.append(lista1[temp])
if keep.upper() in 'N':
break
temp += 1
print('='*46)
print('Sua lista:',lista1)
if len(par) > 0:
print('Os números par: ', par)
else: print('Você não digitou nenhum número par.')
if len(imp) > 0:
print('Os números imp: ',imp)
else: print('Você não digitou nenhum número ímpar.')
print('='*46)
| lista1, par, imp = [], [], []
temp = 0
while True:
print('='*46)
lista1.append(int(input('Digite um valor: ')))
keep = str(input('Deseja continuar? [S/N] '))
if lista1[temp] % 2 == 0:
par.append(lista1[temp])
else:
imp.append(lista1[temp])
if keep.upper() in 'N':
break
temp += 1
print('='*46)
print('Sua lista:',lista1)
if len(par) > 0:
print('Os números par: ', par)
else: print('Você não digitou nenhum número par.')
if len(imp) > 0:
print('Os números imp: ',imp)
else: print('Você não digitou nenhum número ímpar.')
print('='*46)
| none | 1 | 3.637619 | 4 | |
node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/web.py | Kanishkparganiha/Portfolio-website-using-NextJs-and-Vercel | 0 | 6624826 | # -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Lexers for web-related languages and markup.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
include, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Other, Punctuation, Literal
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
html_doctype_matches, unirange
from pygments.lexers.agile import RubyLexer
from pygments.lexers.compiled import ScalaLexer
__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer',
'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ScssLexer',
'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer',
'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer',
'DtdLexer', 'DartLexer', 'LassoLexer']
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', ]
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript', ]
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class JsonLexer(RegexLexer):
"""
For JSON data structures.
*New in Pygments 1.5.*
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = [ 'application/json', ]
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
flags = re.DOTALL
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r']', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
*New in Pygments 0.9.*
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~\^\*!%&<>\|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|var|with|new|typeof|arguments|instanceof|this|'
r'switch)\b', Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(r'(Accessibility|AccessibilityProperties|ActionScriptVersion|'
r'ActivityEvent|AntiAliasType|ApplicationDomain|AsBroadcaster|Array|'
r'AsyncErrorEvent|AVM1Movie|BevelFilter|Bitmap|BitmapData|'
r'BitmapDataChannel|BitmapFilter|BitmapFilterQuality|BitmapFilterType|'
r'BlendMode|BlurFilter|Boolean|ByteArray|Camera|Capabilities|CapsStyle|'
r'Class|Color|ColorMatrixFilter|ColorTransform|ContextMenu|'
r'ContextMenuBuiltInItems|ContextMenuEvent|ContextMenuItem|'
r'ConvultionFilter|CSMSettings|DataEvent|Date|DefinitionError|'
r'DeleteObjectSample|Dictionary|DisplacmentMapFilter|DisplayObject|'
r'DisplacmentMapFilterMode|DisplayObjectContainer|DropShadowFilter|'
r'Endian|EOFError|Error|ErrorEvent|EvalError|Event|EventDispatcher|'
r'EventPhase|ExternalInterface|FileFilter|FileReference|'
r'FileReferenceList|FocusDirection|FocusEvent|Font|FontStyle|FontType|'
r'FrameLabel|FullScreenEvent|Function|GlowFilter|GradientBevelFilter|'
r'GradientGlowFilter|GradientType|Graphics|GridFitType|HTTPStatusEvent|'
r'IBitmapDrawable|ID3Info|IDataInput|IDataOutput|IDynamicPropertyOutput'
r'IDynamicPropertyWriter|IEventDispatcher|IExternalizable|'
r'IllegalOperationError|IME|IMEConversionMode|IMEEvent|int|'
r'InteractiveObject|InterpolationMethod|InvalidSWFError|InvokeEvent|'
r'IOError|IOErrorEvent|JointStyle|Key|Keyboard|KeyboardEvent|KeyLocation|'
r'LineScaleMode|Loader|LoaderContext|LoaderInfo|LoadVars|LocalConnection|'
r'Locale|Math|Matrix|MemoryError|Microphone|MorphShape|Mouse|MouseEvent|'
r'MovieClip|MovieClipLoader|Namespace|NetConnection|NetStatusEvent|'
r'NetStream|NewObjectSample|Number|Object|ObjectEncoding|PixelSnapping|'
r'Point|PrintJob|PrintJobOptions|PrintJobOrientation|ProgressEvent|Proxy|'
r'QName|RangeError|Rectangle|ReferenceError|RegExp|Responder|Sample|Scene|'
r'ScriptTimeoutError|Security|SecurityDomain|SecurityError|'
r'SecurityErrorEvent|SecurityPanel|Selection|Shape|SharedObject|'
r'SharedObjectFlushStatus|SimpleButton|Socket|Sound|SoundChannel|'
r'SoundLoaderContext|SoundMixer|SoundTransform|SpreadMethod|Sprite|'
r'StackFrame|StackOverflowError|Stage|StageAlign|StageDisplayState|'
r'StageQuality|StageScaleMode|StaticText|StatusEvent|String|StyleSheet|'
r'SWFVersion|SyncEvent|SyntaxError|System|TextColorType|TextField|'
r'TextFieldAutoSize|TextFieldType|TextFormat|TextFormatAlign|'
r'TextLineMetrics|TextRenderer|TextSnapshot|Timer|TimerEvent|Transform|'
r'TypeError|uint|URIError|URLLoader|URLLoaderDataFormat|URLRequest|'
r'URLRequestHeader|URLRequestMethod|URLStream|URLVariabeles|VerifyError|'
r'Video|XML|XMLDocument|XMLList|XMLNode|XMLNodeType|XMLSocket|XMLUI)\b',
Name.Builtin),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b',Name.Function),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
*New in Pygments 0.11.*
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
identifier = r'[$a-zA-Z_][a-zA-Z0-9_]*'
typeidentifier = identifier + '(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~\^\*!%&<>\|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s*', Text, '#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',?', Operator, '#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'{', Punctuation, 'content'),
(r'\:[a-zA-Z0-9_-]+', Name.Decorator),
(r'\.[a-zA-Z0-9_-]+', Name.Class),
(r'\#[a-zA-Z0-9_-]+', Name.Function),
(r'@[a-zA-Z0-9_-]+', Keyword, 'atrule'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'[~\^\*!%&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow(?:-x|-y)?|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Keyword),
(r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
r'blueviolet|peachpuff)\b', Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|\%|pt|pc|in|mm|cm|ex|s)\b', Number),
(r'-?[0-9]+', Number),
(r'[~\^\*!%&<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name)
]
}
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
*New in Pygments 1.3.*
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[\+-]' + _ws + r')([\(a-zA-Z_].*?[^\(])(' + _ws + '{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{\(\)}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop'),
],
'badregex': [
(r'\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname' : [
# interface definition that inherits
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r':' + _ws +
r')([a-zA-Z_][a-zA-Z0-9_]*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r'\()([a-zA-Z_][a-zA-Z0-9_]*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop'),
],
'forward_classname' : [
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups (Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(Name.Function), "#pop"),
('', Text, '#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^\)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', Text),
],
'expression' : [
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
(r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
(r'[a-zA-Z0-9_:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_phpbuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._phpbuiltins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]']
mimetypes = ['text/x-php']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
(r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop')
],
'functionname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
for key, value in MODULES.iteritems():
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
if '?>' in text:
rv += 0.1
return rv
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
*New in Pygments 1.5.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.5
class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
*New in Pygments 0.10.*
'''
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set([
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
])
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
*New in Pygments 1.1.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class HaxeLexer(RegexLexer):
"""
For haXe source code (http://haxe.org/).
*New in Pygments 1.3.*
"""
name = 'haXe'
aliases = ['hx', 'haXe']
filenames = ['*.hx']
mimetypes = ['text/haxe']
ident = r'(?:[a-zA-Z_][a-zA-Z0-9_]*)'
typeid = r'(?:(?:[a-z0-9_\.])*[A-Z_][A-Za-z0-9_]*)'
key_prop = r'(?:default|null|never)'
key_decl_mod = r'(?:public|private|override|static|inline|extern|dynamic)'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('comments'),
(key_decl_mod, Keyword.Declaration),
include('enumdef'),
include('typedef'),
include('classdef'),
include('imports'),
],
# General constructs
'comments': [
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'#[^\n]*', Comment.Preproc),
],
'whitespace': [
include('comments'),
(r'\s+', Text),
],
'codekeywords': [
(r'\b(if|else|while|do|for|in|break|continue|'
r'return|switch|case|try|catch|throw|null|trace|'
r'new|this|super|untyped|cast|callback|here)\b',
Keyword.Reserved),
],
'literals': [
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'~/([^\n])*?/[gisx]*', String.Regex),
(r'\b(true|false|null)\b', Keyword.Constant),
],
'codeblock': [
include('whitespace'),
include('new'),
include('case'),
include('anonfundef'),
include('literals'),
include('vardef'),
include('codekeywords'),
(r'[();,\[\]]', Punctuation),
(r'(?:=|\+=|-=|\*=|/=|%=|&=|\|=|\^=|<<=|>>=|>>>=|\|\||&&|'
r'\.\.\.|==|!=|>|<|>=|<=|\||&|\^|<<|>>>|>>|\+|\-|\*|/|%|'
r'!|\+\+|\-\-|~|\.|\?|\:)',
Operator),
(ident, Name),
(r'}', Punctuation,'#pop'),
(r'{', Punctuation,'#push'),
],
# Instance/Block level constructs
'propertydef': [
(r'(\()(' + key_prop + ')(,)(' + key_prop + ')(\))',
bygroups(Punctuation, Keyword.Reserved, Punctuation,
Keyword.Reserved, Punctuation)),
],
'new': [
(r'\bnew\b', Keyword, 'typedecl'),
],
'case': [
(r'\b(case)(\s+)(' + ident + ')(\s*)(\()',
bygroups(Keyword.Reserved, Text, Name, Text, Punctuation),
'funargdecl'),
],
'vardef': [
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable), 'vardecl'),
],
'vardecl': [
include('whitespace'),
include('typelabel'),
(r'=', Operator,'#pop'),
(r';', Punctuation,'#pop'),
],
'instancevardef': [
(key_decl_mod,Keyword.Declaration),
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable.Instance),
'instancevardecl'),
],
'instancevardecl': [
include('vardecl'),
include('propertydef'),
],
'anonfundef': [
(r'\bfunction\b', Keyword.Declaration, 'fundecl'),
],
'instancefundef': [
(key_decl_mod, Keyword.Declaration),
(r'\b(function)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Function), 'fundecl'),
],
'fundecl': [
include('whitespace'),
include('typelabel'),
include('generictypedecl'),
(r'\(',Punctuation,'funargdecl'),
(r'(?=[a-zA-Z0-9_])',Text,'#pop'),
(r'{',Punctuation,('#pop','codeblock')),
(r';',Punctuation,'#pop'),
],
'funargdecl': [
include('whitespace'),
(ident, Name.Variable),
include('typelabel'),
include('literals'),
(r'=', Operator),
(r',', Punctuation),
(r'\?', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'typelabel': [
(r':', Punctuation, 'type'),
],
'typedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'(?=[{}()=,a-z])', Text,'#pop'),
],
'type': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'->', Keyword.Type),
(r'(?=[{}(),;=])', Text, '#pop'),
],
'generictypedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r',', Punctuation),
],
# Top level constructs
'imports': [
(r'(package|import|using)(\s+)([^;]+)(;)',
bygroups(Keyword.Namespace, Text, Name.Namespace,Punctuation)),
],
'typedef': [
(r'typedef', Keyword.Declaration, ('typedefprebody', 'typedecl')),
],
'typedefprebody': [
include('whitespace'),
(r'(=)(\s*)({)', bygroups(Punctuation, Text, Punctuation),
('#pop', 'typedefbody')),
],
'enumdef': [
(r'enum', Keyword.Declaration, ('enumdefprebody', 'typedecl')),
],
'enumdefprebody': [
include('whitespace'),
(r'{', Punctuation, ('#pop','enumdefbody')),
],
'classdef': [
(r'class', Keyword.Declaration, ('classdefprebody', 'typedecl')),
],
'classdefprebody': [
include('whitespace'),
(r'(extends|implements)', Keyword.Declaration,'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'interfacedef': [
(r'interface', Keyword.Declaration,
('interfacedefprebody', 'typedecl')),
],
'interfacedefprebody': [
include('whitespace'),
(r'(extends)', Keyword.Declaration, 'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'typedefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'>', Punctuation, 'typedecl'),
(r',', Punctuation),
(r'}', Punctuation, '#pop'),
],
'enumdefbody': [
include('whitespace'),
(ident, Name.Variable.Instance),
(r'\(', Punctuation, 'funargdecl'),
(r';', Punctuation),
(r'}', Punctuation, '#pop'),
],
'classdefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'}', Punctuation, '#pop'),
include('codeblock'),
],
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text): return 0.3
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
*New in Pygments 1.3.*
"""
name = 'Haml'
aliases = ['haml', 'HAML']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant),
(r'(indigo|gold|firebrick|indianred|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|skyblue|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|orchid|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|palegreen|burlywood|'
r'seashell|mediumspringgreen|papayawhip|blanchedalmond|'
r'peru|aquamarine|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|darkgoldenrod|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|'
r'blueviolet|peachpuff)\b', Name.Entity),
(r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|'
r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#{', String.Interpol, 'interpolation'),
(r'[~\^\*!&%<>\|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
*New in Pygments 1.3.*
"""
name = 'Sass'
aliases = ['sass', 'SASS']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
(r'', Text, 'selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
(r'', Text, 'value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'(\$[\w-]\w*)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
(r'', Text, 'selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
class CoffeeScriptLexer(RegexLexer):
"""
For `CoffeeScript`_ source code.
.. _CoffeeScript: http://coffeescript.org
*New in Pygments 1.3.*
"""
name = 'CoffeeScript'
aliases = ['coffee-script', 'coffeescript']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'multilineregex': [
(r'[^/#]+', String.Regex),
(r'///([gim]+\b|\B)', String.Regex, '#pop'),
(r'#{', String.Interpol, 'interpoling_string'),
(r'[/#]', String.Regex),
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'///', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'(?:\([^()]+\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|'
r'loop|break|return|continue|'
r'switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|null|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\$]*', Name.Other, 'slashstartsregex'),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For `LiveScript`_ source code.
.. _LiveScript: http://gkz.github.com/LiveScript/
New in Pygments 1.6.
"""
name = 'LiveScript'
aliases = ['live-script', 'livescript']
filenames = ['*.ls']
mimetypes = ['text/livescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'#.*?\n', Comment.Single),
],
'multilineregex': [
include('commentsandwhitespace'),
(r'//([gim]+\b|\B)', String.Regex, '#pop'),
(r'/', String.Regex),
(r'[^/#]+', String.Regex)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'//', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
(r'\+\+|&&|(?<![\.\$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
r'[+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|loop|break|'
r'return|continue|switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by|const|var|to|til)\b', Keyword,
'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|'
r'null|NaN|Infinity|undefined|void)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\-]*', Name.Other, 'slashstartsregex'),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
(r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\[\w$-]+', String),
(r'<\[.*\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class DuelLexer(RegexLexer):
"""
Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
See http://duelengine.org/.
See http://jsonml.org/jbst/.
*New in Pygments 1.4.*
"""
name = 'Duel'
aliases = ['duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST']
filenames = ['*.duel','*.jbst']
mimetypes = ['text/x-duel','text/x-jbst']
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#!:]?)(.*?)(%>)',
bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
(r'(<%\$)(.*?)(:)(.*?)(%>)',
bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
(r'(<%--)(.*?)(--%>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)',
bygroups(using(HtmlLexer),
using(JavascriptLexer), using(HtmlLexer))),
(r'(.+?)(?=<)', using(HtmlLexer)),
(r'.+', using(HtmlLexer)),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
*New in Pygments 1.4.*
"""
name = 'Scaml'
aliases = ['scaml', 'SCAML']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
"""
For Jade markup.
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
*New in Pygments 1.4.*
"""
name = 'Jade'
aliases = ['jade', 'JADE']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class XQueryLexer(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
*New in Pygments 1.4.*
"""
name = 'XQuery'
aliases = ['xquery', 'xqy']
filenames = ['*.xqy', '*.xquery']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
#ncnamestartchar = (
# ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# ur"[\u10000-\uEFFFF]"
#)
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
#ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# ur"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WY-Z]|_|:|[a-kn-wy-z])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
#elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_`\|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
#x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE | re.UNICODE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
def pushstate_operator_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'(:\))', Comment, '#pop'),
(r'(\(:)', Comment, '#push'),
(r'[^:)]', Comment),
(r'([^:)]|:|\))', Comment),
],
'whitespace': [
(r'\s+', Text),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\||:=|=)',
operator_root_callback),
(r'(::|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
#(r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'external', Keyword),
(r'collation', Keyword, 'uritooperator'),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Text), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)),
(r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'varname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Variable, 'operator'),
],
'singletype': [
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Punctuation, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'\(\#', Punctuation, 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
(r'', Text, 'operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+)[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword, Text, Keyword), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'),
(ncname + ':\*', Name, 'operator'),
('\*:'+ncname, Name.Tag, 'operator'),
('\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\})', popstate_callback),
#NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
#NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword, Text, Keyword), 'namespacekeyword'),
#VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
#ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword, Text, Keyword), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(element|attribute)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
#ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'attribute_qname'),
#ELEMENT
(r'(element)(\s+)(?=' +qname+ r')',
bygroups(Keyword, Text), 'element_qname'),
#PROCESSING_INSTRUCTION
(r'(processing-instruction)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword, Text, Keyword)),
(r'(\{)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword, Text, Keyword), 'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'),
(r'(\(#)', Punctuation, 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword, Text, Keyword),
'option'),
#URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'then|else', Keyword),
# ML specific
(r'(try)(\s*)', bygroups(Keyword, Text), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
(r'(@'+qname+')', Name.Attribute),
(r'(@'+ncname+')', Name.Attribute),
(r'@\*:'+ncname, Name.Attribute),
(r'(@)', Name.Attribute),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(qname, Name.Tag, 'operator'),
]
}
class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Dart'
aliases = ['dart']
filenames = ['*.dart']
mimetypes = ['text/x-dart']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'#!(.*?)$', Comment.Preproc),
(r'(#)(import|library|source)', bygroups(Text, Keyword)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(class|interface)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|in|is|new|return|super|switch|this|throw|try|while)\b',
Keyword),
(r'(abstract|const|extends|factory|final|get|implements|'
r'native|operator|set|static|typedef|var)\b', Keyword.Declaration),
(r'(bool|double|Dynamic|int|num|Object|String|void)', Keyword.Type),
(r'(false|null|true)', Keyword.Constant),
(r'@"(\\\\|\\"|[^"])*"', String.Double), # raw string
(r"@'(\\\\|\\'|[^'])*'", String.Single), # raw string
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
(r'[a-zA-Z_$][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[(){}\[\],.;]', Punctuation),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# DIGIT+ (‘.’ DIGIT*)? EXPONENT?
(r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
(r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
(r'\n', Text)
# pseudo-keyword negate intentionally left out
],
'class': [
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'string_double': [
(r'"', String.Double, '#pop'),
(r'[^"$]+', String.Double),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol)),
(r'\$+', String.Double)
],
'string_single': [
(r"'", String.Single, '#pop'),
(r"[^'$]+", String.Single),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol)),
(r'\$+', String.Single)
]
}
class LassoLexer(RegexLexer):
"""
For `Lasso <http://www.lassosoft.com/>`_ source code, covering both
Lasso 9 syntax and LassoScript for Lasso 8.6 and earlier. For Lasso
embedded in HTML, use the `LassoHtmlLexer`.
Additional options accepted:
`builtinshighlighting`
If given and ``True``, highlight builtin tags, types, traits, and
methods (default: ``True``).
`requiredelimiters`
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
*New in Pygments 1.6.*
"""
name = 'Lasso'
aliases = ['lasso', 'lassoscript']
filenames = ['*.lasso', '*.lasso[89]']
alias_filenames = ['*.incl', '*.inc', '*.las']
mimetypes = ['text/x-lasso']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'^#!.+lasso9\b', Comment.Preproc, 'lasso'),
(r'\s+', Other),
(r'\[noprocess\]', Comment.Preproc, ('delimiters', 'noprocess')),
(r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc,
('delimiters', 'anglebrackets')),
(r'<', Other, 'delimiters'),
include('lasso'),
],
'delimiters': [
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^[<]+', Other),
],
'noprocess': [
(r'\[/noprocess\]', Comment.Preproc, '#pop'),
(r'\[', Other),
(r'[^[]', Other),
],
'squarebrackets': [
(r'\]', Comment.Preproc, '#pop'),
include('lasso'),
],
'anglebrackets': [
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'lasso': [
# whitespace/comments
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*\*!.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
# names
(r'\$[a-z_][\w\.]*', Name.Variable),
(r'(#[a-z_][\w\.]*|#\d+)', Name.Variable.Instance),
(r"\.'[a-z_][\w\.]*'", Name.Variable.Class),
(r"(self)(->)('[a-z_][\w\.]*')",
bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)),
(r'(self|void)\b', Name.Builtin.Pseudo),
(r'-[a-z_][\w\.]*', Name.Attribute),
(r'(::)([a-z_][\w\.]*)', bygroups(Punctuation, Name.Label)),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
r'Error_InvalidDatabase|Error_InvalidPassword|'
r'Error_InvalidUsername|Error_ModuleNotFound|'
r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
r'Error_UpdateError)\b', Name.Exception),
# definitions
(r'(parent)(\s+)([a-z_][\w\.]*)',
bygroups(Keyword.Declaration, Text, Name.Class)),
(r'(define)(\s+)([a-z_][\w\.]*)(\s*)(=>)(\s*)(type|trait|thread)',
bygroups(Keyword.Declaration, Text, Name.Class, Text, Operator,
Text, Keyword)),
(r'(define)(\s+)([a-z_][\w\.]*)(->)([a-z_][\w\.]*=?)',
bygroups(Keyword.Declaration, Text, Name.Class, Operator,
Name.Function)),
(r'(define)(\s+)([a-z_][\w\.]*=?)',
bygroups(Keyword.Declaration, Text, Name.Function)),
(r'(public|protected|private)(\s+)([a-z_][\w\.]*)(\s*)(=>)',
bygroups(Keyword, Text, Name.Function, Text, Operator)),
(r'(public|protected|private|provide)(\s+)([a-z_][\w\.]*=?)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation)),
# keywords
(r'\.\.\.', Keyword.Pseudo),
(r'(true|false|null|[+\-]?infinity|\+?NaN)\b', Keyword.Constant),
(r'(local|var|variable|global|data)\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|'
r'xml)\b', Keyword.Type),
(r'(/?)(Cache|Database_Names|Database_SchemaNames|'
r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|'
r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|'
r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|'
r'Namespace_Using|NoProcess|Output_None|Portal|Private|Protect|'
r'Records|Referer|Referrer|Repeating|ResultSet|Rows|Search_Args|'
r'Search_Arguments|Select|Sort_Args|Sort_Arguments|Thread_Atomic|'
r'Value_List|While|Abort|Case|Else|If_Empty|If_False|If_Null|'
r'If_True|Loop_Abort|Loop_Continue|Loop_Count|Params|Params_Up|'
r'Return|Return_Value|Run_Children|SOAP_DefineTag|'
r'SOAP_LastRequest|SOAP_LastResponse|Tag_Name)\b',
bygroups(Punctuation, Keyword)),
(r'(and|ascending|average|by|case|define|descending|do|else|'
r'equals|frozen|group|import|in|inherited|into|join|let|match|'
r'max|min|not|on|or|order|params|parent|private|protected|'
r'provide|public|require|return|select|skip|sum|take|thread|to|'
r'trait|type|where|with)\b', Keyword),
# literals
(r'([+\-]?\d*\.\d+(e[+\-]?\d+)?)', Number.Float),
(r'0x[\da-f]+', Number.Hex),
(r'[+\-]?\d+', Number.Integer),
(r"'", String.Single, 'singlestring'),
(r'"', String.Double, 'doublestring'),
(r'`[^`]*`', String.Backtick),
# other
(r'(=)(bw|ew|cn|lte?|gte?|n?eq|ft|n?rx)\b',
bygroups(Operator, Operator.Word)),
(r'([=\+\-\*/%<>&|!\?\.\\]+|:=)', Operator),
(r'[{}():;,@^]', Punctuation),
(r'(/?)([\w\.]+)', bygroups(Punctuation, Name.Other)),
],
'singlestring': [
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
(r"\\+", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
(r'\\+', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|'
r'[abefnrtv\"\'\?\\]|$)', String.Escape),
],
}
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(
options, 'builtinshighlighting', True)
self.requiredelimiters = get_bool_opt(
options, 'requiredelimiters', False)
self._builtins = set()
if self.builtinshighlighting:
from pygments.lexers._lassobuiltins import BUILTINS
for key, value in BUILTINS.iteritems():
self._builtins.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.requiredelimiters:
stack.append('delimiters')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value.lower() in self._builtins:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if 'bin/lasso9' in text:
rv += 0.8
if re.search(r'<\?(=|lasso)', text, re.I):
rv += 0.4
if re.search(r'local\(', text, re.I):
rv += 0.4
if re.search(r'(\[\n|\?>)', text):
rv += 0.4
return rv
| # -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Lexers for web-related languages and markup.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
include, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Other, Punctuation, Literal
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
html_doctype_matches, unirange
from pygments.lexers.agile import RubyLexer
from pygments.lexers.compiled import ScalaLexer
__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer',
'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ScssLexer',
'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer',
'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer',
'DtdLexer', 'DartLexer', 'LassoLexer']
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', ]
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript', ]
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class JsonLexer(RegexLexer):
"""
For JSON data structures.
*New in Pygments 1.5.*
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = [ 'application/json', ]
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
flags = re.DOTALL
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r']', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
*New in Pygments 0.9.*
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~\^\*!%&<>\|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|var|with|new|typeof|arguments|instanceof|this|'
r'switch)\b', Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(r'(Accessibility|AccessibilityProperties|ActionScriptVersion|'
r'ActivityEvent|AntiAliasType|ApplicationDomain|AsBroadcaster|Array|'
r'AsyncErrorEvent|AVM1Movie|BevelFilter|Bitmap|BitmapData|'
r'BitmapDataChannel|BitmapFilter|BitmapFilterQuality|BitmapFilterType|'
r'BlendMode|BlurFilter|Boolean|ByteArray|Camera|Capabilities|CapsStyle|'
r'Class|Color|ColorMatrixFilter|ColorTransform|ContextMenu|'
r'ContextMenuBuiltInItems|ContextMenuEvent|ContextMenuItem|'
r'ConvultionFilter|CSMSettings|DataEvent|Date|DefinitionError|'
r'DeleteObjectSample|Dictionary|DisplacmentMapFilter|DisplayObject|'
r'DisplacmentMapFilterMode|DisplayObjectContainer|DropShadowFilter|'
r'Endian|EOFError|Error|ErrorEvent|EvalError|Event|EventDispatcher|'
r'EventPhase|ExternalInterface|FileFilter|FileReference|'
r'FileReferenceList|FocusDirection|FocusEvent|Font|FontStyle|FontType|'
r'FrameLabel|FullScreenEvent|Function|GlowFilter|GradientBevelFilter|'
r'GradientGlowFilter|GradientType|Graphics|GridFitType|HTTPStatusEvent|'
r'IBitmapDrawable|ID3Info|IDataInput|IDataOutput|IDynamicPropertyOutput'
r'IDynamicPropertyWriter|IEventDispatcher|IExternalizable|'
r'IllegalOperationError|IME|IMEConversionMode|IMEEvent|int|'
r'InteractiveObject|InterpolationMethod|InvalidSWFError|InvokeEvent|'
r'IOError|IOErrorEvent|JointStyle|Key|Keyboard|KeyboardEvent|KeyLocation|'
r'LineScaleMode|Loader|LoaderContext|LoaderInfo|LoadVars|LocalConnection|'
r'Locale|Math|Matrix|MemoryError|Microphone|MorphShape|Mouse|MouseEvent|'
r'MovieClip|MovieClipLoader|Namespace|NetConnection|NetStatusEvent|'
r'NetStream|NewObjectSample|Number|Object|ObjectEncoding|PixelSnapping|'
r'Point|PrintJob|PrintJobOptions|PrintJobOrientation|ProgressEvent|Proxy|'
r'QName|RangeError|Rectangle|ReferenceError|RegExp|Responder|Sample|Scene|'
r'ScriptTimeoutError|Security|SecurityDomain|SecurityError|'
r'SecurityErrorEvent|SecurityPanel|Selection|Shape|SharedObject|'
r'SharedObjectFlushStatus|SimpleButton|Socket|Sound|SoundChannel|'
r'SoundLoaderContext|SoundMixer|SoundTransform|SpreadMethod|Sprite|'
r'StackFrame|StackOverflowError|Stage|StageAlign|StageDisplayState|'
r'StageQuality|StageScaleMode|StaticText|StatusEvent|String|StyleSheet|'
r'SWFVersion|SyncEvent|SyntaxError|System|TextColorType|TextField|'
r'TextFieldAutoSize|TextFieldType|TextFormat|TextFormatAlign|'
r'TextLineMetrics|TextRenderer|TextSnapshot|Timer|TimerEvent|Transform|'
r'TypeError|uint|URIError|URLLoader|URLLoaderDataFormat|URLRequest|'
r'URLRequestHeader|URLRequestMethod|URLStream|URLVariabeles|VerifyError|'
r'Video|XML|XMLDocument|XMLList|XMLNode|XMLNodeType|XMLSocket|XMLUI)\b',
Name.Builtin),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b',Name.Function),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
*New in Pygments 0.11.*
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
identifier = r'[$a-zA-Z_][a-zA-Z0-9_]*'
typeidentifier = identifier + '(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~\^\*!%&<>\|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s*', Text, '#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',?', Operator, '#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'{', Punctuation, 'content'),
(r'\:[a-zA-Z0-9_-]+', Name.Decorator),
(r'\.[a-zA-Z0-9_-]+', Name.Class),
(r'\#[a-zA-Z0-9_-]+', Name.Function),
(r'@[a-zA-Z0-9_-]+', Keyword, 'atrule'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'[~\^\*!%&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow(?:-x|-y)?|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Keyword),
(r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
r'blueviolet|peachpuff)\b', Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|\%|pt|pc|in|mm|cm|ex|s)\b', Number),
(r'-?[0-9]+', Number),
(r'[~\^\*!%&<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name)
]
}
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
*New in Pygments 1.3.*
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[\+-]' + _ws + r')([\(a-zA-Z_].*?[^\(])(' + _ws + '{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{\(\)}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop'),
],
'badregex': [
(r'\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname' : [
# interface definition that inherits
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r':' + _ws +
r')([a-zA-Z_][a-zA-Z0-9_]*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r'\()([a-zA-Z_][a-zA-Z0-9_]*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop'),
],
'forward_classname' : [
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups (Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(Name.Function), "#pop"),
('', Text, '#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^\)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', Text),
],
'expression' : [
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
(r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
(r'[a-zA-Z0-9_:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_phpbuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._phpbuiltins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]']
mimetypes = ['text/x-php']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
(r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop')
],
'functionname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
for key, value in MODULES.iteritems():
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
if '?>' in text:
rv += 0.1
return rv
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
*New in Pygments 1.5.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION', Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.5
class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
*New in Pygments 0.10.*
'''
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set([
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
])
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
*New in Pygments 1.1.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class HaxeLexer(RegexLexer):
"""
For haXe source code (http://haxe.org/).
*New in Pygments 1.3.*
"""
name = 'haXe'
aliases = ['hx', 'haXe']
filenames = ['*.hx']
mimetypes = ['text/haxe']
ident = r'(?:[a-zA-Z_][a-zA-Z0-9_]*)'
typeid = r'(?:(?:[a-z0-9_\.])*[A-Z_][A-Za-z0-9_]*)'
key_prop = r'(?:default|null|never)'
key_decl_mod = r'(?:public|private|override|static|inline|extern|dynamic)'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('comments'),
(key_decl_mod, Keyword.Declaration),
include('enumdef'),
include('typedef'),
include('classdef'),
include('imports'),
],
# General constructs
'comments': [
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'#[^\n]*', Comment.Preproc),
],
'whitespace': [
include('comments'),
(r'\s+', Text),
],
'codekeywords': [
(r'\b(if|else|while|do|for|in|break|continue|'
r'return|switch|case|try|catch|throw|null|trace|'
r'new|this|super|untyped|cast|callback|here)\b',
Keyword.Reserved),
],
'literals': [
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'~/([^\n])*?/[gisx]*', String.Regex),
(r'\b(true|false|null)\b', Keyword.Constant),
],
'codeblock': [
include('whitespace'),
include('new'),
include('case'),
include('anonfundef'),
include('literals'),
include('vardef'),
include('codekeywords'),
(r'[();,\[\]]', Punctuation),
(r'(?:=|\+=|-=|\*=|/=|%=|&=|\|=|\^=|<<=|>>=|>>>=|\|\||&&|'
r'\.\.\.|==|!=|>|<|>=|<=|\||&|\^|<<|>>>|>>|\+|\-|\*|/|%|'
r'!|\+\+|\-\-|~|\.|\?|\:)',
Operator),
(ident, Name),
(r'}', Punctuation,'#pop'),
(r'{', Punctuation,'#push'),
],
# Instance/Block level constructs
'propertydef': [
(r'(\()(' + key_prop + ')(,)(' + key_prop + ')(\))',
bygroups(Punctuation, Keyword.Reserved, Punctuation,
Keyword.Reserved, Punctuation)),
],
'new': [
(r'\bnew\b', Keyword, 'typedecl'),
],
'case': [
(r'\b(case)(\s+)(' + ident + ')(\s*)(\()',
bygroups(Keyword.Reserved, Text, Name, Text, Punctuation),
'funargdecl'),
],
'vardef': [
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable), 'vardecl'),
],
'vardecl': [
include('whitespace'),
include('typelabel'),
(r'=', Operator,'#pop'),
(r';', Punctuation,'#pop'),
],
'instancevardef': [
(key_decl_mod,Keyword.Declaration),
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable.Instance),
'instancevardecl'),
],
'instancevardecl': [
include('vardecl'),
include('propertydef'),
],
'anonfundef': [
(r'\bfunction\b', Keyword.Declaration, 'fundecl'),
],
'instancefundef': [
(key_decl_mod, Keyword.Declaration),
(r'\b(function)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Function), 'fundecl'),
],
'fundecl': [
include('whitespace'),
include('typelabel'),
include('generictypedecl'),
(r'\(',Punctuation,'funargdecl'),
(r'(?=[a-zA-Z0-9_])',Text,'#pop'),
(r'{',Punctuation,('#pop','codeblock')),
(r';',Punctuation,'#pop'),
],
'funargdecl': [
include('whitespace'),
(ident, Name.Variable),
include('typelabel'),
include('literals'),
(r'=', Operator),
(r',', Punctuation),
(r'\?', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'typelabel': [
(r':', Punctuation, 'type'),
],
'typedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'(?=[{}()=,a-z])', Text,'#pop'),
],
'type': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'->', Keyword.Type),
(r'(?=[{}(),;=])', Text, '#pop'),
],
'generictypedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r',', Punctuation),
],
# Top level constructs
'imports': [
(r'(package|import|using)(\s+)([^;]+)(;)',
bygroups(Keyword.Namespace, Text, Name.Namespace,Punctuation)),
],
'typedef': [
(r'typedef', Keyword.Declaration, ('typedefprebody', 'typedecl')),
],
'typedefprebody': [
include('whitespace'),
(r'(=)(\s*)({)', bygroups(Punctuation, Text, Punctuation),
('#pop', 'typedefbody')),
],
'enumdef': [
(r'enum', Keyword.Declaration, ('enumdefprebody', 'typedecl')),
],
'enumdefprebody': [
include('whitespace'),
(r'{', Punctuation, ('#pop','enumdefbody')),
],
'classdef': [
(r'class', Keyword.Declaration, ('classdefprebody', 'typedecl')),
],
'classdefprebody': [
include('whitespace'),
(r'(extends|implements)', Keyword.Declaration,'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'interfacedef': [
(r'interface', Keyword.Declaration,
('interfacedefprebody', 'typedecl')),
],
'interfacedefprebody': [
include('whitespace'),
(r'(extends)', Keyword.Declaration, 'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'typedefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'>', Punctuation, 'typedecl'),
(r',', Punctuation),
(r'}', Punctuation, '#pop'),
],
'enumdefbody': [
include('whitespace'),
(ident, Name.Variable.Instance),
(r'\(', Punctuation, 'funargdecl'),
(r';', Punctuation),
(r'}', Punctuation, '#pop'),
],
'classdefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'}', Punctuation, '#pop'),
include('codeblock'),
],
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text): return 0.3
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
*New in Pygments 1.3.*
"""
name = 'Haml'
aliases = ['haml', 'HAML']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant),
(r'(indigo|gold|firebrick|indianred|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|skyblue|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|orchid|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|palegreen|burlywood|'
r'seashell|mediumspringgreen|papayawhip|blanchedalmond|'
r'peru|aquamarine|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|darkgoldenrod|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|'
r'blueviolet|peachpuff)\b', Name.Entity),
(r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|'
r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#{', String.Interpol, 'interpolation'),
(r'[~\^\*!&%<>\|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
*New in Pygments 1.3.*
"""
name = 'Sass'
aliases = ['sass', 'SASS']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
(r'', Text, 'selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
(r'', Text, 'value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'(\$[\w-]\w*)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
(r'', Text, 'selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
class CoffeeScriptLexer(RegexLexer):
"""
For `CoffeeScript`_ source code.
.. _CoffeeScript: http://coffeescript.org
*New in Pygments 1.3.*
"""
name = 'CoffeeScript'
aliases = ['coffee-script', 'coffeescript']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'multilineregex': [
(r'[^/#]+', String.Regex),
(r'///([gim]+\b|\B)', String.Regex, '#pop'),
(r'#{', String.Interpol, 'interpoling_string'),
(r'[/#]', String.Regex),
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'///', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'(?:\([^()]+\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|'
r'loop|break|return|continue|'
r'switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|null|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\$]*', Name.Other, 'slashstartsregex'),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For `LiveScript`_ source code.
.. _LiveScript: http://gkz.github.com/LiveScript/
New in Pygments 1.6.
"""
name = 'LiveScript'
aliases = ['live-script', 'livescript']
filenames = ['*.ls']
mimetypes = ['text/livescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'#.*?\n', Comment.Single),
],
'multilineregex': [
include('commentsandwhitespace'),
(r'//([gim]+\b|\B)', String.Regex, '#pop'),
(r'/', String.Regex),
(r'[^/#]+', String.Regex)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'//', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
(r'\+\+|&&|(?<![\.\$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
r'[+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|loop|break|'
r'return|continue|switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by|const|var|to|til)\b', Keyword,
'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|'
r'null|NaN|Infinity|undefined|void)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\-]*', Name.Other, 'slashstartsregex'),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
(r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\[\w$-]+', String),
(r'<\[.*\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class DuelLexer(RegexLexer):
"""
Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
See http://duelengine.org/.
See http://jsonml.org/jbst/.
*New in Pygments 1.4.*
"""
name = 'Duel'
aliases = ['duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST']
filenames = ['*.duel','*.jbst']
mimetypes = ['text/x-duel','text/x-jbst']
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#!:]?)(.*?)(%>)',
bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
(r'(<%\$)(.*?)(:)(.*?)(%>)',
bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
(r'(<%--)(.*?)(--%>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)',
bygroups(using(HtmlLexer),
using(JavascriptLexer), using(HtmlLexer))),
(r'(.+?)(?=<)', using(HtmlLexer)),
(r'.+', using(HtmlLexer)),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
*New in Pygments 1.4.*
"""
name = 'Scaml'
aliases = ['scaml', 'SCAML']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
"""
For Jade markup.
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
*New in Pygments 1.4.*
"""
name = 'Jade'
aliases = ['jade', 'JADE']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class XQueryLexer(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
*New in Pygments 1.4.*
"""
name = 'XQuery'
aliases = ['xquery', 'xqy']
filenames = ['*.xqy', '*.xquery']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
#ncnamestartchar = (
# ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# ur"[\u10000-\uEFFFF]"
#)
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
#ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# ur"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WY-Z]|_|:|[a-kn-wy-z])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
#elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_`\|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
#x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE | re.UNICODE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
def pushstate_operator_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'(:\))', Comment, '#pop'),
(r'(\(:)', Comment, '#push'),
(r'[^:)]', Comment),
(r'([^:)]|:|\))', Comment),
],
'whitespace': [
(r'\s+', Text),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\||:=|=)',
operator_root_callback),
(r'(::|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
#(r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'external', Keyword),
(r'collation', Keyword, 'uritooperator'),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Text), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)),
(r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'varname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Variable, 'operator'),
],
'singletype': [
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Punctuation, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'\(\#', Punctuation, 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
(r'', Text, 'operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+)[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword, Text, Keyword), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'),
(ncname + ':\*', Name, 'operator'),
('\*:'+ncname, Name.Tag, 'operator'),
('\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\})', popstate_callback),
#NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
#NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword, Text, Keyword), 'namespacekeyword'),
#VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
#ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword, Text, Keyword), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(element|attribute)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
#ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'attribute_qname'),
#ELEMENT
(r'(element)(\s+)(?=' +qname+ r')',
bygroups(Keyword, Text), 'element_qname'),
#PROCESSING_INSTRUCTION
(r'(processing-instruction)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword, Text, Keyword)),
(r'(\{)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword, Text, Keyword), 'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'),
(r'(\(#)', Punctuation, 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword, Text, Keyword),
'option'),
#URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'then|else', Keyword),
# ML specific
(r'(try)(\s*)', bygroups(Keyword, Text), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
(r'(@'+qname+')', Name.Attribute),
(r'(@'+ncname+')', Name.Attribute),
(r'@\*:'+ncname, Name.Attribute),
(r'(@)', Name.Attribute),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(qname, Name.Tag, 'operator'),
]
}
class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Dart'
aliases = ['dart']
filenames = ['*.dart']
mimetypes = ['text/x-dart']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'#!(.*?)$', Comment.Preproc),
(r'(#)(import|library|source)', bygroups(Text, Keyword)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(class|interface)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|in|is|new|return|super|switch|this|throw|try|while)\b',
Keyword),
(r'(abstract|const|extends|factory|final|get|implements|'
r'native|operator|set|static|typedef|var)\b', Keyword.Declaration),
(r'(bool|double|Dynamic|int|num|Object|String|void)', Keyword.Type),
(r'(false|null|true)', Keyword.Constant),
(r'@"(\\\\|\\"|[^"])*"', String.Double), # raw string
(r"@'(\\\\|\\'|[^'])*'", String.Single), # raw string
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
(r'[a-zA-Z_$][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[(){}\[\],.;]', Punctuation),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# DIGIT+ (‘.’ DIGIT*)? EXPONENT?
(r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
(r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
(r'\n', Text)
# pseudo-keyword negate intentionally left out
],
'class': [
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'string_double': [
(r'"', String.Double, '#pop'),
(r'[^"$]+', String.Double),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol)),
(r'\$+', String.Double)
],
'string_single': [
(r"'", String.Single, '#pop'),
(r"[^'$]+", String.Single),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol)),
(r'\$+', String.Single)
]
}
class LassoLexer(RegexLexer):
"""
For `Lasso <http://www.lassosoft.com/>`_ source code, covering both
Lasso 9 syntax and LassoScript for Lasso 8.6 and earlier. For Lasso
embedded in HTML, use the `LassoHtmlLexer`.
Additional options accepted:
`builtinshighlighting`
If given and ``True``, highlight builtin tags, types, traits, and
methods (default: ``True``).
`requiredelimiters`
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
*New in Pygments 1.6.*
"""
name = 'Lasso'
aliases = ['lasso', 'lassoscript']
filenames = ['*.lasso', '*.lasso[89]']
alias_filenames = ['*.incl', '*.inc', '*.las']
mimetypes = ['text/x-lasso']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'^#!.+lasso9\b', Comment.Preproc, 'lasso'),
(r'\s+', Other),
(r'\[noprocess\]', Comment.Preproc, ('delimiters', 'noprocess')),
(r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc,
('delimiters', 'anglebrackets')),
(r'<', Other, 'delimiters'),
include('lasso'),
],
'delimiters': [
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^[<]+', Other),
],
'noprocess': [
(r'\[/noprocess\]', Comment.Preproc, '#pop'),
(r'\[', Other),
(r'[^[]', Other),
],
'squarebrackets': [
(r'\]', Comment.Preproc, '#pop'),
include('lasso'),
],
'anglebrackets': [
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'lasso': [
# whitespace/comments
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*\*!.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
# names
(r'\$[a-z_][\w\.]*', Name.Variable),
(r'(#[a-z_][\w\.]*|#\d+)', Name.Variable.Instance),
(r"\.'[a-z_][\w\.]*'", Name.Variable.Class),
(r"(self)(->)('[a-z_][\w\.]*')",
bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)),
(r'(self|void)\b', Name.Builtin.Pseudo),
(r'-[a-z_][\w\.]*', Name.Attribute),
(r'(::)([a-z_][\w\.]*)', bygroups(Punctuation, Name.Label)),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
r'Error_InvalidDatabase|Error_InvalidPassword|'
r'Error_InvalidUsername|Error_ModuleNotFound|'
r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
r'Error_UpdateError)\b', Name.Exception),
# definitions
(r'(parent)(\s+)([a-z_][\w\.]*)',
bygroups(Keyword.Declaration, Text, Name.Class)),
(r'(define)(\s+)([a-z_][\w\.]*)(\s*)(=>)(\s*)(type|trait|thread)',
bygroups(Keyword.Declaration, Text, Name.Class, Text, Operator,
Text, Keyword)),
(r'(define)(\s+)([a-z_][\w\.]*)(->)([a-z_][\w\.]*=?)',
bygroups(Keyword.Declaration, Text, Name.Class, Operator,
Name.Function)),
(r'(define)(\s+)([a-z_][\w\.]*=?)',
bygroups(Keyword.Declaration, Text, Name.Function)),
(r'(public|protected|private)(\s+)([a-z_][\w\.]*)(\s*)(=>)',
bygroups(Keyword, Text, Name.Function, Text, Operator)),
(r'(public|protected|private|provide)(\s+)([a-z_][\w\.]*=?)(\s*)(\()',
bygroups(Keyword, Text, Name.Function, Text, Punctuation)),
# keywords
(r'\.\.\.', Keyword.Pseudo),
(r'(true|false|null|[+\-]?infinity|\+?NaN)\b', Keyword.Constant),
(r'(local|var|variable|global|data)\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|'
r'xml)\b', Keyword.Type),
(r'(/?)(Cache|Database_Names|Database_SchemaNames|'
r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|'
r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|'
r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|'
r'Namespace_Using|NoProcess|Output_None|Portal|Private|Protect|'
r'Records|Referer|Referrer|Repeating|ResultSet|Rows|Search_Args|'
r'Search_Arguments|Select|Sort_Args|Sort_Arguments|Thread_Atomic|'
r'Value_List|While|Abort|Case|Else|If_Empty|If_False|If_Null|'
r'If_True|Loop_Abort|Loop_Continue|Loop_Count|Params|Params_Up|'
r'Return|Return_Value|Run_Children|SOAP_DefineTag|'
r'SOAP_LastRequest|SOAP_LastResponse|Tag_Name)\b',
bygroups(Punctuation, Keyword)),
(r'(and|ascending|average|by|case|define|descending|do|else|'
r'equals|frozen|group|import|in|inherited|into|join|let|match|'
r'max|min|not|on|or|order|params|parent|private|protected|'
r'provide|public|require|return|select|skip|sum|take|thread|to|'
r'trait|type|where|with)\b', Keyword),
# literals
(r'([+\-]?\d*\.\d+(e[+\-]?\d+)?)', Number.Float),
(r'0x[\da-f]+', Number.Hex),
(r'[+\-]?\d+', Number.Integer),
(r"'", String.Single, 'singlestring'),
(r'"', String.Double, 'doublestring'),
(r'`[^`]*`', String.Backtick),
# other
(r'(=)(bw|ew|cn|lte?|gte?|n?eq|ft|n?rx)\b',
bygroups(Operator, Operator.Word)),
(r'([=\+\-\*/%<>&|!\?\.\\]+|:=)', Operator),
(r'[{}():;,@^]', Punctuation),
(r'(/?)([\w\.]+)', bygroups(Punctuation, Name.Other)),
],
'singlestring': [
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
(r"\\+", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
(r'\\+', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|'
r'[abefnrtv\"\'\?\\]|$)', String.Escape),
],
}
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(
options, 'builtinshighlighting', True)
self.requiredelimiters = get_bool_opt(
options, 'requiredelimiters', False)
self._builtins = set()
if self.builtinshighlighting:
from pygments.lexers._lassobuiltins import BUILTINS
for key, value in BUILTINS.iteritems():
self._builtins.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.requiredelimiters:
stack.append('delimiters')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value.lower() in self._builtins:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if 'bin/lasso9' in text:
rv += 0.8
if re.search(r'<\?(=|lasso)', text, re.I):
rv += 0.4
if re.search(r'local\(', text, re.I):
rv += 0.4
if re.search(r'(\[\n|\?>)', text):
rv += 0.4
return rv
| en | 0.324131 | # -*- coding: utf-8 -*- pygments.lexers.web ~~~~~~~~~~~~~~~~~~~ Lexers for web-related languages and markup. :copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. For JavaScript source code. For JSON data structures. *New in Pygments 1.5.* # integer part of a number # fractional part of a number # exponential part of a number # represents a simple terminal value # the right hand side of an object, after the attribute name # comma terminates the attribute but expects more # a closing bracket terminates the entire object, so pop twice # a json object - { attr, attr, ... } # json array - [ value, value, ... } # a json value - either a simple value or a complex value (object or array) # the root of a json document whould be a value # for backwards compatibility with Pygments 1.5 For ActionScript source code. *New in Pygments 0.9.* For ActionScript 3 source code. *New in Pygments 0.11.* For CSS (Cascading Style Sheets). #[a-zA-Z0-9_-]+', Name.Function), #[a-zA-Z0-9]{1,6}', Number), For Objective-J source code with preprocessor directives. *New in Pygments 1.3.* #: optional Comment or Whitespace # function definition # class definition #(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")', #(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)', # line continuation # interface definition that inherits # interface definition for a category # simple interface / implementation # start of a selector w/ parameters # open paren # return type # close paren # function name # no-param function # open paren # return type # close paren # function name # no return type given, start of a selector w/ parameters # function name # no return type given, no-param function # function name # parameters # open paren # type # close paren # param name # one piece of a selector name # function name # smallest possible selector piece # var args # param name # all other characters # line continuation # stray backslash #if.*?(?<!\\)\n', Comment.Preproc, '#push'), #endif.*?(?<!\\)\n', Comment.Preproc, '#pop'), # special directive found in most Objective-J files For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted by the appropriate lexer. For `PHP <http://www.php.net/>`_ source code. For PHP embedded in HTML, use the `HtmlPhpLexer`. Additional options accepted: `startinline` If given and ``True`` the lexer starts highlighting with php code (i.e.: no starting ``<?php`` required). The default is ``False``. `funcnamehighlighting` If given and ``True``, highlight builtin function names (default: ``True``). `disabledmodules` If given, must be a list of module names whose function names should not be highlighted. By default all modules are highlighted except the special ``'unknown'`` module that includes functions that are known to php but are undocumented. To get a list of allowed modules have a look into the `_phpbuiltins` module: .. sourcecode:: pycon >>> from pygments.lexers._phpbuiltins import MODULES >>> MODULES.keys() ['PHP Options/Info', 'Zip', 'dba', ...] In fact the names of those modules match the module names from the php documentation. # put the empty comment here, it is otherwise seen as # the start of a docstring # private option argument for the lexer itself # collect activated functions in a set A lexer for DTDs (Document Type Definitions). *New in Pygments 1.5.* # conditional sections #PCDATA', Keyword.Constant), #IMPLIED|#FIXED', Keyword.Constant), Generic lexer for XML (eXtensible Markup Language). A lexer for XSLT. *New in Pygments 0.10.* # xpl is XProc For MXML markup. Nested AS3 in <script> tags is highlighted by the appropriate lexer. *New in Pygments 1.1.* For haXe source code (http://haxe.org/). *New in Pygments 1.3.* # General constructs # Instance/Block level constructs # Top level constructs For Haml markup. *New in Pygments 1.3.* # Haml can include " |\n" anywhere, # which is ignored and used to wrap long lines. # To accomodate this, use this custom faux dot instead. # In certain places, a comma at the end of the line # allows line wrapping as well. #[a-z0-9_:-]+', Name.Function, 'tag'), #' + _dot + r'*\n', _starts_block(Comment.Preproc, #\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), #\{)(' + _dot + '*?)(\})', #\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), #\{)(' + _dot + '*?)(\})', #[a-z0-9]{1,6}', Number.Hex), #', Name.Namespace, 'id'), #(?=[^\n{])|[^\n"#])+', String.Double), #(?=[^\n{])|[^\n'#])+", String.Double), #|#(?=[^\n{])|[^\n#)])+', String.Other), For Sass stylesheets. *New in Pygments 1.3.* #|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline), For SCSS stylesheets. #|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline), For `CoffeeScript`_ source code. .. _CoffeeScript: http://coffeescript.org *New in Pygments 1.3.* ##[^#].*?###', Comment.Multiline), ##[^#]).*?\n', Comment.Single), #]+', String.Regex), #]', String.Regex), # this next expr leads to infinite loops root -> slashstartsregex #(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), ', String, 'tdqs'), ("'''", String, 'tsqs'), ('"', String, 'dqs'), ("'", String, 'sqs'), ], 'strings': [ (r'[^#\\\'"]+', String), # note that all coffee script strings are multi-line. # hashmarks, quotes and backslashes must be parsed one at a time ], 'interpoling_string' : [ (r'}', String.Interpol, "#pop"), include('root') ], 'dqs': [ (r'"', String, '#pop'), (r'\\.|\'', String), # double-quoted string don't need ' escapes (r'#{', String.Interpol, "interpoling_string"), include('strings') ], 'sqs': [ (r"'", String, '#pop'), (r'#|\\.|"', String), # single quoted strings don't need " escapses include('strings') ], 'tdqs': [ (r' # no need to escape quotes in triple-string ", String, '#pop'), (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings include('strings') ], } class LiveScriptLexer(RegexLexer): """ For `LiveScript`_ source code. .. _LiveScript: http://gkz.github.com/LiveScript/ New in Pygments 1.6. """ name = 'LiveScript' aliases = ['live-script', 'livescript'] filenames = ['*.ls'] mimetypes = ['text/livescript'] flags = re.DOTALL tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'/\*.*?\*/', Comment.Multiline), (r'#.*?\n', Comment.Single), ], 'multilineregex': [ include('commentsandwhitespace'), (r'//([gim]+\b|\B)', String.Regex, '#pop'), (r'/', String.Regex), (r'[^/#]+', String.Regex) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'//', String.Regex, ('#pop', 'multilineregex')), (r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'([gim]+\b|\B)', String.Regex, '#pop'), (r'', Text, '#pop'), ], 'root': [ # this next expr leads to infinite loops root -> slashstartsregex #(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|' r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function), (r'\+\+|&&|(?<![\.\$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|' r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|' r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|' r'[+*`%&\|\^/])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(?<![\.\$])(for|own|in|of|while|until|loop|break|' r'return|continue|switch|when|then|if|unless|else|' r'throw|try|catch|finally|new|delete|typeof|instanceof|super|' r'extends|this|class|by|const|var|to|til)\b', Keyword, 'slashstartsregex'), (r'(?<![\.\$])(true|false|yes|no|on|off|' r'null|NaN|Infinity|undefined|void)\b', Keyword.Constant), (r'(Array|Boolean|Date|Error|Function|Math|netscape|' r'Number|Object|Packages|RegExp|String|sun|decodeURI|' r'decodeURIComponent|encodeURI|encodeURIComponent|' r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b', Name.Builtin), (r'[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable, 'slashstartsregex'), (r'@[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable.Instance, 'slashstartsregex'), (r'@', Name.Other, 'slashstartsregex'), (r'@?[$a-zA-Z_][a-zA-Z0-9_\-]*', Name.Other, 'slashstartsregex'), (r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float), (r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer), ('"""', String, 'tdqs'), (" #\\\'"]+', String), # note that all coffee script strings are multi-line. # hashmarks, quotes and backslashes must be parsed one at a time # double-quoted string don't need ' escapes # single quoted strings don't need " escapses ', String, '#pop'), (r'\\.|\'|"', String), # no need to escape quotes in triple-string (r'#{', String.Interpol, "interpoling_string"), (r'#', String), include('strings'), ], 'tsqs': [ (r"'''", String, '#pop'), (r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings include('strings') ], } class DuelLexer(RegexLexer): name = 'Duel' aliases = ['duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST'] filenames = ['*.duel','*.jbst'] mimetypes = ['text/x-duel','text/x-jbst'] flags = re.DOTALL tokens = { 'root': [ (r'(<%[@=#!:]?)(.*?)(%>)', bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)), (r'(<%\$)(.*?)(:)(.*?)(%>)', bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)), (r'(<%--)(.*?)(--%>)', bygroups(Name.Tag, Comment.Multiline, Name.Tag)), (r'(<script.*?>)(.*?)(</script>)', bygroups(using(HtmlLexer), using(JavascriptLexer), using(HtmlLexer))), (r'(.+?)(?=<)', using(HtmlLexer)), (r'.+', using(HtmlLexer)), ], } class ScamlLexer(ExtendedRegexLexer): name = 'Scaml' aliases = ['scaml', 'SCAML'] filenames = ['*.scaml'] mimetypes = ['text/x-scaml'] flags = re.IGNORECASE # Scaml does not yet support the " |\n" notation to # wrap long lines. Once it does, use the custom faux # dot instead. # _dot = r'(?: \|\n(?=.* \|)|.)' _dot = r'.' tokens = { 'root': [ (r'[ \t]*\n', Text), (r'[ \t]*', _indentation), ], 'css': [ (r'\.[a-z0-9_:-]+', Name.Class, 'tag'), (r'\#[a-z0-9_:-]+', Name.Function, 'tag'), ], 'eval-or-plain': [ (r'[&!]?==', Punctuation, 'plain'), (r'([&!]?[=~])(' + _dot + r'*\n)', bygroups(Punctuation, using(ScalaLexer)), 'root'), (r'', Text, 'plain'), ], 'content': [ include('css'), (r'%[a-z0-9_:-]+', Name.Tag, 'tag'), (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), (r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)', bygroups(Comment, Comment.Special, Comment), '#pop'), (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), '#pop'), (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, 'scaml-comment-block'), '#pop'), (r'(-@\s*)(import)?(' + _dot + r'*\n)', bygroups(Punctuation, Keyword, using(ScalaLexer)), '#pop'), (r'(-)(' + _dot + r'*\n)', bygroups(Punctuation, using(ScalaLexer)), '#pop'), (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), '#pop'), include('eval-or-plain'), ], 'tag': [ include('css'), (r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)), (r'\[' + _dot + '*?\]', using(ScalaLexer)), (r'\(', Text, 'html-attributes'), (r'/[ \t]*\n', Punctuation, '#pop:2'), (r'[<>]{1,2}(?=[ \t=])', Punctuation), include('eval-or-plain'), ], 'plain': [ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), (r'(#\{)(' + _dot + '*?)(\})', bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), (r'\n', Text, 'root'), ], 'html-attributes': [ (r'\s+', Text), (r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), (r'[a-z0-9_:-]+', Name.Attribute), (r'\)', Text, '#pop'), ], 'html-attribute-value': [ (r'[ \t]+', Text), (r'[a-z0-9_]+', Name.Variable, '#pop'), (r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'), (r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'), (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), ], 'html-comment-block': [ (_dot + '+', Comment), (r'\n', Text, 'root'), ], 'scaml-comment-block': [ (_dot + '+', Comment.Preproc), (r'\n', Text, 'root'), ], 'filter-block': [ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), (r'(#\{)(' + _dot + '*?)(\})', bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), (r'\n', Text, 'root'), ], } class JadeLexer(ExtendedRegexLexer): name = 'Jade' aliases = ['jade', 'JADE'] filenames = ['*.jade'] mimetypes = ['text/x-jade'] flags = re.IGNORECASE _dot = r'.' tokens = { 'root': [ (r'[ \t]*\n', Text), (r'[ \t]*', _indentation), ], 'css': [ (r'\.[a-z0-9_:-]+', Name.Class, 'tag'), (r'\#[a-z0-9_:-]+', Name.Function, 'tag'), ], 'eval-or-plain': [ (r'[&!]?==', Punctuation, 'plain'), (r'([&!]?[=~])(' + _dot + r'*\n)', bygroups(Punctuation, using(ScalaLexer)), 'root'), (r'', Text, 'plain'), ], 'content': [ include('css'), (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'), (r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)', bygroups(Comment, Comment.Special, Comment), '#pop'), (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'), '#pop'), (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc, 'scaml-comment-block'), '#pop'), (r'(-@\s*)(import)?(' + _dot + r'*\n)', bygroups(Punctuation, Keyword, using(ScalaLexer)), '#pop'), (r'(-)(' + _dot + r'*\n)', bygroups(Punctuation, using(ScalaLexer)), '#pop'), (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'), '#pop'), (r'[a-z0-9_:-]+', Name.Tag, 'tag'), (r'\|', Text, 'eval-or-plain'), ], 'tag': [ include('css'), (r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)), (r'\[' + _dot + '*?\]', using(ScalaLexer)), (r'\(', Text, 'html-attributes'), (r'/[ \t]*\n', Punctuation, '#pop:2'), (r'[<>]{1,2}(?=[ \t=])', Punctuation), include('eval-or-plain'), ], 'plain': [ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text), (r'(#\{)(' + _dot + '*?)(\})', bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), (r'\n', Text, 'root'), ], 'html-attributes': [ (r'\s+', Text), (r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'), (r'[a-z0-9_:-]+', Name.Attribute), (r'\)', Text, '#pop'), ], 'html-attribute-value': [ (r'[ \t]+', Text), (r'[a-z0-9_]+', Name.Variable, '#pop'), (r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'), (r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'), (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'), (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'), ], 'html-comment-block': [ (_dot + '+', Comment), (r'\n', Text, 'root'), ], 'scaml-comment-block': [ (_dot + '+', Comment.Preproc), (r'\n', Text, 'root'), ], 'filter-block': [ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator), (r'(#\{)(' + _dot + '*?)(\})', bygroups(String.Interpol, using(ScalaLexer), String.Interpol)), (r'\n', Text, 'root'), ], } class XQueryLexer(ExtendedRegexLexer): name = 'XQuery' aliases = ['xquery', 'xqy'] filenames = ['*.xqy', '*.xquery'] mimetypes = ['text/xquery', 'application/xquery'] xquery_parse_state = [] # FIX UNICODE LATER #ncnamestartchar = ( # ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|" # ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|" # ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|" # ur"[\u10000-\uEFFFF]" #) ncnamestartchar = r"(?:[A-Z]|_|[a-z])" # FIX UNICODE LATER #ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|" # ur"[\u203F-\u2040]") ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])" ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar) pitarget_namestartchar = r"(?:[A-KN-WY-Z]|_|:|[a-kn-wy-z])" pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])" pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar) prefixedname = "%s:%s" % (ncname, ncname) unprefixedname = ncname qname = "(?:%s|%s)" % (prefixedname, unprefixedname) entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)' charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)' stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")' stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')" # FIX UNICODE LATER #elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|' # ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]') elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]' #quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|' # ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]') quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]' #aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|' # ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]') aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_`\|~]' # CHAR elements - fix the above elementcontentchar, quotattrcontentchar, # aposattrcontentchar #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] flags = re.DOTALL | re.MULTILINE | re.UNICODE def punctuation_root_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) # transition to root always - don't pop off stack ctx.stack = ['root'] ctx.pos = match.end() def operator_root_callback(lexer, match, ctx): yield match.start(), Operator, match.group(1) # transition to root always - don't pop off stack ctx.stack = ['root'] ctx.pos = match.end() def popstate_tag_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) ctx.stack.append(lexer.xquery_parse_state.pop()) ctx.pos = match.end() def popstate_xmlcomment_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append(lexer.xquery_parse_state.pop()) ctx.pos = match.end() def popstate_kindtest_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) next_state = lexer.xquery_parse_state.pop() if next_state == 'occurrenceindicator': if re.match("[?*+]+", match.group(2)): yield match.start(), Punctuation, match.group(2) ctx.stack.append('operator') ctx.pos = match.end() else: ctx.stack.append('operator') ctx.pos = match.end(1) else: ctx.stack.append(next_state) ctx.pos = match.end(1) def popstate_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) # if we have run out of our state stack, pop whatever is on the pygments # state stack if len(lexer.xquery_parse_state) == 0: ctx.stack.pop() elif len(ctx.stack) > 1: ctx.stack.append(lexer.xquery_parse_state.pop()) else: # i don't know if i'll need this, but in case, default back to root ctx.stack = ['root'] ctx.pos = match.end() def pushstate_element_content_starttag_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) lexer.xquery_parse_state.append('element_content') ctx.stack.append('start_tag') ctx.pos = match.end() def pushstate_cdata_section_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('cdata_section') lexer.xquery_parse_state.append(ctx.state.pop) ctx.pos = match.end() def pushstate_starttag_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) lexer.xquery_parse_state.append(ctx.state.pop) ctx.stack.append('start_tag') ctx.pos = match.end() def pushstate_operator_order_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Text, match.group(2) yield match.start(), Punctuation, match.group(3) ctx.stack = ['root'] lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_operator_root_validate(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Text, match.group(2) yield match.start(), Punctuation, match.group(3) ctx.stack = ['root'] lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_operator_root_validate_withmode(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Text, match.group(2) yield match.start(), Keyword, match.group(3) ctx.stack = ['root'] lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_operator_processing_instruction_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('processing_instruction') lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_element_content_processing_instruction_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('processing_instruction') lexer.xquery_parse_state.append('element_content') ctx.pos = match.end() def pushstate_element_content_cdata_section_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('cdata_section') lexer.xquery_parse_state.append('element_content') ctx.pos = match.end() def pushstate_operator_cdata_section_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('cdata_section') lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_element_content_xmlcomment_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('xml_comment') lexer.xquery_parse_state.append('element_content') ctx.pos = match.end() def pushstate_operator_xmlcomment_callback(lexer, match, ctx): yield match.start(), String.Doc, match.group(1) ctx.stack.append('xml_comment') lexer.xquery_parse_state.append('operator') ctx.pos = match.end() def pushstate_kindtest_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Text, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('kindtest') ctx.stack.append('kindtest') ctx.pos = match.end() def pushstate_operator_kindtestforpi_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Text, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('operator') ctx.stack.append('kindtestforpi') ctx.pos = match.end() def pushstate_operator_kindtest_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Text, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('operator') ctx.stack.append('kindtest') ctx.pos = match.end() def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) yield match.start(), Text, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('occurrenceindicator') ctx.stack.append('kindtest') ctx.pos = match.end() def pushstate_operator_starttag_callback(lexer, match, ctx): yield match.start(), Name.Tag, match.group(1) lexer.xquery_parse_state.append('operator') ctx.stack.append('start_tag') ctx.pos = match.end() def pushstate_operator_root_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) lexer.xquery_parse_state.append('operator') ctx.stack = ['root']#.append('root') ctx.pos = match.end() def pushstate_operator_root_construct_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Text, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('operator') ctx.stack = ['root'] ctx.pos = match.end() def pushstate_root_callback(lexer, match, ctx): yield match.start(), Punctuation, match.group(1) cur_state = ctx.stack.pop() lexer.xquery_parse_state.append(cur_state) ctx.stack = ['root']#.append('root') ctx.pos = match.end() def pushstate_operator_attribute_callback(lexer, match, ctx): yield match.start(), Name.Attribute, match.group(1) ctx.stack.append('operator') ctx.pos = match.end() def pushstate_operator_callback(lexer, match, ctx): yield match.start(), Keyword, match.group(1) yield match.start(), Text, match.group(2) yield match.start(), Punctuation, match.group(3) lexer.xquery_parse_state.append('operator') ctx.pos = match.end() tokens = { 'comment': [ # xquery comments (r'(:\))', Comment, '#pop'), (r'(\(:)', Comment, '#push'), (r'[^:)]', Comment), (r'([^:)]|:|\))', Comment), ], 'whitespace': [ (r'\s+', Text), ], 'operator': [ include('whitespace'), (r'(\})', popstate_callback), (r'\(:', Comment, 'comment'), (r'(\{)', pushstate_root_callback), (r'then|else|external|at|div|except', Keyword, 'root'), (r'order by', Keyword, 'root'), (r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'), (r'and|or', Operator.Word, 'root'), (r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)', Operator.Word, 'root'), (r'return|satisfies|to|union|where|preserve\s+strip', Keyword, 'root'), (r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\||:=|=)', operator_root_callback), (r'(::|;|\[|//|/|,)', punctuation_root_callback), (r'(castable|cast)(\s+)(as)\b', bygroups(Keyword, Text, Keyword), 'singletype'), (r'(instance)(\s+)(of)\b', bygroups(Keyword, Text, Keyword), 'itemtype'), (r'(treat)(\s+)(as)\b', bygroups(Keyword, Text, Keyword), 'itemtype'), (r'(case|as)\b', Keyword, 'itemtype'), (r'(\))(\s*)(as)', bygroups(Punctuation, Text, Keyword), 'itemtype'), (r'\$', Name.Variable, 'varname'), (r'(for|let)(\s+)(\$)', bygroups(Keyword, Text, Name.Variable), 'varname'), #(r'\)|\?|\]', Punctuation, '#push'), (r'\)|\?|\]', Punctuation), (r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)), (r'ascending|descending|default', Keyword, '#push'), (r'external', Keyword), (r'collation', Keyword, 'uritooperator'), # finally catch all string literals and stay in operator state (stringdouble, String.Double), (stringsingle, String.Single), (r'(catch)(\s*)', bygroups(Keyword, Text), 'root'), ], 'uritooperator': [ (stringdouble, String.Double, '#pop'), (stringsingle, String.Single, '#pop'), ], 'namespacedecl': [ include('whitespace'), (r'\(:', Comment, 'comment'), (r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)), (r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)), (stringdouble, String.Double), (stringsingle, String.Single), (r',', Punctuation), (r'=', Operator), (r';', Punctuation, 'root'), (ncname, Name.Namespace), ], 'namespacekeyword': [ include('whitespace'), (r'\(:', Comment, 'comment'), (stringdouble, String.Double, 'namespacedecl'), (stringsingle, String.Single, 'namespacedecl'), (r'inherit|no-inherit', Keyword, 'root'), (r'namespace', Keyword, 'namespacedecl'), (r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)), (r'preserve|no-preserve', Keyword), (r',', Punctuation), ], 'varname': [ (r'\(:', Comment, 'comment'), (qname, Name.Variable, 'operator'), ], 'singletype': [ (r'\(:', Comment, 'comment'), (ncname + r'(:\*)', Name.Variable, 'operator'), (qname, Name.Variable, 'operator'), ], 'itemtype': [ include('whitespace'), (r'\(:', Comment, 'comment'), (r'\$', Punctuation, 'varname'), (r'(void)(\s*)(\()(\s*)(\))', bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'), (r'(element|attribute|schema-element|schema-attribute|comment|text|' r'node|binary|document-node|empty-sequence)(\s*)(\()', pushstate_occurrenceindicator_kindtest_callback), # Marklogic specific type? (r'(processing-instruction)(\s*)(\()', bygroups(Keyword, Text, Punctuation), ('occurrenceindicator', 'kindtestforpi')), (r'(item)(\s*)(\()(\s*)(\))(?=[*+?])', bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'occurrenceindicator'), (r'\(\#', Punctuation, 'pragma'), (r';', Punctuation, '#pop'), (r'then|else', Keyword, '#pop'), (r'(at)(\s+)(' + stringdouble + ')', bygroups(Keyword, Text, String.Double), 'namespacedecl'), (r'(at)(\s+)(' + stringsingle + ')', bygroups(Keyword, Text, String.Single), 'namespacedecl'), (r'except|intersect|in|is|return|satisfies|to|union|where', Keyword, 'root'), (r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'), (r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|', Operator, 'root'), (r'external|at', Keyword, 'root'), (r'(stable)(\s+)(order)(\s+)(by)', bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'), (r'(castable|cast)(\s+)(as)', bygroups(Keyword, Text, Keyword), 'singletype'), (r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)), (r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)), (r'case|as', Keyword, 'itemtype'), (r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'), (ncname + r':\*', Keyword.Type, 'operator'), (qname, Keyword.Type, 'occurrenceindicator'), ], 'kindtest': [ (r'\(:', Comment, 'comment'), (r'{', Punctuation, 'root'), (r'(\))([*+?]?)', popstate_kindtest_callback), (r'\*', Name, 'closekindtest'), (qname, Name, 'closekindtest'), (r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback), ], 'kindtestforpi': [ (r'\(:', Comment, 'comment'), (r'\)', Punctuation, '#pop'), (ncname, Name.Variable), (stringdouble, String.Double), (stringsingle, String.Single), ], 'closekindtest': [ (r'\(:', Comment, 'comment'), (r'(\))', popstate_callback), (r',', Punctuation), (r'(\{)', pushstate_operator_root_callback), (r'\?', Punctuation), ], 'xml_comment': [ (r'(-->)', popstate_xmlcomment_callback), (r'[^-]{1,2}', Literal), (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + unirange(0x10000, 0x10ffff), Literal), ], 'processing_instruction': [ (r'\s+', Text, 'processing_instruction_content'), (r'\?>', String.Doc, '#pop'), (pitarget, Name), ], 'processing_instruction_content': [ (r'\?>', String.Doc, '#pop'), (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + unirange(0x10000, 0x10ffff), Literal), ], 'cdata_section': [ (r']]>', String.Doc, '#pop'), (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + unirange(0x10000, 0x10ffff), Literal), ], 'start_tag': [ include('whitespace'), (r'(/>)', popstate_tag_callback), (r'>', Name.Tag, 'element_content'), (r'"', Punctuation, 'quot_attribute_content'), (r"'", Punctuation, 'apos_attribute_content'), (r'=', Operator), (qname, Name.Tag), ], 'quot_attribute_content': [ (r'"', Punctuation, 'start_tag'), (r'(\{)', pushstate_root_callback), (r'""', Name.Attribute), (quotattrcontentchar, Name.Attribute), (entityref, Name.Attribute), (charref, Name.Attribute), (r'\{\{|\}\}', Name.Attribute), ], 'apos_attribute_content': [ (r"'", Punctuation, 'start_tag'), (r'\{', Punctuation, 'root'), (r"''", Name.Attribute), (aposattrcontentchar, Name.Attribute), (entityref, Name.Attribute), (charref, Name.Attribute), (r'\{\{|\}\}', Name.Attribute), ], 'element_content': [ (r'</', Name.Tag, 'end_tag'), (r'(\{)', pushstate_root_callback), (r'(<!--)', pushstate_element_content_xmlcomment_callback), (r'(<\?)', pushstate_element_content_processing_instruction_callback), (r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback), (r'(<)', pushstate_element_content_starttag_callback), (elementcontentchar, Literal), (entityref, Literal), (charref, Literal), (r'\{\{|\}\}', Literal), ], 'end_tag': [ include('whitespace'), (r'(>)', popstate_tag_callback), (qname, Name.Tag), ], 'xmlspace_decl': [ (r'\(:', Comment, 'comment'), (r'preserve|strip', Keyword, '#pop'), ], 'declareordering': [ (r'\(:', Comment, 'comment'), include('whitespace'), (r'ordered|unordered', Keyword, '#pop'), ], 'xqueryversion': [ include('whitespace'), (r'\(:', Comment, 'comment'), (stringdouble, String.Double), (stringsingle, String.Single), (r'encoding', Keyword), (r';', Punctuation, '#pop'), ], 'pragma': [ (qname, Name.Variable, 'pragmacontents'), ], 'pragmacontents': [ (r'#\)', Punctuation, 'operator'), (ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' + unirange(0x10000, 0x10ffff), Literal), (r'(\s+)', Text), ], 'occurrenceindicator': [ include('whitespace'), (r'\(:', Comment, 'comment'), (r'\*|\?|\+', Operator, 'operator'), (r':=', Operator, 'root'), (r'', Text, 'operator'), ], 'option': [ include('whitespace'), (qname, Name.Variable, '#pop'), ], 'qname_braren': [ include('whitespace'), (r'(\{)', pushstate_operator_root_callback), (r'(\()', Punctuation, 'root'), ], 'element_qname': [ (qname, Name.Variable, 'root'), ], 'attribute_qname': [ (qname, Name.Variable, 'root'), ], 'root': [ include('whitespace'), (r'\(:', Comment, 'comment'), # handle operator state # order on numbers matters - handle most complex first (r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Double, 'operator'), (r'(\.\d+)[eE][\+\-]?\d+', Number.Double, 'operator'), (r'(\.\d+|\d+\.\d*)', Number, 'operator'), (r'(\d+)', Number.Integer, 'operator'), (r'(\.\.|\.|\))', Punctuation, 'operator'), (r'(declare)(\s+)(construction)', bygroups(Keyword, Text, Keyword), 'operator'), (r'(declare)(\s+)(default)(\s+)(order)', bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'), (ncname + ':\*', Name, 'operator'), ('\*:'+ncname, Name.Tag, 'operator'), ('\*', Name.Tag, 'operator'), (stringdouble, String.Double, 'operator'), (stringsingle, String.Single, 'operator'), (r'(\})', popstate_callback), #NAMESPACE DECL (r'(declare)(\s+)(default)(\s+)(collation)', bygroups(Keyword, Text, Keyword, Text, Keyword)), (r'(module|declare)(\s+)(namespace)', bygroups(Keyword, Text, Keyword), 'namespacedecl'), (r'(declare)(\s+)(base-uri)', bygroups(Keyword, Text, Keyword), 'namespacedecl'), #NAMESPACE KEYWORD (r'(declare)(\s+)(default)(\s+)(element|function)', bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'), (r'(import)(\s+)(schema|module)', bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'), (r'(declare)(\s+)(copy-namespaces)', bygroups(Keyword, Text, Keyword), 'namespacekeyword'), #VARNAMEs (r'(for|let|some|every)(\s+)(\$)', bygroups(Keyword, Text, Name.Variable), 'varname'), (r'\$', Name.Variable, 'varname'), (r'(declare)(\s+)(variable)(\s+)(\$)', bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'), #ITEMTYPE (r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'), (r'(element|attribute|schema-element|schema-attribute|comment|' r'text|node|document-node|empty-sequence)(\s+)(\()', pushstate_operator_kindtest_callback), (r'(processing-instruction)(\s+)(\()', pushstate_operator_kindtestforpi_callback), (r'(<!--)', pushstate_operator_xmlcomment_callback), (r'(<\?)', pushstate_operator_processing_instruction_callback), (r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback), # (r'</', Name.Tag, 'end_tag'), (r'(<)', pushstate_operator_starttag_callback), (r'(declare)(\s+)(boundary-space)', bygroups(Keyword, Text, Keyword), 'xmlspace_decl'), (r'(validate)(\s+)(lax|strict)', pushstate_operator_root_validate_withmode), (r'(validate)(\s*)(\{)', pushstate_operator_root_validate), (r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)), (r'(element|attribute)(\s*)(\{)', pushstate_operator_root_construct_callback), (r'(document|text|processing-instruction|comment)(\s*)(\{)', pushstate_operator_root_construct_callback), #ATTRIBUTE (r'(attribute)(\s+)(?=' + qname + r')', bygroups(Keyword, Text), 'attribute_qname'), #ELEMENT (r'(element)(\s+)(?=' +qname+ r')', bygroups(Keyword, Text), 'element_qname'), #PROCESSING_INSTRUCTION (r'(processing-instruction)(\s+)(' + ncname + r')(\s*)(\{)', bygroups(Keyword, Text, Name.Variable, Text, Punctuation), 'operator'), (r'(declare|define)(\s+)(function)', bygroups(Keyword, Text, Keyword)), (r'(\{)', pushstate_operator_root_callback), (r'(unordered|ordered)(\s*)(\{)', pushstate_operator_order_callback), (r'(declare)(\s+)(ordering)', bygroups(Keyword, Text, Keyword), 'declareordering'), (r'(xquery)(\s+)(version)', bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'), (r'(\(#)', Punctuation, 'pragma'), # sometimes return can occur in root state (r'return', Keyword), (r'(declare)(\s+)(option)', bygroups(Keyword, Text, Keyword), 'option'), #URI LITERALS - single and double quoted (r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'), (r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'), (r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)', bygroups(Keyword, Punctuation)), (r'(descendant|following-sibling|following|parent|preceding-sibling' r'|preceding|self)(::)', bygroups(Keyword, Punctuation)), (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)), (r'then|else', Keyword), # ML specific (r'(try)(\s*)', bygroups(Keyword, Text), 'root'), (r'(catch)(\s*)(\()(\$)', bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'), (r'(@'+qname+')', Name.Attribute), (r'(@'+ncname+')', Name.Attribute), (r'@\*:'+ncname, Name.Attribute), (r'(@)', Name.Attribute), (r'//|/|\+|-|;|,|\(|\)', Punctuation), # STANDALONE QNAMES (qname + r'(?=\s*{)', Name.Tag, 'qname_braren'), (qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'), (qname, Name.Tag, 'operator'), ] } class DartLexer(RegexLexer): name = 'Dart' aliases = ['dart'] filenames = ['*.dart'] mimetypes = ['text/x-dart'] flags = re.MULTILINE | re.DOTALL tokens = { 'root': [ (r'#!(.*?)$', Comment.Preproc), (r'(#)(import|library|source)', bygroups(Text, Keyword)), (r'[^\S\n]+', Text), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline), (r'(class|interface)(\s+)', bygroups(Keyword.Declaration, Text), 'class'), (r'(assert|break|case|catch|continue|default|do|else|finally|for|' r'if|in|is|new|return|super|switch|this|throw|try|while)\b', Keyword), (r'(abstract|const|extends|factory|final|get|implements|' r'native|operator|set|static|typedef|var)\b', Keyword.Declaration), (r'(bool|double|Dynamic|int|num|Object|String|void)', Keyword.Type), (r'(false|null|true)', Keyword.Constant), (r'@"(\\\\|\\"|[^"])*"', String.Double), # raw string (r"@'(\\\\|\\'|[^'])*'", String.Single), # raw string (r'"', String.Double, 'string_double'), (r"'", String.Single, 'string_single'), (r'[a-zA-Z_$][a-zA-Z0-9_]*:', Name.Label), (r'[a-zA-Z_$][a-zA-Z0-9_]*', Name), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[(){}\[\],.;]', Punctuation), (r'0[xX][0-9a-fA-F]+', Number.Hex), # DIGIT+ (‘.’ DIGIT*)? EXPONENT? (r'\d+(\.\d*)?([eE][+-]?\d+)?', Number), (r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT? (r'\n', Text) # pseudo-keyword negate intentionally left out ], 'class': [ (r'[a-zA-Z_$][a-zA-Z0-9_]*', Name.Class, '#pop') ], 'string_double': [ (r'"', String.Double, '#pop'), (r'[^"$]+', String.Double), (r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)), (r'(\$\{)(.*?)(\})', bygroups(String.Interpol, using(this), String.Interpol)), (r'\$+', String.Double) ], 'string_single': [ (r"'", String.Single, '#pop'), (r"[^'$]+", String.Single), (r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)), (r'(\$\{)(.*?)(\})', bygroups(String.Interpol, using(this), String.Interpol)), (r'\$+', String.Single) ] } class LassoLexer(RegexLexer): #!.+lasso9\b', Comment.Preproc, 'lasso'), # whitespace/comments # names #[a-z_][\w\.]*|#\d+)', Name.Variable.Instance), # definitions # keywords # literals # other | 1.374229 | 1 |
iometrics/example.py | sayadi/iometrics | 0 | 6624827 | <filename>iometrics/example.py
#!/usr/bin/env python3
"""
## Show example usage and example output.
```py
from iometrics.example import usage
usage()
```
Example output
```markdown
| Network (MBytes/s) | Disk Util | Disk MBytes | Disk I/O |
| Received | Sent | % | MB/s Read | MB/s Written | I/O Read | I/O Write |
| val | avg | val | avg | val | avg | val | avg | val | avg | val | avg | val | avg |
| ------:| ------:| -----:| -----:| ---:| ---:| ------:| ------:| -----:| -----:| ------:| ------:| ---:| ---:|
| 4.6 | 3.5 | 0.1 | 0.1 | 49 | 2 | 52.8 | 1.1 | 0.0 | 0.9 | 211 | 4 | 5 | 18 |
| 4.1 | 3.5 | 0.1 | 0.1 | 61 | 3 | 60.4 | 2.4 | 40.3 | 1.7 | 255 | 10 | 149 | 21 |
```
:copyright: (c) 2021 by <NAME>.
:license: Apache 2.0, see LICENSE for more details.
"""
import time
from iometrics import DiskMetrics
from iometrics import NetworkMetrics
DUAL_METRICS_HEADER = """
| Network (MBytes/s) | Disk Util | Disk MBytes | Disk I/O |
| Received | Sent | % | MB/s Read | MB/s Written | I/O Read | I/O Write |
| val | avg | val | avg | val | avg | val | avg | val | avg | val | avg | val | avg |
| ------:| ------:| -----:| -----:| ---:| ---:| ------:| ------:| -----:| -----:| ------:| ------:| -----:| -----:|"""
def usage(iterations: int = 10000) -> str:
"""Compute a live metric report of network and disk statistics."""
net = NetworkMetrics()
disk = DiskMetrics()
for i in range(iterations):
time.sleep(1)
net.update_stats()
disk.update_stats()
if i % 15 == 0:
print(DUAL_METRICS_HEADER)
row = (
f"| {net.mb_recv_ps.val:6.1f} | {net.mb_recv_ps.avg:6.1f} "
f"| {net.mb_sent_ps.val:5.1f} | {net.mb_sent_ps.avg:5.1f} "
f"| {int(disk.io_util.val):3d} | {int(disk.io_util.avg):3d} "
f"| {disk.mb_read.val:6.1f} | {disk.mb_read.avg:6.1f} "
f"| {disk.mb_writ.val:5.1f} | {disk.mb_writ.avg:5.1f} "
f"| {int(disk.io_read.val):6d} | {int(disk.io_read.avg):6d} "
f"| {int(disk.io_writ.val):5d} | {int(disk.io_writ.avg):5d} "
f"|"
)
print(row)
return row
# `__all__` is left here for documentation purposes and as a
# reference to which interfaces are meant to be imported.
__all__ = [
"usage",
]
| <filename>iometrics/example.py
#!/usr/bin/env python3
"""
## Show example usage and example output.
```py
from iometrics.example import usage
usage()
```
Example output
```markdown
| Network (MBytes/s) | Disk Util | Disk MBytes | Disk I/O |
| Received | Sent | % | MB/s Read | MB/s Written | I/O Read | I/O Write |
| val | avg | val | avg | val | avg | val | avg | val | avg | val | avg | val | avg |
| ------:| ------:| -----:| -----:| ---:| ---:| ------:| ------:| -----:| -----:| ------:| ------:| ---:| ---:|
| 4.6 | 3.5 | 0.1 | 0.1 | 49 | 2 | 52.8 | 1.1 | 0.0 | 0.9 | 211 | 4 | 5 | 18 |
| 4.1 | 3.5 | 0.1 | 0.1 | 61 | 3 | 60.4 | 2.4 | 40.3 | 1.7 | 255 | 10 | 149 | 21 |
```
:copyright: (c) 2021 by <NAME>.
:license: Apache 2.0, see LICENSE for more details.
"""
import time
from iometrics import DiskMetrics
from iometrics import NetworkMetrics
DUAL_METRICS_HEADER = """
| Network (MBytes/s) | Disk Util | Disk MBytes | Disk I/O |
| Received | Sent | % | MB/s Read | MB/s Written | I/O Read | I/O Write |
| val | avg | val | avg | val | avg | val | avg | val | avg | val | avg | val | avg |
| ------:| ------:| -----:| -----:| ---:| ---:| ------:| ------:| -----:| -----:| ------:| ------:| -----:| -----:|"""
def usage(iterations: int = 10000) -> str:
"""Compute a live metric report of network and disk statistics."""
net = NetworkMetrics()
disk = DiskMetrics()
for i in range(iterations):
time.sleep(1)
net.update_stats()
disk.update_stats()
if i % 15 == 0:
print(DUAL_METRICS_HEADER)
row = (
f"| {net.mb_recv_ps.val:6.1f} | {net.mb_recv_ps.avg:6.1f} "
f"| {net.mb_sent_ps.val:5.1f} | {net.mb_sent_ps.avg:5.1f} "
f"| {int(disk.io_util.val):3d} | {int(disk.io_util.avg):3d} "
f"| {disk.mb_read.val:6.1f} | {disk.mb_read.avg:6.1f} "
f"| {disk.mb_writ.val:5.1f} | {disk.mb_writ.avg:5.1f} "
f"| {int(disk.io_read.val):6d} | {int(disk.io_read.avg):6d} "
f"| {int(disk.io_writ.val):5d} | {int(disk.io_writ.avg):5d} "
f"|"
)
print(row)
return row
# `__all__` is left here for documentation purposes and as a
# reference to which interfaces are meant to be imported.
__all__ = [
"usage",
]
| en | 0.521031 | #!/usr/bin/env python3 ## Show example usage and example output. ```py from iometrics.example import usage usage() ``` Example output ```markdown | Network (MBytes/s) | Disk Util | Disk MBytes | Disk I/O | | Received | Sent | % | MB/s Read | MB/s Written | I/O Read | I/O Write | | val | avg | val | avg | val | avg | val | avg | val | avg | val | avg | val | avg | | ------:| ------:| -----:| -----:| ---:| ---:| ------:| ------:| -----:| -----:| ------:| ------:| ---:| ---:| | 4.6 | 3.5 | 0.1 | 0.1 | 49 | 2 | 52.8 | 1.1 | 0.0 | 0.9 | 211 | 4 | 5 | 18 | | 4.1 | 3.5 | 0.1 | 0.1 | 61 | 3 | 60.4 | 2.4 | 40.3 | 1.7 | 255 | 10 | 149 | 21 | ``` :copyright: (c) 2021 by <NAME>. :license: Apache 2.0, see LICENSE for more details. | Network (MBytes/s) | Disk Util | Disk MBytes | Disk I/O | | Received | Sent | % | MB/s Read | MB/s Written | I/O Read | I/O Write | | val | avg | val | avg | val | avg | val | avg | val | avg | val | avg | val | avg | | ------:| ------:| -----:| -----:| ---:| ---:| ------:| ------:| -----:| -----:| ------:| ------:| -----:| -----:| Compute a live metric report of network and disk statistics. # `__all__` is left here for documentation purposes and as a # reference to which interfaces are meant to be imported. | 2.144371 | 2 |
controllers/__init__.py | rfukui/orign | 0 | 6624828 | <reponame>rfukui/orign
from .insurance_sugestion import insurance_sugestion
| from .insurance_sugestion import insurance_sugestion | none | 1 | 1.076089 | 1 | |
doc/source/cookbook/amrkdtree_downsampling.py | cevans216/yt | 0 | 6624829 | <gh_stars>0
# Using AMRKDTree Homogenized Volumes to examine large datasets
# at lower resolution.
# In this example we will show how to use the AMRKDTree to take a simulation
# with 8 levels of refinement and only use levels 0-3 to render the dataset.
# Currently this cookbook is flawed in that the data that is covered by the
# higher resolution data gets masked during the rendering. This should be
# fixed by changing either the data source or the code in
# yt/utilities/amr_kdtree.py where data is being masked for the partitioned
# grid. Right now the quick fix is to create a data_collection, but this
# will only work for patch based simulations that have ds.index.grids.
# We begin by loading up yt, and importing the AMRKDTree
import numpy as np
import yt
from yt.utilities.amr_kdtree.api import AMRKDTree
# Load up a dataset and define the kdtree
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
im, sc = yt.volume_render(ds, "density", fname="v0.png")
sc.camera.set_width(ds.arr(100, "kpc"))
render_source = sc.get_source()
kd = render_source.volume
# Print out specifics of KD Tree
print("Total volume of all bricks = %i" % kd.count_volume())
print("Total number of cells = %i" % kd.count_cells())
new_source = ds.all_data()
new_source.max_level = 3
kd_low_res = AMRKDTree(ds, data_source=new_source)
print(kd_low_res.count_volume())
print(kd_low_res.count_cells())
# Now we pass this in as the volume to our camera, and render the snapshot
# again.
render_source.set_volume(kd_low_res)
render_source.set_field("density")
sc.save("v1.png", sigma_clip=6.0)
# This operation was substantially faster. Now lets modify the low resolution
# rendering until we find something we like.
tf = render_source.transfer_function
tf.clear()
tf.add_layers(
4,
0.01,
col_bounds=[-27.5, -25.5],
alpha=np.ones(4, dtype="float64"),
colormap="RdBu_r",
)
sc.save("v2.png", sigma_clip=6.0)
# This looks better. Now let's try turning on opacity.
tf.grey_opacity = True
sc.save("v3.png", sigma_clip=6.0)
#
## That seemed to pick out som interesting structures. Now let's bump up the
## opacity.
#
tf.clear()
tf.add_layers(
4,
0.01,
col_bounds=[-27.5, -25.5],
alpha=10.0 * np.ones(4, dtype="float64"),
colormap="RdBu_r",
)
tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
sc.save("v4.png", sigma_clip=6.0)
#
## This looks pretty good, now lets go back to the full resolution AMRKDTree
#
render_source.set_volume(kd)
sc.save("v5.png", sigma_clip=6.0)
# This looks great!
| # Using AMRKDTree Homogenized Volumes to examine large datasets
# at lower resolution.
# In this example we will show how to use the AMRKDTree to take a simulation
# with 8 levels of refinement and only use levels 0-3 to render the dataset.
# Currently this cookbook is flawed in that the data that is covered by the
# higher resolution data gets masked during the rendering. This should be
# fixed by changing either the data source or the code in
# yt/utilities/amr_kdtree.py where data is being masked for the partitioned
# grid. Right now the quick fix is to create a data_collection, but this
# will only work for patch based simulations that have ds.index.grids.
# We begin by loading up yt, and importing the AMRKDTree
import numpy as np
import yt
from yt.utilities.amr_kdtree.api import AMRKDTree
# Load up a dataset and define the kdtree
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
im, sc = yt.volume_render(ds, "density", fname="v0.png")
sc.camera.set_width(ds.arr(100, "kpc"))
render_source = sc.get_source()
kd = render_source.volume
# Print out specifics of KD Tree
print("Total volume of all bricks = %i" % kd.count_volume())
print("Total number of cells = %i" % kd.count_cells())
new_source = ds.all_data()
new_source.max_level = 3
kd_low_res = AMRKDTree(ds, data_source=new_source)
print(kd_low_res.count_volume())
print(kd_low_res.count_cells())
# Now we pass this in as the volume to our camera, and render the snapshot
# again.
render_source.set_volume(kd_low_res)
render_source.set_field("density")
sc.save("v1.png", sigma_clip=6.0)
# This operation was substantially faster. Now lets modify the low resolution
# rendering until we find something we like.
tf = render_source.transfer_function
tf.clear()
tf.add_layers(
4,
0.01,
col_bounds=[-27.5, -25.5],
alpha=np.ones(4, dtype="float64"),
colormap="RdBu_r",
)
sc.save("v2.png", sigma_clip=6.0)
# This looks better. Now let's try turning on opacity.
tf.grey_opacity = True
sc.save("v3.png", sigma_clip=6.0)
#
## That seemed to pick out som interesting structures. Now let's bump up the
## opacity.
#
tf.clear()
tf.add_layers(
4,
0.01,
col_bounds=[-27.5, -25.5],
alpha=10.0 * np.ones(4, dtype="float64"),
colormap="RdBu_r",
)
tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
sc.save("v4.png", sigma_clip=6.0)
#
## This looks pretty good, now lets go back to the full resolution AMRKDTree
#
render_source.set_volume(kd)
sc.save("v5.png", sigma_clip=6.0)
# This looks great! | en | 0.923787 | # Using AMRKDTree Homogenized Volumes to examine large datasets # at lower resolution. # In this example we will show how to use the AMRKDTree to take a simulation # with 8 levels of refinement and only use levels 0-3 to render the dataset. # Currently this cookbook is flawed in that the data that is covered by the # higher resolution data gets masked during the rendering. This should be # fixed by changing either the data source or the code in # yt/utilities/amr_kdtree.py where data is being masked for the partitioned # grid. Right now the quick fix is to create a data_collection, but this # will only work for patch based simulations that have ds.index.grids. # We begin by loading up yt, and importing the AMRKDTree # Load up a dataset and define the kdtree # Print out specifics of KD Tree # Now we pass this in as the volume to our camera, and render the snapshot # again. # This operation was substantially faster. Now lets modify the low resolution # rendering until we find something we like. # This looks better. Now let's try turning on opacity. # ## That seemed to pick out som interesting structures. Now let's bump up the ## opacity. # # ## This looks pretty good, now lets go back to the full resolution AMRKDTree # # This looks great! | 2.231601 | 2 |
test/functional/api/cas/cli.py | mraveendrababu/open-cas-linux | 0 | 6624830 | <filename>test/functional/api/cas/cli.py
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import logging
LOGGER = logging.getLogger(__name__)
casadm_bin = "casadm"
casctl = "casctl"
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False):
command = f" -A -i {cache_id} -d {core_dev}" if shortcut \
else f" --add-core --cache-id {cache_id} --core-device {core_dev}"
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def remove_core_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False):
command = f" -R -i {cache_id} -j {core_id}" if shortcut \
else f" --remove-core --cache-id {cache_id} --core-id {core_id}"
if force:
command += " -f" if shortcut else " --force"
return casadm_bin + command
def remove_detached_cmd(core_device: str, shortcut: bool = False):
command = " --remove-detached" + (" -d " if shortcut else " --device ") + core_device
return casadm_bin + command
def help_cmd(shortcut: bool = False):
return casadm_bin + (" -H" if shortcut else " --help")
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False):
command = (" -Z -i " if shortcut else " --reset-counters --cache-id ") + cache_id
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def flush_cache_cmd(cache_id: str, shortcut: bool = False):
command = (" -F -i " if shortcut else " --flush-cache --cache-id ") + cache_id
return casadm_bin + command
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False):
command = (f" -E -i {cache_id} -j {core_id}" if shortcut
else f" --flush-core --cache-id {cache_id} --core-id {core_id}")
return casadm_bin + command
def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = None,
cache_id: str = None, force: bool = False,
load: bool = False, shortcut: bool = False):
command = " -S" if shortcut else " --start-cache"
command += (" -d " if shortcut else " --cache-device ") + cache_dev
if cache_mode is not None:
command += (" -c " if shortcut else " --cache-mode ") + cache_mode
if cache_line_size is not None:
command += (" -x " if shortcut else " --cache-line-size ") + cache_line_size
if cache_id is not None:
command += (" -i " if shortcut else " --cache-id ") + cache_id
if force:
command += " -f" if shortcut else " --force"
if load:
command += " -l" if shortcut else " --load"
return casadm_bin + command
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False,
io_class_id: str = None, filter: str = None,
output_format: str = None, shortcut: bool = False):
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
if per_io_class:
command += " -d" if shortcut else " --io-class-id"
if io_class_id is not None:
command += " " + io_class_id
elif io_class_id is not None:
raise Exception("Per io class flag not set but ID given.")
if filter is not None:
command += (" -f " if shortcut else " --filter ") + filter
if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def format_cmd(cache_dev: str, force: bool = False, shortcut: bool = False):
command = (" -N -F -d " if shortcut else " --nvme --format --device ") + cache_dev
if force:
command += " -f" if shortcut else " --force"
return casadm_bin + command
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False):
command = " -T " if shortcut else " --stop-cache"
command += (" -i " if shortcut else " --cache-id ") + cache_id
if no_data_flush:
command += " --no-data-flush"
return casadm_bin + command
def list_cmd(output_format: str = None, shortcut: bool = False):
command = " -L" if shortcut else " --list-caches"
if output_format == "table" or output_format == "csv":
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def load_cmd(cache_dev: str, shortcut: bool = False):
return start_cmd(cache_dev, load=True, shortcut=shortcut)
def version_cmd(output_format: str = None, shortcut: bool = False):
command = " -V" if shortcut else " --version"
if output_format == "table" or output_format == "csv":
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def set_cache_mode_cmd(cache_mode: str, cache_id: str,
flush_cache: str = None, shortcut: bool = False):
command = f" -Q -c {cache_mode} -i {cache_id}" if shortcut else \
f" --set-cache-mode --cache-mode {cache_mode} --cache-id {cache_id}"
if flush_cache:
command += (" -f " if shortcut else " --flush-cache ") + flush_cache
return casadm_bin + command
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False):
command = f" -C -C -i {cache_id} -f {file}" if shortcut else \
f" --io-class --load-config --cache-id {cache_id} --file {file}"
return casadm_bin + command
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False):
command = f" -C -L -i {cache_id} -o {output_format}" if shortcut else \
f" --io-class --list --cache-id {cache_id} --output-format {output_format}"
return casadm_bin + command
def _get_param_cmd(namespace: str, cache_id: str, output_format: str = None,
additional_params: str = None, shortcut: bool = False):
command = f" -G -n {namespace} -i {cache_id}" if shortcut else\
f" --get-param --name {namespace} --cache-id {cache_id}"
if additional_params is not None:
command += additional_params
if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def get_param_cutoff_cmd(cache_id: str, core_id: str,
output_format: str = None, shortcut: bool = False):
add_param = (" -j " if shortcut else " --core-id ") + core_id
return _get_param_cmd(namespace="seq-cutoff", cache_id=cache_id, output_format=output_format,
additional_params=add_param, shortcut=shortcut)
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def get_param_cleaning_alru_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def get_param_cleaning_acp_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def _set_param_cmd(namespace: str, cache_id: str, additional_params: str = None,
shortcut: bool = False):
command = f" -X -n {namespace} -i {cache_id}" if shortcut else\
f" --set-param --name {namespace} --cache-id {cache_id}"
command += additional_params
return casadm_bin + command
def set_param_cutoff_cmd(cache_id: str, core_id: str = None, threshold: str = None,
policy: str = None, shortcut: bool = False):
add_params = ""
if core_id is not None:
add_params += (" -j " if shortcut else " --core-id ") + core_id
if threshold is not None:
add_params += (" -t " if shortcut else " --threshold ") + threshold
if policy is not None:
add_params += (" -p " if shortcut else " --policy ") + policy
return _set_param_cmd(namespace="seq-cutoff", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False):
add_params = (" -p " if shortcut else " --policy ") + policy
return _set_param_cmd(namespace="cleaning", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def set_param_cleaning_alru_cmd(cache_id: str, wake_up: str, staleness_time: str,
flush_max_buffers: str, activity_threshold: str,
shortcut: bool = False):
add_param = ""
if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
if staleness_time is not None:
add_param += (" -s " if shortcut else " --staleness-time ") + staleness_time
if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
if activity_threshold is not None:
add_param += (" -t " if shortcut else " --activity-threshold ") + activity_threshold
return _set_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
def set_param_cleaning_acp_cmd(cache_id: str, wake_up: str = None,
flush_max_buffers: str = None, shortcut: bool = False):
add_param = ""
if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
return _set_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
def ctl_help(shortcut: bool = False):
return casctl + " --help" if shortcut else " -h"
def ctl_start():
return casctl + " start"
def ctl_stop(flush: bool = False):
command = casctl + " stop"
if flush:
command += " --flush"
return command
def ctl_init(force: bool = False):
command = casctl + " init"
if force:
command += " --force"
return command
| <filename>test/functional/api/cas/cli.py
#
# Copyright(c) 2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import logging
LOGGER = logging.getLogger(__name__)
casadm_bin = "casadm"
casctl = "casctl"
def add_core_cmd(cache_id: str, core_dev: str, core_id: str = None, shortcut: bool = False):
command = f" -A -i {cache_id} -d {core_dev}" if shortcut \
else f" --add-core --cache-id {cache_id} --core-device {core_dev}"
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def remove_core_cmd(cache_id: str, core_id: str, force: bool = False, shortcut: bool = False):
command = f" -R -i {cache_id} -j {core_id}" if shortcut \
else f" --remove-core --cache-id {cache_id} --core-id {core_id}"
if force:
command += " -f" if shortcut else " --force"
return casadm_bin + command
def remove_detached_cmd(core_device: str, shortcut: bool = False):
command = " --remove-detached" + (" -d " if shortcut else " --device ") + core_device
return casadm_bin + command
def help_cmd(shortcut: bool = False):
return casadm_bin + (" -H" if shortcut else " --help")
def reset_counters_cmd(cache_id: str, core_id: str = None, shortcut: bool = False):
command = (" -Z -i " if shortcut else " --reset-counters --cache-id ") + cache_id
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
return casadm_bin + command
def flush_cache_cmd(cache_id: str, shortcut: bool = False):
command = (" -F -i " if shortcut else " --flush-cache --cache-id ") + cache_id
return casadm_bin + command
def flush_core_cmd(cache_id: str, core_id: str, shortcut: bool = False):
command = (f" -E -i {cache_id} -j {core_id}" if shortcut
else f" --flush-core --cache-id {cache_id} --core-id {core_id}")
return casadm_bin + command
def start_cmd(cache_dev: str, cache_mode: str = None, cache_line_size: str = None,
cache_id: str = None, force: bool = False,
load: bool = False, shortcut: bool = False):
command = " -S" if shortcut else " --start-cache"
command += (" -d " if shortcut else " --cache-device ") + cache_dev
if cache_mode is not None:
command += (" -c " if shortcut else " --cache-mode ") + cache_mode
if cache_line_size is not None:
command += (" -x " if shortcut else " --cache-line-size ") + cache_line_size
if cache_id is not None:
command += (" -i " if shortcut else " --cache-id ") + cache_id
if force:
command += " -f" if shortcut else " --force"
if load:
command += " -l" if shortcut else " --load"
return casadm_bin + command
def print_statistics_cmd(cache_id: str, core_id: str = None, per_io_class: bool = False,
io_class_id: str = None, filter: str = None,
output_format: str = None, shortcut: bool = False):
command = (" -P -i " if shortcut else " --stats --cache-id ") + cache_id
if core_id is not None:
command += (" -j " if shortcut else " --core-id ") + core_id
if per_io_class:
command += " -d" if shortcut else " --io-class-id"
if io_class_id is not None:
command += " " + io_class_id
elif io_class_id is not None:
raise Exception("Per io class flag not set but ID given.")
if filter is not None:
command += (" -f " if shortcut else " --filter ") + filter
if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def format_cmd(cache_dev: str, force: bool = False, shortcut: bool = False):
command = (" -N -F -d " if shortcut else " --nvme --format --device ") + cache_dev
if force:
command += " -f" if shortcut else " --force"
return casadm_bin + command
def stop_cmd(cache_id: str, no_data_flush: bool = False, shortcut: bool = False):
command = " -T " if shortcut else " --stop-cache"
command += (" -i " if shortcut else " --cache-id ") + cache_id
if no_data_flush:
command += " --no-data-flush"
return casadm_bin + command
def list_cmd(output_format: str = None, shortcut: bool = False):
command = " -L" if shortcut else " --list-caches"
if output_format == "table" or output_format == "csv":
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def load_cmd(cache_dev: str, shortcut: bool = False):
return start_cmd(cache_dev, load=True, shortcut=shortcut)
def version_cmd(output_format: str = None, shortcut: bool = False):
command = " -V" if shortcut else " --version"
if output_format == "table" or output_format == "csv":
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def set_cache_mode_cmd(cache_mode: str, cache_id: str,
flush_cache: str = None, shortcut: bool = False):
command = f" -Q -c {cache_mode} -i {cache_id}" if shortcut else \
f" --set-cache-mode --cache-mode {cache_mode} --cache-id {cache_id}"
if flush_cache:
command += (" -f " if shortcut else " --flush-cache ") + flush_cache
return casadm_bin + command
def load_io_classes_cmd(cache_id: str, file: str, shortcut: bool = False):
command = f" -C -C -i {cache_id} -f {file}" if shortcut else \
f" --io-class --load-config --cache-id {cache_id} --file {file}"
return casadm_bin + command
def list_io_classes_cmd(cache_id: str, output_format: str, shortcut: bool = False):
command = f" -C -L -i {cache_id} -o {output_format}" if shortcut else \
f" --io-class --list --cache-id {cache_id} --output-format {output_format}"
return casadm_bin + command
def _get_param_cmd(namespace: str, cache_id: str, output_format: str = None,
additional_params: str = None, shortcut: bool = False):
command = f" -G -n {namespace} -i {cache_id}" if shortcut else\
f" --get-param --name {namespace} --cache-id {cache_id}"
if additional_params is not None:
command += additional_params
if output_format is not None:
command += (" -o " if shortcut else " --output-format ") + output_format
return casadm_bin + command
def get_param_cutoff_cmd(cache_id: str, core_id: str,
output_format: str = None, shortcut: bool = False):
add_param = (" -j " if shortcut else " --core-id ") + core_id
return _get_param_cmd(namespace="seq-cutoff", cache_id=cache_id, output_format=output_format,
additional_params=add_param, shortcut=shortcut)
def get_param_cleaning_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def get_param_cleaning_alru_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def get_param_cleaning_acp_cmd(cache_id: str, output_format: str = None, shortcut: bool = False):
return _get_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
output_format=output_format, shortcut=shortcut)
def _set_param_cmd(namespace: str, cache_id: str, additional_params: str = None,
shortcut: bool = False):
command = f" -X -n {namespace} -i {cache_id}" if shortcut else\
f" --set-param --name {namespace} --cache-id {cache_id}"
command += additional_params
return casadm_bin + command
def set_param_cutoff_cmd(cache_id: str, core_id: str = None, threshold: str = None,
policy: str = None, shortcut: bool = False):
add_params = ""
if core_id is not None:
add_params += (" -j " if shortcut else " --core-id ") + core_id
if threshold is not None:
add_params += (" -t " if shortcut else " --threshold ") + threshold
if policy is not None:
add_params += (" -p " if shortcut else " --policy ") + policy
return _set_param_cmd(namespace="seq-cutoff", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def set_param_cleaning_cmd(cache_id: str, policy: str, shortcut: bool = False):
add_params = (" -p " if shortcut else " --policy ") + policy
return _set_param_cmd(namespace="cleaning", cache_id=cache_id,
additional_params=add_params, shortcut=shortcut)
def set_param_cleaning_alru_cmd(cache_id: str, wake_up: str, staleness_time: str,
flush_max_buffers: str, activity_threshold: str,
shortcut: bool = False):
add_param = ""
if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
if staleness_time is not None:
add_param += (" -s " if shortcut else " --staleness-time ") + staleness_time
if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
if activity_threshold is not None:
add_param += (" -t " if shortcut else " --activity-threshold ") + activity_threshold
return _set_param_cmd(namespace="cleaning-alru", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
def set_param_cleaning_acp_cmd(cache_id: str, wake_up: str = None,
flush_max_buffers: str = None, shortcut: bool = False):
add_param = ""
if wake_up is not None:
add_param += (" -w " if shortcut else " --wake-up ") + wake_up
if flush_max_buffers is not None:
add_param += (" -b " if shortcut else " --flush-max-buffers ") + flush_max_buffers
return _set_param_cmd(namespace="cleaning-acp", cache_id=cache_id,
additional_params=add_param, shortcut=shortcut)
def ctl_help(shortcut: bool = False):
return casctl + " --help" if shortcut else " -h"
def ctl_start():
return casctl + " start"
def ctl_stop(flush: bool = False):
command = casctl + " stop"
if flush:
command += " --flush"
return command
def ctl_init(force: bool = False):
command = casctl + " init"
if force:
command += " --force"
return command
| en | 0.304614 | # # Copyright(c) 2019 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # | 1.928998 | 2 |
src/olympia/stats/management/commands/update_counts_from_file.py | Osmose/olympia | 0 | 6624831 | <reponame>Osmose/olympia
import codecs
import json
import re
from datetime import datetime, timedelta
from os import path, unlink
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon
from olympia.stats.models import update_inc, UpdateCount
from . import get_date_from_file, save_stats_to_file
log = olympia.core.logger.getLogger('adi.updatecountsfromfile')
# Validate a locale: must be like 'fr', 'en-us', 'zap-MX-diiste', ...
LOCALE_REGEX = re.compile(r"""^[a-z]{2,3} # General: fr, en, dsb,...
(-[A-Z]{2,3})? # Region: -US, -GB, ...
(-[a-z]{2,12})?$ # Locality: -valencia, -diiste
""", re.VERBOSE)
VALID_STATUSES = ["userDisabled,incompatible", "userEnabled", "Unknown",
"userDisabled", "userEnabled,incompatible"]
UPDATE_COUNT_TRIGGER = "userEnabled"
VALID_APP_GUIDS = amo.APP_GUIDS.keys()
APPVERSION_REGEX = re.compile(
r"""^[0-9]{1,3} # Major version: 2, 35
\.[0-9]{1,3}([ab][0-9])? # Minor version + alpha or beta: .0a1, .0b2
(\.[0-9]{1,3})?$ # Patch version: .1, .23
""", re.VERBOSE)
class Command(BaseCommand):
"""Process hive results stored in different files and store them in the db.
Usage:
./manage.py update_counts_from_file <folder> --date=YYYY-MM-DD
If no date is specified, the default is the day before.
If not folder is specified, the default is `hive_results/<YYYY-MM-DD>/`.
This folder will be located in `<settings.NETAPP_STORAGE>/tmp`.
Five files are processed:
- update_counts_by_version.hive
- update_counts_by_status.hive
- update_counts_by_app.hive
- update_counts_by_os.hive
- update_counts_by_locale.hive
Each file has the following cols:
- date
- addon guid
- data: the data grouped on (eg version, status...).
- count
- update type
For the "app" file, the "data" col is in fact two cols: the application
guid and the application version.
"""
help = __doc__
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument('folder_name', default='hive_results', nargs='?')
parser.add_argument(
'--date', action='store', type=str,
dest='date', help='Date in the YYYY-MM-DD format.')
parser.add_argument(
'--separator', action='store', type=str, default='\t',
dest='separator', help='Field separator in file.')
def handle(self, *args, **options):
start = datetime.now() # Measure the time it takes to run the script.
day = options['date']
if not day:
day = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
folder = options['folder_name']
folder = path.join(settings.TMP_PATH, folder, day)
sep = options['separator']
groups = ('version', 'status', 'app', 'os', 'locale')
group_filepaths = []
# Make sure we're not trying to update with mismatched data.
for group in groups:
filepath = path.join(folder, 'update_counts_by_%s.hive' % group)
if get_date_from_file(filepath, sep) != day:
raise CommandError('%s file contains data for another day' %
filepath)
group_filepaths.append((group, filepath))
# First, make sure we don't have any existing counts for the same day,
# or it would just increment again the same data.
UpdateCount.objects.filter(date=day).delete()
# Memoize the addons and the UpdateCounts.
update_counts = {}
# Perf: preload all the addons once and for all.
# This builds a dict where each key (the addon guid we get from the
# hive query) has the addon_id as value.
guids_to_addon = (dict(Addon.objects.public()
.exclude(guid__isnull=True)
.exclude(type=amo.ADDON_PERSONA)
.values_list('guid', 'id')))
index = -1
for group, filepath in group_filepaths:
with codecs.open(filepath, encoding='utf8') as results_file:
for line in results_file:
index += 1
if index and (index % 1000000) == 0:
log.info('Processed %s lines' % index)
splitted = line[:-1].split(sep)
if ((group == 'app' and len(splitted) != 6) or
(group != 'app' and len(splitted) != 5)):
log.debug('Badly formatted row: %s' % line)
continue
if group == 'app':
day, addon_guid, app_id, app_ver, count, \
update_type = splitted
else:
day, addon_guid, data, count, update_type = splitted
addon_guid = addon_guid.strip()
if update_type:
update_type.strip()
# Old versions of Firefox don't provide the update type.
# All the following are "empty-like" values.
if update_type in ['0', 'NULL', 'None', '', '\N',
'%UPDATE_TYPE%']:
update_type = None
try:
count = int(count)
if update_type:
update_type = int(update_type)
except ValueError: # Badly formatted? Drop.
continue
# The following is magic that I don't understand. I've just
# been told that this is the way we can make sure a request
# is valid:
# > the lower bits for updateType (eg 112) should add to
# > 16, if not, ignore the request.
# > udpateType & 31 == 16 == valid request.
if update_type and update_type & 31 != 16:
log.debug("Update type doesn't add to 16: %s" %
update_type)
continue
# Does this addon exist?
if addon_guid and addon_guid in guids_to_addon:
addon_id = guids_to_addon[addon_guid]
else:
log.debug(u"Addon {guid} doesn't exist."
.format(guid=addon_guid.strip()))
continue
# Memoize the UpdateCount.
if addon_guid in update_counts:
uc = update_counts[addon_guid]
else:
uc = UpdateCount(date=day, addon_id=addon_id, count=0)
update_counts[addon_guid] = uc
# We can now fill the UpdateCount object.
if group == 'version':
self.update_version(uc, data, count)
elif group == 'status':
self.update_status(uc, data, count)
if data == UPDATE_COUNT_TRIGGER:
# Use this count to compute the global number
# of daily users for this addon.
uc.count += count
elif group == 'app':
self.update_app(uc, app_id, app_ver, count)
elif group == 'os':
self.update_os(uc, data, count)
elif group == 'locale':
self.update_locale(uc, data, count)
# Make sure the locales and versions fields aren't too big to fit in
# the database. Those two fields are the only ones that are not fully
# validated, so we could end up with just anything in there (spam,
# buffer overflow attempts and the like).
# We don't care that they will increase the numbers, but we do not want
# those to break the process because of a "Data too long for column
# 'version'" error.
# The database field (TEXT), can hold up to 2^16 = 64k characters.
# If the field is longer than that, we we drop the least used items
# (with the lower count) until the field fits.
for addon_guid, update_count in update_counts.iteritems():
self.trim_field(update_count.locales)
self.trim_field(update_count.versions)
# Create in bulk: this is much faster.
UpdateCount.objects.bulk_create(update_counts.values(), 100)
for udate_count in update_counts.values():
save_stats_to_file(update_count)
log.info('Processed a total of %s lines' % (index + 1))
log.debug('Total processing time: %s' % (datetime.now() - start))
# Clean up files.
for _, filepath in group_filepaths:
log.debug('Deleting {path}'.format(path=filepath))
unlink(filepath)
def update_version(self, update_count, version, count):
"""Update the versions on the update_count with the given version."""
version = version[:32] # Limit the version to a (random) length.
update_count.versions = update_inc(update_count.versions, version,
count)
def update_status(self, update_count, status, count):
"""Update the statuses on the update_count with the given status."""
# Only update if the given status is valid.
if status in VALID_STATUSES:
update_count.statuses = update_inc(update_count.statuses, status,
count)
def update_app(self, update_count, app_id, app_ver, count):
"""Update the applications on the update_count with the given data."""
# Only update if app_id is a valid application guid, and if app_ver
# "could be" a valid version.
if (app_id not in VALID_APP_GUIDS or
not re.match(APPVERSION_REGEX, app_ver)):
return
# Applications is a dict of dicts, eg:
# {"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}":
# {"10.0": 2, "21.0": 1, ....},
# "some other application guid": ...
# }
if update_count.applications is None:
update_count.applications = {}
app = update_count.applications.get(app_id, {})
# Now overwrite this application's dict with
# incremented counts for its versions.
update_count.applications.update(
{app_id: update_inc(app, app_ver, count)})
def update_os(self, update_count, os, count):
"""Update the OSes on the update_count with the given OS."""
if os.lower() in amo.PLATFORM_DICT:
update_count.oses = update_inc(update_count.oses, os, count)
def update_locale(self, update_count, locale, count):
"""Update the locales on the update_count with the given locale."""
locale = locale.replace('_', '-')
# Only update if the locale "could be" valid. We can't simply restrict
# on locales that AMO know, because Firefox has many more, and custom
# packaged versions could have even more. Thus, we only restrict on the
# allowed characters, some kind of format, and the total length, and
# hope to not miss out on too many locales.
if re.match(LOCALE_REGEX, locale):
update_count.locales = update_inc(update_count.locales, locale,
count)
def trim_field(self, field):
"""Trim (in-place) the dict provided, keeping the most used items.
The "locales" and "versions" fields are dicts which have the locale
or version as the key, and the count as the value.
"""
def fits(field):
"""Does the json version of the field fits in the db TEXT field?"""
return len(json.dumps(field)) < (2 ** 16) # Max len of TEXT field.
if fits(field):
return
# Order by count (desc), for a dict like {'<locale>': <count>}.
values = list(reversed(sorted(field.items(), key=lambda v: v[1])))
while not fits(field):
key, count = values.pop() # Remove the least used (the last).
del field[key] # Remove this entry from the dict.
| import codecs
import json
import re
from datetime import datetime, timedelta
from os import path, unlink
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon
from olympia.stats.models import update_inc, UpdateCount
from . import get_date_from_file, save_stats_to_file
log = olympia.core.logger.getLogger('adi.updatecountsfromfile')
# Validate a locale: must be like 'fr', 'en-us', 'zap-MX-diiste', ...
LOCALE_REGEX = re.compile(r"""^[a-z]{2,3} # General: fr, en, dsb,...
(-[A-Z]{2,3})? # Region: -US, -GB, ...
(-[a-z]{2,12})?$ # Locality: -valencia, -diiste
""", re.VERBOSE)
VALID_STATUSES = ["userDisabled,incompatible", "userEnabled", "Unknown",
"userDisabled", "userEnabled,incompatible"]
UPDATE_COUNT_TRIGGER = "userEnabled"
VALID_APP_GUIDS = amo.APP_GUIDS.keys()
APPVERSION_REGEX = re.compile(
r"""^[0-9]{1,3} # Major version: 2, 35
\.[0-9]{1,3}([ab][0-9])? # Minor version + alpha or beta: .0a1, .0b2
(\.[0-9]{1,3})?$ # Patch version: .1, .23
""", re.VERBOSE)
class Command(BaseCommand):
"""Process hive results stored in different files and store them in the db.
Usage:
./manage.py update_counts_from_file <folder> --date=YYYY-MM-DD
If no date is specified, the default is the day before.
If not folder is specified, the default is `hive_results/<YYYY-MM-DD>/`.
This folder will be located in `<settings.NETAPP_STORAGE>/tmp`.
Five files are processed:
- update_counts_by_version.hive
- update_counts_by_status.hive
- update_counts_by_app.hive
- update_counts_by_os.hive
- update_counts_by_locale.hive
Each file has the following cols:
- date
- addon guid
- data: the data grouped on (eg version, status...).
- count
- update type
For the "app" file, the "data" col is in fact two cols: the application
guid and the application version.
"""
help = __doc__
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument('folder_name', default='hive_results', nargs='?')
parser.add_argument(
'--date', action='store', type=str,
dest='date', help='Date in the YYYY-MM-DD format.')
parser.add_argument(
'--separator', action='store', type=str, default='\t',
dest='separator', help='Field separator in file.')
def handle(self, *args, **options):
start = datetime.now() # Measure the time it takes to run the script.
day = options['date']
if not day:
day = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
folder = options['folder_name']
folder = path.join(settings.TMP_PATH, folder, day)
sep = options['separator']
groups = ('version', 'status', 'app', 'os', 'locale')
group_filepaths = []
# Make sure we're not trying to update with mismatched data.
for group in groups:
filepath = path.join(folder, 'update_counts_by_%s.hive' % group)
if get_date_from_file(filepath, sep) != day:
raise CommandError('%s file contains data for another day' %
filepath)
group_filepaths.append((group, filepath))
# First, make sure we don't have any existing counts for the same day,
# or it would just increment again the same data.
UpdateCount.objects.filter(date=day).delete()
# Memoize the addons and the UpdateCounts.
update_counts = {}
# Perf: preload all the addons once and for all.
# This builds a dict where each key (the addon guid we get from the
# hive query) has the addon_id as value.
guids_to_addon = (dict(Addon.objects.public()
.exclude(guid__isnull=True)
.exclude(type=amo.ADDON_PERSONA)
.values_list('guid', 'id')))
index = -1
for group, filepath in group_filepaths:
with codecs.open(filepath, encoding='utf8') as results_file:
for line in results_file:
index += 1
if index and (index % 1000000) == 0:
log.info('Processed %s lines' % index)
splitted = line[:-1].split(sep)
if ((group == 'app' and len(splitted) != 6) or
(group != 'app' and len(splitted) != 5)):
log.debug('Badly formatted row: %s' % line)
continue
if group == 'app':
day, addon_guid, app_id, app_ver, count, \
update_type = splitted
else:
day, addon_guid, data, count, update_type = splitted
addon_guid = addon_guid.strip()
if update_type:
update_type.strip()
# Old versions of Firefox don't provide the update type.
# All the following are "empty-like" values.
if update_type in ['0', 'NULL', 'None', '', '\N',
'%UPDATE_TYPE%']:
update_type = None
try:
count = int(count)
if update_type:
update_type = int(update_type)
except ValueError: # Badly formatted? Drop.
continue
# The following is magic that I don't understand. I've just
# been told that this is the way we can make sure a request
# is valid:
# > the lower bits for updateType (eg 112) should add to
# > 16, if not, ignore the request.
# > udpateType & 31 == 16 == valid request.
if update_type and update_type & 31 != 16:
log.debug("Update type doesn't add to 16: %s" %
update_type)
continue
# Does this addon exist?
if addon_guid and addon_guid in guids_to_addon:
addon_id = guids_to_addon[addon_guid]
else:
log.debug(u"Addon {guid} doesn't exist."
.format(guid=addon_guid.strip()))
continue
# Memoize the UpdateCount.
if addon_guid in update_counts:
uc = update_counts[addon_guid]
else:
uc = UpdateCount(date=day, addon_id=addon_id, count=0)
update_counts[addon_guid] = uc
# We can now fill the UpdateCount object.
if group == 'version':
self.update_version(uc, data, count)
elif group == 'status':
self.update_status(uc, data, count)
if data == UPDATE_COUNT_TRIGGER:
# Use this count to compute the global number
# of daily users for this addon.
uc.count += count
elif group == 'app':
self.update_app(uc, app_id, app_ver, count)
elif group == 'os':
self.update_os(uc, data, count)
elif group == 'locale':
self.update_locale(uc, data, count)
# Make sure the locales and versions fields aren't too big to fit in
# the database. Those two fields are the only ones that are not fully
# validated, so we could end up with just anything in there (spam,
# buffer overflow attempts and the like).
# We don't care that they will increase the numbers, but we do not want
# those to break the process because of a "Data too long for column
# 'version'" error.
# The database field (TEXT), can hold up to 2^16 = 64k characters.
# If the field is longer than that, we we drop the least used items
# (with the lower count) until the field fits.
for addon_guid, update_count in update_counts.iteritems():
self.trim_field(update_count.locales)
self.trim_field(update_count.versions)
# Create in bulk: this is much faster.
UpdateCount.objects.bulk_create(update_counts.values(), 100)
for udate_count in update_counts.values():
save_stats_to_file(update_count)
log.info('Processed a total of %s lines' % (index + 1))
log.debug('Total processing time: %s' % (datetime.now() - start))
# Clean up files.
for _, filepath in group_filepaths:
log.debug('Deleting {path}'.format(path=filepath))
unlink(filepath)
def update_version(self, update_count, version, count):
"""Update the versions on the update_count with the given version."""
version = version[:32] # Limit the version to a (random) length.
update_count.versions = update_inc(update_count.versions, version,
count)
def update_status(self, update_count, status, count):
"""Update the statuses on the update_count with the given status."""
# Only update if the given status is valid.
if status in VALID_STATUSES:
update_count.statuses = update_inc(update_count.statuses, status,
count)
def update_app(self, update_count, app_id, app_ver, count):
"""Update the applications on the update_count with the given data."""
# Only update if app_id is a valid application guid, and if app_ver
# "could be" a valid version.
if (app_id not in VALID_APP_GUIDS or
not re.match(APPVERSION_REGEX, app_ver)):
return
# Applications is a dict of dicts, eg:
# {"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}":
# {"10.0": 2, "21.0": 1, ....},
# "some other application guid": ...
# }
if update_count.applications is None:
update_count.applications = {}
app = update_count.applications.get(app_id, {})
# Now overwrite this application's dict with
# incremented counts for its versions.
update_count.applications.update(
{app_id: update_inc(app, app_ver, count)})
def update_os(self, update_count, os, count):
"""Update the OSes on the update_count with the given OS."""
if os.lower() in amo.PLATFORM_DICT:
update_count.oses = update_inc(update_count.oses, os, count)
def update_locale(self, update_count, locale, count):
"""Update the locales on the update_count with the given locale."""
locale = locale.replace('_', '-')
# Only update if the locale "could be" valid. We can't simply restrict
# on locales that AMO know, because Firefox has many more, and custom
# packaged versions could have even more. Thus, we only restrict on the
# allowed characters, some kind of format, and the total length, and
# hope to not miss out on too many locales.
if re.match(LOCALE_REGEX, locale):
update_count.locales = update_inc(update_count.locales, locale,
count)
def trim_field(self, field):
"""Trim (in-place) the dict provided, keeping the most used items.
The "locales" and "versions" fields are dicts which have the locale
or version as the key, and the count as the value.
"""
def fits(field):
"""Does the json version of the field fits in the db TEXT field?"""
return len(json.dumps(field)) < (2 ** 16) # Max len of TEXT field.
if fits(field):
return
# Order by count (desc), for a dict like {'<locale>': <count>}.
values = list(reversed(sorted(field.items(), key=lambda v: v[1])))
while not fits(field):
key, count = values.pop() # Remove the least used (the last).
del field[key] # Remove this entry from the dict. | en | 0.826584 | # Validate a locale: must be like 'fr', 'en-us', 'zap-MX-diiste', ... ^[a-z]{2,3} # General: fr, en, dsb,... (-[A-Z]{2,3})? # Region: -US, -GB, ... (-[a-z]{2,12})?$ # Locality: -valencia, -diiste ^[0-9]{1,3} # Major version: 2, 35 \.[0-9]{1,3}([ab][0-9])? # Minor version + alpha or beta: .0a1, .0b2 (\.[0-9]{1,3})?$ # Patch version: .1, .23 Process hive results stored in different files and store them in the db. Usage: ./manage.py update_counts_from_file <folder> --date=YYYY-MM-DD If no date is specified, the default is the day before. If not folder is specified, the default is `hive_results/<YYYY-MM-DD>/`. This folder will be located in `<settings.NETAPP_STORAGE>/tmp`. Five files are processed: - update_counts_by_version.hive - update_counts_by_status.hive - update_counts_by_app.hive - update_counts_by_os.hive - update_counts_by_locale.hive Each file has the following cols: - date - addon guid - data: the data grouped on (eg version, status...). - count - update type For the "app" file, the "data" col is in fact two cols: the application guid and the application version. Handle command arguments. # Measure the time it takes to run the script. # Make sure we're not trying to update with mismatched data. # First, make sure we don't have any existing counts for the same day, # or it would just increment again the same data. # Memoize the addons and the UpdateCounts. # Perf: preload all the addons once and for all. # This builds a dict where each key (the addon guid we get from the # hive query) has the addon_id as value. # Old versions of Firefox don't provide the update type. # All the following are "empty-like" values. # Badly formatted? Drop. # The following is magic that I don't understand. I've just # been told that this is the way we can make sure a request # is valid: # > the lower bits for updateType (eg 112) should add to # > 16, if not, ignore the request. # > udpateType & 31 == 16 == valid request. # Does this addon exist? # Memoize the UpdateCount. # We can now fill the UpdateCount object. # Use this count to compute the global number # of daily users for this addon. # Make sure the locales and versions fields aren't too big to fit in # the database. Those two fields are the only ones that are not fully # validated, so we could end up with just anything in there (spam, # buffer overflow attempts and the like). # We don't care that they will increase the numbers, but we do not want # those to break the process because of a "Data too long for column # 'version'" error. # The database field (TEXT), can hold up to 2^16 = 64k characters. # If the field is longer than that, we we drop the least used items # (with the lower count) until the field fits. # Create in bulk: this is much faster. # Clean up files. Update the versions on the update_count with the given version. # Limit the version to a (random) length. Update the statuses on the update_count with the given status. # Only update if the given status is valid. Update the applications on the update_count with the given data. # Only update if app_id is a valid application guid, and if app_ver # "could be" a valid version. # Applications is a dict of dicts, eg: # {"{ec8030f7-c20a-464f-9b0e-13a3a9e97384}": # {"10.0": 2, "21.0": 1, ....}, # "some other application guid": ... # } # Now overwrite this application's dict with # incremented counts for its versions. Update the OSes on the update_count with the given OS. Update the locales on the update_count with the given locale. # Only update if the locale "could be" valid. We can't simply restrict # on locales that AMO know, because Firefox has many more, and custom # packaged versions could have even more. Thus, we only restrict on the # allowed characters, some kind of format, and the total length, and # hope to not miss out on too many locales. Trim (in-place) the dict provided, keeping the most used items. The "locales" and "versions" fields are dicts which have the locale or version as the key, and the count as the value. Does the json version of the field fits in the db TEXT field? # Max len of TEXT field. # Order by count (desc), for a dict like {'<locale>': <count>}. # Remove the least used (the last). # Remove this entry from the dict. | 1.834346 | 2 |
tests/functional/gtcs/test_sql_join_03.py | reevespaul/firebird-qa | 0 | 6624832 | #coding:utf-8
#
# id: functional.gtcs.sql_join_03
# title: GTCS/tests/C_SQL_JOIN_3. Ability to run query: ( A LEFT JOIN B ) INER JOIN C, plus ORDER BY with fields not from SELECT list.
# decription:
# Original test see in:
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/C_SQL_JOIN_3.script
# Original backup file that is used for this test see in:
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/test-files/atlas.gbk
# Checked on 4.0.0.1896; 3.0.6.33288; 2.5.9.27149
#
# tracker_id:
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = [('=', ''), ('[ \t]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(from_backup='gtcs_atlas.fbk', init=init_script_1)
test_script_1 = """
select 'DSQL-test' as msg, b.team_name, c.city, s.state_name
from (
cities c left join states s on s.state = c.state
)
inner join baseball_teams b on b.city = c.city
--order by b.team_name, c.city, s.state_name;
order by b.home_stadium, c.population, s.capital;
set term ^;
execute block returns(
msg varchar(10)
,team_name type of column baseball_teams.team_name
,city type of column cities.city
,state_name type of column states.state_name
) as
declare c cursor for (
select 'PSQL-test' as msg, b.team_name, c.city, s.state_name
from (
cities c left join states s on s.state = c.state
)
inner join baseball_teams b on b.city = c.city
--order by b.team_name, c.city, s.state_name
order by b.home_stadium, c.population, s.capital
);
begin
open c;
while (1=1) do
begin
fetch c into msg, team_name, city, state_name;
if (row_count = 0) then
leave;
suspend;
end
close c;
end
^
set term ;^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
MSG TEAM_NAME CITY STATE_NAME
DSQL-test Astros Houston Texas
DSQL-test Braves Atlanta Georgia
DSQL-test Cardinals St. Louis Missouri
DSQL-test Giants San Francisco California
DSQL-test Indians Cleveland Ohio
DSQL-test White Sox Chicago Illinois
DSQL-test Dodgers Los Angeles California
DSQL-test Red Sox Boston Massachusetts
DSQL-test Mariners Seattle Washington
DSQL-test Brewers Milwaukee Wisconsin
DSQL-test Royals Kansas City Missouri
DSQL-test Padres San Diego California
DSQL-test Mets New York New York
DSQL-test Pirates Pittsburgh Pennsylvania
DSQL-test Tigers Detroit Michigan
DSQL-test Phillies Philadelphia Pennsylvania
DSQL-test Cubs Chicago Illinois
DSQL-test Yankees New York New York
MSG TEAM_NAME CITY STATE_NAME
PSQL-test Astros Houston Texas
PSQL-test Braves Atlanta Georgia
PSQL-test Cardinals St. Louis Missouri
PSQL-test Giants San Francisco California
PSQL-test Indians Cleveland Ohio
PSQL-test White Sox Chicago Illinois
PSQL-test Dodgers Los Angeles California
PSQL-test Red Sox Boston Massachusetts
PSQL-test Mariners Seattle Washington
PSQL-test Brewers Milwaukee Wisconsin
PSQL-test Royals Kansas City Missouri
PSQL-test Padres San Diego California
PSQL-test Mets New York New York
PSQL-test Pirates Pittsburgh Pennsylvania
PSQL-test Tigers Detroit Michigan
PSQL-test Phillies Philadelphia Pennsylvania
PSQL-test Cubs Chicago Illinois
PSQL-test Yankees New York New York
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| #coding:utf-8
#
# id: functional.gtcs.sql_join_03
# title: GTCS/tests/C_SQL_JOIN_3. Ability to run query: ( A LEFT JOIN B ) INER JOIN C, plus ORDER BY with fields not from SELECT list.
# decription:
# Original test see in:
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/C_SQL_JOIN_3.script
# Original backup file that is used for this test see in:
# https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/test-files/atlas.gbk
# Checked on 4.0.0.1896; 3.0.6.33288; 2.5.9.27149
#
# tracker_id:
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = [('=', ''), ('[ \t]+', ' ')]
init_script_1 = """"""
db_1 = db_factory(from_backup='gtcs_atlas.fbk', init=init_script_1)
test_script_1 = """
select 'DSQL-test' as msg, b.team_name, c.city, s.state_name
from (
cities c left join states s on s.state = c.state
)
inner join baseball_teams b on b.city = c.city
--order by b.team_name, c.city, s.state_name;
order by b.home_stadium, c.population, s.capital;
set term ^;
execute block returns(
msg varchar(10)
,team_name type of column baseball_teams.team_name
,city type of column cities.city
,state_name type of column states.state_name
) as
declare c cursor for (
select 'PSQL-test' as msg, b.team_name, c.city, s.state_name
from (
cities c left join states s on s.state = c.state
)
inner join baseball_teams b on b.city = c.city
--order by b.team_name, c.city, s.state_name
order by b.home_stadium, c.population, s.capital
);
begin
open c;
while (1=1) do
begin
fetch c into msg, team_name, city, state_name;
if (row_count = 0) then
leave;
suspend;
end
close c;
end
^
set term ;^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
MSG TEAM_NAME CITY STATE_NAME
DSQL-test Astros Houston Texas
DSQL-test Braves Atlanta Georgia
DSQL-test Cardinals St. Louis Missouri
DSQL-test Giants San Francisco California
DSQL-test Indians Cleveland Ohio
DSQL-test White Sox Chicago Illinois
DSQL-test Dodgers Los Angeles California
DSQL-test Red Sox Boston Massachusetts
DSQL-test Mariners Seattle Washington
DSQL-test Brewers Milwaukee Wisconsin
DSQL-test Royals Kansas City Missouri
DSQL-test Padres San Diego California
DSQL-test Mets New York New York
DSQL-test Pirates Pittsburgh Pennsylvania
DSQL-test Tigers Detroit Michigan
DSQL-test Phillies Philadelphia Pennsylvania
DSQL-test Cubs Chicago Illinois
DSQL-test Yankees New York New York
MSG TEAM_NAME CITY STATE_NAME
PSQL-test Astros Houston Texas
PSQL-test Braves Atlanta Georgia
PSQL-test Cardinals St. Louis Missouri
PSQL-test Giants San Francisco California
PSQL-test Indians Cleveland Ohio
PSQL-test White Sox Chicago Illinois
PSQL-test Dodgers Los Angeles California
PSQL-test Red Sox Boston Massachusetts
PSQL-test Mariners Seattle Washington
PSQL-test Brewers Milwaukee Wisconsin
PSQL-test Royals Kansas City Missouri
PSQL-test Padres San Diego California
PSQL-test Mets New York New York
PSQL-test Pirates Pittsburgh Pennsylvania
PSQL-test Tigers Detroit Michigan
PSQL-test Phillies Philadelphia Pennsylvania
PSQL-test Cubs Chicago Illinois
PSQL-test Yankees New York New York
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
| en | 0.630471 | #coding:utf-8 # # id: functional.gtcs.sql_join_03 # title: GTCS/tests/C_SQL_JOIN_3. Ability to run query: ( A LEFT JOIN B ) INER JOIN C, plus ORDER BY with fields not from SELECT list. # decription: # Original test see in: # https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/tests/C_SQL_JOIN_3.script # Original backup file that is used for this test see in: # https://github.com/FirebirdSQL/fbtcs/blob/master/GTCS/test-files/atlas.gbk # Checked on 4.0.0.1896; 3.0.6.33288; 2.5.9.27149 # # tracker_id: # min_versions: ['2.5.0'] # versions: 2.5 # qmid: None # version: 2.5 # resources: None select 'DSQL-test' as msg, b.team_name, c.city, s.state_name from ( cities c left join states s on s.state = c.state ) inner join baseball_teams b on b.city = c.city --order by b.team_name, c.city, s.state_name; order by b.home_stadium, c.population, s.capital; set term ^; execute block returns( msg varchar(10) ,team_name type of column baseball_teams.team_name ,city type of column cities.city ,state_name type of column states.state_name ) as declare c cursor for ( select 'PSQL-test' as msg, b.team_name, c.city, s.state_name from ( cities c left join states s on s.state = c.state ) inner join baseball_teams b on b.city = c.city --order by b.team_name, c.city, s.state_name order by b.home_stadium, c.population, s.capital ); begin open c; while (1=1) do begin fetch c into msg, team_name, city, state_name; if (row_count = 0) then leave; suspend; end close c; end ^ set term ;^ MSG TEAM_NAME CITY STATE_NAME DSQL-test Astros Houston Texas DSQL-test Braves Atlanta Georgia DSQL-test Cardinals St. Louis Missouri DSQL-test Giants San Francisco California DSQL-test Indians Cleveland Ohio DSQL-test White Sox Chicago Illinois DSQL-test Dodgers Los Angeles California DSQL-test Red Sox Boston Massachusetts DSQL-test Mariners Seattle Washington DSQL-test Brewers Milwaukee Wisconsin DSQL-test Royals Kansas City Missouri DSQL-test Padres San Diego California DSQL-test Mets New York New York DSQL-test Pirates Pittsburgh Pennsylvania DSQL-test Tigers Detroit Michigan DSQL-test Phillies Philadelphia Pennsylvania DSQL-test Cubs Chicago Illinois DSQL-test Yankees New York New York MSG TEAM_NAME CITY STATE_NAME PSQL-test Astros Houston Texas PSQL-test Braves Atlanta Georgia PSQL-test Cardinals St. Louis Missouri PSQL-test Giants San Francisco California PSQL-test Indians Cleveland Ohio PSQL-test White Sox Chicago Illinois PSQL-test Dodgers Los Angeles California PSQL-test Red Sox Boston Massachusetts PSQL-test Mariners Seattle Washington PSQL-test Brewers Milwaukee Wisconsin PSQL-test Royals Kansas City Missouri PSQL-test Padres San Diego California PSQL-test Mets New York New York PSQL-test Pirates Pittsburgh Pennsylvania PSQL-test Tigers Detroit Michigan PSQL-test Phillies Philadelphia Pennsylvania PSQL-test Cubs Chicago Illinois PSQL-test Yankees New York New York | 1.905919 | 2 |
unk_replacer/bin/sort_vocab.py | hitochan777/unk-replacer | 3 | 6624833 | import json
import argparse
from collections import Counter
from operator import itemgetter
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main(args=None):
parser = argparse.ArgumentParser(description='Get sorted vocab')
parser.add_argument("vocab", type=str, help="JSON format vocab file")
parser.add_argument("source", type=str, help="Source side training file")
parser.add_argument("target", type=str, help="Target side training file")
parser.add_argument("output", type=str, help="Path to output")
options = parser.parse_args(args)
with open(options.vocab) as vocab_stream, open(options.source) as source_lines, open(options.target) as target_lines, open(options.output, "w") as output:
logger.info("Loading vocab...")
vocab = json.load(vocab_stream)
src_voc = set(vocab[0])
tgt_voc = set(vocab[1])
logger.info("Counting source side tokens ...")
src_voc_counter = Counter(token for line in source_lines for token in line.strip().split(" ") if token in src_voc)
logger.info("Counting target side tokens ...")
tgt_voc_counter = Counter(token for line in target_lines for token in line.strip().split(" ") if token in tgt_voc)
logger.info("Getting the sorted vocab...")
sorted_src_voc = list(map(lambda x: x[0], src_voc_counter.most_common()))
sorted_tgt_voc = list(map(lambda x: x[0], tgt_voc_counter.most_common()))
assert len(sorted_src_voc) == len(src_voc), (len(sorted_src_voc), len(src_voc))
assert len(sorted_tgt_voc) == len(tgt_voc), (len(sorted_tgt_voc), len(tgt_voc))
assert set(sorted_src_voc) == src_voc
assert set(sorted_tgt_voc) == tgt_voc
logger.info("Writing sorted vocab...")
json.dump([sorted_src_voc, sorted_tgt_voc], output)
if __name__ == "__main__":
main()
| import json
import argparse
from collections import Counter
from operator import itemgetter
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main(args=None):
parser = argparse.ArgumentParser(description='Get sorted vocab')
parser.add_argument("vocab", type=str, help="JSON format vocab file")
parser.add_argument("source", type=str, help="Source side training file")
parser.add_argument("target", type=str, help="Target side training file")
parser.add_argument("output", type=str, help="Path to output")
options = parser.parse_args(args)
with open(options.vocab) as vocab_stream, open(options.source) as source_lines, open(options.target) as target_lines, open(options.output, "w") as output:
logger.info("Loading vocab...")
vocab = json.load(vocab_stream)
src_voc = set(vocab[0])
tgt_voc = set(vocab[1])
logger.info("Counting source side tokens ...")
src_voc_counter = Counter(token for line in source_lines for token in line.strip().split(" ") if token in src_voc)
logger.info("Counting target side tokens ...")
tgt_voc_counter = Counter(token for line in target_lines for token in line.strip().split(" ") if token in tgt_voc)
logger.info("Getting the sorted vocab...")
sorted_src_voc = list(map(lambda x: x[0], src_voc_counter.most_common()))
sorted_tgt_voc = list(map(lambda x: x[0], tgt_voc_counter.most_common()))
assert len(sorted_src_voc) == len(src_voc), (len(sorted_src_voc), len(src_voc))
assert len(sorted_tgt_voc) == len(tgt_voc), (len(sorted_tgt_voc), len(tgt_voc))
assert set(sorted_src_voc) == src_voc
assert set(sorted_tgt_voc) == tgt_voc
logger.info("Writing sorted vocab...")
json.dump([sorted_src_voc, sorted_tgt_voc], output)
if __name__ == "__main__":
main()
| none | 1 | 2.949509 | 3 | |
toolchain/riscv/MSYS/python/Tools/scripts/fixdiv.py | zhiqiang-hu/bl_iot_sdk | 207 | 6624834 | <filename>toolchain/riscv/MSYS/python/Tools/scripts/fixdiv.py
#! /usr/bin/env python3
"""fixdiv - tool to fix division operators.
To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'.
This runs the script `yourscript.py' while writing warning messages
about all uses of the classic division operator to the file
`warnings'. The warnings look like this:
<file>:<line>: DeprecationWarning: classic <type> division
The warnings are written to stderr, so you must use `2>' for the I/O
redirect. I know of no way to redirect stderr on Windows in a DOS
box, so you will have to modify the script to set sys.stderr to some
kind of log file if you want to do this on Windows.
The warnings are not limited to the script; modules imported by the
script may also trigger warnings. In fact a useful technique is to
write a test script specifically intended to exercise all code in a
particular module or set of modules.
Then run `python fixdiv.py warnings'. This first reads the warnings,
looking for classic division warnings, and sorts them by file name and
line number. Then, for each file that received at least one warning,
it parses the file and tries to match the warnings up to the division
operators found in the source code. If it is successful, it writes
its findings to stdout, preceded by a line of dashes and a line of the
form:
Index: <file>
If the only findings found are suggestions to change a / operator into
a // operator, the output is acceptable input for the Unix 'patch'
program.
Here are the possible messages on stdout (N stands for a line number):
- A plain-diff-style change ('NcN', a line marked by '<', a line
containing '---', and a line marked by '>'):
A / operator was found that should be changed to //. This is the
recommendation when only int and/or long arguments were seen.
- 'True division / operator at line N' and a line marked by '=':
A / operator was found that can remain unchanged. This is the
recommendation when only float and/or complex arguments were seen.
- 'Ambiguous / operator (..., ...) at line N', line marked by '?':
A / operator was found for which int or long as well as float or
complex arguments were seen. This is highly unlikely; if it occurs,
you may have to restructure the code to keep the classic semantics,
or maybe you don't care about the classic semantics.
- 'No conclusive evidence on line N', line marked by '*':
A / operator was found for which no warnings were seen. This could
be code that was never executed, or code that was only executed
with user-defined objects as arguments. You will have to
investigate further. Note that // can be overloaded separately from
/, using __floordiv__. True division can also be separately
overloaded, using __truediv__. Classic division should be the same
as either of those. (XXX should I add a warning for division on
user-defined objects, to disambiguate this case from code that was
never executed?)
- 'Phantom ... warnings for line N', line marked by '*':
A warning was seen for a line not containing a / operator. The most
likely cause is a warning about code executed by 'exec' or eval()
(see note below), or an indirect invocation of the / operator, for
example via the div() function in the operator module. It could
also be caused by a change to the file between the time the test
script was run to collect warnings and the time fixdiv was run.
- 'More than one / operator in line N'; or
'More than one / operator per statement in lines N-N':
The scanner found more than one / operator on a single line, or in a
statement split across multiple lines. Because the warnings
framework doesn't (and can't) show the offset within the line, and
the code generator doesn't always give the correct line number for
operations in a multi-line statement, we can't be sure whether all
operators in the statement were executed. To be on the safe side,
by default a warning is issued about this case. In practice, these
cases are usually safe, and the -m option suppresses these warning.
- 'Can't find the / operator in line N', line marked by '*':
This really shouldn't happen. It means that the tokenize module
reported a '/' operator but the line it returns didn't contain a '/'
character at the indicated position.
- 'Bad warning for line N: XYZ', line marked by '*':
This really shouldn't happen. It means that a 'classic XYZ
division' warning was read with XYZ being something other than
'int', 'long', 'float', or 'complex'.
Notes:
- The augmented assignment operator /= is handled the same way as the
/ operator.
- This tool never looks at the // operator; no warnings are ever
generated for use of this operator.
- This tool never looks at the / operator when a future division
statement is in effect; no warnings are generated in this case, and
because the tool only looks at files for which at least one classic
division warning was seen, it will never look at files containing a
future division statement.
- Warnings may be issued for code not read from a file, but executed
using the exec() or eval() functions. These may have
<string> in the filename position, in which case the fixdiv script
will attempt and fail to open a file named '<string>' and issue a
warning about this failure; or these may be reported as 'Phantom'
warnings (see above). You're on your own to deal with these. You
could make all recommended changes and add a future division
statement to all affected files, and then re-run the test script; it
should not issue any warnings. If there are any, and you have a
hard time tracking down where they are generated, you can use the
-Werror option to force an error instead of a first warning,
generating a traceback.
- The tool should be run from the same directory as that from which
the original script was run, otherwise it won't be able to open
files given by relative pathnames.
"""
import sys
import getopt
import re
import tokenize
multi_ok = 0
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hm")
except getopt.error as msg:
usage(msg)
return 2
for o, a in opts:
if o == "-h":
print(__doc__)
return
if o == "-m":
global multi_ok
multi_ok = 1
if not args:
usage("at least one file argument is required")
return 2
if args[1:]:
sys.stderr.write("%s: extra file arguments ignored\n", sys.argv[0])
warnings = readwarnings(args[0])
if warnings is None:
return 1
files = list(warnings.keys())
if not files:
print("No classic division warnings read from", args[0])
return
files.sort()
exit = None
for filename in files:
x = process(filename, warnings[filename])
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
PATTERN = (r"^(.+?):(\d+): DeprecationWarning: "
r"classic (int|long|float|complex) division$")
def readwarnings(warningsfile):
prog = re.compile(PATTERN)
try:
f = open(warningsfile)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return
warnings = {}
while 1:
line = f.readline()
if not line:
break
m = prog.match(line)
if not m:
if line.find("division") >= 0:
sys.stderr.write("Warning: ignored input " + line)
continue
filename, lineno, what = m.groups()
list = warnings.get(filename)
if list is None:
warnings[filename] = list = []
list.append((int(lineno), sys.intern(what)))
f.close()
return warnings
def process(filename, list):
print("-"*70)
assert list # if this fails, readwarnings() is broken
try:
fp = open(filename)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return 1
print("Index:", filename)
f = FileContext(fp)
list.sort()
index = 0 # list[:index] has been processed, list[index:] is still to do
g = tokenize.generate_tokens(f.readline)
while 1:
startlineno, endlineno, slashes = lineinfo = scanline(g)
if startlineno is None:
break
assert startlineno <= endlineno is not None
orphans = []
while index < len(list) and list[index][0] < startlineno:
orphans.append(list[index])
index += 1
if orphans:
reportphantomwarnings(orphans, f)
warnings = []
while index < len(list) and list[index][0] <= endlineno:
warnings.append(list[index])
index += 1
if not slashes and not warnings:
pass
elif slashes and not warnings:
report(slashes, "No conclusive evidence")
elif warnings and not slashes:
reportphantomwarnings(warnings, f)
else:
if len(slashes) > 1:
if not multi_ok:
rows = []
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
rows.append(row)
lastrow = row
assert rows
if len(rows) == 1:
print("*** More than one / operator in line", rows[0])
else:
print("*** More than one / operator per statement", end=' ')
print("in lines %d-%d" % (rows[0], rows[-1]))
intlong = []
floatcomplex = []
bad = []
for lineno, what in warnings:
if what in ("int", "long"):
intlong.append(what)
elif what in ("float", "complex"):
floatcomplex.append(what)
else:
bad.append(what)
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
lastrow = row
line = chop(line)
if line[col:col+1] != "/":
print("*** Can't find the / operator in line %d:" % row)
print("*", line)
continue
if bad:
print("*** Bad warning for line %d:" % row, bad)
print("*", line)
elif intlong and not floatcomplex:
print("%dc%d" % (row, row))
print("<", line)
print("---")
print(">", line[:col] + "/" + line[col:])
elif floatcomplex and not intlong:
print("True division / operator at line %d:" % row)
print("=", line)
elif intlong and floatcomplex:
print("*** Ambiguous / operator (%s, %s) at line %d:" % (
"|".join(intlong), "|".join(floatcomplex), row))
print("?", line)
fp.close()
def reportphantomwarnings(warnings, f):
blocks = []
lastrow = None
lastblock = None
for row, what in warnings:
if row != lastrow:
lastblock = [row]
blocks.append(lastblock)
lastblock.append(what)
for block in blocks:
row = block[0]
whats = "/".join(block[1:])
print("*** Phantom %s warnings for line %d:" % (whats, row))
f.report(row, mark="*")
def report(slashes, message):
lastrow = None
for (row, col), line in slashes:
if row != lastrow:
print("*** %s on line %d:" % (message, row))
print("*", chop(line))
lastrow = row
class FileContext:
def __init__(self, fp, window=5, lineno=1):
self.fp = fp
self.window = 5
self.lineno = 1
self.eoflookahead = 0
self.lookahead = []
self.buffer = []
def fill(self):
while len(self.lookahead) < self.window and not self.eoflookahead:
line = self.fp.readline()
if not line:
self.eoflookahead = 1
break
self.lookahead.append(line)
def readline(self):
self.fill()
if not self.lookahead:
return ""
line = self.lookahead.pop(0)
self.buffer.append(line)
self.lineno += 1
return line
def __getitem__(self, index):
self.fill()
bufstart = self.lineno - len(self.buffer)
lookend = self.lineno + len(self.lookahead)
if bufstart <= index < self.lineno:
return self.buffer[index - bufstart]
if self.lineno <= index < lookend:
return self.lookahead[index - self.lineno]
raise KeyError
def report(self, first, last=None, mark="*"):
if last is None:
last = first
for i in range(first, last+1):
try:
line = self[first]
except KeyError:
line = "<missing line>"
print(mark, chop(line))
def scanline(g):
slashes = []
startlineno = None
endlineno = None
for type, token, start, end, line in g:
endlineno = end[0]
if startlineno is None:
startlineno = endlineno
if token in ("/", "/="):
slashes.append((start, line))
if type == tokenize.NEWLINE:
break
return startlineno, endlineno, slashes
def chop(line):
if line.endswith("\n"):
return line[:-1]
else:
return line
if __name__ == "__main__":
sys.exit(main())
| <filename>toolchain/riscv/MSYS/python/Tools/scripts/fixdiv.py
#! /usr/bin/env python3
"""fixdiv - tool to fix division operators.
To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'.
This runs the script `yourscript.py' while writing warning messages
about all uses of the classic division operator to the file
`warnings'. The warnings look like this:
<file>:<line>: DeprecationWarning: classic <type> division
The warnings are written to stderr, so you must use `2>' for the I/O
redirect. I know of no way to redirect stderr on Windows in a DOS
box, so you will have to modify the script to set sys.stderr to some
kind of log file if you want to do this on Windows.
The warnings are not limited to the script; modules imported by the
script may also trigger warnings. In fact a useful technique is to
write a test script specifically intended to exercise all code in a
particular module or set of modules.
Then run `python fixdiv.py warnings'. This first reads the warnings,
looking for classic division warnings, and sorts them by file name and
line number. Then, for each file that received at least one warning,
it parses the file and tries to match the warnings up to the division
operators found in the source code. If it is successful, it writes
its findings to stdout, preceded by a line of dashes and a line of the
form:
Index: <file>
If the only findings found are suggestions to change a / operator into
a // operator, the output is acceptable input for the Unix 'patch'
program.
Here are the possible messages on stdout (N stands for a line number):
- A plain-diff-style change ('NcN', a line marked by '<', a line
containing '---', and a line marked by '>'):
A / operator was found that should be changed to //. This is the
recommendation when only int and/or long arguments were seen.
- 'True division / operator at line N' and a line marked by '=':
A / operator was found that can remain unchanged. This is the
recommendation when only float and/or complex arguments were seen.
- 'Ambiguous / operator (..., ...) at line N', line marked by '?':
A / operator was found for which int or long as well as float or
complex arguments were seen. This is highly unlikely; if it occurs,
you may have to restructure the code to keep the classic semantics,
or maybe you don't care about the classic semantics.
- 'No conclusive evidence on line N', line marked by '*':
A / operator was found for which no warnings were seen. This could
be code that was never executed, or code that was only executed
with user-defined objects as arguments. You will have to
investigate further. Note that // can be overloaded separately from
/, using __floordiv__. True division can also be separately
overloaded, using __truediv__. Classic division should be the same
as either of those. (XXX should I add a warning for division on
user-defined objects, to disambiguate this case from code that was
never executed?)
- 'Phantom ... warnings for line N', line marked by '*':
A warning was seen for a line not containing a / operator. The most
likely cause is a warning about code executed by 'exec' or eval()
(see note below), or an indirect invocation of the / operator, for
example via the div() function in the operator module. It could
also be caused by a change to the file between the time the test
script was run to collect warnings and the time fixdiv was run.
- 'More than one / operator in line N'; or
'More than one / operator per statement in lines N-N':
The scanner found more than one / operator on a single line, or in a
statement split across multiple lines. Because the warnings
framework doesn't (and can't) show the offset within the line, and
the code generator doesn't always give the correct line number for
operations in a multi-line statement, we can't be sure whether all
operators in the statement were executed. To be on the safe side,
by default a warning is issued about this case. In practice, these
cases are usually safe, and the -m option suppresses these warning.
- 'Can't find the / operator in line N', line marked by '*':
This really shouldn't happen. It means that the tokenize module
reported a '/' operator but the line it returns didn't contain a '/'
character at the indicated position.
- 'Bad warning for line N: XYZ', line marked by '*':
This really shouldn't happen. It means that a 'classic XYZ
division' warning was read with XYZ being something other than
'int', 'long', 'float', or 'complex'.
Notes:
- The augmented assignment operator /= is handled the same way as the
/ operator.
- This tool never looks at the // operator; no warnings are ever
generated for use of this operator.
- This tool never looks at the / operator when a future division
statement is in effect; no warnings are generated in this case, and
because the tool only looks at files for which at least one classic
division warning was seen, it will never look at files containing a
future division statement.
- Warnings may be issued for code not read from a file, but executed
using the exec() or eval() functions. These may have
<string> in the filename position, in which case the fixdiv script
will attempt and fail to open a file named '<string>' and issue a
warning about this failure; or these may be reported as 'Phantom'
warnings (see above). You're on your own to deal with these. You
could make all recommended changes and add a future division
statement to all affected files, and then re-run the test script; it
should not issue any warnings. If there are any, and you have a
hard time tracking down where they are generated, you can use the
-Werror option to force an error instead of a first warning,
generating a traceback.
- The tool should be run from the same directory as that from which
the original script was run, otherwise it won't be able to open
files given by relative pathnames.
"""
import sys
import getopt
import re
import tokenize
multi_ok = 0
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hm")
except getopt.error as msg:
usage(msg)
return 2
for o, a in opts:
if o == "-h":
print(__doc__)
return
if o == "-m":
global multi_ok
multi_ok = 1
if not args:
usage("at least one file argument is required")
return 2
if args[1:]:
sys.stderr.write("%s: extra file arguments ignored\n", sys.argv[0])
warnings = readwarnings(args[0])
if warnings is None:
return 1
files = list(warnings.keys())
if not files:
print("No classic division warnings read from", args[0])
return
files.sort()
exit = None
for filename in files:
x = process(filename, warnings[filename])
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
PATTERN = (r"^(.+?):(\d+): DeprecationWarning: "
r"classic (int|long|float|complex) division$")
def readwarnings(warningsfile):
prog = re.compile(PATTERN)
try:
f = open(warningsfile)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return
warnings = {}
while 1:
line = f.readline()
if not line:
break
m = prog.match(line)
if not m:
if line.find("division") >= 0:
sys.stderr.write("Warning: ignored input " + line)
continue
filename, lineno, what = m.groups()
list = warnings.get(filename)
if list is None:
warnings[filename] = list = []
list.append((int(lineno), sys.intern(what)))
f.close()
return warnings
def process(filename, list):
print("-"*70)
assert list # if this fails, readwarnings() is broken
try:
fp = open(filename)
except IOError as msg:
sys.stderr.write("can't open: %s\n" % msg)
return 1
print("Index:", filename)
f = FileContext(fp)
list.sort()
index = 0 # list[:index] has been processed, list[index:] is still to do
g = tokenize.generate_tokens(f.readline)
while 1:
startlineno, endlineno, slashes = lineinfo = scanline(g)
if startlineno is None:
break
assert startlineno <= endlineno is not None
orphans = []
while index < len(list) and list[index][0] < startlineno:
orphans.append(list[index])
index += 1
if orphans:
reportphantomwarnings(orphans, f)
warnings = []
while index < len(list) and list[index][0] <= endlineno:
warnings.append(list[index])
index += 1
if not slashes and not warnings:
pass
elif slashes and not warnings:
report(slashes, "No conclusive evidence")
elif warnings and not slashes:
reportphantomwarnings(warnings, f)
else:
if len(slashes) > 1:
if not multi_ok:
rows = []
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
rows.append(row)
lastrow = row
assert rows
if len(rows) == 1:
print("*** More than one / operator in line", rows[0])
else:
print("*** More than one / operator per statement", end=' ')
print("in lines %d-%d" % (rows[0], rows[-1]))
intlong = []
floatcomplex = []
bad = []
for lineno, what in warnings:
if what in ("int", "long"):
intlong.append(what)
elif what in ("float", "complex"):
floatcomplex.append(what)
else:
bad.append(what)
lastrow = None
for (row, col), line in slashes:
if row == lastrow:
continue
lastrow = row
line = chop(line)
if line[col:col+1] != "/":
print("*** Can't find the / operator in line %d:" % row)
print("*", line)
continue
if bad:
print("*** Bad warning for line %d:" % row, bad)
print("*", line)
elif intlong and not floatcomplex:
print("%dc%d" % (row, row))
print("<", line)
print("---")
print(">", line[:col] + "/" + line[col:])
elif floatcomplex and not intlong:
print("True division / operator at line %d:" % row)
print("=", line)
elif intlong and floatcomplex:
print("*** Ambiguous / operator (%s, %s) at line %d:" % (
"|".join(intlong), "|".join(floatcomplex), row))
print("?", line)
fp.close()
def reportphantomwarnings(warnings, f):
blocks = []
lastrow = None
lastblock = None
for row, what in warnings:
if row != lastrow:
lastblock = [row]
blocks.append(lastblock)
lastblock.append(what)
for block in blocks:
row = block[0]
whats = "/".join(block[1:])
print("*** Phantom %s warnings for line %d:" % (whats, row))
f.report(row, mark="*")
def report(slashes, message):
lastrow = None
for (row, col), line in slashes:
if row != lastrow:
print("*** %s on line %d:" % (message, row))
print("*", chop(line))
lastrow = row
class FileContext:
def __init__(self, fp, window=5, lineno=1):
self.fp = fp
self.window = 5
self.lineno = 1
self.eoflookahead = 0
self.lookahead = []
self.buffer = []
def fill(self):
while len(self.lookahead) < self.window and not self.eoflookahead:
line = self.fp.readline()
if not line:
self.eoflookahead = 1
break
self.lookahead.append(line)
def readline(self):
self.fill()
if not self.lookahead:
return ""
line = self.lookahead.pop(0)
self.buffer.append(line)
self.lineno += 1
return line
def __getitem__(self, index):
self.fill()
bufstart = self.lineno - len(self.buffer)
lookend = self.lineno + len(self.lookahead)
if bufstart <= index < self.lineno:
return self.buffer[index - bufstart]
if self.lineno <= index < lookend:
return self.lookahead[index - self.lineno]
raise KeyError
def report(self, first, last=None, mark="*"):
if last is None:
last = first
for i in range(first, last+1):
try:
line = self[first]
except KeyError:
line = "<missing line>"
print(mark, chop(line))
def scanline(g):
slashes = []
startlineno = None
endlineno = None
for type, token, start, end, line in g:
endlineno = end[0]
if startlineno is None:
startlineno = endlineno
if token in ("/", "/="):
slashes.append((start, line))
if type == tokenize.NEWLINE:
break
return startlineno, endlineno, slashes
def chop(line):
if line.endswith("\n"):
return line[:-1]
else:
return line
if __name__ == "__main__":
sys.exit(main())
| en | 0.94327 | #! /usr/bin/env python3 fixdiv - tool to fix division operators.
To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'.
This runs the script `yourscript.py' while writing warning messages
about all uses of the classic division operator to the file
`warnings'. The warnings look like this:
<file>:<line>: DeprecationWarning: classic <type> division
The warnings are written to stderr, so you must use `2>' for the I/O
redirect. I know of no way to redirect stderr on Windows in a DOS
box, so you will have to modify the script to set sys.stderr to some
kind of log file if you want to do this on Windows.
The warnings are not limited to the script; modules imported by the
script may also trigger warnings. In fact a useful technique is to
write a test script specifically intended to exercise all code in a
particular module or set of modules.
Then run `python fixdiv.py warnings'. This first reads the warnings,
looking for classic division warnings, and sorts them by file name and
line number. Then, for each file that received at least one warning,
it parses the file and tries to match the warnings up to the division
operators found in the source code. If it is successful, it writes
its findings to stdout, preceded by a line of dashes and a line of the
form:
Index: <file>
If the only findings found are suggestions to change a / operator into
a // operator, the output is acceptable input for the Unix 'patch'
program.
Here are the possible messages on stdout (N stands for a line number):
- A plain-diff-style change ('NcN', a line marked by '<', a line
containing '---', and a line marked by '>'):
A / operator was found that should be changed to //. This is the
recommendation when only int and/or long arguments were seen.
- 'True division / operator at line N' and a line marked by '=':
A / operator was found that can remain unchanged. This is the
recommendation when only float and/or complex arguments were seen.
- 'Ambiguous / operator (..., ...) at line N', line marked by '?':
A / operator was found for which int or long as well as float or
complex arguments were seen. This is highly unlikely; if it occurs,
you may have to restructure the code to keep the classic semantics,
or maybe you don't care about the classic semantics.
- 'No conclusive evidence on line N', line marked by '*':
A / operator was found for which no warnings were seen. This could
be code that was never executed, or code that was only executed
with user-defined objects as arguments. You will have to
investigate further. Note that // can be overloaded separately from
/, using __floordiv__. True division can also be separately
overloaded, using __truediv__. Classic division should be the same
as either of those. (XXX should I add a warning for division on
user-defined objects, to disambiguate this case from code that was
never executed?)
- 'Phantom ... warnings for line N', line marked by '*':
A warning was seen for a line not containing a / operator. The most
likely cause is a warning about code executed by 'exec' or eval()
(see note below), or an indirect invocation of the / operator, for
example via the div() function in the operator module. It could
also be caused by a change to the file between the time the test
script was run to collect warnings and the time fixdiv was run.
- 'More than one / operator in line N'; or
'More than one / operator per statement in lines N-N':
The scanner found more than one / operator on a single line, or in a
statement split across multiple lines. Because the warnings
framework doesn't (and can't) show the offset within the line, and
the code generator doesn't always give the correct line number for
operations in a multi-line statement, we can't be sure whether all
operators in the statement were executed. To be on the safe side,
by default a warning is issued about this case. In practice, these
cases are usually safe, and the -m option suppresses these warning.
- 'Can't find the / operator in line N', line marked by '*':
This really shouldn't happen. It means that the tokenize module
reported a '/' operator but the line it returns didn't contain a '/'
character at the indicated position.
- 'Bad warning for line N: XYZ', line marked by '*':
This really shouldn't happen. It means that a 'classic XYZ
division' warning was read with XYZ being something other than
'int', 'long', 'float', or 'complex'.
Notes:
- The augmented assignment operator /= is handled the same way as the
/ operator.
- This tool never looks at the // operator; no warnings are ever
generated for use of this operator.
- This tool never looks at the / operator when a future division
statement is in effect; no warnings are generated in this case, and
because the tool only looks at files for which at least one classic
division warning was seen, it will never look at files containing a
future division statement.
- Warnings may be issued for code not read from a file, but executed
using the exec() or eval() functions. These may have
<string> in the filename position, in which case the fixdiv script
will attempt and fail to open a file named '<string>' and issue a
warning about this failure; or these may be reported as 'Phantom'
warnings (see above). You're on your own to deal with these. You
could make all recommended changes and add a future division
statement to all affected files, and then re-run the test script; it
should not issue any warnings. If there are any, and you have a
hard time tracking down where they are generated, you can use the
-Werror option to force an error instead of a first warning,
generating a traceback.
- The tool should be run from the same directory as that from which
the original script was run, otherwise it won't be able to open
files given by relative pathnames. # if this fails, readwarnings() is broken # list[:index] has been processed, list[index:] is still to do | 2.863456 | 3 |
piny/validators.py | jochenparm/piny | 0 | 6624835 | from abc import ABC, abstractmethod
from typing import Any, Dict, List, Union
from .errors import ValidationError
LoadedData = Union[Dict[str, Any], List[Any]]
class Validator(ABC):
"""
Abstract base class for optional validator classes
Use only to derive new child classes, implement all abstract methods
"""
def __init__(self, schema: Any, **params):
self.schema = schema
self.schema_params = params
@abstractmethod
def load(self, data: LoadedData, **params):
"""
Load data, return validated data or raise en error
"""
pass # pragma: no cover
class PydanticValidator(Validator):
"""
Validator class for Pydantic library
"""
def load(self, data: LoadedData, **params):
try:
return self.schema(**{**data, **params}).dict()
except Exception as e:
raise ValidationError(origin=e, reason=str(e))
class MarshmallowValidator(Validator):
"""
Validator class for Marshmallow library
"""
def load(self, data: LoadedData, **params):
try:
return self.schema(**self.schema_params).load(data, **params).data
except Exception as e:
raise ValidationError(origin=e, reason=str(e))
class TrafaretValidator(Validator):
"""
Validator class for Trafaret library
"""
def load(self, data: LoadedData, **params):
try:
return self.schema.check(data)
except Exception as e:
raise ValidationError(origin=e, reason=str(e))
| from abc import ABC, abstractmethod
from typing import Any, Dict, List, Union
from .errors import ValidationError
LoadedData = Union[Dict[str, Any], List[Any]]
class Validator(ABC):
"""
Abstract base class for optional validator classes
Use only to derive new child classes, implement all abstract methods
"""
def __init__(self, schema: Any, **params):
self.schema = schema
self.schema_params = params
@abstractmethod
def load(self, data: LoadedData, **params):
"""
Load data, return validated data or raise en error
"""
pass # pragma: no cover
class PydanticValidator(Validator):
"""
Validator class for Pydantic library
"""
def load(self, data: LoadedData, **params):
try:
return self.schema(**{**data, **params}).dict()
except Exception as e:
raise ValidationError(origin=e, reason=str(e))
class MarshmallowValidator(Validator):
"""
Validator class for Marshmallow library
"""
def load(self, data: LoadedData, **params):
try:
return self.schema(**self.schema_params).load(data, **params).data
except Exception as e:
raise ValidationError(origin=e, reason=str(e))
class TrafaretValidator(Validator):
"""
Validator class for Trafaret library
"""
def load(self, data: LoadedData, **params):
try:
return self.schema.check(data)
except Exception as e:
raise ValidationError(origin=e, reason=str(e))
| en | 0.500269 | Abstract base class for optional validator classes Use only to derive new child classes, implement all abstract methods Load data, return validated data or raise en error # pragma: no cover Validator class for Pydantic library Validator class for Marshmallow library Validator class for Trafaret library | 3.355861 | 3 |
ci_output_parser/log_file_parsers/firebase_log_file_parser.py | lzbrooks/ci_output_parser | 0 | 6624836 | <filename>ci_output_parser/log_file_parsers/firebase_log_file_parser.py<gh_stars>0
import re
from ci_output_parser.log_file_parsers.regex_log_file_parser import RegexLogFileParser
class FirebaseLogFileParser(RegexLogFileParser):
def __init__(self, log_file_path=None, parser_name="Firebase Lint", output_file_name=None,
file_parser_parameters=None, log_line_parser_parameters=None):
super().__init__(log_file_path=log_file_path, parser_name=parser_name, output_file_name=output_file_name,
file_parser_parameters=file_parser_parameters,
log_line_parser_parameters=log_line_parser_parameters)
def format_lint_errors(self):
if self.lint_errors:
print("Formatting %s log lint lines" % len(self.lint_errors))
for error_line in self.lint_errors:
error_line = error_line + "\n"
self.formatted_lines.append(error_line)
class FirebaseBucketLogFileParser(FirebaseLogFileParser):
def __init__(self, log_file_path=None, parser_name="Firebase Bucket Path"):
log_line_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'gs:\/\/.*')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
log_line_parser_parameters=log_line_parser_parameters)
def clean_lint_error_lines(self):
if any("Failed" in line for line in self.log_lines):
self.lint_errors = [x[33:-3] for x in self.lint_errors]
else:
self.lint_errors = []
class FirebaseDeviceLogFileParser(FirebaseLogFileParser):
def __init__(self, log_file_path=None, parser_name="Firebase Device Folders"):
file_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'Failed')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
file_parser_parameters=file_parser_parameters)
def clean_lint_error_lines(self):
device_folders = []
for error_line in self.lint_errors:
error_split_list = re.split(r' +', error_line)
device_folders.append(error_split_list[3])
self.lint_errors = device_folders
class FirebaseTestLogFileParser(RegexLogFileParser):
def __init__(self, log_file_path=None, device_name="Device"):
parser_name = "Firebase " + device_name + " Tests"
file_parser_parameters = dict(start_regex=r'^There were|There was',
stop_regex=r'^INSTRUMENTATION_CODE',
parse_regex=r'.*')
log_line_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'failure')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
file_parser_parameters=file_parser_parameters,
log_line_parser_parameters=log_line_parser_parameters)
def clean_lint_error_lines(self):
source_error_lines = []
if self.lint_lines:
for error_line in self.log_lines:
if error_line.startswith(" at ") and (
error_line.startswith(" at org.junit.runner") or
error_line.startswith(" at org.junit.internal") or
error_line.startswith(" at androidx.test") or
error_line.startswith(" at android.app.Instrumentation") or
error_line.startswith(" at org.mockito.internal")):
continue
else:
source_error_lines.append(error_line)
self.lint_errors = source_error_lines
class FirebaseTestCrashLogFileParser(RegexLogFileParser):
def __init__(self, log_file_path=None, device_name="Device"):
parser_name = "Firebase " + device_name + " Tests"
file_parser_parameters = dict(start_regex=None,
stop_regex=r'^INSTRUMENTATION_CODE',
parse_regex=r'.*')
log_line_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'shortMsg=Process crashed')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
file_parser_parameters=file_parser_parameters,
log_line_parser_parameters=log_line_parser_parameters)
def clean_lint_error_lines(self):
if self.lint_lines:
self.lint_errors = self.log_lines[-9:]
else:
self.lint_errors = []
class FirebaseTestTimeoutLogFileParser(RegexLogFileParser):
def __init__(self, log_file_path=None, device_name="Device"):
parser_name = "Firebase " + device_name + " Tests"
log_line_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'INSTRUMENTATION_CODE:')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
log_line_parser_parameters=log_line_parser_parameters)
def clean_lint_error_lines(self):
timeout_lines = []
if (not self.lint_lines) and self.log_lines:
timeout_lines.append("Test Timed Out:\n")
timeout_lines.append("\n")
timeout_lines = timeout_lines + self.log_lines[-8:]
self.lint_errors = timeout_lines
else:
self.lint_errors = []
| <filename>ci_output_parser/log_file_parsers/firebase_log_file_parser.py<gh_stars>0
import re
from ci_output_parser.log_file_parsers.regex_log_file_parser import RegexLogFileParser
class FirebaseLogFileParser(RegexLogFileParser):
def __init__(self, log_file_path=None, parser_name="Firebase Lint", output_file_name=None,
file_parser_parameters=None, log_line_parser_parameters=None):
super().__init__(log_file_path=log_file_path, parser_name=parser_name, output_file_name=output_file_name,
file_parser_parameters=file_parser_parameters,
log_line_parser_parameters=log_line_parser_parameters)
def format_lint_errors(self):
if self.lint_errors:
print("Formatting %s log lint lines" % len(self.lint_errors))
for error_line in self.lint_errors:
error_line = error_line + "\n"
self.formatted_lines.append(error_line)
class FirebaseBucketLogFileParser(FirebaseLogFileParser):
def __init__(self, log_file_path=None, parser_name="Firebase Bucket Path"):
log_line_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'gs:\/\/.*')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
log_line_parser_parameters=log_line_parser_parameters)
def clean_lint_error_lines(self):
if any("Failed" in line for line in self.log_lines):
self.lint_errors = [x[33:-3] for x in self.lint_errors]
else:
self.lint_errors = []
class FirebaseDeviceLogFileParser(FirebaseLogFileParser):
def __init__(self, log_file_path=None, parser_name="Firebase Device Folders"):
file_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'Failed')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
file_parser_parameters=file_parser_parameters)
def clean_lint_error_lines(self):
device_folders = []
for error_line in self.lint_errors:
error_split_list = re.split(r' +', error_line)
device_folders.append(error_split_list[3])
self.lint_errors = device_folders
class FirebaseTestLogFileParser(RegexLogFileParser):
def __init__(self, log_file_path=None, device_name="Device"):
parser_name = "Firebase " + device_name + " Tests"
file_parser_parameters = dict(start_regex=r'^There were|There was',
stop_regex=r'^INSTRUMENTATION_CODE',
parse_regex=r'.*')
log_line_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'failure')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
file_parser_parameters=file_parser_parameters,
log_line_parser_parameters=log_line_parser_parameters)
def clean_lint_error_lines(self):
source_error_lines = []
if self.lint_lines:
for error_line in self.log_lines:
if error_line.startswith(" at ") and (
error_line.startswith(" at org.junit.runner") or
error_line.startswith(" at org.junit.internal") or
error_line.startswith(" at androidx.test") or
error_line.startswith(" at android.app.Instrumentation") or
error_line.startswith(" at org.mockito.internal")):
continue
else:
source_error_lines.append(error_line)
self.lint_errors = source_error_lines
class FirebaseTestCrashLogFileParser(RegexLogFileParser):
def __init__(self, log_file_path=None, device_name="Device"):
parser_name = "Firebase " + device_name + " Tests"
file_parser_parameters = dict(start_regex=None,
stop_regex=r'^INSTRUMENTATION_CODE',
parse_regex=r'.*')
log_line_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'shortMsg=Process crashed')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
file_parser_parameters=file_parser_parameters,
log_line_parser_parameters=log_line_parser_parameters)
def clean_lint_error_lines(self):
if self.lint_lines:
self.lint_errors = self.log_lines[-9:]
else:
self.lint_errors = []
class FirebaseTestTimeoutLogFileParser(RegexLogFileParser):
def __init__(self, log_file_path=None, device_name="Device"):
parser_name = "Firebase " + device_name + " Tests"
log_line_parser_parameters = dict(start_regex=None,
stop_regex=None,
parse_regex=r'INSTRUMENTATION_CODE:')
super().__init__(log_file_path=log_file_path, parser_name=parser_name,
log_line_parser_parameters=log_line_parser_parameters)
def clean_lint_error_lines(self):
timeout_lines = []
if (not self.lint_lines) and self.log_lines:
timeout_lines.append("Test Timed Out:\n")
timeout_lines.append("\n")
timeout_lines = timeout_lines + self.log_lines[-8:]
self.lint_errors = timeout_lines
else:
self.lint_errors = []
| none | 1 | 2.651474 | 3 | |
configs/selfsup/_base_/schedules/adamw_coslr-300e_in1k.py | mitming/mmselfsup | 355 | 6624837 | <gh_stars>100-1000
# optimizer
optimizer = dict(type='AdamW', lr=6e-4, weight_decay=0.1)
optimizer_config = dict() # grad_clip, coalesce, bucket_size_mb
# learning policy
lr_config = dict(
policy='CosineAnnealing',
by_epoch=False,
min_lr=0.,
warmup='linear',
warmup_iters=40,
warmup_ratio=1e-4, # cannot be 0
warmup_by_epoch=True)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=300)
| # optimizer
optimizer = dict(type='AdamW', lr=6e-4, weight_decay=0.1)
optimizer_config = dict() # grad_clip, coalesce, bucket_size_mb
# learning policy
lr_config = dict(
policy='CosineAnnealing',
by_epoch=False,
min_lr=0.,
warmup='linear',
warmup_iters=40,
warmup_ratio=1e-4, # cannot be 0
warmup_by_epoch=True)
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=300) | en | 0.683427 | # optimizer # grad_clip, coalesce, bucket_size_mb # learning policy # cannot be 0 # runtime settings | 1.730932 | 2 |
textx/cli/version.py | hlouzada/textX | 346 | 6624838 | try:
import click
except ImportError:
raise Exception('textX must be installed with CLI dependencies to use '
'textx command.\npip install textX[cli]')
def version(textx):
@textx.command()
def version():
"""
Print version info.
"""
import textx
click.echo('textX {}'.format(textx.__version__))
| try:
import click
except ImportError:
raise Exception('textX must be installed with CLI dependencies to use '
'textx command.\npip install textX[cli]')
def version(textx):
@textx.command()
def version():
"""
Print version info.
"""
import textx
click.echo('textX {}'.format(textx.__version__))
| en | 0.426635 | Print version info. | 2.385669 | 2 |
ticdat/testing/run_tests_for_many.py | adampkehoe/ticdat | 15 | 6624839 | <reponame>adampkehoe/ticdat<gh_stars>10-100
#useful helper testing script
import ticdat.testing.ticdattestutils
run_suite = ticdat.testing.ticdattestutils._runSuite
from ticdat.testing.testcsv import TestCsv
from ticdat.testing.testxls import TestXls
from ticdat.testing.testpandas import TestPandas
from ticdat.testing.testutils import TestUtils
from ticdat.testing.testjson import TestJson
from ticdat.testing.testpandat_io import TestIO
import ticdat.testing.testpandat_utils
TestPandatUtils = ticdat.testing.testpandat_utils.TestUtils
from ticdat.testing.testsql import TestSql
from ticdat.testing.test_pgtd import TestPostres
from ticdat.testing.testmodel import TestModel
the_classes = [TestSql, TestPandatUtils, TestIO, TestJson, TestUtils, TestSql, TestPandatUtils, TestCsv,
TestPandas, TestModel, TestXls, TestPostres,]
for c in the_classes:
print(f"\n--------{c}")
can_attr = [x for x in dir(c) if x.startswith("can")]
assert len(can_attr) == 1 or c in [TestUtils, TestModel]
if can_attr:
setattr(c, can_attr[0], True)
run_suite(c)
| #useful helper testing script
import ticdat.testing.ticdattestutils
run_suite = ticdat.testing.ticdattestutils._runSuite
from ticdat.testing.testcsv import TestCsv
from ticdat.testing.testxls import TestXls
from ticdat.testing.testpandas import TestPandas
from ticdat.testing.testutils import TestUtils
from ticdat.testing.testjson import TestJson
from ticdat.testing.testpandat_io import TestIO
import ticdat.testing.testpandat_utils
TestPandatUtils = ticdat.testing.testpandat_utils.TestUtils
from ticdat.testing.testsql import TestSql
from ticdat.testing.test_pgtd import TestPostres
from ticdat.testing.testmodel import TestModel
the_classes = [TestSql, TestPandatUtils, TestIO, TestJson, TestUtils, TestSql, TestPandatUtils, TestCsv,
TestPandas, TestModel, TestXls, TestPostres,]
for c in the_classes:
print(f"\n--------{c}")
can_attr = [x for x in dir(c) if x.startswith("can")]
assert len(can_attr) == 1 or c in [TestUtils, TestModel]
if can_attr:
setattr(c, can_attr[0], True)
run_suite(c) | en | 0.369754 | #useful helper testing script | 2.133072 | 2 |
vision/cloud-client/product_search/product_search_test.py | spitfire55/python-docs-samples | 1 | 6624840 | <gh_stars>1-10
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from product_search import get_similar_products_file, get_similar_products_uri
PROJECT_ID = os.getenv('GCLOUD_PROJECT')
LOCATION = 'us-west1'
PRODUCT_SET_ID = 'indexed_product_set_id_for_testing'
PRODUCT_CATEGORY = 'apparel'
PRODUCT_ID_1 = 'indexed_product_id_for_testing_1'
PRODUCT_ID_2 = 'indexed_product_id_for_testing_2'
FILE_PATH_1 = 'resources/shoes_1.jpg'
IMAGE_URI_1 = 'gs://python-docs-samples-tests/product_search/shoes_1.jpg'
FILTER = 'style=womens'
def test_get_similar_products_file(capsys):
get_similar_products_file(
PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, FILE_PATH_1,
'')
out, _ = capsys.readouterr()
assert PRODUCT_ID_1 in out
assert PRODUCT_ID_2 in out
def test_get_similar_products_uri(capsys):
get_similar_products_uri(
PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, IMAGE_URI_1,
'')
out, _ = capsys.readouterr()
assert PRODUCT_ID_1 in out
assert PRODUCT_ID_2 in out
def test_get_similar_products_file_with_filter(capsys):
get_similar_products_file(
PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, FILE_PATH_1,
FILTER)
out, _ = capsys.readouterr()
assert PRODUCT_ID_1 in out
assert PRODUCT_ID_2 not in out
def test_get_similar_products_uri_with_filter(capsys):
get_similar_products_uri(
PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, IMAGE_URI_1,
FILTER)
out, _ = capsys.readouterr()
assert PRODUCT_ID_1 in out
assert PRODUCT_ID_2 not in out
| # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from product_search import get_similar_products_file, get_similar_products_uri
PROJECT_ID = os.getenv('GCLOUD_PROJECT')
LOCATION = 'us-west1'
PRODUCT_SET_ID = 'indexed_product_set_id_for_testing'
PRODUCT_CATEGORY = 'apparel'
PRODUCT_ID_1 = 'indexed_product_id_for_testing_1'
PRODUCT_ID_2 = 'indexed_product_id_for_testing_2'
FILE_PATH_1 = 'resources/shoes_1.jpg'
IMAGE_URI_1 = 'gs://python-docs-samples-tests/product_search/shoes_1.jpg'
FILTER = 'style=womens'
def test_get_similar_products_file(capsys):
get_similar_products_file(
PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, FILE_PATH_1,
'')
out, _ = capsys.readouterr()
assert PRODUCT_ID_1 in out
assert PRODUCT_ID_2 in out
def test_get_similar_products_uri(capsys):
get_similar_products_uri(
PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, IMAGE_URI_1,
'')
out, _ = capsys.readouterr()
assert PRODUCT_ID_1 in out
assert PRODUCT_ID_2 in out
def test_get_similar_products_file_with_filter(capsys):
get_similar_products_file(
PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, FILE_PATH_1,
FILTER)
out, _ = capsys.readouterr()
assert PRODUCT_ID_1 in out
assert PRODUCT_ID_2 not in out
def test_get_similar_products_uri_with_filter(capsys):
get_similar_products_uri(
PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, IMAGE_URI_1,
FILTER)
out, _ = capsys.readouterr()
assert PRODUCT_ID_1 in out
assert PRODUCT_ID_2 not in out | en | 0.848975 | # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.188151 | 2 |
OpenGLCffi/GL/EXT/ARB/color_buffer_float.py | cydenix/OpenGLCffi | 0 | 6624841 | <reponame>cydenix/OpenGLCffi
from OpenGLCffi.GL import params
@params(api='gl', prms=['target', 'clamp'])
def glClampColorARB(target, clamp):
pass
| from OpenGLCffi.GL import params
@params(api='gl', prms=['target', 'clamp'])
def glClampColorARB(target, clamp):
pass | none | 1 | 1.705262 | 2 | |
pypostalwin/src/pypostalwin.py | selva221724/pypostalwin | 9 | 6624842 | <filename>pypostalwin/src/pypostalwin.py
from subprocess import Popen, PIPE
import subprocess
def stringToJSON(string):
if not string in ['{}']:
string = string.replace('{ ', '')
string = string.replace('}', '')
string = string.replace('"', '')
string = string.split(", ")
stringList = [i.split(': ') for i in string]
outDictList = []
for i in stringList:
outDictList.append({i[0]: i[1].rstrip().lstrip()})
return outDictList
else:
return {}
def outputStripper(result):
result = result.split('Result:\n\n')[1].split('\n\n> ')[0].replace('\n', '')
result = stringToJSON(result)
return result
def removeSpeacialChars(address):
b = {'≈': '', '≠': '', '>': '', '<': '', '+': '', '≥': '', '≤': '', '±': '', '*': '', '÷': '', '√': '',
'°': '', '⊥': '', '~': '', 'Δ': '', 'π': '', '≡': '', '≜': '', '∝': '', '∞': '', '≪': '', '≫': '',
'⌈': '', '⌉': '', '⌋': '', '⌊': '', '∑': '', '∏': '', 'γ': '', 'φ': '', '⊃': '', '⋂': '', '⋃': '',
'μ': '', 'σ': '', 'ρ': '', 'λ': '', 'χ': '', '⊄': '', '⊆': '', '⊂': '', '⊇': '', '⊅': '', '⊖': '',
'∈': '', '∉': '', '⊕': '', '⇒': '', '⇔': '', '↔': '', '∀': '', '∃': '', '∄': '', '∴': '', '∵': '',
'ε': '', '∫': '', '∮': '', '∯': '', '∰': '', 'δ': '', 'ψ': '', 'Θ': '', 'θ': '', 'α': '', 'β': '',
'ζ': '', 'η': '', 'ι': '', 'κ': '', 'ξ': '', 'τ': '', 'ω': '', '∇': ''}
for x, y in b.items():
address = address.replace(x, y)
return address
class AddressParser:
def __init__(self):
self.exePath = r"C:\Workbench\libpostal\src\address_parser.exe"
self.process = Popen(self.exePath, shell=False, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
pass
def runParser(self, address):
address = removeSpeacialChars(address)
address = address + '\n'
self.process.stdin.write(address)
self.process.stdin.flush()
result = ''
for line in self.process.stdout:
if line == '}\n':
result += line
break
result += line
return outputStripper(result)
def expandTheAddress(self,address):
address = removeSpeacialChars(address)
out = subprocess.Popen(['C:\Workbench\libpostal\src\libpostal.exe',
address, '--json'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
return eval(stdout.decode("utf-8"))['expansions']
def terminateParser(self):
self.process.stdin.close()
self.process.terminate()
self.process.wait()
| <filename>pypostalwin/src/pypostalwin.py
from subprocess import Popen, PIPE
import subprocess
def stringToJSON(string):
if not string in ['{}']:
string = string.replace('{ ', '')
string = string.replace('}', '')
string = string.replace('"', '')
string = string.split(", ")
stringList = [i.split(': ') for i in string]
outDictList = []
for i in stringList:
outDictList.append({i[0]: i[1].rstrip().lstrip()})
return outDictList
else:
return {}
def outputStripper(result):
result = result.split('Result:\n\n')[1].split('\n\n> ')[0].replace('\n', '')
result = stringToJSON(result)
return result
def removeSpeacialChars(address):
b = {'≈': '', '≠': '', '>': '', '<': '', '+': '', '≥': '', '≤': '', '±': '', '*': '', '÷': '', '√': '',
'°': '', '⊥': '', '~': '', 'Δ': '', 'π': '', '≡': '', '≜': '', '∝': '', '∞': '', '≪': '', '≫': '',
'⌈': '', '⌉': '', '⌋': '', '⌊': '', '∑': '', '∏': '', 'γ': '', 'φ': '', '⊃': '', '⋂': '', '⋃': '',
'μ': '', 'σ': '', 'ρ': '', 'λ': '', 'χ': '', '⊄': '', '⊆': '', '⊂': '', '⊇': '', '⊅': '', '⊖': '',
'∈': '', '∉': '', '⊕': '', '⇒': '', '⇔': '', '↔': '', '∀': '', '∃': '', '∄': '', '∴': '', '∵': '',
'ε': '', '∫': '', '∮': '', '∯': '', '∰': '', 'δ': '', 'ψ': '', 'Θ': '', 'θ': '', 'α': '', 'β': '',
'ζ': '', 'η': '', 'ι': '', 'κ': '', 'ξ': '', 'τ': '', 'ω': '', '∇': ''}
for x, y in b.items():
address = address.replace(x, y)
return address
class AddressParser:
def __init__(self):
self.exePath = r"C:\Workbench\libpostal\src\address_parser.exe"
self.process = Popen(self.exePath, shell=False, universal_newlines=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
pass
def runParser(self, address):
address = removeSpeacialChars(address)
address = address + '\n'
self.process.stdin.write(address)
self.process.stdin.flush()
result = ''
for line in self.process.stdout:
if line == '}\n':
result += line
break
result += line
return outputStripper(result)
def expandTheAddress(self,address):
address = removeSpeacialChars(address)
out = subprocess.Popen(['C:\Workbench\libpostal\src\libpostal.exe',
address, '--json'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
return eval(stdout.decode("utf-8"))['expansions']
def terminateParser(self):
self.process.stdin.close()
self.process.terminate()
self.process.wait()
| none | 1 | 2.855714 | 3 | |
templates/controller-template.py | yangdanny97/p4-stacklang | 0 | 6624843 | #!/usr/bin/env python2
import argparse
import grpc
import os
import sys
from time import sleep
from headers import *
from stitch import *
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'utils/'))
import run_exercise
import p4runtime_lib.bmv2
from p4runtime_lib.switch import ShutdownAllSwitchConnections
import p4runtime_lib.helper
switches = {}
p4info_helper = None
def addForwardingRule(switch, dst_ip_addr, dst_port):
# Helper function to install forwarding rules
table_entry = p4info_helper.buildTableEntry(
table_name="MyIngress.ipv4_lpm",
match_fields={
"hdr.ipv4.dstAddr": (dst_ip_addr, 32)
},
action_name="MyIngress.ipv4_forward",
action_params={
"port": dst_port,
})
bmv2_switch = switches[switch]
bmv2_switch.WriteTableEntry(table_entry)
print "Installed rule on %s to forward to %s via port %d" % (switch, dst_ip_addr, dst_port)
def addSwIDRule(switch, swid):
# Helper function to install forwarding rules
table_entry = p4info_helper.buildTableEntry(
table_name="MyIngress.switch_id",
match_fields={
"hdr.ipv4.protocol": 0x8F
},
action_name="MyIngress.set_switch_id",
action_params={
"switch_id": swid,
})
bmv2_switch = switches[switch]
bmv2_switch.WriteTableEntry(table_entry)
print "Installed switch_id rule on %s" % switch
def main(p4info_file_path, bmv2_file_path, topo_file_path):
# Instantiate a P4Runtime helper from the p4info file
global p4info_helper
p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
try:
# Establish a P4 Runtime connection to each switch
for switch in << switches >>:
switch_id = int(switch[1:])
bmv2_switch = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name=switch,
address="127.0.0.1:%d" % (50050 + switch_id),
device_id=(switch_id - 1),
proto_dump_file="logs/%s-p4runtime-requests.txt" % switch)
bmv2_switch.MasterArbitrationUpdate()
print "Established as controller for %s" % bmv2_switch.name
bmv2_switch.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,
bmv2_json_file_path=bmv2_file_path)
print "Installed P4 Program using SetForwardingPipelineConfig on %s" % bmv2_switch.name
switches[switch] = bmv2_switch
addSwIDRule(switch, switch_id)
<< forwarding_rules >>
except KeyboardInterrupt:
print " Shutting down."
except grpc.RpcError as e:
print "gRPC Error:", e.details(),
status_code = e.code()
print "(%s)" % status_code.name,
traceback = sys.exc_info()[2]
print "[%s:%d]" % (traceback.tb_frame.f_code.co_filename, traceback.tb_lineno)
ShutdownAllSwitchConnections()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='P4Runtime Controller')
parser.add_argument('--p4info', help='p4info proto in text format from p4c',
type=str, action="store", required=False,
default='./build/switch.p4.p4info.txt')
parser.add_argument('--bmv2-json', help='BMv2 JSON file from p4c',
type=str, action="store", required=False,
default='./build/switch.json')
parser.add_argument('--topo', help='Topology file',
type=str, action="store", required=False,
default='topology.json')
args = parser.parse_args()
if not os.path.exists(args.p4info):
parser.print_help()
print "\np4info file not found: %s\nHave you run 'make'?" % args.p4info
parser.exit(1)
if not os.path.exists(args.bmv2_json):
parser.print_help()
print "\nBMv2 JSON file not found: %s\nHave you run 'make'?" % args.bmv2_json
parser.exit(1)
if not os.path.exists(args.topo):
parser.print_help()
print "\nTopology file not found: %s" % args.topo
parser.exit(1)
main(args.p4info, args.bmv2_json, args.topo)
| #!/usr/bin/env python2
import argparse
import grpc
import os
import sys
from time import sleep
from headers import *
from stitch import *
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'utils/'))
import run_exercise
import p4runtime_lib.bmv2
from p4runtime_lib.switch import ShutdownAllSwitchConnections
import p4runtime_lib.helper
switches = {}
p4info_helper = None
def addForwardingRule(switch, dst_ip_addr, dst_port):
# Helper function to install forwarding rules
table_entry = p4info_helper.buildTableEntry(
table_name="MyIngress.ipv4_lpm",
match_fields={
"hdr.ipv4.dstAddr": (dst_ip_addr, 32)
},
action_name="MyIngress.ipv4_forward",
action_params={
"port": dst_port,
})
bmv2_switch = switches[switch]
bmv2_switch.WriteTableEntry(table_entry)
print "Installed rule on %s to forward to %s via port %d" % (switch, dst_ip_addr, dst_port)
def addSwIDRule(switch, swid):
# Helper function to install forwarding rules
table_entry = p4info_helper.buildTableEntry(
table_name="MyIngress.switch_id",
match_fields={
"hdr.ipv4.protocol": 0x8F
},
action_name="MyIngress.set_switch_id",
action_params={
"switch_id": swid,
})
bmv2_switch = switches[switch]
bmv2_switch.WriteTableEntry(table_entry)
print "Installed switch_id rule on %s" % switch
def main(p4info_file_path, bmv2_file_path, topo_file_path):
# Instantiate a P4Runtime helper from the p4info file
global p4info_helper
p4info_helper = p4runtime_lib.helper.P4InfoHelper(p4info_file_path)
try:
# Establish a P4 Runtime connection to each switch
for switch in << switches >>:
switch_id = int(switch[1:])
bmv2_switch = p4runtime_lib.bmv2.Bmv2SwitchConnection(
name=switch,
address="127.0.0.1:%d" % (50050 + switch_id),
device_id=(switch_id - 1),
proto_dump_file="logs/%s-p4runtime-requests.txt" % switch)
bmv2_switch.MasterArbitrationUpdate()
print "Established as controller for %s" % bmv2_switch.name
bmv2_switch.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,
bmv2_json_file_path=bmv2_file_path)
print "Installed P4 Program using SetForwardingPipelineConfig on %s" % bmv2_switch.name
switches[switch] = bmv2_switch
addSwIDRule(switch, switch_id)
<< forwarding_rules >>
except KeyboardInterrupt:
print " Shutting down."
except grpc.RpcError as e:
print "gRPC Error:", e.details(),
status_code = e.code()
print "(%s)" % status_code.name,
traceback = sys.exc_info()[2]
print "[%s:%d]" % (traceback.tb_frame.f_code.co_filename, traceback.tb_lineno)
ShutdownAllSwitchConnections()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='P4Runtime Controller')
parser.add_argument('--p4info', help='p4info proto in text format from p4c',
type=str, action="store", required=False,
default='./build/switch.p4.p4info.txt')
parser.add_argument('--bmv2-json', help='BMv2 JSON file from p4c',
type=str, action="store", required=False,
default='./build/switch.json')
parser.add_argument('--topo', help='Topology file',
type=str, action="store", required=False,
default='topology.json')
args = parser.parse_args()
if not os.path.exists(args.p4info):
parser.print_help()
print "\np4info file not found: %s\nHave you run 'make'?" % args.p4info
parser.exit(1)
if not os.path.exists(args.bmv2_json):
parser.print_help()
print "\nBMv2 JSON file not found: %s\nHave you run 'make'?" % args.bmv2_json
parser.exit(1)
if not os.path.exists(args.topo):
parser.print_help()
print "\nTopology file not found: %s" % args.topo
parser.exit(1)
main(args.p4info, args.bmv2_json, args.topo)
| en | 0.680937 | #!/usr/bin/env python2 # Helper function to install forwarding rules # Helper function to install forwarding rules # Instantiate a P4Runtime helper from the p4info file # Establish a P4 Runtime connection to each switch | 2.153847 | 2 |
places/migrations/0004_auto_20200710_2304.py | huangsam/chowist | 11 | 6624844 | # Generated by Django 3.0.8 on 2020-07-10 23:04
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("places", "0003_auto_20200708_0719"),
]
operations = [
migrations.AlterUniqueTogether(
name="rating", unique_together={("place", "author")},
),
]
| # Generated by Django 3.0.8 on 2020-07-10 23:04
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("places", "0003_auto_20200708_0719"),
]
operations = [
migrations.AlterUniqueTogether(
name="rating", unique_together={("place", "author")},
),
]
| en | 0.810378 | # Generated by Django 3.0.8 on 2020-07-10 23:04 | 1.597487 | 2 |
xml2txt.py | kly1997/head_shoulder-detection-by-yolov3 | 27 | 6624845 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author : kly time:2019/4/15
# coding=utf-8
import os
import sys
import xml.etree.ElementTree as ET
import glob
def xml_to_txt(indir):
os.chdir(indir)
annotations = os.listdir('.')
annotations = glob.glob(str(annotations) + '*.xml')
for i, file in enumerate(annotations):
with open('F:\DEEPL\yolo\Test//result\groundtruthbox1.txt', "a") as f_w:
f_w.write(file.split('.')[0] + '.jpg' + '\n')
# actual parsing
in_file = open(file)
tree = ET.parse(in_file)
root = tree.getroot()
print(file)
i=0
for object in root.iter('object'):
i += 1
with open('F:\DEEPL\yolo\Test//result\groundtruthbox1.txt', "a") as f_w:
f_w.write(str(i) + '\n')
for obj in root.iter('object'):
current = list()
name = obj.find('name').text
xmlbox = obj.find('bndbox')
xn = xmlbox.find('xmin').text
xx = xmlbox.find('xmax').text
yn = xmlbox.find('ymin').text
yx = xmlbox.find('ymax').text
# print xn
width = int(xx) - int(xn)
print(width)
height = int(yx) - int(yn)
print(height)
with open('F:\DEEPL\yolo\Test//result\groundtruthbox1.txt', "a") as f_w:
f_w.write(xn + ' ' + yn + ' ' + str(width) + ' ' + str(height) + ' '+'1')
f_w.write('\n')
indir = 'F:\DEEPL\yolo\Test//annotations' # xml目录
xml_to_txt(indir) | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author : kly time:2019/4/15
# coding=utf-8
import os
import sys
import xml.etree.ElementTree as ET
import glob
def xml_to_txt(indir):
os.chdir(indir)
annotations = os.listdir('.')
annotations = glob.glob(str(annotations) + '*.xml')
for i, file in enumerate(annotations):
with open('F:\DEEPL\yolo\Test//result\groundtruthbox1.txt', "a") as f_w:
f_w.write(file.split('.')[0] + '.jpg' + '\n')
# actual parsing
in_file = open(file)
tree = ET.parse(in_file)
root = tree.getroot()
print(file)
i=0
for object in root.iter('object'):
i += 1
with open('F:\DEEPL\yolo\Test//result\groundtruthbox1.txt', "a") as f_w:
f_w.write(str(i) + '\n')
for obj in root.iter('object'):
current = list()
name = obj.find('name').text
xmlbox = obj.find('bndbox')
xn = xmlbox.find('xmin').text
xx = xmlbox.find('xmax').text
yn = xmlbox.find('ymin').text
yx = xmlbox.find('ymax').text
# print xn
width = int(xx) - int(xn)
print(width)
height = int(yx) - int(yn)
print(height)
with open('F:\DEEPL\yolo\Test//result\groundtruthbox1.txt', "a") as f_w:
f_w.write(xn + ' ' + yn + ' ' + str(width) + ' ' + str(height) + ' '+'1')
f_w.write('\n')
indir = 'F:\DEEPL\yolo\Test//annotations' # xml目录
xml_to_txt(indir) | en | 0.322645 | #!/usr/bin/env python # -*- coding:utf-8 -*- # author : kly time:2019/4/15 # coding=utf-8 # actual parsing # print xn # xml目录 | 3.097667 | 3 |
Task2B.py | HSCam/Flood-Warning-Sytem | 0 | 6624846 | <reponame>HSCam/Flood-Warning-Sytem
from distutils.command.build import build
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_level_over_threshold
def run():
"""Task 2B: Assessing flood risk by relative water level"""
stations = build_station_list()
update_water_levels(stations)
stations_of_interest = stations_level_over_threshold(stations, 0.8)
for station in stations_of_interest:
print(station)
if __name__ == '__main__':
print("***Task 2B: CUED Part 1A Flood Warning System***")
run() | from distutils.command.build import build
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_level_over_threshold
def run():
"""Task 2B: Assessing flood risk by relative water level"""
stations = build_station_list()
update_water_levels(stations)
stations_of_interest = stations_level_over_threshold(stations, 0.8)
for station in stations_of_interest:
print(station)
if __name__ == '__main__':
print("***Task 2B: CUED Part 1A Flood Warning System***")
run() | en | 0.881539 | Task 2B: Assessing flood risk by relative water level | 2.511211 | 3 |
requests_cache/backends/gridfs.py | rrosajp/requests-cache | 2 | 6624847 | from gridfs import GridFS
from pymongo import MongoClient
from . import get_valid_kwargs
from .base import BaseCache, BaseStorage
from .mongo import MongoDict
class GridFSCache(BaseCache):
"""GridFS cache backend.
Use this backend to store documents greater than 16MB.
Example:
>>> requests_cache.install_cache(backend='gridfs')
>>>
>>> # Or, to provide custom connection settings:
>>> from pymongo import MongoClient
>>> requests_cache.install_cache(backend='gridfs', connection=MongoClient('alternate-host'))
Args:
db_name: Database name
connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name: str, **kwargs):
super().__init__(**kwargs)
self.responses = GridFSPickleDict(db_name, **kwargs)
self.redirects = MongoDict(
db_name, collection_name='redirects', connection=self.responses.connection, **kwargs
)
class GridFSPickleDict(BaseStorage):
"""A dictionary-like interface for a GridFS database
Args:
db_name: Database name
collection_name: Ignored; GridFS internally uses collections 'fs.files' and 'fs.chunks'
connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name, collection_name=None, connection=None, **kwargs):
super().__init__(**kwargs)
connection_kwargs = get_valid_kwargs(MongoClient, kwargs)
self.connection = connection or MongoClient(**connection_kwargs)
self.db = self.connection[db_name]
self.fs = GridFS(self.db)
def __getitem__(self, key):
result = self.fs.find_one({'_id': key})
if result is None:
raise KeyError
return self.serializer.loads(result.read())
def __setitem__(self, key, item):
try:
self.__delitem__(key)
except KeyError:
pass
value = self.serializer.dumps(item)
encoding = None if isinstance(value, bytes) else 'utf-8'
self.fs.put(value, encoding=encoding, **{'_id': key})
def __delitem__(self, key):
res = self.fs.find_one({'_id': key})
if res is None:
raise KeyError
self.fs.delete(res._id)
def __len__(self):
return self.db['fs.files'].estimated_document_count()
def __iter__(self):
for d in self.fs.find():
yield d._id
def clear(self):
self.db['fs.files'].drop()
self.db['fs.chunks'].drop()
| from gridfs import GridFS
from pymongo import MongoClient
from . import get_valid_kwargs
from .base import BaseCache, BaseStorage
from .mongo import MongoDict
class GridFSCache(BaseCache):
"""GridFS cache backend.
Use this backend to store documents greater than 16MB.
Example:
>>> requests_cache.install_cache(backend='gridfs')
>>>
>>> # Or, to provide custom connection settings:
>>> from pymongo import MongoClient
>>> requests_cache.install_cache(backend='gridfs', connection=MongoClient('alternate-host'))
Args:
db_name: Database name
connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name: str, **kwargs):
super().__init__(**kwargs)
self.responses = GridFSPickleDict(db_name, **kwargs)
self.redirects = MongoDict(
db_name, collection_name='redirects', connection=self.responses.connection, **kwargs
)
class GridFSPickleDict(BaseStorage):
"""A dictionary-like interface for a GridFS database
Args:
db_name: Database name
collection_name: Ignored; GridFS internally uses collections 'fs.files' and 'fs.chunks'
connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one
kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient`
"""
def __init__(self, db_name, collection_name=None, connection=None, **kwargs):
super().__init__(**kwargs)
connection_kwargs = get_valid_kwargs(MongoClient, kwargs)
self.connection = connection or MongoClient(**connection_kwargs)
self.db = self.connection[db_name]
self.fs = GridFS(self.db)
def __getitem__(self, key):
result = self.fs.find_one({'_id': key})
if result is None:
raise KeyError
return self.serializer.loads(result.read())
def __setitem__(self, key, item):
try:
self.__delitem__(key)
except KeyError:
pass
value = self.serializer.dumps(item)
encoding = None if isinstance(value, bytes) else 'utf-8'
self.fs.put(value, encoding=encoding, **{'_id': key})
def __delitem__(self, key):
res = self.fs.find_one({'_id': key})
if res is None:
raise KeyError
self.fs.delete(res._id)
def __len__(self):
return self.db['fs.files'].estimated_document_count()
def __iter__(self):
for d in self.fs.find():
yield d._id
def clear(self):
self.db['fs.files'].drop()
self.db['fs.chunks'].drop()
| en | 0.53584 | GridFS cache backend. Use this backend to store documents greater than 16MB. Example: >>> requests_cache.install_cache(backend='gridfs') >>> >>> # Or, to provide custom connection settings: >>> from pymongo import MongoClient >>> requests_cache.install_cache(backend='gridfs', connection=MongoClient('alternate-host')) Args: db_name: Database name connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient` A dictionary-like interface for a GridFS database Args: db_name: Database name collection_name: Ignored; GridFS internally uses collections 'fs.files' and 'fs.chunks' connection: :py:class:`pymongo.MongoClient` object to reuse instead of creating a new one kwargs: Additional keyword arguments for :py:class:`pymongo.MongoClient` | 2.784301 | 3 |
PythonAPI/examples/Tests/countgpu.py | Ayaanfaraz/carla | 0 | 6624848 | <gh_stars>0
# #from keras import backend as K
# #K.tensorflow_backend._get_available_gpus()
# from torchvision import datasets, transforms, models
# import torch
# from torch import nn
# from torch import optim
# import torch.nn.functional as F
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = models.resnet50(pretrained=True)
# print(model)
# print("\n\n\n")
# # Freeze parameters so we don't backprop through them
# for param in model.parameters():
# param.requires_grad = False
# model.fc = nn.Sequential(nn.Linear(2048, 512),
# nn.ReLU(),
# nn.Dropout(0.2),
# nn.Linear(512, 3),
# nn.LogSoftmax(dim=1))
# criterion = nn.NLLLoss()
# optimizer = optim.Adam(model.fc.parameters(), lr=0.003)
# model.to(device)
# print(model)
# import tensorflow as tf
# print(tf.keras.Input(shape=(240,360,3)))
# import torch
# device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
# print(device)
# print(torch.cuda.get_device_name(0))
import gym
from gym import envs
all_envs = envs.registry.all()
env_ids = [env_spec.id for env_spec in all_envs]
print(sorted(env_ids))
env = gym.make('Breakout-v0', render_mode='human')
done = False
env.reset()
print("Obs :", env.observation_space)
print("ASpace: ", env.action_space)
while not done:
state, reward, done, _ = env.step(env.action_space.sample())
# print("State shape ", state.shape)
# print("reward, ", reward)
env.render()
env.close()
| # #from keras import backend as K
# #K.tensorflow_backend._get_available_gpus()
# from torchvision import datasets, transforms, models
# import torch
# from torch import nn
# from torch import optim
# import torch.nn.functional as F
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model = models.resnet50(pretrained=True)
# print(model)
# print("\n\n\n")
# # Freeze parameters so we don't backprop through them
# for param in model.parameters():
# param.requires_grad = False
# model.fc = nn.Sequential(nn.Linear(2048, 512),
# nn.ReLU(),
# nn.Dropout(0.2),
# nn.Linear(512, 3),
# nn.LogSoftmax(dim=1))
# criterion = nn.NLLLoss()
# optimizer = optim.Adam(model.fc.parameters(), lr=0.003)
# model.to(device)
# print(model)
# import tensorflow as tf
# print(tf.keras.Input(shape=(240,360,3)))
# import torch
# device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
# print(device)
# print(torch.cuda.get_device_name(0))
import gym
from gym import envs
all_envs = envs.registry.all()
env_ids = [env_spec.id for env_spec in all_envs]
print(sorted(env_ids))
env = gym.make('Breakout-v0', render_mode='human')
done = False
env.reset()
print("Obs :", env.observation_space)
print("ASpace: ", env.action_space)
while not done:
state, reward, done, _ = env.step(env.action_space.sample())
# print("State shape ", state.shape)
# print("reward, ", reward)
env.render()
env.close() | en | 0.496978 | # #from keras import backend as K # #K.tensorflow_backend._get_available_gpus() # from torchvision import datasets, transforms, models # import torch # from torch import nn # from torch import optim # import torch.nn.functional as F # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # model = models.resnet50(pretrained=True) # print(model) # print("\n\n\n") # # Freeze parameters so we don't backprop through them # for param in model.parameters(): # param.requires_grad = False # model.fc = nn.Sequential(nn.Linear(2048, 512), # nn.ReLU(), # nn.Dropout(0.2), # nn.Linear(512, 3), # nn.LogSoftmax(dim=1)) # criterion = nn.NLLLoss() # optimizer = optim.Adam(model.fc.parameters(), lr=0.003) # model.to(device) # print(model) # import tensorflow as tf # print(tf.keras.Input(shape=(240,360,3))) # import torch # device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") # print(device) # print(torch.cuda.get_device_name(0)) # print("State shape ", state.shape) # print("reward, ", reward) | 2.296238 | 2 |
ArduCAM_Mini_OV2640_Capture_MQTT/mqtt_client_v2.py | ivanshilo/arduino | 0 | 6624849 | import paho.mqtt.publish as publish
import paho.mqtt.subscribe as subscribe
import paho.mqtt.client as mqtt
import time
import base64
meta = True
file_close = False
filename = ""
local_feed_name = 'camPIC'
local_feed_name_command = 'snapPIC'
local_feed_name_log = 'camLOG'
local_mqtt_server_name = 'asus-lap'
username = ''
password_remote_mqtt = ''
remote_mqtt_server_name = 'io.adafruit.com'
remote_feed_name = username + '/feeds/camPIC'
remote_feed_name_command = username + '/feeds/snapPIC'
remote_feed_name_log = username + '/feeds/camLOG'
# The callback for when the client receives a CONNACK response from the server.
def on_connect_local(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(local_feed_name)
client.subscribe(local_feed_name_log)
# The callback for when a PUBLISH message is received from the server.
def on_message_local(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
if len(msg.payload) == 0:
print("no data in payload, exit...")
return
if msg.topic == local_feed_name:
record_image_from_camera(msg)
if msg.topic == local_feed_name_log:
client_remote.publish(remote_feed_name_log, msg.payload)
def record_image_from_camera(msg):
global meta
global file_close
global f
global filename
# we expect for metadata to be sent now
if meta:
if msg.payload[0] == "S":
# we have to create a new file
if not f:
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = 'picture_' + timestr + '.jpg'
f = open(filename, 'wb')
print("new file " + filename + " created")
file_close = False
print("S")
meta = False
return
# next data packet is the last, be ready to close the file
if msg.payload[0] == "E":
print("E")
meta = False
file_close = True
return
# real data
if meta is not True:
print("data")
f.write(msg.payload)
meta = True
if file_close:
f.close()
f = 0
print("file closed")
publish_to_remote_mqtt(client_remote, filename, remote_feed_name)
# The callback for when the client receives a CONNACK response from the server.
def on_connect_remote(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# DEBUG only - to show what has been published by script
#client.subscribe(remote_feed_name)
client.subscribe(remote_feed_name_command, qos = 1)
# The callback for when a PUBLISH message is received from the server.
def on_message_remote(client, userdata, msg):
print("Received message '" + str(msg.payload) + "' on topic '"
+ msg.topic + "' with QoS " + str(msg.qos))
if msg.topic == remote_feed_name_command:
client_local.publish(local_feed_name_command, msg.payload)
# publish data to remote mqtt server from file
def publish_to_remote_mqtt(client, filename, feedname):
print("publishing to " + remote_mqtt_server_name + " file " + filename)
image = open(filename)
encoded = base64.b64encode(image.read())
#for DEBUG
#print encoded
#publish to feed in internet
client.publish(feedname, encoded)
client_remote = mqtt.Client()
client_remote.on_connect = on_connect_remote
client_remote.on_message = on_message_remote
client_remote.username_pw_set(username, password_remote_mqtt)
client_remote.connect(remote_mqtt_server_name, 1883, 60)
client_local = mqtt.Client()
client_local.on_connect = on_connect_local
client_local.on_message = on_message_local
client_local.connect(local_mqtt_server_name, 1883, 60)
f = 0
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
#loop_start() starts processing loop inbackground and returns to current thread
client_remote.loop_start()
# this loop blocks program from exiting
client_local.loop_forever()
| import paho.mqtt.publish as publish
import paho.mqtt.subscribe as subscribe
import paho.mqtt.client as mqtt
import time
import base64
meta = True
file_close = False
filename = ""
local_feed_name = 'camPIC'
local_feed_name_command = 'snapPIC'
local_feed_name_log = 'camLOG'
local_mqtt_server_name = 'asus-lap'
username = ''
password_remote_mqtt = ''
remote_mqtt_server_name = 'io.adafruit.com'
remote_feed_name = username + '/feeds/camPIC'
remote_feed_name_command = username + '/feeds/snapPIC'
remote_feed_name_log = username + '/feeds/camLOG'
# The callback for when the client receives a CONNACK response from the server.
def on_connect_local(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(local_feed_name)
client.subscribe(local_feed_name_log)
# The callback for when a PUBLISH message is received from the server.
def on_message_local(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
if len(msg.payload) == 0:
print("no data in payload, exit...")
return
if msg.topic == local_feed_name:
record_image_from_camera(msg)
if msg.topic == local_feed_name_log:
client_remote.publish(remote_feed_name_log, msg.payload)
def record_image_from_camera(msg):
global meta
global file_close
global f
global filename
# we expect for metadata to be sent now
if meta:
if msg.payload[0] == "S":
# we have to create a new file
if not f:
timestr = time.strftime("%Y%m%d-%H%M%S")
filename = 'picture_' + timestr + '.jpg'
f = open(filename, 'wb')
print("new file " + filename + " created")
file_close = False
print("S")
meta = False
return
# next data packet is the last, be ready to close the file
if msg.payload[0] == "E":
print("E")
meta = False
file_close = True
return
# real data
if meta is not True:
print("data")
f.write(msg.payload)
meta = True
if file_close:
f.close()
f = 0
print("file closed")
publish_to_remote_mqtt(client_remote, filename, remote_feed_name)
# The callback for when the client receives a CONNACK response from the server.
def on_connect_remote(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
# DEBUG only - to show what has been published by script
#client.subscribe(remote_feed_name)
client.subscribe(remote_feed_name_command, qos = 1)
# The callback for when a PUBLISH message is received from the server.
def on_message_remote(client, userdata, msg):
print("Received message '" + str(msg.payload) + "' on topic '"
+ msg.topic + "' with QoS " + str(msg.qos))
if msg.topic == remote_feed_name_command:
client_local.publish(local_feed_name_command, msg.payload)
# publish data to remote mqtt server from file
def publish_to_remote_mqtt(client, filename, feedname):
print("publishing to " + remote_mqtt_server_name + " file " + filename)
image = open(filename)
encoded = base64.b64encode(image.read())
#for DEBUG
#print encoded
#publish to feed in internet
client.publish(feedname, encoded)
client_remote = mqtt.Client()
client_remote.on_connect = on_connect_remote
client_remote.on_message = on_message_remote
client_remote.username_pw_set(username, password_remote_mqtt)
client_remote.connect(remote_mqtt_server_name, 1883, 60)
client_local = mqtt.Client()
client_local.on_connect = on_connect_local
client_local.on_message = on_message_local
client_local.connect(local_mqtt_server_name, 1883, 60)
f = 0
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
#loop_start() starts processing loop inbackground and returns to current thread
client_remote.loop_start()
# this loop blocks program from exiting
client_local.loop_forever()
| en | 0.887282 | # The callback for when the client receives a CONNACK response from the server. # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. # The callback for when a PUBLISH message is received from the server. # we expect for metadata to be sent now # we have to create a new file # next data packet is the last, be ready to close the file # real data # The callback for when the client receives a CONNACK response from the server. # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. # DEBUG only - to show what has been published by script #client.subscribe(remote_feed_name) # The callback for when a PUBLISH message is received from the server. # publish data to remote mqtt server from file #for DEBUG #print encoded #publish to feed in internet # Blocking call that processes network traffic, dispatches callbacks and # handles reconnecting. # Other loop*() functions are available that give a threaded interface and a # manual interface. #loop_start() starts processing loop inbackground and returns to current thread # this loop blocks program from exiting | 2.739947 | 3 |
bot/exts/info/pep.py | laundmo/bot | 2 | 6624850 | <reponame>laundmo/bot
import logging
from datetime import datetime, timedelta
from email.parser import HeaderParser
from io import StringIO
from typing import Dict, Optional, Tuple
from discord import Colour, Embed
from discord.ext.commands import Cog, Context, command
from bot.bot import Bot
from bot.constants import Keys
from bot.utils.cache import AsyncCache
log = logging.getLogger(__name__)
ICON_URL = "https://www.python.org/static/opengraph-icon-200x200.png"
BASE_PEP_URL = "http://www.python.org/dev/peps/pep-"
PEPS_LISTING_API_URL = "https://api.github.com/repos/python/peps/contents?ref=master"
pep_cache = AsyncCache()
GITHUB_API_HEADERS = {}
if Keys.github:
GITHUB_API_HEADERS["Authorization"] = f"token {Keys.github}"
class PythonEnhancementProposals(Cog):
"""Cog for displaying information about PEPs."""
def __init__(self, bot: Bot):
self.bot = bot
self.peps: Dict[int, str] = {}
# To avoid situations where we don't have last datetime, set this to now.
self.last_refreshed_peps: datetime = datetime.now()
self.bot.loop.create_task(self.refresh_peps_urls())
async def refresh_peps_urls(self) -> None:
"""Refresh PEP URLs listing in every 3 hours."""
# Wait until HTTP client is available
await self.bot.wait_until_ready()
log.trace("Started refreshing PEP URLs.")
self.last_refreshed_peps = datetime.now()
async with self.bot.http_session.get(
PEPS_LISTING_API_URL,
headers=GITHUB_API_HEADERS
) as resp:
if resp.status != 200:
log.warning(f"Fetching PEP URLs from GitHub API failed with code {resp.status}")
return
listing = await resp.json()
log.trace("Got PEP URLs listing from GitHub API")
for file in listing:
name = file["name"]
if name.startswith("pep-") and name.endswith((".rst", ".txt")):
pep_number = name.replace("pep-", "").split(".")[0]
self.peps[int(pep_number)] = file["download_url"]
log.info("Successfully refreshed PEP URLs listing.")
@staticmethod
def get_pep_zero_embed() -> Embed:
"""Get information embed about PEP 0."""
pep_embed = Embed(
title="**PEP 0 - Index of Python Enhancement Proposals (PEPs)**",
url="https://www.python.org/dev/peps/"
)
pep_embed.set_thumbnail(url=ICON_URL)
pep_embed.add_field(name="Status", value="Active")
pep_embed.add_field(name="Created", value="13-Jul-2000")
pep_embed.add_field(name="Type", value="Informational")
return pep_embed
async def validate_pep_number(self, pep_nr: int) -> Optional[Embed]:
"""Validate is PEP number valid. When it isn't, return error embed, otherwise None."""
if (
pep_nr not in self.peps
and (self.last_refreshed_peps + timedelta(minutes=30)) <= datetime.now()
and len(str(pep_nr)) < 5
):
await self.refresh_peps_urls()
if pep_nr not in self.peps:
log.trace(f"PEP {pep_nr} was not found")
return Embed(
title="PEP not found",
description=f"PEP {pep_nr} does not exist.",
colour=Colour.red()
)
return None
def generate_pep_embed(self, pep_header: Dict, pep_nr: int) -> Embed:
"""Generate PEP embed based on PEP headers data."""
# Assemble the embed
pep_embed = Embed(
title=f"**PEP {pep_nr} - {pep_header['Title']}**",
description=f"[Link]({BASE_PEP_URL}{pep_nr:04})",
)
pep_embed.set_thumbnail(url=ICON_URL)
# Add the interesting information
fields_to_check = ("Status", "Python-Version", "Created", "Type")
for field in fields_to_check:
# Check for a PEP metadata field that is present but has an empty value
# embed field values can't contain an empty string
if pep_header.get(field, ""):
pep_embed.add_field(name=field, value=pep_header[field])
return pep_embed
@pep_cache(arg_offset=1)
async def get_pep_embed(self, pep_nr: int) -> Tuple[Embed, bool]:
"""Fetch, generate and return PEP embed. Second item of return tuple show does getting success."""
response = await self.bot.http_session.get(self.peps[pep_nr])
if response.status == 200:
log.trace(f"PEP {pep_nr} found")
pep_content = await response.text()
# Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179
pep_header = HeaderParser().parse(StringIO(pep_content))
return self.generate_pep_embed(pep_header, pep_nr), True
else:
log.trace(
f"The user requested PEP {pep_nr}, but the response had an unexpected status code: {response.status}."
)
return Embed(
title="Unexpected error",
description="Unexpected HTTP error during PEP search. Please let us know.",
colour=Colour.red()
), False
@command(name='pep', aliases=('get_pep', 'p'))
async def pep_command(self, ctx: Context, pep_number: int) -> None:
"""Fetches information about a PEP and sends it to the channel."""
# Trigger typing in chat to show users that bot is responding
await ctx.trigger_typing()
# Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.
if pep_number == 0:
pep_embed = self.get_pep_zero_embed()
success = True
else:
success = False
if not (pep_embed := await self.validate_pep_number(pep_number)):
pep_embed, success = await self.get_pep_embed(pep_number)
await ctx.send(embed=pep_embed)
if success:
log.trace(f"PEP {pep_number} getting and sending finished successfully. Increasing stat.")
self.bot.stats.incr(f"pep_fetches.{pep_number}")
else:
log.trace(f"Getting PEP {pep_number} failed. Error embed sent.")
def setup(bot: Bot) -> None:
"""Load the PEP cog."""
bot.add_cog(PythonEnhancementProposals(bot))
| import logging
from datetime import datetime, timedelta
from email.parser import HeaderParser
from io import StringIO
from typing import Dict, Optional, Tuple
from discord import Colour, Embed
from discord.ext.commands import Cog, Context, command
from bot.bot import Bot
from bot.constants import Keys
from bot.utils.cache import AsyncCache
log = logging.getLogger(__name__)
ICON_URL = "https://www.python.org/static/opengraph-icon-200x200.png"
BASE_PEP_URL = "http://www.python.org/dev/peps/pep-"
PEPS_LISTING_API_URL = "https://api.github.com/repos/python/peps/contents?ref=master"
pep_cache = AsyncCache()
GITHUB_API_HEADERS = {}
if Keys.github:
GITHUB_API_HEADERS["Authorization"] = f"token {Keys.github}"
class PythonEnhancementProposals(Cog):
"""Cog for displaying information about PEPs."""
def __init__(self, bot: Bot):
self.bot = bot
self.peps: Dict[int, str] = {}
# To avoid situations where we don't have last datetime, set this to now.
self.last_refreshed_peps: datetime = datetime.now()
self.bot.loop.create_task(self.refresh_peps_urls())
async def refresh_peps_urls(self) -> None:
"""Refresh PEP URLs listing in every 3 hours."""
# Wait until HTTP client is available
await self.bot.wait_until_ready()
log.trace("Started refreshing PEP URLs.")
self.last_refreshed_peps = datetime.now()
async with self.bot.http_session.get(
PEPS_LISTING_API_URL,
headers=GITHUB_API_HEADERS
) as resp:
if resp.status != 200:
log.warning(f"Fetching PEP URLs from GitHub API failed with code {resp.status}")
return
listing = await resp.json()
log.trace("Got PEP URLs listing from GitHub API")
for file in listing:
name = file["name"]
if name.startswith("pep-") and name.endswith((".rst", ".txt")):
pep_number = name.replace("pep-", "").split(".")[0]
self.peps[int(pep_number)] = file["download_url"]
log.info("Successfully refreshed PEP URLs listing.")
@staticmethod
def get_pep_zero_embed() -> Embed:
"""Get information embed about PEP 0."""
pep_embed = Embed(
title="**PEP 0 - Index of Python Enhancement Proposals (PEPs)**",
url="https://www.python.org/dev/peps/"
)
pep_embed.set_thumbnail(url=ICON_URL)
pep_embed.add_field(name="Status", value="Active")
pep_embed.add_field(name="Created", value="13-Jul-2000")
pep_embed.add_field(name="Type", value="Informational")
return pep_embed
async def validate_pep_number(self, pep_nr: int) -> Optional[Embed]:
"""Validate is PEP number valid. When it isn't, return error embed, otherwise None."""
if (
pep_nr not in self.peps
and (self.last_refreshed_peps + timedelta(minutes=30)) <= datetime.now()
and len(str(pep_nr)) < 5
):
await self.refresh_peps_urls()
if pep_nr not in self.peps:
log.trace(f"PEP {pep_nr} was not found")
return Embed(
title="PEP not found",
description=f"PEP {pep_nr} does not exist.",
colour=Colour.red()
)
return None
def generate_pep_embed(self, pep_header: Dict, pep_nr: int) -> Embed:
"""Generate PEP embed based on PEP headers data."""
# Assemble the embed
pep_embed = Embed(
title=f"**PEP {pep_nr} - {pep_header['Title']}**",
description=f"[Link]({BASE_PEP_URL}{pep_nr:04})",
)
pep_embed.set_thumbnail(url=ICON_URL)
# Add the interesting information
fields_to_check = ("Status", "Python-Version", "Created", "Type")
for field in fields_to_check:
# Check for a PEP metadata field that is present but has an empty value
# embed field values can't contain an empty string
if pep_header.get(field, ""):
pep_embed.add_field(name=field, value=pep_header[field])
return pep_embed
@pep_cache(arg_offset=1)
async def get_pep_embed(self, pep_nr: int) -> Tuple[Embed, bool]:
"""Fetch, generate and return PEP embed. Second item of return tuple show does getting success."""
response = await self.bot.http_session.get(self.peps[pep_nr])
if response.status == 200:
log.trace(f"PEP {pep_nr} found")
pep_content = await response.text()
# Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179
pep_header = HeaderParser().parse(StringIO(pep_content))
return self.generate_pep_embed(pep_header, pep_nr), True
else:
log.trace(
f"The user requested PEP {pep_nr}, but the response had an unexpected status code: {response.status}."
)
return Embed(
title="Unexpected error",
description="Unexpected HTTP error during PEP search. Please let us know.",
colour=Colour.red()
), False
@command(name='pep', aliases=('get_pep', 'p'))
async def pep_command(self, ctx: Context, pep_number: int) -> None:
"""Fetches information about a PEP and sends it to the channel."""
# Trigger typing in chat to show users that bot is responding
await ctx.trigger_typing()
# Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.
if pep_number == 0:
pep_embed = self.get_pep_zero_embed()
success = True
else:
success = False
if not (pep_embed := await self.validate_pep_number(pep_number)):
pep_embed, success = await self.get_pep_embed(pep_number)
await ctx.send(embed=pep_embed)
if success:
log.trace(f"PEP {pep_number} getting and sending finished successfully. Increasing stat.")
self.bot.stats.incr(f"pep_fetches.{pep_number}")
else:
log.trace(f"Getting PEP {pep_number} failed. Error embed sent.")
def setup(bot: Bot) -> None:
"""Load the PEP cog."""
bot.add_cog(PythonEnhancementProposals(bot)) | en | 0.87031 | Cog for displaying information about PEPs. # To avoid situations where we don't have last datetime, set this to now. Refresh PEP URLs listing in every 3 hours. # Wait until HTTP client is available Get information embed about PEP 0. Validate is PEP number valid. When it isn't, return error embed, otherwise None. Generate PEP embed based on PEP headers data. # Assemble the embed # Add the interesting information # Check for a PEP metadata field that is present but has an empty value # embed field values can't contain an empty string Fetch, generate and return PEP embed. Second item of return tuple show does getting success. # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179 Fetches information about a PEP and sends it to the channel. # Trigger typing in chat to show users that bot is responding # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs. Load the PEP cog. | 2.471011 | 2 |