hexsha
stringlengths 40
40
| size
int64 10
805k
| ext
stringclasses 6
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
176
| max_stars_repo_name
stringlengths 7
114
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
176
| max_issues_repo_name
stringlengths 7
114
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
48.5k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
176
| max_forks_repo_name
stringlengths 7
114
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 10
805k
| avg_line_length
float64 5.53
11k
| max_line_length
int64 10
129k
| alphanum_fraction
float64 0.13
0.93
| content_no_comment
stringlengths 0
449k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71525eb6d02c2219a98e137bacd9ff85536c953
| 838
|
py
|
Python
|
eventmanager/contacts/migrations/0001_initial.py
|
jasham/event2Backend
|
54e9945676458231cacb6fb8ad62a757a9547b63
|
[
"MIT"
] | null | null | null |
eventmanager/contacts/migrations/0001_initial.py
|
jasham/event2Backend
|
54e9945676458231cacb6fb8ad62a757a9547b63
|
[
"MIT"
] | null | null | null |
eventmanager/contacts/migrations/0001_initial.py
|
jasham/event2Backend
|
54e9945676458231cacb6fb8ad62a757a9547b63
|
[
"MIT"
] | 1
|
2020-04-27T06:45:18.000Z
|
2020-04-27T06:45:18.000Z
|
# Generated by Django 2.2.5 on 2020-03-08 07:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('leads', '__first__'),
]
operations = [
migrations.CreateModel(
name='Contacts',
fields=[
('paid', models.CharField(choices=[('Yes', 'Yes'), ('No', 'No')], default='No', max_length=50)),
('amount', models.IntegerField()),
('unique_number', models.IntegerField(primary_key=True, serialize=False)),
('is_valid', models.BooleanField(default=True)),
('leads_data', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leads.Leads')),
],
),
]
| 31.037037
| 114
| 0.559666
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('leads', '__first__'),
]
operations = [
migrations.CreateModel(
name='Contacts',
fields=[
('paid', models.CharField(choices=[('Yes', 'Yes'), ('No', 'No')], default='No', max_length=50)),
('amount', models.IntegerField()),
('unique_number', models.IntegerField(primary_key=True, serialize=False)),
('is_valid', models.BooleanField(default=True)),
('leads_data', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='leads.Leads')),
],
),
]
| true
| true
|
f71526b15e9dfaa84b99bbeb8e8827713bb52184
| 38,532
|
py
|
Python
|
topaz/utils/regexp.py
|
mswart/topaz
|
4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae
|
[
"BSD-3-Clause"
] | null | null | null |
topaz/utils/regexp.py
|
mswart/topaz
|
4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae
|
[
"BSD-3-Clause"
] | null | null | null |
topaz/utils/regexp.py
|
mswart/topaz
|
4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from rpython.rlib.listsort import make_timsort_class
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rsre.rsre_core import (
OPCODE_LITERAL, OPCODE_LITERAL_IGNORE, OPCODE_SUCCESS, OPCODE_ASSERT,
OPCODE_MARK, OPCODE_REPEAT, OPCODE_ANY, OPCODE_ANY_ALL, OPCODE_MAX_UNTIL,
OPCODE_MIN_UNTIL, OPCODE_GROUPREF, OPCODE_AT, OPCODE_BRANCH, OPCODE_RANGE,
OPCODE_JUMP, OPCODE_ASSERT_NOT, OPCODE_CATEGORY, OPCODE_FAILURE, OPCODE_IN,
OPCODE_NEGATE
)
from rpython.rlib.rsre.rsre_char import MAXREPEAT as MAX_REPEAT
IGNORE_CASE = 1 << 0
EXTENDED = 1 << 1
DOT_ALL = 1 << 2
ONCE = 1 << 3
FIXED_ENCODING = 1 << 4
NO_ENCODING = 1 << 5
OPTIONS_MAP = {
"i": IGNORE_CASE,
"x": EXTENDED,
"m": DOT_ALL,
"o": ONCE,
"u": FIXED_ENCODING,
"n": NO_ENCODING,
"e": FIXED_ENCODING,
"s": FIXED_ENCODING,
}
FLAGS_MAP = [
("m", DOT_ALL),
("i", IGNORE_CASE),
("x", EXTENDED),
]
SPECIAL_CHARS = "()|?*+{^$.[\\#"
CHARACTER_ESCAPES = {
"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
}
AT_BEGINNING = 0
AT_BEGINNING_LINE = 1
AT_BEGINNING_STRING = 2
AT_BOUNDARY = 3
AT_NON_BOUNDARY = 4
AT_END = 5
AT_END_LINE = 6
AT_END_STRING = 7
AT_LOC_BOUNDARY = 8
AT_LOC_NON_BOUNDARY = 9
AT_UNI_BOUNDARY = 10
AT_UNI_NON_BOUNDARY = 11
CATEGORY_DIGIT = 0
CATEGORY_NOT_DIGIT = 1
CATEGORY_SPACE = 2
CATEGORY_NOT_SPACE = 3
CATEGORY_WORD = 4
CATEGORY_NOT_WORD = 5
CATEGORY_LINEBREAK = 6
CATEGORY_NOT_LINEBREAK = 7
CATEGORY_LOC_WORD = 8
CATEGORY_LOC_NOT_WORD = 9
CATEGORY_UNI_DIGIT = 10
CATEGORY_UNI_NOT_DIGIT = 11
CATEGORY_UNI_SPACE = 12
CATEGORY_UNI_NOT_SPACE = 13
CATEGORY_UNI_WORD = 14
CATEGORY_UNI_NOT_WORD = 15
CATEGORY_UNI_LINEBREAK = 16
CATEGORY_UNI_NOT_LINEBREAK = 17
class UnscopedFlagSet(Exception):
def __init__(self, global_flags):
Exception.__init__(self)
self.global_flags = global_flags
class RegexpError(Exception):
pass
class ParseError(Exception):
pass
class Source(object):
def __init__(self, s):
self.pos = 0
self.s = s
self.ignore_space = False
def at_end(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if pos >= len(s):
break
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
return pos >= len(s)
def get(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if pos >= len(s):
return ""
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
try:
ch = s[pos]
self.pos = pos + 1
return ch
except IndexError:
self.pos = pos
return ""
except ValueError:
self.pos = len(s)
return ""
def match(self, substr):
s = self.s
pos = self.pos
if self.ignore_space:
for c in substr:
while True:
if pos >= len(s):
return False
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
if s[pos] != c:
return False
pos += 1
self.pos = pos
return True
else:
if pos + len(substr) <= len(s):
matches = True
for i in xrange(len(substr)):
if s[pos + i] != substr[i]:
matches = False
else:
matches = False
if not matches:
return False
self.pos = pos + len(substr)
return True
def expect(self, substr):
if not self.match(substr):
raise RegexpError("Missing %s" % substr)
class Info(object):
OPEN = 0
CLOSED = 1
def __init__(self, flags):
self.flags = flags
self.group_count = 0
self.used_groups = {}
self.group_state = {}
self.group_index = {}
self.group_name = {}
self.named_lists_used = {}
self.defined_groups = {}
self.group_offsets = []
def new_group(self, name=None):
if name in self.group_index:
group = self.group_index[name]
else:
while True:
self.group_count += 1
if name is None or self.group_count not in self.group_name:
break
group = self.group_count
if name is not None:
self.group_index[name] = group
self.group_name[group] = name
self.used_groups[group] = None
self.group_state[group] = self.OPEN
return group
def close_group(self, group, hidden=False):
last_group_offset = self.group_offsets[-1] if self.group_offsets else 0
if hidden:
last_group_offset += 1
self.group_offsets.append(last_group_offset)
self.group_state[group] = self.CLOSED
def normalize_group(self, name):
if name.isdigit():
return int(name)
else:
return self.group_index[name]
def is_open_group(self, name):
group = self.normalize_group(name)
return group in self.group_state and self.group_state[group] == self.OPEN
BaseSorter = make_timsort_class()
class BranchSorter(BaseSorter):
def __init__(self, items, order):
BaseSorter.__init__(self, items)
self.order = order
def lt(self, a, b):
return self.order[a[0]] < self.order[b[0]]
class CompilerContext(object):
def __init__(self):
self.data = []
def emit(self, opcode):
self.data.append(opcode)
def tell(self):
return len(self.data)
def patch(self, pos, value):
self.data[pos] = value
def build(self):
return self.data[:]
class Counts(object):
def __init__(self, min_count, max_count=MAX_REPEAT, limited_quantifier=False):
self.min_count = min_count
self.max_count = max_count
self.limited_quantifier = limited_quantifier
class RegexpBase(object):
_attrs_ = ["positive", "case_insensitive", "zerowidth"]
def __init__(self, positive=True, case_insensitive=False, zerowidth=False):
self.positive = positive
self.case_insensitive = case_insensitive
self.zerowidth = zerowidth
@specialize.argtype(1, 2, 3)
def with_flags(self, positive=None, case_insensitive=None, zerowidth=None):
positive = positive if positive is not None else self.positive
case_insensitive = case_insensitive if case_insensitive is not None else self.case_insensitive
zerowidth = zerowidth if zerowidth is not None else self.zerowidth
if (positive == self.positive and
case_insensitive == self.case_insensitive and
zerowidth == self.zerowidth):
return self
return self.rebuild(positive, case_insensitive, zerowidth)
class Character(RegexpBase):
def __init__(self, value, case_insensitive=False, positive=True, zerowidth=False):
RegexpBase.__init__(self, case_insensitive=case_insensitive, positive=positive, zerowidth=zerowidth)
self.value = value
def rebuild(self, positive, case_insensitive, zerowidth):
return Character(self.value, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
def getwidth(self):
return 1, 1
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def is_empty(self):
return False
def compile(self, ctx):
ctx.emit(OPCODE_LITERAL_IGNORE if self.case_insensitive else OPCODE_LITERAL)
ctx.emit(self.value)
class Any(RegexpBase):
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
ctx.emit(OPCODE_ANY)
class AnyAll(RegexpBase):
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
ctx.emit(OPCODE_ANY_ALL)
class ZeroWidthBase(RegexpBase):
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
class AtPosition(ZeroWidthBase):
def __init__(self, code):
ZeroWidthBase.__init__(self)
self.code = code
def can_be_affix(self):
return True
def compile(self, ctx):
ctx.emit(OPCODE_AT)
ctx.emit(self.code)
class Property(RegexpBase):
def __init__(self, value, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.value = value
def rebuild(self, positive, case_insensitive, zerowidth):
return Property(self.value, positive, case_insensitive, zerowidth)
def getwidth(self):
return 1, 1
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def compile(self, ctx):
ctx.emit(OPCODE_CATEGORY)
ctx.emit(self.value)
class Range(RegexpBase):
def __init__(self, lower, upper, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.lower = lower
self.upper = upper
def rebuild(self, positive, case_insensitive, zerowidth):
return Range(self.lower, self.upper, positive, case_insensitive, zerowidth)
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def compile(self, ctx):
if not self.positive:
ctx.emit(OPCODE_NEGATE)
ctx.emit(OPCODE_RANGE)
ctx.emit(self.lower)
ctx.emit(self.upper)
class Sequence(RegexpBase):
def __init__(self, items):
RegexpBase.__init__(self)
self.items = items
def is_empty(self):
for item in self.items:
if not item.is_empty():
return False
return True
def fix_groups(self):
for item in self.items:
item.fix_groups()
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info)
if isinstance(item, Sequence):
items.extend(item.items)
else:
items.append(item)
return make_sequence(items)
def compile(self, ctx):
for item in self.items:
item.compile(ctx)
class Branch(RegexpBase):
def __init__(self, branches):
RegexpBase.__init__(self)
self.branches = branches
def fix_groups(self):
for b in self.branches:
b.fix_groups()
def is_empty(self):
for b in self.branches:
if not b.is_empty():
return False
return True
def _flatten_branches(self, info, branches):
new_branches = []
for b in branches:
b = b.optimize(info)
if isinstance(b, Branch):
new_branches.extend(b.branches)
else:
new_branches.append(b)
return new_branches
def _split_common_prefix(self, info, branches):
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
max_count = sys.maxint
for a in alternatives:
max_count = min(max_count, len(a))
prefix = alternatives[0]
pos = 0
end_pos = max_count
while (pos < end_pos and prefix[pos].can_be_affix() and
[None for a in alternatives if a[pos] == prefix[pos]]):
pos += 1
if pos == 0:
return [], branches
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[pos:]))
return prefix[:pos], new_branches
def _split_common_suffix(self, info, branches):
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
max_count = sys.maxint
for a in alternatives:
max_count = min(max_count, len(a))
suffix = alternatives[0]
pos = -1
end_pos = -1 - max_count
while (pos > end_pos and suffix[pos].can_be_affix() and
[None for a in alternatives if a[pos] == suffix[pos]]):
pos -= 1
count = -1 - pos
if count == 0:
return [], branches
new_branches = []
for a in alternatives:
end = len(a) - count
assert end >= 0
new_branches.append(make_sequence(a[:end]))
start = len(suffix) - count
assert start >= 0
return suffix[start:], new_branches
def _is_simple_character(self, c):
return isinstance(c, Character) and c.positive and not c.case_insensitive
def _flush_char_prefix(self, info, prefixed, order, new_branches):
if not prefixed:
return
items = prefixed.items()
sorter = BranchSorter(items, order)
sorter.sort()
for value, branches in items:
if len(branches) == 1:
new_branches.append(make_sequence(branches[0]))
else:
subbranches = []
optional = False
for b in branches:
if len(b) > 1:
subbranches.append(make_sequence(b[1:]))
elif not optional:
subbranches.append(Sequence([]))
optional = True
sequence = Sequence([Character(value), Branch(subbranches)])
new_branches.append(sequence.optimize(info))
prefixed.clear()
order.clear()
def _merge_common_prefixes(self, info, branches):
prefixed = {}
order = {}
new_branches = []
for b in branches:
if self._is_simple_character(b):
assert isinstance(b, Character)
prefixed.setdefault(b.value, []).append([b])
order.setdefault(b.value, len(order))
elif isinstance(b, Sequence) and b.items and self._is_simple_character(b.items[0]):
item = b.items[0]
assert isinstance(item, Character)
prefixed.setdefault(item.value, []).append(b.items)
order.setdefault(item.value, len(order))
else:
self._flush_char_prefix(info, prefixed, order, new_branches)
new_branches.append(b)
self._flush_char_prefix(info, prefixed, order, new_branches)
return new_branches
def _flush_set_members(self, info, items, case_insensitive, new_branches):
if not items:
return
if len(items) == 1:
[item] = items.keys()
else:
item = SetUnion(info, items.keys()).optimize(info)
new_branches.append(item.with_flags(case_insensitive=case_insensitive))
items.clear()
def _reduce_to_set(self, info, branches):
new_branches = []
items = {}
case_insensitive = False
for b in branches:
if isinstance(b, Character) or isinstance(b, Property) or isinstance(b, SetBase):
if b.case_insensitive != case_insensitive:
self._flush_set_members(info, items, case_insensitive, new_branches)
case_insensitive = b.case_insensitive
items[b.with_flags(case_insensitive=False)] = False
else:
self._flush_set_members(info, items, case_insensitive, new_branches)
new_branches.append(b)
self._flush_set_members(info, items, case_insensitive, new_branches)
return new_branches
def optimize(self, info, in_set=False):
branches = self._flatten_branches(info, self.branches)
prefix, branches = self._split_common_prefix(info, branches)
suffix, branches = self._split_common_suffix(info, branches)
branches = self._merge_common_prefixes(info, branches)
branches = self._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = prefix + [Branch(branches)] + suffix
else:
sequence = prefix + branches + suffix
return make_sequence(sequence)
def compile(self, ctx):
ctx.emit(OPCODE_BRANCH)
tail = []
for b in self.branches:
pos = ctx.tell()
ctx.emit(0)
b.compile(ctx)
ctx.emit(OPCODE_JUMP)
tail.append(ctx.tell())
ctx.emit(0)
ctx.patch(pos, ctx.tell() - pos)
ctx.emit(0)
for t in tail:
ctx.patch(t, ctx.tell() - t)
class BaseRepeat(RegexpBase):
def __init__(self, subpattern, min_count, max_count):
RegexpBase.__init__(self)
self.subpattern = subpattern
self.min_count = min_count
self.max_count = max_count
def fix_groups(self):
self.subpattern.fix_groups()
def is_empty(self):
return self.subpattern.is_empty()
def compile(self, ctx):
ctx.emit(OPCODE_REPEAT)
pos = ctx.tell()
ctx.emit(0)
ctx.emit(self.min_count)
ctx.emit(self.max_count)
self.subpattern.compile(ctx)
ctx.patch(pos, ctx.tell() - pos)
ctx.emit(self.UNTIL_OPCODE)
class GreedyRepeat(BaseRepeat):
UNTIL_OPCODE = OPCODE_MAX_UNTIL
def can_be_affix(self):
return True
def optimize(self, info, in_set=False):
subpattern = self.subpattern.optimize(info)
return GreedyRepeat(subpattern, self.min_count, self.max_count)
class LazyRepeat(BaseRepeat):
UNTIL_OPCODE = OPCODE_MIN_UNTIL
def optimize(self, info, in_set=False):
subpattern = self.subpattern.optimize(info)
return LazyRepeat(subpattern, self.min_count, self.max_count)
class LookAround(RegexpBase):
def __init__(self, subpattern, behind, positive):
RegexpBase.__init__(self, positive=positive)
self.subpattern = subpattern
self.behind = behind
def fix_groups(self):
self.subpattern.fix_groups()
def can_be_affix(self):
return self.subpattern.can_be_affix()
def optimize(self, info, in_set=False):
return LookAround(self.subpattern.optimize(info), self.behind, self.positive)
def compile(self, ctx):
ctx.emit(OPCODE_ASSERT if self.positive else OPCODE_ASSERT_NOT)
pos = ctx.tell()
ctx.emit(0)
if self.behind:
lo, hi = self.subpattern.getwidth()
if lo != hi:
raise RegexpError("look-behind requires fixed-width pattern")
ctx.emit(lo)
else:
ctx.emit(0)
self.subpattern.compile(ctx)
ctx.emit(OPCODE_SUCCESS)
ctx.patch(pos, ctx.tell() - pos)
class Group(RegexpBase):
def __init__(self, info, group, subpattern):
RegexpBase.__init__(self)
self.info = info
self.group = group
self.subpattern = subpattern
def fix_groups(self):
self.info.defined_groups[self.group] = self
self.subpattern.fix_groups()
def can_be_affix(self):
return False
def optimize(self, info, in_set=False):
return Group(self.info, self.group, self.subpattern.optimize(info))
def is_empty(self):
return False
def compile(self, ctx):
ctx.emit(OPCODE_MARK)
ctx.emit((self.group - 1) * 2)
self.subpattern.compile(ctx)
ctx.emit(OPCODE_MARK)
ctx.emit((self.group - 1) * 2 + 1)
class RefGroup(RegexpBase):
def __init__(self, info, group, case_insensitive=False):
RegexpBase.__init__(self, case_insensitive=case_insensitive)
self.info = info
self.group = group
def fix_groups(self):
if not 1 <= self.group <= self.info.group_count:
raise RegexpError("unknown group")
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
assert not self.case_insensitive
ctx.emit(OPCODE_GROUPREF)
ctx.emit(self.group - 1)
class SetBase(RegexpBase):
def __init__(self, info, items, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.info = info
self.items = items
def is_empty(self):
return False
def can_be_affix(self):
return True
def fix_groups(self):
pass
class SetUnion(SetBase):
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info, in_set=True)
if isinstance(item, SetUnion) and item.positive:
items.extend(item.items)
else:
items.append(item)
if len(items) == 1 and not isinstance(items[0], Range):
return items[0].with_flags(
positive=items[0].positive == self.positive,
case_insensitive=self.case_insensitive,
zerowidth=self.zerowidth
).optimize(info, in_set=in_set)
return SetUnion(self.info, items, positive=self.positive, case_insensitive=self.case_insensitive, zerowidth=self.zerowidth)
def rebuild(self, positive, case_insensitive, zerowidth):
return SetUnion(self.info, self.items, positive, case_insensitive, zerowidth).optimize(self.info)
def compile(self, ctx):
ctx.emit(OPCODE_IN)
pos = ctx.tell()
ctx.emit(0)
if not self.positive:
ctx.emit(OPCODE_NEGATE)
for item in self.items:
item.compile(ctx)
ctx.emit(OPCODE_FAILURE)
ctx.patch(pos, ctx.tell() - pos)
class SetIntersection(SetBase):
def rebuild(self, positive, case_insensitive, zerowidth):
return SetIntersection(self.info, self.items, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info, in_set=True)
if isinstance(item, SetIntersection) and item.positive:
items.extend(item.items)
else:
items.append(item)
if len(items) == 1:
return items[0].with_flags(
case_insensitive=self.case_insensitive,
zerowidth=self.zerowidth,
).optimize(info, in_set)
return SetIntersection(info, items)
def compile(self, ctx):
Sequence([
LookAround(item, behind=False, positive=True)
for item in self.items[:-1]
] + [self.items[-1]]).compile(ctx)
POSITION_ESCAPES = {
"A": AtPosition(AT_BEGINNING_STRING),
"z": AtPosition(AT_END_STRING),
"b": AtPosition(AT_BOUNDARY),
"B": AtPosition(AT_NON_BOUNDARY),
}
CHARSET_ESCAPES = {
"d": Property(CATEGORY_DIGIT),
"w": Property(CATEGORY_WORD),
}
PROPERTIES = {
"digit": CATEGORY_DIGIT,
"alnum": CATEGORY_WORD,
}
def make_character(info, value, in_set=False):
if in_set:
return Character(value)
return Character(value, case_insensitive=info.flags & IGNORE_CASE)
def make_sequence(items):
if len(items) == 1:
return items[0]
return Sequence(items)
def make_atomic(info, subpattern):
group = info.new_group()
info.close_group(group, hidden=True)
return Sequence([
LookAround(Group(info, group, subpattern), behind=False, positive=True),
RefGroup(info, group),
])
def make_ref_group(info, name):
return RefGroup(info, name, case_insensitive=info.flags & IGNORE_CASE)
def _parse_pattern(source, info):
previous_groups = info.used_groups.copy()
branches = [_parse_sequence(source, info)]
all_groups = info.used_groups
while source.match("|"):
info.used_groups = previous_groups.copy()
branches.append(_parse_sequence(source, info))
all_groups.update(info.used_groups)
info.used_groups = all_groups
if len(branches) == 1:
return branches[0]
return Branch(branches)
def _parse_sequence(source, info):
sequence = []
item = _parse_item(source, info)
while item:
sequence.append(item)
item = _parse_item(source, info)
return make_sequence(sequence)
def _parse_item(source, info):
element = _parse_element(source, info)
counts = _parse_quantifier(source, info)
if counts is not None:
min_count, max_count = counts.min_count, counts.max_count
if element.is_empty() or min_count == max_count == 1:
return element
if source.match("?"):
return LazyRepeat(element, min_count, max_count)
elif source.match("+"):
if counts.limited_quantifier:
return GreedyRepeat(GreedyRepeat(element, min_count, max_count), 1, MAX_REPEAT)
else:
return make_atomic(info, GreedyRepeat(element, min_count, max_count))
else:
return GreedyRepeat(element, min_count, max_count)
return element
def _parse_element(source, info):
here = source.pos
ch = source.get()
if ch in SPECIAL_CHARS:
if ch in ")|":
source.pos = here
return None
elif ch == "\\":
return _parse_escape(source, info, in_set=False)
elif ch == "(":
element = _parse_paren(source, info)
if element is not None:
return element
elif ch == ".":
if info.flags & DOT_ALL:
return AnyAll()
else:
return Any()
elif ch == "[":
return _parse_set(source, info)
elif ch == "^":
return AtPosition(AT_BEGINNING_STRING)
elif ch == "$":
return AtPosition(AT_END_STRING)
elif ch == "{":
here2 = source.pos
counts = _parse_quantifier(source, info)
if counts is not None:
raise RegexpError("nothing to repeat")
source.pos = here2
return make_character(info, ord(ch[0]))
elif ch in "?*+":
raise RegexpError("nothing to repeat")
else:
return make_character(info, ord(ch[0]))
else:
return make_character(info, ord(ch[0]))
def _parse_quantifier(source, info):
while True:
here = source.pos
if source.match("?"):
return Counts(0, 1)
elif source.match("*"):
return Counts(0)
elif source.match("+"):
return Counts(1)
elif source.match("{"):
try:
return _parse_limited_quantifier(source)
except ParseError:
pass
elif source.match("(?#"):
_parse_comment(source)
continue
break
source.pos = here
return None
def _parse_paren(source, info):
if source.match("?"):
if source.match("<"):
if source.match("="):
return _parse_lookaround(source, info, behind=True, positive=True)
elif source.match("!"):
return _parse_lookaround(source, info, behind=True, positive=False)
name = _parse_name(source)
group = info.new_group(name)
source.expect(">")
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
info.close_group(group)
return Group(info, group, subpattern)
elif source.match("="):
return _parse_lookaround(source, info, behind=False, positive=True)
elif source.match("!"):
return _parse_lookaround(source, info, behind=False, positive=False)
elif source.match("#"):
_parse_comment(source)
return
elif source.match(">"):
return _parse_atomic(source, info)
elif source.match(":"):
subpattern = _parse_pattern(source, info)
source.expect(")")
return subpattern
elif source.match("-") or source.match("m") or source.match("i") or source.match("x"):
# TODO: parse plain here flags = _parse_plain_flags(source)
subpattern = _parse_pattern(source, info)
source.expect(")")
return subpattern
else:
raise RegexpError("undefined group option")
group = info.new_group()
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
info.close_group(group)
return Group(info, group, subpattern)
def _parse_atomic(source, info):
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
return make_atomic(info, subpattern)
def _parse_set(source, info):
saved_ignore = source.ignore_space
source.ignore_space = False
negate = source.match("^")
try:
item = _parse_set_intersect(source, info)
source.expect("]")
finally:
source.ignore_space = saved_ignore
if negate:
item = item.with_flags(positive=not item.positive)
return item.with_flags(case_insensitive=info.flags & IGNORE_CASE)
def _parse_set_intersect(source, info):
items = [_parse_set_implicit_union(source, info)]
while source.match("&&"):
items.append(_parse_set_implicit_union(source, info))
if len(items) == 1:
return items[0]
return SetIntersection(info, items)
def _parse_set_implicit_union(source, info):
items = [_parse_set_member(source, info)]
while True:
here = source.pos
if source.match("]") or source.match("&&"):
source.pos = here
break
items.append(_parse_set_member(source, info))
if len(items) == 1 and not isinstance(items[0], Range):
return items[0]
return SetUnion(info, items)
def _parse_set_member(source, info):
start = _parse_set_item(source, info)
if (not isinstance(start, Character) or not start.positive or
not source.match("-")):
return start
here = source.pos
if source.match("]"):
source.pos = here
return SetUnion(info, [start, Character(ord("-"))])
end = _parse_set_item(source, info)
if not isinstance(end, Character) or not end.positive:
return SetUnion(info, [start, Character(ord("-")), end])
if start.value > end.value:
raise RegexpError("bad character range")
if start.value == end.value:
return start
return Range(start.value, end.value)
def _parse_set_item(source, info):
if source.match("\\"):
return _parse_escape(source, info, in_set=True)
here = source.pos
if source.match("[:"):
try:
return _parse_posix_class(source, info)
except ParseError:
source.pos = here
if source.match("["):
negate = source.match("^")
item = _parse_set_intersect(source, info)
source.expect("]")
if negate:
item = item.with_flags(positive=not item.positive)
return item
ch = source.get()
if not ch:
raise RegexpError("bad set")
return Character(ord(ch[0]))
def _parse_escape(source, info, in_set):
saved_ignore = source.ignore_space
source.ignore_space = False
ch = source.get()
source.ignore_space = saved_ignore
if not ch:
raise RegexpError("bad escape")
if ch == "g" and not in_set:
here = source.pos
try:
return _parse_group_ref(source, info)
except RegexpError:
source.pos = here
return make_character(info, ord(ch[0]), in_set)
elif ch == "G" and not in_set:
return AtPosition(AT_BEGINNING)
elif ch in "pP":
return _parse_property(source, info, ch == "p", in_set)
elif ch.isalpha():
if not in_set:
if ch in POSITION_ESCAPES:
return POSITION_ESCAPES[ch]
if ch in CHARSET_ESCAPES:
return CHARSET_ESCAPES[ch]
elif ch in CHARACTER_ESCAPES:
return Character(ord(CHARACTER_ESCAPES[ch]))
return make_character(info, ord(ch[0]), in_set)
elif ch.isdigit():
return _parse_numeric_escape(source, info, ch, in_set)
else:
return make_character(info, ord(ch[0]), in_set)
def _parse_lookaround(source, info, behind, positive):
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
return LookAround(subpattern, behind=behind, positive=positive)
def _parse_limited_quantifier(source):
min_count = _parse_count(source)
ch = source.get()
if ch == ",":
max_count = _parse_count(source)
if not source.match("}"):
raise ParseError
min_count = int(min_count) if min_count else 0
max_count = int(max_count) if max_count else MAX_REPEAT
if min_count > max_count:
raise RegexpError("min repeat gereater than max repeat")
if max_count > MAX_REPEAT:
raise RegexpError("repeat count too big")
return Counts(min_count, max_count, limited_quantifier=True)
if ch != "}":
raise ParseError
if not min_count:
raise ParseError
min_count = int(min_count)
if min_count > MAX_REPEAT:
raise RegexpError("repeat count too big")
return Counts(min_count, min_count, limited_quantifier=True)
def _parse_count(source):
b = StringBuilder(2)
while True:
here = source.pos
ch = source.get()
if ch.isdigit():
b.append(ch)
else:
source.pos = here
break
return b.build()
def _parse_comment(source):
while True:
ch = source.get()
if ch == ")":
break
elif not ch:
break
def _parse_name(source):
b = StringBuilder(5)
while True:
here = source.pos
ch = source.get()
if ch in ")>":
source.pos = here
break
elif not ch:
break
else:
b.append(ch)
return b.build()
def _parse_plain_flags(source):
b = StringBuilder(4)
while True:
ch = source.get()
if ch == ":":
break
else:
b.append(ch)
return b.build()
def _parse_group_ref(source, info):
source.expect("<")
name = _parse_name(source)
source.expect(">")
if info.is_open_group(name):
raise RegexpError("can't refer to an open group")
return make_ref_group(info, info.normalize_group(name))
def _parse_property(source, info, positive, in_set):
here = source.pos
if source.match("{"):
negate = source.match("^")
prop_name, name = _parse_property_name(source)
if source.match("}"):
if name in PROPERTIES:
return Property(PROPERTIES[name], positive != negate)
source.pos = here
return make_character(info, ord("p" if positive else "P"), in_set)
def _parse_property_name(source):
b = StringBuilder(5)
while True:
here = source.pos
ch = source.get()
if ch.isalnum():
b.append(ch)
else:
source.pos = here
break
name = b.build()
return name, name
def _parse_numeric_escape(source, info, ch, in_set):
raise NotImplementedError("_parse_numeric_escape")
def _parse_posix_class(source, info):
negate = source.match("^")
prop_name, name = _parse_property_name(source)
if not source.match(":]"):
raise ParseError
return Property(PROPERTIES[name], negate)
def _compile_no_cache(pattern, flags):
source = Source(pattern)
if flags & EXTENDED:
source.ignore_space = True
info = Info(flags)
parsed = _parse_pattern(source, info)
if not source.at_end():
raise RegexpError("trailing characters in pattern")
parsed.fix_groups()
parsed = parsed.optimize(info)
ctx = CompilerContext()
parsed.compile(ctx)
ctx.emit(OPCODE_SUCCESS)
code = ctx.build()
index_group = {}
for n, v in info.group_index.iteritems():
index_group[v] = n
return code, info.flags, info.group_count, info.group_index, index_group, info.group_offsets
def compile(cache, pattern, flags=0):
if not cache.contains(pattern, flags):
cache.set(pattern, flags, _compile_no_cache(pattern, flags))
return cache.get(pattern, flags)
| 29.190909
| 131
| 0.595116
|
import sys
from rpython.rlib.listsort import make_timsort_class
from rpython.rlib.objectmodel import specialize
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rsre.rsre_core import (
OPCODE_LITERAL, OPCODE_LITERAL_IGNORE, OPCODE_SUCCESS, OPCODE_ASSERT,
OPCODE_MARK, OPCODE_REPEAT, OPCODE_ANY, OPCODE_ANY_ALL, OPCODE_MAX_UNTIL,
OPCODE_MIN_UNTIL, OPCODE_GROUPREF, OPCODE_AT, OPCODE_BRANCH, OPCODE_RANGE,
OPCODE_JUMP, OPCODE_ASSERT_NOT, OPCODE_CATEGORY, OPCODE_FAILURE, OPCODE_IN,
OPCODE_NEGATE
)
from rpython.rlib.rsre.rsre_char import MAXREPEAT as MAX_REPEAT
IGNORE_CASE = 1 << 0
EXTENDED = 1 << 1
DOT_ALL = 1 << 2
ONCE = 1 << 3
FIXED_ENCODING = 1 << 4
NO_ENCODING = 1 << 5
OPTIONS_MAP = {
"i": IGNORE_CASE,
"x": EXTENDED,
"m": DOT_ALL,
"o": ONCE,
"u": FIXED_ENCODING,
"n": NO_ENCODING,
"e": FIXED_ENCODING,
"s": FIXED_ENCODING,
}
FLAGS_MAP = [
("m", DOT_ALL),
("i", IGNORE_CASE),
("x", EXTENDED),
]
SPECIAL_CHARS = "()|?*+{^$.[\\#"
CHARACTER_ESCAPES = {
"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
}
AT_BEGINNING = 0
AT_BEGINNING_LINE = 1
AT_BEGINNING_STRING = 2
AT_BOUNDARY = 3
AT_NON_BOUNDARY = 4
AT_END = 5
AT_END_LINE = 6
AT_END_STRING = 7
AT_LOC_BOUNDARY = 8
AT_LOC_NON_BOUNDARY = 9
AT_UNI_BOUNDARY = 10
AT_UNI_NON_BOUNDARY = 11
CATEGORY_DIGIT = 0
CATEGORY_NOT_DIGIT = 1
CATEGORY_SPACE = 2
CATEGORY_NOT_SPACE = 3
CATEGORY_WORD = 4
CATEGORY_NOT_WORD = 5
CATEGORY_LINEBREAK = 6
CATEGORY_NOT_LINEBREAK = 7
CATEGORY_LOC_WORD = 8
CATEGORY_LOC_NOT_WORD = 9
CATEGORY_UNI_DIGIT = 10
CATEGORY_UNI_NOT_DIGIT = 11
CATEGORY_UNI_SPACE = 12
CATEGORY_UNI_NOT_SPACE = 13
CATEGORY_UNI_WORD = 14
CATEGORY_UNI_NOT_WORD = 15
CATEGORY_UNI_LINEBREAK = 16
CATEGORY_UNI_NOT_LINEBREAK = 17
class UnscopedFlagSet(Exception):
def __init__(self, global_flags):
Exception.__init__(self)
self.global_flags = global_flags
class RegexpError(Exception):
pass
class ParseError(Exception):
pass
class Source(object):
def __init__(self, s):
self.pos = 0
self.s = s
self.ignore_space = False
def at_end(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if pos >= len(s):
break
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
return pos >= len(s)
def get(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if pos >= len(s):
return ""
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
try:
ch = s[pos]
self.pos = pos + 1
return ch
except IndexError:
self.pos = pos
return ""
except ValueError:
self.pos = len(s)
return ""
def match(self, substr):
s = self.s
pos = self.pos
if self.ignore_space:
for c in substr:
while True:
if pos >= len(s):
return False
elif s[pos].isspace():
pos += 1
elif s[pos] == "#":
pos = s.find("\n", pos)
if pos < 0:
pos = len(s)
else:
break
if s[pos] != c:
return False
pos += 1
self.pos = pos
return True
else:
if pos + len(substr) <= len(s):
matches = True
for i in xrange(len(substr)):
if s[pos + i] != substr[i]:
matches = False
else:
matches = False
if not matches:
return False
self.pos = pos + len(substr)
return True
def expect(self, substr):
if not self.match(substr):
raise RegexpError("Missing %s" % substr)
class Info(object):
OPEN = 0
CLOSED = 1
def __init__(self, flags):
self.flags = flags
self.group_count = 0
self.used_groups = {}
self.group_state = {}
self.group_index = {}
self.group_name = {}
self.named_lists_used = {}
self.defined_groups = {}
self.group_offsets = []
def new_group(self, name=None):
if name in self.group_index:
group = self.group_index[name]
else:
while True:
self.group_count += 1
if name is None or self.group_count not in self.group_name:
break
group = self.group_count
if name is not None:
self.group_index[name] = group
self.group_name[group] = name
self.used_groups[group] = None
self.group_state[group] = self.OPEN
return group
def close_group(self, group, hidden=False):
last_group_offset = self.group_offsets[-1] if self.group_offsets else 0
if hidden:
last_group_offset += 1
self.group_offsets.append(last_group_offset)
self.group_state[group] = self.CLOSED
def normalize_group(self, name):
if name.isdigit():
return int(name)
else:
return self.group_index[name]
def is_open_group(self, name):
group = self.normalize_group(name)
return group in self.group_state and self.group_state[group] == self.OPEN
BaseSorter = make_timsort_class()
class BranchSorter(BaseSorter):
def __init__(self, items, order):
BaseSorter.__init__(self, items)
self.order = order
def lt(self, a, b):
return self.order[a[0]] < self.order[b[0]]
class CompilerContext(object):
def __init__(self):
self.data = []
def emit(self, opcode):
self.data.append(opcode)
def tell(self):
return len(self.data)
def patch(self, pos, value):
self.data[pos] = value
def build(self):
return self.data[:]
class Counts(object):
def __init__(self, min_count, max_count=MAX_REPEAT, limited_quantifier=False):
self.min_count = min_count
self.max_count = max_count
self.limited_quantifier = limited_quantifier
class RegexpBase(object):
_attrs_ = ["positive", "case_insensitive", "zerowidth"]
def __init__(self, positive=True, case_insensitive=False, zerowidth=False):
self.positive = positive
self.case_insensitive = case_insensitive
self.zerowidth = zerowidth
@specialize.argtype(1, 2, 3)
def with_flags(self, positive=None, case_insensitive=None, zerowidth=None):
positive = positive if positive is not None else self.positive
case_insensitive = case_insensitive if case_insensitive is not None else self.case_insensitive
zerowidth = zerowidth if zerowidth is not None else self.zerowidth
if (positive == self.positive and
case_insensitive == self.case_insensitive and
zerowidth == self.zerowidth):
return self
return self.rebuild(positive, case_insensitive, zerowidth)
class Character(RegexpBase):
def __init__(self, value, case_insensitive=False, positive=True, zerowidth=False):
RegexpBase.__init__(self, case_insensitive=case_insensitive, positive=positive, zerowidth=zerowidth)
self.value = value
def rebuild(self, positive, case_insensitive, zerowidth):
return Character(self.value, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
def getwidth(self):
return 1, 1
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def is_empty(self):
return False
def compile(self, ctx):
ctx.emit(OPCODE_LITERAL_IGNORE if self.case_insensitive else OPCODE_LITERAL)
ctx.emit(self.value)
class Any(RegexpBase):
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
ctx.emit(OPCODE_ANY)
class AnyAll(RegexpBase):
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
ctx.emit(OPCODE_ANY_ALL)
class ZeroWidthBase(RegexpBase):
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
class AtPosition(ZeroWidthBase):
def __init__(self, code):
ZeroWidthBase.__init__(self)
self.code = code
def can_be_affix(self):
return True
def compile(self, ctx):
ctx.emit(OPCODE_AT)
ctx.emit(self.code)
class Property(RegexpBase):
def __init__(self, value, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.value = value
def rebuild(self, positive, case_insensitive, zerowidth):
return Property(self.value, positive, case_insensitive, zerowidth)
def getwidth(self):
return 1, 1
def is_empty(self):
return False
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def compile(self, ctx):
ctx.emit(OPCODE_CATEGORY)
ctx.emit(self.value)
class Range(RegexpBase):
def __init__(self, lower, upper, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.lower = lower
self.upper = upper
def rebuild(self, positive, case_insensitive, zerowidth):
return Range(self.lower, self.upper, positive, case_insensitive, zerowidth)
def fix_groups(self):
pass
def optimize(self, info, in_set=False):
return self
def can_be_affix(self):
return True
def compile(self, ctx):
if not self.positive:
ctx.emit(OPCODE_NEGATE)
ctx.emit(OPCODE_RANGE)
ctx.emit(self.lower)
ctx.emit(self.upper)
class Sequence(RegexpBase):
def __init__(self, items):
RegexpBase.__init__(self)
self.items = items
def is_empty(self):
for item in self.items:
if not item.is_empty():
return False
return True
def fix_groups(self):
for item in self.items:
item.fix_groups()
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info)
if isinstance(item, Sequence):
items.extend(item.items)
else:
items.append(item)
return make_sequence(items)
def compile(self, ctx):
for item in self.items:
item.compile(ctx)
class Branch(RegexpBase):
def __init__(self, branches):
RegexpBase.__init__(self)
self.branches = branches
def fix_groups(self):
for b in self.branches:
b.fix_groups()
def is_empty(self):
for b in self.branches:
if not b.is_empty():
return False
return True
def _flatten_branches(self, info, branches):
new_branches = []
for b in branches:
b = b.optimize(info)
if isinstance(b, Branch):
new_branches.extend(b.branches)
else:
new_branches.append(b)
return new_branches
def _split_common_prefix(self, info, branches):
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
max_count = sys.maxint
for a in alternatives:
max_count = min(max_count, len(a))
prefix = alternatives[0]
pos = 0
end_pos = max_count
while (pos < end_pos and prefix[pos].can_be_affix() and
[None for a in alternatives if a[pos] == prefix[pos]]):
pos += 1
if pos == 0:
return [], branches
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[pos:]))
return prefix[:pos], new_branches
def _split_common_suffix(self, info, branches):
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
max_count = sys.maxint
for a in alternatives:
max_count = min(max_count, len(a))
suffix = alternatives[0]
pos = -1
end_pos = -1 - max_count
while (pos > end_pos and suffix[pos].can_be_affix() and
[None for a in alternatives if a[pos] == suffix[pos]]):
pos -= 1
count = -1 - pos
if count == 0:
return [], branches
new_branches = []
for a in alternatives:
end = len(a) - count
assert end >= 0
new_branches.append(make_sequence(a[:end]))
start = len(suffix) - count
assert start >= 0
return suffix[start:], new_branches
def _is_simple_character(self, c):
return isinstance(c, Character) and c.positive and not c.case_insensitive
def _flush_char_prefix(self, info, prefixed, order, new_branches):
if not prefixed:
return
items = prefixed.items()
sorter = BranchSorter(items, order)
sorter.sort()
for value, branches in items:
if len(branches) == 1:
new_branches.append(make_sequence(branches[0]))
else:
subbranches = []
optional = False
for b in branches:
if len(b) > 1:
subbranches.append(make_sequence(b[1:]))
elif not optional:
subbranches.append(Sequence([]))
optional = True
sequence = Sequence([Character(value), Branch(subbranches)])
new_branches.append(sequence.optimize(info))
prefixed.clear()
order.clear()
def _merge_common_prefixes(self, info, branches):
prefixed = {}
order = {}
new_branches = []
for b in branches:
if self._is_simple_character(b):
assert isinstance(b, Character)
prefixed.setdefault(b.value, []).append([b])
order.setdefault(b.value, len(order))
elif isinstance(b, Sequence) and b.items and self._is_simple_character(b.items[0]):
item = b.items[0]
assert isinstance(item, Character)
prefixed.setdefault(item.value, []).append(b.items)
order.setdefault(item.value, len(order))
else:
self._flush_char_prefix(info, prefixed, order, new_branches)
new_branches.append(b)
self._flush_char_prefix(info, prefixed, order, new_branches)
return new_branches
def _flush_set_members(self, info, items, case_insensitive, new_branches):
if not items:
return
if len(items) == 1:
[item] = items.keys()
else:
item = SetUnion(info, items.keys()).optimize(info)
new_branches.append(item.with_flags(case_insensitive=case_insensitive))
items.clear()
def _reduce_to_set(self, info, branches):
new_branches = []
items = {}
case_insensitive = False
for b in branches:
if isinstance(b, Character) or isinstance(b, Property) or isinstance(b, SetBase):
if b.case_insensitive != case_insensitive:
self._flush_set_members(info, items, case_insensitive, new_branches)
case_insensitive = b.case_insensitive
items[b.with_flags(case_insensitive=False)] = False
else:
self._flush_set_members(info, items, case_insensitive, new_branches)
new_branches.append(b)
self._flush_set_members(info, items, case_insensitive, new_branches)
return new_branches
def optimize(self, info, in_set=False):
branches = self._flatten_branches(info, self.branches)
prefix, branches = self._split_common_prefix(info, branches)
suffix, branches = self._split_common_suffix(info, branches)
branches = self._merge_common_prefixes(info, branches)
branches = self._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = prefix + [Branch(branches)] + suffix
else:
sequence = prefix + branches + suffix
return make_sequence(sequence)
def compile(self, ctx):
ctx.emit(OPCODE_BRANCH)
tail = []
for b in self.branches:
pos = ctx.tell()
ctx.emit(0)
b.compile(ctx)
ctx.emit(OPCODE_JUMP)
tail.append(ctx.tell())
ctx.emit(0)
ctx.patch(pos, ctx.tell() - pos)
ctx.emit(0)
for t in tail:
ctx.patch(t, ctx.tell() - t)
class BaseRepeat(RegexpBase):
def __init__(self, subpattern, min_count, max_count):
RegexpBase.__init__(self)
self.subpattern = subpattern
self.min_count = min_count
self.max_count = max_count
def fix_groups(self):
self.subpattern.fix_groups()
def is_empty(self):
return self.subpattern.is_empty()
def compile(self, ctx):
ctx.emit(OPCODE_REPEAT)
pos = ctx.tell()
ctx.emit(0)
ctx.emit(self.min_count)
ctx.emit(self.max_count)
self.subpattern.compile(ctx)
ctx.patch(pos, ctx.tell() - pos)
ctx.emit(self.UNTIL_OPCODE)
class GreedyRepeat(BaseRepeat):
UNTIL_OPCODE = OPCODE_MAX_UNTIL
def can_be_affix(self):
return True
def optimize(self, info, in_set=False):
subpattern = self.subpattern.optimize(info)
return GreedyRepeat(subpattern, self.min_count, self.max_count)
class LazyRepeat(BaseRepeat):
UNTIL_OPCODE = OPCODE_MIN_UNTIL
def optimize(self, info, in_set=False):
subpattern = self.subpattern.optimize(info)
return LazyRepeat(subpattern, self.min_count, self.max_count)
class LookAround(RegexpBase):
def __init__(self, subpattern, behind, positive):
RegexpBase.__init__(self, positive=positive)
self.subpattern = subpattern
self.behind = behind
def fix_groups(self):
self.subpattern.fix_groups()
def can_be_affix(self):
return self.subpattern.can_be_affix()
def optimize(self, info, in_set=False):
return LookAround(self.subpattern.optimize(info), self.behind, self.positive)
def compile(self, ctx):
ctx.emit(OPCODE_ASSERT if self.positive else OPCODE_ASSERT_NOT)
pos = ctx.tell()
ctx.emit(0)
if self.behind:
lo, hi = self.subpattern.getwidth()
if lo != hi:
raise RegexpError("look-behind requires fixed-width pattern")
ctx.emit(lo)
else:
ctx.emit(0)
self.subpattern.compile(ctx)
ctx.emit(OPCODE_SUCCESS)
ctx.patch(pos, ctx.tell() - pos)
class Group(RegexpBase):
def __init__(self, info, group, subpattern):
RegexpBase.__init__(self)
self.info = info
self.group = group
self.subpattern = subpattern
def fix_groups(self):
self.info.defined_groups[self.group] = self
self.subpattern.fix_groups()
def can_be_affix(self):
return False
def optimize(self, info, in_set=False):
return Group(self.info, self.group, self.subpattern.optimize(info))
def is_empty(self):
return False
def compile(self, ctx):
ctx.emit(OPCODE_MARK)
ctx.emit((self.group - 1) * 2)
self.subpattern.compile(ctx)
ctx.emit(OPCODE_MARK)
ctx.emit((self.group - 1) * 2 + 1)
class RefGroup(RegexpBase):
def __init__(self, info, group, case_insensitive=False):
RegexpBase.__init__(self, case_insensitive=case_insensitive)
self.info = info
self.group = group
def fix_groups(self):
if not 1 <= self.group <= self.info.group_count:
raise RegexpError("unknown group")
def optimize(self, info, in_set=False):
return self
def compile(self, ctx):
assert not self.case_insensitive
ctx.emit(OPCODE_GROUPREF)
ctx.emit(self.group - 1)
class SetBase(RegexpBase):
def __init__(self, info, items, positive=True, case_insensitive=False, zerowidth=False):
RegexpBase.__init__(self, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
self.info = info
self.items = items
def is_empty(self):
return False
def can_be_affix(self):
return True
def fix_groups(self):
pass
class SetUnion(SetBase):
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info, in_set=True)
if isinstance(item, SetUnion) and item.positive:
items.extend(item.items)
else:
items.append(item)
if len(items) == 1 and not isinstance(items[0], Range):
return items[0].with_flags(
positive=items[0].positive == self.positive,
case_insensitive=self.case_insensitive,
zerowidth=self.zerowidth
).optimize(info, in_set=in_set)
return SetUnion(self.info, items, positive=self.positive, case_insensitive=self.case_insensitive, zerowidth=self.zerowidth)
def rebuild(self, positive, case_insensitive, zerowidth):
return SetUnion(self.info, self.items, positive, case_insensitive, zerowidth).optimize(self.info)
def compile(self, ctx):
ctx.emit(OPCODE_IN)
pos = ctx.tell()
ctx.emit(0)
if not self.positive:
ctx.emit(OPCODE_NEGATE)
for item in self.items:
item.compile(ctx)
ctx.emit(OPCODE_FAILURE)
ctx.patch(pos, ctx.tell() - pos)
class SetIntersection(SetBase):
def rebuild(self, positive, case_insensitive, zerowidth):
return SetIntersection(self.info, self.items, positive=positive, case_insensitive=case_insensitive, zerowidth=zerowidth)
def optimize(self, info, in_set=False):
items = []
for item in self.items:
item = item.optimize(info, in_set=True)
if isinstance(item, SetIntersection) and item.positive:
items.extend(item.items)
else:
items.append(item)
if len(items) == 1:
return items[0].with_flags(
case_insensitive=self.case_insensitive,
zerowidth=self.zerowidth,
).optimize(info, in_set)
return SetIntersection(info, items)
def compile(self, ctx):
Sequence([
LookAround(item, behind=False, positive=True)
for item in self.items[:-1]
] + [self.items[-1]]).compile(ctx)
POSITION_ESCAPES = {
"A": AtPosition(AT_BEGINNING_STRING),
"z": AtPosition(AT_END_STRING),
"b": AtPosition(AT_BOUNDARY),
"B": AtPosition(AT_NON_BOUNDARY),
}
CHARSET_ESCAPES = {
"d": Property(CATEGORY_DIGIT),
"w": Property(CATEGORY_WORD),
}
PROPERTIES = {
"digit": CATEGORY_DIGIT,
"alnum": CATEGORY_WORD,
}
def make_character(info, value, in_set=False):
if in_set:
return Character(value)
return Character(value, case_insensitive=info.flags & IGNORE_CASE)
def make_sequence(items):
if len(items) == 1:
return items[0]
return Sequence(items)
def make_atomic(info, subpattern):
group = info.new_group()
info.close_group(group, hidden=True)
return Sequence([
LookAround(Group(info, group, subpattern), behind=False, positive=True),
RefGroup(info, group),
])
def make_ref_group(info, name):
return RefGroup(info, name, case_insensitive=info.flags & IGNORE_CASE)
def _parse_pattern(source, info):
previous_groups = info.used_groups.copy()
branches = [_parse_sequence(source, info)]
all_groups = info.used_groups
while source.match("|"):
info.used_groups = previous_groups.copy()
branches.append(_parse_sequence(source, info))
all_groups.update(info.used_groups)
info.used_groups = all_groups
if len(branches) == 1:
return branches[0]
return Branch(branches)
def _parse_sequence(source, info):
sequence = []
item = _parse_item(source, info)
while item:
sequence.append(item)
item = _parse_item(source, info)
return make_sequence(sequence)
def _parse_item(source, info):
element = _parse_element(source, info)
counts = _parse_quantifier(source, info)
if counts is not None:
min_count, max_count = counts.min_count, counts.max_count
if element.is_empty() or min_count == max_count == 1:
return element
if source.match("?"):
return LazyRepeat(element, min_count, max_count)
elif source.match("+"):
if counts.limited_quantifier:
return GreedyRepeat(GreedyRepeat(element, min_count, max_count), 1, MAX_REPEAT)
else:
return make_atomic(info, GreedyRepeat(element, min_count, max_count))
else:
return GreedyRepeat(element, min_count, max_count)
return element
def _parse_element(source, info):
here = source.pos
ch = source.get()
if ch in SPECIAL_CHARS:
if ch in ")|":
source.pos = here
return None
elif ch == "\\":
return _parse_escape(source, info, in_set=False)
elif ch == "(":
element = _parse_paren(source, info)
if element is not None:
return element
elif ch == ".":
if info.flags & DOT_ALL:
return AnyAll()
else:
return Any()
elif ch == "[":
return _parse_set(source, info)
elif ch == "^":
return AtPosition(AT_BEGINNING_STRING)
elif ch == "$":
return AtPosition(AT_END_STRING)
elif ch == "{":
here2 = source.pos
counts = _parse_quantifier(source, info)
if counts is not None:
raise RegexpError("nothing to repeat")
source.pos = here2
return make_character(info, ord(ch[0]))
elif ch in "?*+":
raise RegexpError("nothing to repeat")
else:
return make_character(info, ord(ch[0]))
else:
return make_character(info, ord(ch[0]))
def _parse_quantifier(source, info):
while True:
here = source.pos
if source.match("?"):
return Counts(0, 1)
elif source.match("*"):
return Counts(0)
elif source.match("+"):
return Counts(1)
elif source.match("{"):
try:
return _parse_limited_quantifier(source)
except ParseError:
pass
elif source.match("(?#"):
_parse_comment(source)
continue
break
source.pos = here
return None
def _parse_paren(source, info):
if source.match("?"):
if source.match("<"):
if source.match("="):
return _parse_lookaround(source, info, behind=True, positive=True)
elif source.match("!"):
return _parse_lookaround(source, info, behind=True, positive=False)
name = _parse_name(source)
group = info.new_group(name)
source.expect(">")
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
info.close_group(group)
return Group(info, group, subpattern)
elif source.match("="):
return _parse_lookaround(source, info, behind=False, positive=True)
elif source.match("!"):
return _parse_lookaround(source, info, behind=False, positive=False)
elif source.match("#"):
_parse_comment(source)
return
elif source.match(">"):
return _parse_atomic(source, info)
elif source.match(":"):
subpattern = _parse_pattern(source, info)
source.expect(")")
return subpattern
elif source.match("-") or source.match("m") or source.match("i") or source.match("x"):
subpattern = _parse_pattern(source, info)
source.expect(")")
return subpattern
else:
raise RegexpError("undefined group option")
group = info.new_group()
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
info.close_group(group)
return Group(info, group, subpattern)
def _parse_atomic(source, info):
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
return make_atomic(info, subpattern)
def _parse_set(source, info):
saved_ignore = source.ignore_space
source.ignore_space = False
negate = source.match("^")
try:
item = _parse_set_intersect(source, info)
source.expect("]")
finally:
source.ignore_space = saved_ignore
if negate:
item = item.with_flags(positive=not item.positive)
return item.with_flags(case_insensitive=info.flags & IGNORE_CASE)
def _parse_set_intersect(source, info):
items = [_parse_set_implicit_union(source, info)]
while source.match("&&"):
items.append(_parse_set_implicit_union(source, info))
if len(items) == 1:
return items[0]
return SetIntersection(info, items)
def _parse_set_implicit_union(source, info):
items = [_parse_set_member(source, info)]
while True:
here = source.pos
if source.match("]") or source.match("&&"):
source.pos = here
break
items.append(_parse_set_member(source, info))
if len(items) == 1 and not isinstance(items[0], Range):
return items[0]
return SetUnion(info, items)
def _parse_set_member(source, info):
start = _parse_set_item(source, info)
if (not isinstance(start, Character) or not start.positive or
not source.match("-")):
return start
here = source.pos
if source.match("]"):
source.pos = here
return SetUnion(info, [start, Character(ord("-"))])
end = _parse_set_item(source, info)
if not isinstance(end, Character) or not end.positive:
return SetUnion(info, [start, Character(ord("-")), end])
if start.value > end.value:
raise RegexpError("bad character range")
if start.value == end.value:
return start
return Range(start.value, end.value)
def _parse_set_item(source, info):
if source.match("\\"):
return _parse_escape(source, info, in_set=True)
here = source.pos
if source.match("[:"):
try:
return _parse_posix_class(source, info)
except ParseError:
source.pos = here
if source.match("["):
negate = source.match("^")
item = _parse_set_intersect(source, info)
source.expect("]")
if negate:
item = item.with_flags(positive=not item.positive)
return item
ch = source.get()
if not ch:
raise RegexpError("bad set")
return Character(ord(ch[0]))
def _parse_escape(source, info, in_set):
saved_ignore = source.ignore_space
source.ignore_space = False
ch = source.get()
source.ignore_space = saved_ignore
if not ch:
raise RegexpError("bad escape")
if ch == "g" and not in_set:
here = source.pos
try:
return _parse_group_ref(source, info)
except RegexpError:
source.pos = here
return make_character(info, ord(ch[0]), in_set)
elif ch == "G" and not in_set:
return AtPosition(AT_BEGINNING)
elif ch in "pP":
return _parse_property(source, info, ch == "p", in_set)
elif ch.isalpha():
if not in_set:
if ch in POSITION_ESCAPES:
return POSITION_ESCAPES[ch]
if ch in CHARSET_ESCAPES:
return CHARSET_ESCAPES[ch]
elif ch in CHARACTER_ESCAPES:
return Character(ord(CHARACTER_ESCAPES[ch]))
return make_character(info, ord(ch[0]), in_set)
elif ch.isdigit():
return _parse_numeric_escape(source, info, ch, in_set)
else:
return make_character(info, ord(ch[0]), in_set)
def _parse_lookaround(source, info, behind, positive):
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(")")
return LookAround(subpattern, behind=behind, positive=positive)
def _parse_limited_quantifier(source):
min_count = _parse_count(source)
ch = source.get()
if ch == ",":
max_count = _parse_count(source)
if not source.match("}"):
raise ParseError
min_count = int(min_count) if min_count else 0
max_count = int(max_count) if max_count else MAX_REPEAT
if min_count > max_count:
raise RegexpError("min repeat gereater than max repeat")
if max_count > MAX_REPEAT:
raise RegexpError("repeat count too big")
return Counts(min_count, max_count, limited_quantifier=True)
if ch != "}":
raise ParseError
if not min_count:
raise ParseError
min_count = int(min_count)
if min_count > MAX_REPEAT:
raise RegexpError("repeat count too big")
return Counts(min_count, min_count, limited_quantifier=True)
def _parse_count(source):
b = StringBuilder(2)
while True:
here = source.pos
ch = source.get()
if ch.isdigit():
b.append(ch)
else:
source.pos = here
break
return b.build()
def _parse_comment(source):
while True:
ch = source.get()
if ch == ")":
break
elif not ch:
break
def _parse_name(source):
b = StringBuilder(5)
while True:
here = source.pos
ch = source.get()
if ch in ")>":
source.pos = here
break
elif not ch:
break
else:
b.append(ch)
return b.build()
def _parse_plain_flags(source):
b = StringBuilder(4)
while True:
ch = source.get()
if ch == ":":
break
else:
b.append(ch)
return b.build()
def _parse_group_ref(source, info):
source.expect("<")
name = _parse_name(source)
source.expect(">")
if info.is_open_group(name):
raise RegexpError("can't refer to an open group")
return make_ref_group(info, info.normalize_group(name))
def _parse_property(source, info, positive, in_set):
here = source.pos
if source.match("{"):
negate = source.match("^")
prop_name, name = _parse_property_name(source)
if source.match("}"):
if name in PROPERTIES:
return Property(PROPERTIES[name], positive != negate)
source.pos = here
return make_character(info, ord("p" if positive else "P"), in_set)
def _parse_property_name(source):
b = StringBuilder(5)
while True:
here = source.pos
ch = source.get()
if ch.isalnum():
b.append(ch)
else:
source.pos = here
break
name = b.build()
return name, name
def _parse_numeric_escape(source, info, ch, in_set):
raise NotImplementedError("_parse_numeric_escape")
def _parse_posix_class(source, info):
negate = source.match("^")
prop_name, name = _parse_property_name(source)
if not source.match(":]"):
raise ParseError
return Property(PROPERTIES[name], negate)
def _compile_no_cache(pattern, flags):
source = Source(pattern)
if flags & EXTENDED:
source.ignore_space = True
info = Info(flags)
parsed = _parse_pattern(source, info)
if not source.at_end():
raise RegexpError("trailing characters in pattern")
parsed.fix_groups()
parsed = parsed.optimize(info)
ctx = CompilerContext()
parsed.compile(ctx)
ctx.emit(OPCODE_SUCCESS)
code = ctx.build()
index_group = {}
for n, v in info.group_index.iteritems():
index_group[v] = n
return code, info.flags, info.group_count, info.group_index, index_group, info.group_offsets
def compile(cache, pattern, flags=0):
if not cache.contains(pattern, flags):
cache.set(pattern, flags, _compile_no_cache(pattern, flags))
return cache.get(pattern, flags)
| true
| true
|
f71526c8cd11306936f07cf05d779b8768da4f48
| 2,661
|
py
|
Python
|
search_wizard/__init__.py
|
saptarshibasu15/search-wizard
|
bd2e84f1f5dbc9196b09ba62930970e364413ed7
|
[
"MIT"
] | 2
|
2020-11-26T14:43:45.000Z
|
2021-02-15T07:34:45.000Z
|
search_wizard/__init__.py
|
saptarshibasu15/search_wizard
|
bd2e84f1f5dbc9196b09ba62930970e364413ed7
|
[
"MIT"
] | null | null | null |
search_wizard/__init__.py
|
saptarshibasu15/search_wizard
|
bd2e84f1f5dbc9196b09ba62930970e364413ed7
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
import pandas as pd
class SearchWizard:
config = {
"base": "https://www.google.com/search?q=",
"query": None,
"format": "json"
}
search_results = []
def __init__(self, query: str = None):
if not query == None:
self.config["query"] = query
def get_config(self) -> dict:
return self.config
def get_results(self, query: str = None, flag: str = None) -> list:
if not query == None:
self.config["query"] = query
if not self.config["query"] == None:
r = requests.get(self.config["base"]+self.config["query"])
htmlContent = r.content
soup = BeautifulSoup(htmlContent, "html.parser")
titles = soup.find_all("h3", class_="zBAuLc")
descriptions = soup.find_all('div', class_="BNeawe s3v9rd AP7Wnd")
urls = soup.find_all("div", class_="kCrYT")
for title, description, url in zip(titles, descriptions, urls):
description = description.get_text().replace(u"\xa0", "")
try:
url = str(url.find("a")["href"])
except:
url = "NaN"
self.search_results.append(
{
"title": title.get_text(),
"description": description if description.find("...") == -1 else description[:description.find("...")+3],
"url": url[7:url.find("&sa")]
}
)
if not flag == None:
if flag == "head":
return self.search_results[:3]
elif flag == "tail":
return self.search_results[len(self.search_results)-3:]
else:
return self.search_results
else:
raise Exception(
"QueryNotFound: Try mentioning the search query before using.\nHint: SearchWizard(query) or SearchWizard().get_results(query)")
def prettify(self, flag=None):
if not self.config["query"] == None:
if not flag == None:
if flag == "head":
print(pd.DataFrame(self.get_results(flag="head")))
elif flag == "tail":
print(pd.DataFrame(self.get_results(flag="tail")))
else:
print(pd.DataFrame(self.get_results()))
else:
raise Exception(
"QueryNotFound: Try mentioning the search query before using.\nHint: SearchWizard(query) or SearchWizard().get_results(query)")
| 34.115385
| 143
| 0.516723
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
class SearchWizard:
config = {
"base": "https://www.google.com/search?q=",
"query": None,
"format": "json"
}
search_results = []
def __init__(self, query: str = None):
if not query == None:
self.config["query"] = query
def get_config(self) -> dict:
return self.config
def get_results(self, query: str = None, flag: str = None) -> list:
if not query == None:
self.config["query"] = query
if not self.config["query"] == None:
r = requests.get(self.config["base"]+self.config["query"])
htmlContent = r.content
soup = BeautifulSoup(htmlContent, "html.parser")
titles = soup.find_all("h3", class_="zBAuLc")
descriptions = soup.find_all('div', class_="BNeawe s3v9rd AP7Wnd")
urls = soup.find_all("div", class_="kCrYT")
for title, description, url in zip(titles, descriptions, urls):
description = description.get_text().replace(u"\xa0", "")
try:
url = str(url.find("a")["href"])
except:
url = "NaN"
self.search_results.append(
{
"title": title.get_text(),
"description": description if description.find("...") == -1 else description[:description.find("...")+3],
"url": url[7:url.find("&sa")]
}
)
if not flag == None:
if flag == "head":
return self.search_results[:3]
elif flag == "tail":
return self.search_results[len(self.search_results)-3:]
else:
return self.search_results
else:
raise Exception(
"QueryNotFound: Try mentioning the search query before using.\nHint: SearchWizard(query) or SearchWizard().get_results(query)")
def prettify(self, flag=None):
if not self.config["query"] == None:
if not flag == None:
if flag == "head":
print(pd.DataFrame(self.get_results(flag="head")))
elif flag == "tail":
print(pd.DataFrame(self.get_results(flag="tail")))
else:
print(pd.DataFrame(self.get_results()))
else:
raise Exception(
"QueryNotFound: Try mentioning the search query before using.\nHint: SearchWizard(query) or SearchWizard().get_results(query)")
| true
| true
|
f71526d120aeaf83394faef2c80ab3eeb85fdce7
| 1,413
|
py
|
Python
|
setup.py
|
pmrowla/gumiyabot
|
5dd446342f129c8e8ddc4de044a7072a58ec7851
|
[
"MIT"
] | 4
|
2019-03-19T00:25:44.000Z
|
2021-10-16T03:45:22.000Z
|
setup.py
|
pmrowla/gumiyabot
|
5dd446342f129c8e8ddc4de044a7072a58ec7851
|
[
"MIT"
] | 5
|
2017-08-25T15:08:39.000Z
|
2021-06-11T09:15:00.000Z
|
setup.py
|
pmrowla/gumiyabot
|
5dd446342f129c8e8ddc4de044a7072a58ec7851
|
[
"MIT"
] | 2
|
2017-10-04T19:30:08.000Z
|
2021-12-01T13:39:27.000Z
|
from setuptools import find_packages, setup
from codecs import open
from os import path
version = '0.1.5'
install_requires = ['aiohttp', 'irc3', 'osuapi']
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gumiyabot',
version=version,
description='Standalone Twitch + Bancho IRC bot for handling osu! beatmap requests',
long_description=long_description,
url='https://github.com/pmrowla/gumiyabot',
author='Peter Rowlands',
author_email='peter@pmrowla.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Framework :: AsyncIO',
'Intended Audience :: Developers',
'Topic :: Games/Entertainment',
'Topic :: Communications :: Chat :: Internet Relay Chat',
],
keywords='osu twitch gumiya',
packages=find_packages(exclude=['docs', 'tests']),
include_package_data=True,
entry_points={
'console_scripts': ['gumiyabot = gumiyabot.__main__:main'],
},
install_requires=install_requires,
)
| 30.717391
| 88
| 0.644728
|
from setuptools import find_packages, setup
from codecs import open
from os import path
version = '0.1.5'
install_requires = ['aiohttp', 'irc3', 'osuapi']
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gumiyabot',
version=version,
description='Standalone Twitch + Bancho IRC bot for handling osu! beatmap requests',
long_description=long_description,
url='https://github.com/pmrowla/gumiyabot',
author='Peter Rowlands',
author_email='peter@pmrowla.com',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Framework :: AsyncIO',
'Intended Audience :: Developers',
'Topic :: Games/Entertainment',
'Topic :: Communications :: Chat :: Internet Relay Chat',
],
keywords='osu twitch gumiya',
packages=find_packages(exclude=['docs', 'tests']),
include_package_data=True,
entry_points={
'console_scripts': ['gumiyabot = gumiyabot.__main__:main'],
},
install_requires=install_requires,
)
| true
| true
|
f715280a5f0f47e678d78c95f09a2e73a4da4522
| 5,503
|
py
|
Python
|
src/sentry/web/frontend/accept_organization_invite.py
|
sigismund/sentry
|
421a512cd3b4a4c9ed660af536dc5bc4c12a287c
|
[
"BSD-3-Clause"
] | 1
|
2019-05-28T06:18:03.000Z
|
2019-05-28T06:18:03.000Z
|
src/sentry/web/frontend/accept_organization_invite.py
|
sigismund/sentry
|
421a512cd3b4a4c9ed660af536dc5bc4c12a287c
|
[
"BSD-3-Clause"
] | 6
|
2018-10-19T10:04:23.000Z
|
2019-12-09T20:29:12.000Z
|
src/sentry/web/frontend/accept_organization_invite.py
|
sigismund/sentry
|
421a512cd3b4a4c9ed660af536dc5bc4c12a287c
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils.crypto import constant_time_compare
from django.utils.translation import ugettext_lazy as _
from sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember, Project
from sentry.signals import member_joined
from sentry.utils import auth
from sentry.web.frontend.base import BaseView
ERR_INVITE_INVALID = _('The invite link you followed is not valid.')
PENDING_INVITE = 'pending-invite'
MAX_AGE = 60 * 60 * 24 * 7 # 7 days
class AcceptInviteForm(forms.Form):
pass
class AcceptOrganizationInviteView(BaseView):
auth_required = False
def get_form(self, request):
if request.method == 'POST':
return AcceptInviteForm(request.POST)
return AcceptInviteForm()
def handle(self, request, member_id, token):
assert request.method in ('POST', 'GET')
try:
om = OrganizationMember.objects.select_related('organization').get(pk=member_id)
except OrganizationMember.DoesNotExist:
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
if not om.is_pending:
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
if not constant_time_compare(om.token or om.legacy_token, token):
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
organization = om.organization
qs = Project.objects.filter(
organization=organization,
)
project_list = list(qs[:25])
project_count = qs.count()
org_requires_2fa = organization.flags.require_2fa.is_set
user_has_2fa = Authenticator.objects.user_has_2fa(request.user.id)
needs_2fa = org_requires_2fa and not user_has_2fa
context = {
'org_name': organization.name,
'project_list': project_list,
'project_count': project_count,
'needs_authentication': not request.user.is_authenticated(),
'needs_2fa': needs_2fa,
'logout_url': '{}?next={}'.format(
reverse('sentry-logout'),
request.path,
),
'login_url': '{}?next={}'.format(
reverse('sentry-login'),
request.path,
),
'register_url': '{}?next={}'.format(
reverse('sentry-register'),
request.path,
),
}
if not request.user.is_authenticated():
# Show login or register form
auth.initiate_login(request, next_url=request.get_full_path())
request.session['can_register'] = True
request.session['invite_email'] = om.email
return self.respond('sentry/accept-organization-invite.html', context)
if needs_2fa:
# redirect to setup 2fa
response = self.respond('sentry/accept-organization-invite.html', context)
response.set_cookie(PENDING_INVITE, request.path, max_age=MAX_AGE)
return response
# if they're already a member of the organization its likely they're
# using a shared account and either previewing this invite or
# are incorrectly expecting this to create a new account for them
context['existing_member'] = OrganizationMember.objects.filter(
user=request.user.id,
organization=om.organization_id,
).exists()
form = self.get_form(request)
if form.is_valid():
if OrganizationMember.objects.filter(
organization=organization, user=request.user
).exists():
messages.add_message(
request, messages.SUCCESS,
_('You are already a member of the %r organization.') %
(organization.name.encode('utf-8'), )
)
om.delete()
else:
om.user = request.user
om.email = None
om.save()
self.create_audit_entry(
request,
organization=organization,
target_object=om.id,
target_user=request.user,
event=AuditLogEntryEvent.MEMBER_ACCEPT,
data=om.get_audit_log_data(),
)
messages.add_message(
request, messages.SUCCESS,
_('You have been added to the %r organization.') %
(organization.name.encode('utf-8'), )
)
member_joined.send(member=om, sender=self)
request.session.pop('can_register', None)
response = self.redirect(reverse('sentry-organization-home', args=[organization.slug]))
if PENDING_INVITE in request.COOKIES:
response.delete_cookie(PENDING_INVITE)
return response
context['form'] = form
return self.respond('sentry/accept-organization-invite.html', context)
| 34.39375
| 99
| 0.58459
|
from __future__ import absolute_import
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils.crypto import constant_time_compare
from django.utils.translation import ugettext_lazy as _
from sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember, Project
from sentry.signals import member_joined
from sentry.utils import auth
from sentry.web.frontend.base import BaseView
ERR_INVITE_INVALID = _('The invite link you followed is not valid.')
PENDING_INVITE = 'pending-invite'
MAX_AGE = 60 * 60 * 24 * 7
class AcceptInviteForm(forms.Form):
pass
class AcceptOrganizationInviteView(BaseView):
auth_required = False
def get_form(self, request):
if request.method == 'POST':
return AcceptInviteForm(request.POST)
return AcceptInviteForm()
def handle(self, request, member_id, token):
assert request.method in ('POST', 'GET')
try:
om = OrganizationMember.objects.select_related('organization').get(pk=member_id)
except OrganizationMember.DoesNotExist:
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
if not om.is_pending:
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
if not constant_time_compare(om.token or om.legacy_token, token):
messages.add_message(
request,
messages.ERROR,
ERR_INVITE_INVALID,
)
return self.redirect(reverse('sentry'))
organization = om.organization
qs = Project.objects.filter(
organization=organization,
)
project_list = list(qs[:25])
project_count = qs.count()
org_requires_2fa = organization.flags.require_2fa.is_set
user_has_2fa = Authenticator.objects.user_has_2fa(request.user.id)
needs_2fa = org_requires_2fa and not user_has_2fa
context = {
'org_name': organization.name,
'project_list': project_list,
'project_count': project_count,
'needs_authentication': not request.user.is_authenticated(),
'needs_2fa': needs_2fa,
'logout_url': '{}?next={}'.format(
reverse('sentry-logout'),
request.path,
),
'login_url': '{}?next={}'.format(
reverse('sentry-login'),
request.path,
),
'register_url': '{}?next={}'.format(
reverse('sentry-register'),
request.path,
),
}
if not request.user.is_authenticated():
auth.initiate_login(request, next_url=request.get_full_path())
request.session['can_register'] = True
request.session['invite_email'] = om.email
return self.respond('sentry/accept-organization-invite.html', context)
if needs_2fa:
response = self.respond('sentry/accept-organization-invite.html', context)
response.set_cookie(PENDING_INVITE, request.path, max_age=MAX_AGE)
return response
context['existing_member'] = OrganizationMember.objects.filter(
user=request.user.id,
organization=om.organization_id,
).exists()
form = self.get_form(request)
if form.is_valid():
if OrganizationMember.objects.filter(
organization=organization, user=request.user
).exists():
messages.add_message(
request, messages.SUCCESS,
_('You are already a member of the %r organization.') %
(organization.name.encode('utf-8'), )
)
om.delete()
else:
om.user = request.user
om.email = None
om.save()
self.create_audit_entry(
request,
organization=organization,
target_object=om.id,
target_user=request.user,
event=AuditLogEntryEvent.MEMBER_ACCEPT,
data=om.get_audit_log_data(),
)
messages.add_message(
request, messages.SUCCESS,
_('You have been added to the %r organization.') %
(organization.name.encode('utf-8'), )
)
member_joined.send(member=om, sender=self)
request.session.pop('can_register', None)
response = self.redirect(reverse('sentry-organization-home', args=[organization.slug]))
if PENDING_INVITE in request.COOKIES:
response.delete_cookie(PENDING_INVITE)
return response
context['form'] = form
return self.respond('sentry/accept-organization-invite.html', context)
| true
| true
|
f7152815b2e45bf057d62fc81d08199232df205f
| 591
|
py
|
Python
|
wildlifecompliance/migrations/0539_auto_20210317_1151.py
|
Djandwich/wildlifecompliance
|
ca296798526a56ce67ffc2f7e8ebdbae95077e6d
|
[
"Apache-2.0"
] | null | null | null |
wildlifecompliance/migrations/0539_auto_20210317_1151.py
|
Djandwich/wildlifecompliance
|
ca296798526a56ce67ffc2f7e8ebdbae95077e6d
|
[
"Apache-2.0"
] | 3
|
2020-03-12T00:45:31.000Z
|
2022-03-02T10:37:23.000Z
|
wildlifecompliance/migrations/0539_auto_20210317_1151.py
|
Djandwich/wildlifecompliance
|
ca296798526a56ce67ffc2f7e8ebdbae95077e6d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-03-17 03:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0538_auto_20210305_1140'),
]
operations = [
migrations.AlterField(
model_name='classification',
name='name',
field=models.CharField(choices=[('complaint', 'Complaint'), ('enquiry', 'Enquiry'), ('incident', 'Incident')], default='complaint', max_length=30, unique=True),
),
]
| 28.142857
| 172
| 0.637902
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0538_auto_20210305_1140'),
]
operations = [
migrations.AlterField(
model_name='classification',
name='name',
field=models.CharField(choices=[('complaint', 'Complaint'), ('enquiry', 'Enquiry'), ('incident', 'Incident')], default='complaint', max_length=30, unique=True),
),
]
| true
| true
|
f71528a738fc1065a84bb466471ea884e98dc377
| 3,768
|
py
|
Python
|
src/decode/decoder.py
|
Masao-Someki/CycleVAE_VC
|
be4a27637a3f8b6272d96105f9b3c9327f6c16f7
|
[
"MIT"
] | 3
|
2020-06-03T08:29:49.000Z
|
2022-03-23T02:29:01.000Z
|
src/decode/decoder.py
|
Masao-Someki/CycleVAE_VC
|
be4a27637a3f8b6272d96105f9b3c9327f6c16f7
|
[
"MIT"
] | 1
|
2020-06-07T23:06:10.000Z
|
2020-06-07T23:06:10.000Z
|
src/decode/decoder.py
|
Masao-Someki/CycleVAE_VC
|
be4a27637a3f8b6272d96105f9b3c9327f6c16f7
|
[
"MIT"
] | 1
|
2020-06-03T09:41:42.000Z
|
2020-06-03T09:41:42.000Z
|
# Copyright 2020 Masao Someki
# MIT License (https://opensource.org/licenses/MIT)
import os
import glob
import h5py
import logging
import librosa
import numpy as np
from scipy.io import wavfile
from speech import Synthesizer
IRLEN = 1024
INTERVALS = 10
SEED = 1
LP_CUTOFF = 20
class Decoder(object):
def __init__(self, args, scaler, logger=None):
# directory to save wav files
self.save_dir = args.exp_dir
self.fs = args.fs
self.shiftms = args.shiftms
self.fftl = args.fftl
# mcep_alpha
if args.fs == 16000:
self.mcep_alpha = 0.41
elif args.fs == 22050:
self.mcep_alpha = 0.455
elif args.fs == 24000:
self.mcep_alpha = 0.466
elif args.fs == 44100:
self.mcep_alpha = 0.544
elif args.fs == 48000:
self.mcep_alpha = 0.554
else:
raise ValueError('sampling rate should be one of \
16000, 22050, 24000, 44100, 48000')
# scaler
self.scaler = scaler
# synthesizer
self.synthesizer = Synthesizer(fs=args.fs, fftl=args.fftl, shiftms=args.shiftms)
# logger
if logger is not None:
self.logger = logger
else:
self.logger = logging.getLogger(__name__)
def _inverse_transform(self, key, x):
m = self.scaler[key].mean_
s = self.scaler[key].scale_
return x * s + m
def decode(self, inputs, output, iter_count, i):
# directory
wav_dir = os.path.join(self.save_dir, str(iter_count))
if not os.path.exists(wav_dir):
os.mkdir(wav_dir)
# process over all data
for b in range(len(output['reconst_half'][0])):
# flen
flen = inputs['flen'][b]
# mcep
mcep = inputs['mcep'][b][:flen].cpu().detach().numpy()
mcep = self._inverse_transform('mcep', mcep).astype(np.float64)
# process src-src wav
cvmcep = output['reconst_half'][0][b][:flen].cpu().detach().numpy()
cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)
# codeap
codeap = inputs['codeap'][b][:flen].cpu().detach().numpy().astype(np.float64)
codeap = self._inverse_transform('codeap', codeap)
# synthesize
wav = self.synthesizer.synthesis(
inputs['f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64),
cvmcep,
codeap,
alpha=self.mcep_alpha,
rmcep=mcep
)
wav = np.clip(wav, -32768, 32767)
wav_file = os.path.join(
wav_dir,
'%s_%s_%d.wav' % (inputs['src'][b], inputs['src'][b], i)
)
wavfile.write(wav_file, self.fs, wav.astype(np.int16))
# process src-trg wav
cvmcep = output['trg_reconst'][b][:flen].cpu().detach().numpy()
cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)
# convert f0
cvf0 = inputs['cv_f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64)
# synthesize
wav = self.synthesizer.synthesis(
cvf0,
cvmcep,
codeap,
alpha=self.mcep_alpha,
rmcep=mcep
)
wav = np.clip(wav, -32768, 32767)
wav_file = os.path.join(
wav_dir,
'%s_%s_%d.wav' % (inputs['src'][b], inputs['trg'][b], i)
)
wavfile.write(wav_file, self.fs, wav.astype(np.int16))
| 31.4
| 97
| 0.52362
|
import os
import glob
import h5py
import logging
import librosa
import numpy as np
from scipy.io import wavfile
from speech import Synthesizer
IRLEN = 1024
INTERVALS = 10
SEED = 1
LP_CUTOFF = 20
class Decoder(object):
def __init__(self, args, scaler, logger=None):
self.save_dir = args.exp_dir
self.fs = args.fs
self.shiftms = args.shiftms
self.fftl = args.fftl
if args.fs == 16000:
self.mcep_alpha = 0.41
elif args.fs == 22050:
self.mcep_alpha = 0.455
elif args.fs == 24000:
self.mcep_alpha = 0.466
elif args.fs == 44100:
self.mcep_alpha = 0.544
elif args.fs == 48000:
self.mcep_alpha = 0.554
else:
raise ValueError('sampling rate should be one of \
16000, 22050, 24000, 44100, 48000')
self.scaler = scaler
self.synthesizer = Synthesizer(fs=args.fs, fftl=args.fftl, shiftms=args.shiftms)
if logger is not None:
self.logger = logger
else:
self.logger = logging.getLogger(__name__)
def _inverse_transform(self, key, x):
m = self.scaler[key].mean_
s = self.scaler[key].scale_
return x * s + m
def decode(self, inputs, output, iter_count, i):
wav_dir = os.path.join(self.save_dir, str(iter_count))
if not os.path.exists(wav_dir):
os.mkdir(wav_dir)
for b in range(len(output['reconst_half'][0])):
flen = inputs['flen'][b]
mcep = inputs['mcep'][b][:flen].cpu().detach().numpy()
mcep = self._inverse_transform('mcep', mcep).astype(np.float64)
cvmcep = output['reconst_half'][0][b][:flen].cpu().detach().numpy()
cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)
codeap = inputs['codeap'][b][:flen].cpu().detach().numpy().astype(np.float64)
codeap = self._inverse_transform('codeap', codeap)
wav = self.synthesizer.synthesis(
inputs['f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64),
cvmcep,
codeap,
alpha=self.mcep_alpha,
rmcep=mcep
)
wav = np.clip(wav, -32768, 32767)
wav_file = os.path.join(
wav_dir,
'%s_%s_%d.wav' % (inputs['src'][b], inputs['src'][b], i)
)
wavfile.write(wav_file, self.fs, wav.astype(np.int16))
cvmcep = output['trg_reconst'][b][:flen].cpu().detach().numpy()
cvmcep = self._inverse_transform('mcep', cvmcep).astype(np.float64)
cvf0 = inputs['cv_f0'][b][:flen].squeeze(1).cpu().detach().numpy().astype(np.float64)
wav = self.synthesizer.synthesis(
cvf0,
cvmcep,
codeap,
alpha=self.mcep_alpha,
rmcep=mcep
)
wav = np.clip(wav, -32768, 32767)
wav_file = os.path.join(
wav_dir,
'%s_%s_%d.wav' % (inputs['src'][b], inputs['trg'][b], i)
)
wavfile.write(wav_file, self.fs, wav.astype(np.int16))
| true
| true
|
f7152949331934bec0c7d5505f3422644b6d6f4e
| 114,228
|
gyp
|
Python
|
grpc.gyp
|
stungkit/grpc
|
063c36cb46733c13d2ce8116b6af482c9bd832d6
|
[
"Apache-2.0"
] | null | null | null |
grpc.gyp
|
stungkit/grpc
|
063c36cb46733c13d2ce8116b6af482c9bd832d6
|
[
"Apache-2.0"
] | null | null | null |
grpc.gyp
|
stungkit/grpc
|
063c36cb46733c13d2ce8116b6af482c9bd832d6
|
[
"Apache-2.0"
] | null | null | null |
# GRPC GYP build file
# This file has been automatically generated from a template file.
# Please look at the templates directory instead.
# This file can be regenerated from the template by running
# tools/buildgen/generate_projects.sh
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# The openssl and zlib dependencies must be passed in as variables
# defined in an included gypi file, usually common.gypi.
'openssl_gyp_target%': 'Please Define openssl_gyp_target variable',
'zlib_gyp_target%': 'Please Define zlib_gyp_target variable',
'grpc_gcov%': 'false',
'grpc_alpine%': 'false',
},
'target_defaults': {
'configurations': {
'Debug': {
'cflags': [
'-O0',
],
'defines': [
'_DEBUG',
'DEBUG',
],
},
'Release': {
'cflags': [
'-O2',
'-Wframe-larger-than=16384',
],
'defines': [
'NDEBUG',
],
},
},
'cflags': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'ldflags': [
'-g',
],
'cflags_c': [
'-Werror',
'-std=c99',
],
'cflags_cc': [
'-Werror',
'-std=c++11',
],
'include_dirs': [
'.',
'../..',
'include',
],
'defines': [
'GRPC_ARES=0',
],
'dependencies': [
'<(openssl_gyp_target)',
'<(zlib_gyp_target)',
],
'conditions': [
['grpc_gcov=="true"', {
'cflags': [
'-O0',
'-fprofile-arcs',
'-ftest-coverage',
'-Wno-return-type',
],
'defines': [
'_DEBUG',
'DEBUG',
'GPR_GCOV',
],
'ldflags': [
'-fprofile-arcs',
'-ftest-coverage',
'-rdynamic',
'-lstdc++',
],
}],
['grpc_alpine=="true"', {
'defines': [
'GPR_MUSL_LIBC_COMPAT'
]
}],
['OS == "win"', {
'defines': [
'_WIN32_WINNT=0x0600',
'WIN32_LEAN_AND_MEAN',
'_HAS_EXCEPTIONS=0',
'UNICODE',
'_UNICODE',
'NOMINMAX',
],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
}
},
"libraries": [
"ws2_32"
]
}],
['OS == "mac"', {
'xcode_settings': {
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations',
],
},
}]
]
},
'targets': [
{
'target_name': 'address_sorting',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
],
},
{
'target_name': 'end2end_tests',
'type': 'static_library',
'dependencies': [
'grpc_test_util',
],
'sources': [
'src/core/lib/security/authorization/grpc_authorization_policy_provider.cc',
'src/core/lib/security/authorization/rbac_translator.cc',
'test/core/compression/args_utils.cc',
'test/core/end2end/cq_verifier.cc',
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/core/end2end/end2end_test_utils.cc',
'test/core/end2end/end2end_tests.cc',
'test/core/end2end/fixtures/http_proxy_fixture.cc',
'test/core/end2end/fixtures/local_util.cc',
'test/core/end2end/fixtures/proxy.cc',
'test/core/end2end/tests/authority_not_supported.cc',
'test/core/end2end/tests/bad_hostname.cc',
'test/core/end2end/tests/bad_ping.cc',
'test/core/end2end/tests/binary_metadata.cc',
'test/core/end2end/tests/call_creds.cc',
'test/core/end2end/tests/call_host_override.cc',
'test/core/end2end/tests/cancel_after_accept.cc',
'test/core/end2end/tests/cancel_after_client_done.cc',
'test/core/end2end/tests/cancel_after_invoke.cc',
'test/core/end2end/tests/cancel_after_round_trip.cc',
'test/core/end2end/tests/cancel_before_invoke.cc',
'test/core/end2end/tests/cancel_in_a_vacuum.cc',
'test/core/end2end/tests/cancel_with_status.cc',
'test/core/end2end/tests/channelz.cc',
'test/core/end2end/tests/client_streaming.cc',
'test/core/end2end/tests/compressed_payload.cc',
'test/core/end2end/tests/connectivity.cc',
'test/core/end2end/tests/default_host.cc',
'test/core/end2end/tests/disappearing_server.cc',
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_init_fails.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/filtered_metadata.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/grpc_authz.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
'test/core/end2end/tests/invoke_large_request.cc',
'test/core/end2end/tests/keepalive_timeout.cc',
'test/core/end2end/tests/large_metadata.cc',
'test/core/end2end/tests/max_concurrent_streams.cc',
'test/core/end2end/tests/max_connection_age.cc',
'test/core/end2end/tests/max_connection_idle.cc',
'test/core/end2end/tests/max_message_length.cc',
'test/core/end2end/tests/negative_deadline.cc',
'test/core/end2end/tests/no_error_on_hotpath.cc',
'test/core/end2end/tests/no_logging.cc',
'test/core/end2end/tests/no_op.cc',
'test/core/end2end/tests/payload.cc',
'test/core/end2end/tests/ping.cc',
'test/core/end2end/tests/ping_pong_streaming.cc',
'test/core/end2end/tests/proxy_auth.cc',
'test/core/end2end/tests/registered_call.cc',
'test/core/end2end/tests/request_with_flags.cc',
'test/core/end2end/tests/request_with_payload.cc',
'test/core/end2end/tests/resource_quota_server.cc',
'test/core/end2end/tests/retry.cc',
'test/core/end2end/tests/retry_cancel_after_first_attempt_starts.cc',
'test/core/end2end/tests/retry_cancel_during_delay.cc',
'test/core/end2end/tests/retry_cancel_with_multiple_send_batches.cc',
'test/core/end2end/tests/retry_cancellation.cc',
'test/core/end2end/tests/retry_disabled.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_delay.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_lb_drop.cc',
'test/core/end2end/tests/retry_lb_fail.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout_on_last_attempt.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_recv_message_replay.cc',
'test/core/end2end/tests/retry_recv_trailing_metadata_error.cc',
'test/core/end2end/tests/retry_send_initial_metadata_refs.cc',
'test/core/end2end/tests/retry_send_op_fails.cc',
'test/core/end2end/tests/retry_send_recv_batch.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',
'test/core/end2end/tests/retry_server_pushback_disabled.cc',
'test/core/end2end/tests/retry_streaming.cc',
'test/core/end2end/tests/retry_streaming_after_commit.cc',
'test/core/end2end/tests/retry_streaming_succeeds_before_replay_finished.cc',
'test/core/end2end/tests/retry_throttled.cc',
'test/core/end2end/tests/retry_too_many_attempts.cc',
'test/core/end2end/tests/retry_transparent_goaway.cc',
'test/core/end2end/tests/retry_transparent_max_concurrent_streams.cc',
'test/core/end2end/tests/retry_transparent_not_sent_on_wire.cc',
'test/core/end2end/tests/retry_unref_before_finish.cc',
'test/core/end2end/tests/retry_unref_before_recv.cc',
'test/core/end2end/tests/server_finishes_request.cc',
'test/core/end2end/tests/server_streaming.cc',
'test/core/end2end/tests/shutdown_finishes_calls.cc',
'test/core/end2end/tests/shutdown_finishes_tags.cc',
'test/core/end2end/tests/simple_delayed_request.cc',
'test/core/end2end/tests/simple_metadata.cc',
'test/core/end2end/tests/simple_request.cc',
'test/core/end2end/tests/streaming_error_response.cc',
'test/core/end2end/tests/trailing_metadata.cc',
'test/core/end2end/tests/write_buffering.cc',
'test/core/end2end/tests/write_buffering_at_end.cc',
'test/core/util/test_lb_policies.cc',
],
},
{
'target_name': 'gpr',
'type': 'static_library',
'dependencies': [
'absl/base:base',
'absl/base:core_headers',
'absl/memory:memory',
'absl/random:random',
'absl/status:status',
'absl/strings:cord',
'absl/strings:str_format',
'absl/strings:strings',
'absl/synchronization:synchronization',
'absl/time:time',
'absl/types:optional',
'upb',
],
'sources': [
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/examine_stack.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/stat_posix.cc',
'src/core/lib/gprpp/stat_windows.cc',
'src/core/lib/gprpp/status_helper.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
],
},
{
'target_name': 'grpc',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/rbac/rbac_filter.cc',
'src/core/ext/filters/rbac/rbac_service_config_parser.cc',
'src/core/ext/filters/server_config_selector/server_config_selector.cc',
'src/core/ext/filters/server_config_selector/server_config_selector_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c',
'src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c',
'src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c',
'src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c',
'src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/range.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/ratelimit_unit.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/api/httpbody.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls_config.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/security.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/annotations/versioning.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/security.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/sensitive.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/status.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c',
'src/core/ext/upb-generated/xds/core/v3/authority.upb.c',
'src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c',
'src/core/ext/upb-generated/xds/core/v3/context_params.upb.c',
'src/core/ext/upb-generated/xds/core/v3/extension.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/certs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/clusters.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/init_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/memory.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/metrics.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/mutex_stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/server_info.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/tap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/common/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_method_list.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/metrics_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/datadog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/dynamic_ot.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/lightstep.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/opencensus.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/skywalking.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/xray.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/zipkin.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/cookie.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/http_inputs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/hash_policy.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http_status.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_unit.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/token_bucket.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/checked.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/syntax.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/http.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/httpbody.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c',
'src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c',
'src/core/ext/upbdefs-generated/opencensus/proto/trace/v1/trace_config.upbdefs.c',
'src/core/ext/upbdefs-generated/src/proto/grpc/lookup/v1/rls_config.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/validate/validate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/security.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c',
'src/core/ext/xds/certificate_provider_registry.cc',
'src/core/ext/xds/certificate_provider_store.cc',
'src/core/ext/xds/file_watcher_certificate_provider_factory.cc',
'src/core/ext/xds/xds_api.cc',
'src/core/ext/xds/xds_bootstrap.cc',
'src/core/ext/xds/xds_certificate_provider.cc',
'src/core/ext/xds/xds_channel_stack_modifier.cc',
'src/core/ext/xds/xds_client.cc',
'src/core/ext/xds/xds_client_stats.cc',
'src/core/ext/xds/xds_cluster.cc',
'src/core/ext/xds/xds_cluster_specifier_plugin.cc',
'src/core/ext/xds/xds_common_types.cc',
'src/core/ext/xds/xds_endpoint.cc',
'src/core/ext/xds/xds_http_fault_filter.cc',
'src/core/ext/xds/xds_http_filters.cc',
'src/core/ext/xds/xds_http_rbac_filter.cc',
'src/core/ext/xds/xds_listener.cc',
'src/core/ext/xds/xds_resource_type.cc',
'src/core/ext/xds/xds_route_config.cc',
'src/core/ext/xds/xds_routing.cc',
'src/core/ext/xds/xds_server_config_fetcher.cc',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_authorization_engine.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/authorization/matchers.cc',
'src/core/lib/security/authorization/rbac_policy.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/channel_creds_registry_init.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/external/aws_external_account_credentials.cc',
'src/core/lib/security/credentials/external/aws_request_signer.cc',
'src/core/lib/security/credentials/external/external_account_credentials.cc',
'src/core/lib/security/credentials/external/file_external_account_credentials.cc',
'src/core/lib/security/credentials/external/url_external_account_credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/credentials/xds/xds_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_extra.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/key_logging/ssl_key_logging.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'grpc_test_util',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tls_utils.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_test_util_unsecure',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc_unsecure',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_unsecure',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_noextra.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'benchmark_helpers',
'type': 'static_library',
'dependencies': [
'benchmark',
'grpc++_unsecure',
'grpc_test_util_unsecure',
'grpc++_test_config',
],
'sources': [
'src/proto/grpc/testing/echo.proto',
'src/proto/grpc/testing/echo_messages.proto',
'src/proto/grpc/testing/simple_messages.proto',
'src/proto/grpc/testing/xds/v3/orca_load_report.proto',
'test/cpp/microbenchmarks/helpers.cc',
],
},
{
'target_name': 'grpc++',
'type': 'static_library',
'dependencies': [
'grpc',
],
'sources': [
'src/core/ext/transport/binder/client/binder_connector.cc',
'src/core/ext/transport/binder/client/channel_create.cc',
'src/core/ext/transport/binder/client/channel_create_impl.cc',
'src/core/ext/transport/binder/client/connection_id_generator.cc',
'src/core/ext/transport/binder/client/endpoint_binder_pool.cc',
'src/core/ext/transport/binder/client/jni_utils.cc',
'src/core/ext/transport/binder/client/security_policy_setting.cc',
'src/core/ext/transport/binder/security_policy/binder_security_policy.cc',
'src/core/ext/transport/binder/server/binder_server.cc',
'src/core/ext/transport/binder/server/binder_server_credentials.cc',
'src/core/ext/transport/binder/transport/binder_transport.cc',
'src/core/ext/transport/binder/utils/ndk_binder.cc',
'src/core/ext/transport/binder/utils/transport_stream_receiver_impl.cc',
'src/core/ext/transport/binder/wire_format/binder_android.cc',
'src/core/ext/transport/binder/wire_format/binder_constants.cc',
'src/core/ext/transport/binder/wire_format/transaction.cc',
'src/core/ext/transport/binder/wire_format/wire_reader_impl.cc',
'src/core/ext/transport/binder/wire_format/wire_writer.cc',
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/client/secure_credentials.cc',
'src/cpp/client/xds_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/auth_property_iterator.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/secure_auth_context.cc',
'src/cpp/common/secure_channel_arguments.cc',
'src/cpp/common/secure_create_auth_context.cc',
'src/cpp/common/tls_certificate_provider.cc',
'src/cpp/common/tls_certificate_verifier.cc',
'src/cpp/common/tls_credentials_options.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/secure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/server/xds_server_credentials.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc++_alts',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/common/alts_context.cc',
'src/cpp/common/alts_util.cc',
],
},
{
'target_name': 'grpc++_error_details',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/util/error_details.cc',
],
},
{
'target_name': 'grpc++_reflection',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/reflection/v1alpha/reflection.proto',
'src/cpp/ext/proto_server_reflection.cc',
'src/cpp/ext/proto_server_reflection_plugin.cc',
],
},
{
'target_name': 'grpc++_test',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/client/channel_test_peer.cc',
],
},
{
'target_name': 'grpc++_test_config',
'type': 'static_library',
'dependencies': [
'absl/flags:parse',
'gpr',
],
'sources': [
'test/cpp/util/test_config_cc.cc',
],
},
{
'target_name': 'grpc++_test_util',
'type': 'static_library',
'dependencies': [
'absl/flags:flag',
'grpc++',
'grpc_test_util',
],
'sources': [
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/cpp/util/byte_buffer_proto_helper.cc',
'test/cpp/util/create_test_channel.cc',
'test/cpp/util/string_ref_helper.cc',
'test/cpp/util/subprocess.cc',
'test/cpp/util/test_credentials_provider.cc',
],
},
{
'target_name': 'grpc++_unsecure',
'type': 'static_library',
'dependencies': [
'grpc_unsecure',
],
'sources': [
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/insecure_create_auth_context.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc_plugin_support',
'type': 'static_library',
'dependencies': [
],
'sources': [
'src/compiler/cpp_generator.cc',
'src/compiler/csharp_generator.cc',
'src/compiler/node_generator.cc',
'src/compiler/objective_c_generator.cc',
'src/compiler/php_generator.cc',
'src/compiler/python_generator.cc',
'src/compiler/ruby_generator.cc',
],
},
{
'target_name': 'grpcpp_channelz',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/channelz/channelz.proto',
'src/cpp/server/channelz/channelz_service.cc',
'src/cpp/server/channelz/channelz_service_plugin.cc',
],
},
{
'target_name': 'boringssl',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/name_print.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/encrypted_client_hello.cc',
'third_party/boringssl-with-bazel/src/ssl/extensions.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
],
},
{
'target_name': 'boringssl_test_util',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/src/crypto/test/file_test.cc',
'third_party/boringssl-with-bazel/src/crypto/test/malloc.cc',
'third_party/boringssl-with-bazel/src/crypto/test/test_util.cc',
'third_party/boringssl-with-bazel/src/crypto/test/wycheproof_util.cc',
],
},
{
'target_name': 'benchmark',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/benchmark/src/benchmark.cc',
'third_party/benchmark/src/benchmark_api_internal.cc',
'third_party/benchmark/src/benchmark_main.cc',
'third_party/benchmark/src/benchmark_name.cc',
'third_party/benchmark/src/benchmark_register.cc',
'third_party/benchmark/src/benchmark_runner.cc',
'third_party/benchmark/src/colorprint.cc',
'third_party/benchmark/src/commandlineflags.cc',
'third_party/benchmark/src/complexity.cc',
'third_party/benchmark/src/console_reporter.cc',
'third_party/benchmark/src/counter.cc',
'third_party/benchmark/src/csv_reporter.cc',
'third_party/benchmark/src/json_reporter.cc',
'third_party/benchmark/src/perf_counters.cc',
'third_party/benchmark/src/reporter.cc',
'third_party/benchmark/src/sleep.cc',
'third_party/benchmark/src/statistics.cc',
'third_party/benchmark/src/string_util.cc',
'third_party/benchmark/src/sysinfo.cc',
'third_party/benchmark/src/timers.cc',
],
},
{
'target_name': 're2',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/re2/re2/bitstate.cc',
'third_party/re2/re2/compile.cc',
'third_party/re2/re2/dfa.cc',
'third_party/re2/re2/filtered_re2.cc',
'third_party/re2/re2/mimics_pcre.cc',
'third_party/re2/re2/nfa.cc',
'third_party/re2/re2/onepass.cc',
'third_party/re2/re2/parse.cc',
'third_party/re2/re2/perl_groups.cc',
'third_party/re2/re2/prefilter.cc',
'third_party/re2/re2/prefilter_tree.cc',
'third_party/re2/re2/prog.cc',
'third_party/re2/re2/re2.cc',
'third_party/re2/re2/regexp.cc',
'third_party/re2/re2/set.cc',
'third_party/re2/re2/simplify.cc',
'third_party/re2/re2/stringpiece.cc',
'third_party/re2/re2/tostring.cc',
'third_party/re2/re2/unicode_casefold.cc',
'third_party/re2/re2/unicode_groups.cc',
'third_party/re2/util/pcre.cc',
'third_party/re2/util/rune.cc',
'third_party/re2/util/strutil.cc',
],
},
{
'target_name': 'upb',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/upb/third_party/utf8_range/naive.c',
'third_party/upb/third_party/utf8_range/range2-neon.c',
'third_party/upb/third_party/utf8_range/range2-sse.c',
'third_party/upb/upb/decode_fast.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/json_encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/reflection.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/text_encode.c',
'third_party/upb/upb/upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
],
},
{
'target_name': 'z',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
],
},
]
}
| 54.73311
| 135
| 0.684683
|
{
'variables': {
'openssl_gyp_target%': 'Please Define openssl_gyp_target variable',
'zlib_gyp_target%': 'Please Define zlib_gyp_target variable',
'grpc_gcov%': 'false',
'grpc_alpine%': 'false',
},
'target_defaults': {
'configurations': {
'Debug': {
'cflags': [
'-O0',
],
'defines': [
'_DEBUG',
'DEBUG',
],
},
'Release': {
'cflags': [
'-O2',
'-Wframe-larger-than=16384',
],
'defines': [
'NDEBUG',
],
},
},
'cflags': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'ldflags': [
'-g',
],
'cflags_c': [
'-Werror',
'-std=c99',
],
'cflags_cc': [
'-Werror',
'-std=c++11',
],
'include_dirs': [
'.',
'../..',
'include',
],
'defines': [
'GRPC_ARES=0',
],
'dependencies': [
'<(openssl_gyp_target)',
'<(zlib_gyp_target)',
],
'conditions': [
['grpc_gcov=="true"', {
'cflags': [
'-O0',
'-fprofile-arcs',
'-ftest-coverage',
'-Wno-return-type',
],
'defines': [
'_DEBUG',
'DEBUG',
'GPR_GCOV',
],
'ldflags': [
'-fprofile-arcs',
'-ftest-coverage',
'-rdynamic',
'-lstdc++',
],
}],
['grpc_alpine=="true"', {
'defines': [
'GPR_MUSL_LIBC_COMPAT'
]
}],
['OS == "win"', {
'defines': [
'_WIN32_WINNT=0x0600',
'WIN32_LEAN_AND_MEAN',
'_HAS_EXCEPTIONS=0',
'UNICODE',
'_UNICODE',
'NOMINMAX',
],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1,
}
},
"libraries": [
"ws2_32"
]
}],
['OS == "mac"', {
'xcode_settings': {
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations',
],
},
}]
]
},
'targets': [
{
'target_name': 'address_sorting',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
],
},
{
'target_name': 'end2end_tests',
'type': 'static_library',
'dependencies': [
'grpc_test_util',
],
'sources': [
'src/core/lib/security/authorization/grpc_authorization_policy_provider.cc',
'src/core/lib/security/authorization/rbac_translator.cc',
'test/core/compression/args_utils.cc',
'test/core/end2end/cq_verifier.cc',
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/core/end2end/end2end_test_utils.cc',
'test/core/end2end/end2end_tests.cc',
'test/core/end2end/fixtures/http_proxy_fixture.cc',
'test/core/end2end/fixtures/local_util.cc',
'test/core/end2end/fixtures/proxy.cc',
'test/core/end2end/tests/authority_not_supported.cc',
'test/core/end2end/tests/bad_hostname.cc',
'test/core/end2end/tests/bad_ping.cc',
'test/core/end2end/tests/binary_metadata.cc',
'test/core/end2end/tests/call_creds.cc',
'test/core/end2end/tests/call_host_override.cc',
'test/core/end2end/tests/cancel_after_accept.cc',
'test/core/end2end/tests/cancel_after_client_done.cc',
'test/core/end2end/tests/cancel_after_invoke.cc',
'test/core/end2end/tests/cancel_after_round_trip.cc',
'test/core/end2end/tests/cancel_before_invoke.cc',
'test/core/end2end/tests/cancel_in_a_vacuum.cc',
'test/core/end2end/tests/cancel_with_status.cc',
'test/core/end2end/tests/channelz.cc',
'test/core/end2end/tests/client_streaming.cc',
'test/core/end2end/tests/compressed_payload.cc',
'test/core/end2end/tests/connectivity.cc',
'test/core/end2end/tests/default_host.cc',
'test/core/end2end/tests/disappearing_server.cc',
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_init_fails.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/filtered_metadata.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/grpc_authz.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
'test/core/end2end/tests/invoke_large_request.cc',
'test/core/end2end/tests/keepalive_timeout.cc',
'test/core/end2end/tests/large_metadata.cc',
'test/core/end2end/tests/max_concurrent_streams.cc',
'test/core/end2end/tests/max_connection_age.cc',
'test/core/end2end/tests/max_connection_idle.cc',
'test/core/end2end/tests/max_message_length.cc',
'test/core/end2end/tests/negative_deadline.cc',
'test/core/end2end/tests/no_error_on_hotpath.cc',
'test/core/end2end/tests/no_logging.cc',
'test/core/end2end/tests/no_op.cc',
'test/core/end2end/tests/payload.cc',
'test/core/end2end/tests/ping.cc',
'test/core/end2end/tests/ping_pong_streaming.cc',
'test/core/end2end/tests/proxy_auth.cc',
'test/core/end2end/tests/registered_call.cc',
'test/core/end2end/tests/request_with_flags.cc',
'test/core/end2end/tests/request_with_payload.cc',
'test/core/end2end/tests/resource_quota_server.cc',
'test/core/end2end/tests/retry.cc',
'test/core/end2end/tests/retry_cancel_after_first_attempt_starts.cc',
'test/core/end2end/tests/retry_cancel_during_delay.cc',
'test/core/end2end/tests/retry_cancel_with_multiple_send_batches.cc',
'test/core/end2end/tests/retry_cancellation.cc',
'test/core/end2end/tests/retry_disabled.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_delay.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_lb_drop.cc',
'test/core/end2end/tests/retry_lb_fail.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout_on_last_attempt.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_recv_message_replay.cc',
'test/core/end2end/tests/retry_recv_trailing_metadata_error.cc',
'test/core/end2end/tests/retry_send_initial_metadata_refs.cc',
'test/core/end2end/tests/retry_send_op_fails.cc',
'test/core/end2end/tests/retry_send_recv_batch.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',
'test/core/end2end/tests/retry_server_pushback_disabled.cc',
'test/core/end2end/tests/retry_streaming.cc',
'test/core/end2end/tests/retry_streaming_after_commit.cc',
'test/core/end2end/tests/retry_streaming_succeeds_before_replay_finished.cc',
'test/core/end2end/tests/retry_throttled.cc',
'test/core/end2end/tests/retry_too_many_attempts.cc',
'test/core/end2end/tests/retry_transparent_goaway.cc',
'test/core/end2end/tests/retry_transparent_max_concurrent_streams.cc',
'test/core/end2end/tests/retry_transparent_not_sent_on_wire.cc',
'test/core/end2end/tests/retry_unref_before_finish.cc',
'test/core/end2end/tests/retry_unref_before_recv.cc',
'test/core/end2end/tests/server_finishes_request.cc',
'test/core/end2end/tests/server_streaming.cc',
'test/core/end2end/tests/shutdown_finishes_calls.cc',
'test/core/end2end/tests/shutdown_finishes_tags.cc',
'test/core/end2end/tests/simple_delayed_request.cc',
'test/core/end2end/tests/simple_metadata.cc',
'test/core/end2end/tests/simple_request.cc',
'test/core/end2end/tests/streaming_error_response.cc',
'test/core/end2end/tests/trailing_metadata.cc',
'test/core/end2end/tests/write_buffering.cc',
'test/core/end2end/tests/write_buffering_at_end.cc',
'test/core/util/test_lb_policies.cc',
],
},
{
'target_name': 'gpr',
'type': 'static_library',
'dependencies': [
'absl/base:base',
'absl/base:core_headers',
'absl/memory:memory',
'absl/random:random',
'absl/status:status',
'absl/strings:cord',
'absl/strings:str_format',
'absl/strings:strings',
'absl/synchronization:synchronization',
'absl/time:time',
'absl/types:optional',
'upb',
],
'sources': [
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/examine_stack.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/stat_posix.cc',
'src/core/lib/gprpp/stat_windows.cc',
'src/core/lib/gprpp/status_helper.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
],
},
{
'target_name': 'grpc',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/rbac/rbac_filter.cc',
'src/core/ext/filters/rbac/rbac_service_config_parser.cc',
'src/core/ext/filters/server_config_selector/server_config_selector.cc',
'src/core/ext/filters/server_config_selector/server_config_selector_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c',
'src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c',
'src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c',
'src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c',
'src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/range.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/ratelimit_unit.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/api/httpbody.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls_config.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/security.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/annotations/versioning.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/security.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/sensitive.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/status.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c',
'src/core/ext/upb-generated/xds/core/v3/authority.upb.c',
'src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c',
'src/core/ext/upb-generated/xds/core/v3/context_params.upb.c',
'src/core/ext/upb-generated/xds/core/v3/extension.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/certs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/clusters.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/init_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/memory.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/metrics.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/mutex_stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/server_info.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/tap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/common/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_method_list.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/metrics_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/datadog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/dynamic_ot.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/lightstep.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/opencensus.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/skywalking.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/xray.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/zipkin.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/cookie.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/http_inputs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/hash_policy.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http_status.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_unit.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/token_bucket.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/checked.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/syntax.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/http.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/httpbody.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c',
'src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c',
'src/core/ext/upbdefs-generated/opencensus/proto/trace/v1/trace_config.upbdefs.c',
'src/core/ext/upbdefs-generated/src/proto/grpc/lookup/v1/rls_config.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/validate/validate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/security.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c',
'src/core/ext/xds/certificate_provider_registry.cc',
'src/core/ext/xds/certificate_provider_store.cc',
'src/core/ext/xds/file_watcher_certificate_provider_factory.cc',
'src/core/ext/xds/xds_api.cc',
'src/core/ext/xds/xds_bootstrap.cc',
'src/core/ext/xds/xds_certificate_provider.cc',
'src/core/ext/xds/xds_channel_stack_modifier.cc',
'src/core/ext/xds/xds_client.cc',
'src/core/ext/xds/xds_client_stats.cc',
'src/core/ext/xds/xds_cluster.cc',
'src/core/ext/xds/xds_cluster_specifier_plugin.cc',
'src/core/ext/xds/xds_common_types.cc',
'src/core/ext/xds/xds_endpoint.cc',
'src/core/ext/xds/xds_http_fault_filter.cc',
'src/core/ext/xds/xds_http_filters.cc',
'src/core/ext/xds/xds_http_rbac_filter.cc',
'src/core/ext/xds/xds_listener.cc',
'src/core/ext/xds/xds_resource_type.cc',
'src/core/ext/xds/xds_route_config.cc',
'src/core/ext/xds/xds_routing.cc',
'src/core/ext/xds/xds_server_config_fetcher.cc',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_authorization_engine.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/authorization/matchers.cc',
'src/core/lib/security/authorization/rbac_policy.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/channel_creds_registry_init.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/external/aws_external_account_credentials.cc',
'src/core/lib/security/credentials/external/aws_request_signer.cc',
'src/core/lib/security/credentials/external/external_account_credentials.cc',
'src/core/lib/security/credentials/external/file_external_account_credentials.cc',
'src/core/lib/security/credentials/external/url_external_account_credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/credentials/xds/xds_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_extra.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/key_logging/ssl_key_logging.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'grpc_test_util',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tls_utils.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_test_util_unsecure',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc_unsecure',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_unsecure',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_noextra.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'benchmark_helpers',
'type': 'static_library',
'dependencies': [
'benchmark',
'grpc++_unsecure',
'grpc_test_util_unsecure',
'grpc++_test_config',
],
'sources': [
'src/proto/grpc/testing/echo.proto',
'src/proto/grpc/testing/echo_messages.proto',
'src/proto/grpc/testing/simple_messages.proto',
'src/proto/grpc/testing/xds/v3/orca_load_report.proto',
'test/cpp/microbenchmarks/helpers.cc',
],
},
{
'target_name': 'grpc++',
'type': 'static_library',
'dependencies': [
'grpc',
],
'sources': [
'src/core/ext/transport/binder/client/binder_connector.cc',
'src/core/ext/transport/binder/client/channel_create.cc',
'src/core/ext/transport/binder/client/channel_create_impl.cc',
'src/core/ext/transport/binder/client/connection_id_generator.cc',
'src/core/ext/transport/binder/client/endpoint_binder_pool.cc',
'src/core/ext/transport/binder/client/jni_utils.cc',
'src/core/ext/transport/binder/client/security_policy_setting.cc',
'src/core/ext/transport/binder/security_policy/binder_security_policy.cc',
'src/core/ext/transport/binder/server/binder_server.cc',
'src/core/ext/transport/binder/server/binder_server_credentials.cc',
'src/core/ext/transport/binder/transport/binder_transport.cc',
'src/core/ext/transport/binder/utils/ndk_binder.cc',
'src/core/ext/transport/binder/utils/transport_stream_receiver_impl.cc',
'src/core/ext/transport/binder/wire_format/binder_android.cc',
'src/core/ext/transport/binder/wire_format/binder_constants.cc',
'src/core/ext/transport/binder/wire_format/transaction.cc',
'src/core/ext/transport/binder/wire_format/wire_reader_impl.cc',
'src/core/ext/transport/binder/wire_format/wire_writer.cc',
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/client/secure_credentials.cc',
'src/cpp/client/xds_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/auth_property_iterator.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/secure_auth_context.cc',
'src/cpp/common/secure_channel_arguments.cc',
'src/cpp/common/secure_create_auth_context.cc',
'src/cpp/common/tls_certificate_provider.cc',
'src/cpp/common/tls_certificate_verifier.cc',
'src/cpp/common/tls_credentials_options.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/secure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/server/xds_server_credentials.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc++_alts',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/common/alts_context.cc',
'src/cpp/common/alts_util.cc',
],
},
{
'target_name': 'grpc++_error_details',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/util/error_details.cc',
],
},
{
'target_name': 'grpc++_reflection',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/reflection/v1alpha/reflection.proto',
'src/cpp/ext/proto_server_reflection.cc',
'src/cpp/ext/proto_server_reflection_plugin.cc',
],
},
{
'target_name': 'grpc++_test',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/client/channel_test_peer.cc',
],
},
{
'target_name': 'grpc++_test_config',
'type': 'static_library',
'dependencies': [
'absl/flags:parse',
'gpr',
],
'sources': [
'test/cpp/util/test_config_cc.cc',
],
},
{
'target_name': 'grpc++_test_util',
'type': 'static_library',
'dependencies': [
'absl/flags:flag',
'grpc++',
'grpc_test_util',
],
'sources': [
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/cpp/util/byte_buffer_proto_helper.cc',
'test/cpp/util/create_test_channel.cc',
'test/cpp/util/string_ref_helper.cc',
'test/cpp/util/subprocess.cc',
'test/cpp/util/test_credentials_provider.cc',
],
},
{
'target_name': 'grpc++_unsecure',
'type': 'static_library',
'dependencies': [
'grpc_unsecure',
],
'sources': [
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/insecure_create_auth_context.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc_plugin_support',
'type': 'static_library',
'dependencies': [
],
'sources': [
'src/compiler/cpp_generator.cc',
'src/compiler/csharp_generator.cc',
'src/compiler/node_generator.cc',
'src/compiler/objective_c_generator.cc',
'src/compiler/php_generator.cc',
'src/compiler/python_generator.cc',
'src/compiler/ruby_generator.cc',
],
},
{
'target_name': 'grpcpp_channelz',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/channelz/channelz.proto',
'src/cpp/server/channelz/channelz_service.cc',
'src/cpp/server/channelz/channelz_service_plugin.cc',
],
},
{
'target_name': 'boringssl',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/name_print.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/encrypted_client_hello.cc',
'third_party/boringssl-with-bazel/src/ssl/extensions.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
],
},
{
'target_name': 'boringssl_test_util',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/src/crypto/test/file_test.cc',
'third_party/boringssl-with-bazel/src/crypto/test/malloc.cc',
'third_party/boringssl-with-bazel/src/crypto/test/test_util.cc',
'third_party/boringssl-with-bazel/src/crypto/test/wycheproof_util.cc',
],
},
{
'target_name': 'benchmark',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/benchmark/src/benchmark.cc',
'third_party/benchmark/src/benchmark_api_internal.cc',
'third_party/benchmark/src/benchmark_main.cc',
'third_party/benchmark/src/benchmark_name.cc',
'third_party/benchmark/src/benchmark_register.cc',
'third_party/benchmark/src/benchmark_runner.cc',
'third_party/benchmark/src/colorprint.cc',
'third_party/benchmark/src/commandlineflags.cc',
'third_party/benchmark/src/complexity.cc',
'third_party/benchmark/src/console_reporter.cc',
'third_party/benchmark/src/counter.cc',
'third_party/benchmark/src/csv_reporter.cc',
'third_party/benchmark/src/json_reporter.cc',
'third_party/benchmark/src/perf_counters.cc',
'third_party/benchmark/src/reporter.cc',
'third_party/benchmark/src/sleep.cc',
'third_party/benchmark/src/statistics.cc',
'third_party/benchmark/src/string_util.cc',
'third_party/benchmark/src/sysinfo.cc',
'third_party/benchmark/src/timers.cc',
],
},
{
'target_name': 're2',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/re2/re2/bitstate.cc',
'third_party/re2/re2/compile.cc',
'third_party/re2/re2/dfa.cc',
'third_party/re2/re2/filtered_re2.cc',
'third_party/re2/re2/mimics_pcre.cc',
'third_party/re2/re2/nfa.cc',
'third_party/re2/re2/onepass.cc',
'third_party/re2/re2/parse.cc',
'third_party/re2/re2/perl_groups.cc',
'third_party/re2/re2/prefilter.cc',
'third_party/re2/re2/prefilter_tree.cc',
'third_party/re2/re2/prog.cc',
'third_party/re2/re2/re2.cc',
'third_party/re2/re2/regexp.cc',
'third_party/re2/re2/set.cc',
'third_party/re2/re2/simplify.cc',
'third_party/re2/re2/stringpiece.cc',
'third_party/re2/re2/tostring.cc',
'third_party/re2/re2/unicode_casefold.cc',
'third_party/re2/re2/unicode_groups.cc',
'third_party/re2/util/pcre.cc',
'third_party/re2/util/rune.cc',
'third_party/re2/util/strutil.cc',
],
},
{
'target_name': 'upb',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/upb/third_party/utf8_range/naive.c',
'third_party/upb/third_party/utf8_range/range2-neon.c',
'third_party/upb/third_party/utf8_range/range2-sse.c',
'third_party/upb/upb/decode_fast.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/json_encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/reflection.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/text_encode.c',
'third_party/upb/upb/upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
],
},
{
'target_name': 'z',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
],
},
]
}
| true
| true
|
f715296fb8250e6a9fc65fab4030ce645556e39d
| 30,603
|
py
|
Python
|
tests/httpwrappers/tests.py
|
ioinfinity/django
|
b6a0ab523751c13ae3eaec102de70f58f73a0d94
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/httpwrappers/tests.py
|
ioinfinity/django
|
b6a0ab523751c13ae3eaec102de70f58f73a0d94
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2020-07-02T21:10:44.000Z
|
2020-07-02T21:11:21.000Z
|
tests/httpwrappers/tests.py
|
ioinfinity/django
|
b6a0ab523751c13ae3eaec102de70f58f73a0d94
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2020-08-11T18:46:32.000Z
|
2020-08-11T18:46:32.000Z
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import copy
import json
import os
import pickle
import unittest
import uuid
from django.core.exceptions import SuspiciousOperation
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import request_finished
from django.db import close_old_connections
from django.http import (
BadHeaderError, HttpResponse, HttpResponseNotAllowed,
HttpResponseNotModified, HttpResponsePermanentRedirect,
HttpResponseRedirect, JsonResponse, QueryDict, SimpleCookie,
StreamingHttpResponse, parse_cookie,
)
from django.test import SimpleTestCase
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_str
from django.utils.functional import lazystr
class QueryDictTests(unittest.TestCase):
def test_create_with_no_args(self):
self.assertEqual(QueryDict(), QueryDict(str('')))
def test_missing_key(self):
q = QueryDict()
with self.assertRaises(KeyError):
q.__getitem__('foo')
def test_immutability(self):
q = QueryDict()
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
def test_immutable_get_with_default(self):
q = QueryDict()
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict()
self.assertEqual(q.getlist('foo'), [])
if six.PY2:
self.assertIs(q.has_key('foo'), False)
self.assertNotIn('foo', q)
self.assertEqual(list(six.iteritems(q)), [])
self.assertEqual(list(six.iterlists(q)), [])
self.assertEqual(list(six.iterkeys(q)), [])
self.assertEqual(list(six.itervalues(q)), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict(str('foo=bar'))
self.assertEqual(q['foo'], 'bar')
with self.assertRaises(KeyError):
q.__getitem__('bar')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
if six.PY2:
self.assertFalse(q.has_key('bar'))
self.assertNotIn('bar', q)
self.assertEqual(list(six.iteritems(q)), [('foo', 'bar')])
self.assertEqual(list(six.iterlists(q)), [('foo', ['bar'])])
self.assertEqual(list(six.iterkeys(q)), ['foo'])
self.assertEqual(list(six.itervalues(q)), ['bar'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict(mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict(mutable=True)
q['next'] = '/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict().copy()
with self.assertRaises(KeyError):
q.__getitem__("foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
del q['name']
self.assertNotIn('name', q)
def test_basic_mutable_operations(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
self.assertListEqual(sorted(six.iteritems(q)),
[('foo', 'another'), ('name', 'john')])
self.assertListEqual(sorted(six.iterlists(q)),
[('foo', ['bar', 'baz', 'another']), ('name', ['john'])])
self.assertListEqual(sorted(six.iterkeys(q)),
['foo', 'name'])
self.assertListEqual(sorted(six.itervalues(q)),
['another', 'john'])
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar'])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict(str('vote=yes&vote=no'))
self.assertEqual(q['vote'], 'no')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('vote', 'default'), 'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), ['yes', 'no'])
self.assertEqual(q.getlist('foo'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
if six.PY2:
self.assertIs(q.has_key('vote'), True)
self.assertIn('vote', q)
if six.PY2:
self.assertIs(q.has_key('foo'), False)
self.assertNotIn('foo', q)
self.assertEqual(list(six.iteritems(q)), [('vote', 'no')])
self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])])
self.assertEqual(list(six.iterkeys(q)), ['vote'])
self.assertEqual(list(six.itervalues(q)), ['no'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
with self.assertRaises(AttributeError):
q.__delitem__('vote')
if six.PY2:
def test_invalid_input_encoding(self):
"""
QueryDicts must be able to handle invalid input encoding (in this
case, bad UTF-8 encoding), falling back to ISO-8859-1 decoding.
This test doesn't apply under Python 3 because the URL is a string
and not a bytestring.
"""
q = QueryDict(str(b'foo=bar&foo=\xff'))
self.assertEqual(q['foo'], '\xff')
self.assertEqual(q.getlist('foo'), ['bar', '\xff'])
def test_pickle(self):
q = QueryDict()
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict(str('a=b&c=d'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict(str('a=b&c=d&a=1'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict(str("a=1&a=2"), mutable=True)
y = QueryDict(str("a=3&a=4"))
x.update(y)
self.assertEqual(x.getlist('a'), ['1', '2', '3', '4'])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict(str('cur=%A4'), encoding='iso-8859-15')
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
q = q.copy()
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
class HttpResponseTests(unittest.TestCase):
def test_headers_type(self):
r = HttpResponse()
# The following tests explicitly test types in addition to values
# because in Python 2 u'foo' == b'foo'.
# ASCII unicode or bytes values are converted to native strings.
r['key'] = 'test'
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
r['key'] = 'test'.encode('ascii')
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
self.assertIn(b'test', r.serialize_headers())
# Latin-1 unicode or bytes values are also converted to native strings.
r['key'] = 'café'
self.assertEqual(r['key'], force_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
r['key'] = 'café'.encode('latin-1')
self.assertEqual(r['key'], force_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
self.assertIn('café'.encode('latin-1'), r.serialize_headers())
# Other unicode values are MIME-encoded (there's no way to pass them as bytes).
r['key'] = '†'
self.assertEqual(r['key'], str('=?utf-8?b?4oCg?='))
self.assertIsInstance(r['key'], str)
self.assertIn(b'=?utf-8?b?4oCg?=', r.serialize_headers())
# The response also converts unicode or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r['Content-Type']
r['foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
del r['Content-Type']
r[b'foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
with self.assertRaises(UnicodeError):
r.__setitem__('føø', 'bar')
with self.assertRaises(UnicodeError):
r.__setitem__('føø'.encode('utf-8'), 'bar')
def test_long_line(self):
# Bug #20889: long lines trigger newlines to be added to headers
# (which is not allowed due to bug #10188)
h = HttpResponse()
f = 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88'.encode('latin-1')
f = f.decode('utf-8')
h['Content-Disposition'] = 'attachment; filename="%s"' % f
# This one is triggering http://bugs.python.org/issue20747, that is Python
# will itself insert a newline in the header
h['Content-Disposition'] = 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"'
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
with self.assertRaises(BadHeaderError):
r.__setitem__('test\rstr', 'test')
with self.assertRaises(BadHeaderError):
r.__setitem__('test\nstr', 'test')
def test_dict_behavior(self):
"""
Test for bug #14020: Make HttpResponse.get work like dict.get
"""
r = HttpResponse()
self.assertIsNone(r.get('test'))
def test_non_string_content(self):
# Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b'12345')
# test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b'12345')
def test_iter_content(self):
r = HttpResponse(['abc', 'def', 'ghi'])
self.assertEqual(r.content, b'abcdefghi')
# test iter content via property
r = HttpResponse()
r.content = ['idan', 'alex', 'jacob']
self.assertEqual(r.content, b'idanalexjacob')
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b'123')
# test odd inputs
r = HttpResponse()
r.content = ['1', '2', 3, '\u079e']
# '\xde\x9e' == unichr(1950).encode('utf-8')
self.assertEqual(r.content, b'123\xde\x9e')
# .content can safely be accessed multiple times.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b'helloworld')
# __iter__ can safely be called multiple times (#20187).
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(b''.join(r), b'helloworld')
# Accessing .content still works.
self.assertEqual(r.content, b'helloworld')
# Accessing .content also works if the response was iterated first.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(r.content, b'helloworld')
# Additional content can be written to the response.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, b'helloworld')
r.write('!')
self.assertEqual(r.content, b'helloworld!')
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse('abc')
i = iter(r)
self.assertEqual(list(i), [b'abc'])
self.assertEqual(list(i), [])
def test_lazy_content(self):
r = HttpResponse(lazystr('helloworld'))
self.assertEqual(r.content, b'helloworld')
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(['abc'])
r.write('def')
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b'abcdef')
# with Content-Encoding header
r = HttpResponse()
r['Content-Encoding'] = 'winning'
r.write(b'abc')
r.write(b'def')
self.assertEqual(r.content, b'abcdef')
def test_stream_interface(self):
r = HttpResponse('asdf')
self.assertEqual(r.getvalue(), b'asdf')
r = HttpResponse()
self.assertIs(r.writable(), True)
r.writelines(['foo\n', 'bar\n', 'baz\n'])
self.assertEqual(r.content, b'foo\nbar\nbaz\n')
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
'mailto:test@example.com',
'file:///etc/passwd',
]
for url in bad_urls:
with self.assertRaises(SuspiciousOperation):
HttpResponseRedirect(url)
with self.assertRaises(SuspiciousOperation):
HttpResponsePermanentRedirect(url)
class HttpResponseSubclassesTests(SimpleTestCase):
def test_redirect(self):
response = HttpResponseRedirect('/redirected/')
self.assertEqual(response.status_code, 302)
# Test that standard HttpResponse init args can be used
response = HttpResponseRedirect(
'/redirected/',
content='The resource has temporarily moved',
content_type='text/html',
)
self.assertContains(response, 'The resource has temporarily moved', status_code=302)
# Test that url attribute is right
self.assertEqual(response.url, response['Location'])
def test_redirect_lazy(self):
"""Make sure HttpResponseRedirect works with lazy strings."""
r = HttpResponseRedirect(lazystr('/redirected/'))
self.assertEqual(r.url, '/redirected/')
def test_redirect_repr(self):
response = HttpResponseRedirect('/redirected/')
expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/redirected/">'
self.assertEqual(repr(response), expected)
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn('content-type', response)
def test_not_allowed(self):
response = HttpResponseNotAllowed(['GET'])
self.assertEqual(response.status_code, 405)
# Test that standard HttpResponse init args can be used
response = HttpResponseNotAllowed(['GET'], content='Only the GET method is allowed', content_type='text/html')
self.assertContains(response, 'Only the GET method is allowed', status_code=405)
def test_not_allowed_repr(self):
response = HttpResponseNotAllowed(['GET', 'OPTIONS'], content_type='text/plain')
expected = '<HttpResponseNotAllowed [GET, OPTIONS] status_code=405, "text/plain">'
self.assertEqual(repr(response), expected)
class JsonResponseTests(SimpleTestCase):
def test_json_response_non_ascii(self):
data = {'key': 'łóżko'}
response = JsonResponse(data)
self.assertEqual(json.loads(response.content.decode()), data)
def test_json_response_raises_type_error_with_default_setting(self):
with self.assertRaisesMessage(
TypeError,
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False'
):
JsonResponse([1, 2, 3])
def test_json_response_text(self):
response = JsonResponse('foobar', safe=False)
self.assertEqual(json.loads(response.content.decode()), 'foobar')
def test_json_response_list(self):
response = JsonResponse(['foo', 'bar'], safe=False)
self.assertEqual(json.loads(response.content.decode()), ['foo', 'bar'])
def test_json_response_uuid(self):
u = uuid.uuid4()
response = JsonResponse(u, safe=False)
self.assertEqual(json.loads(response.content.decode()), str(u))
def test_json_response_custom_encoder(self):
class CustomDjangoJSONEncoder(DjangoJSONEncoder):
def encode(self, o):
return json.dumps({'foo': 'bar'})
response = JsonResponse({}, encoder=CustomDjangoJSONEncoder)
self.assertEqual(json.loads(response.content.decode()), {'foo': 'bar'})
def test_json_response_passing_arguments_to_json_dumps(self):
response = JsonResponse({'foo': 'bar'}, json_dumps_params={'indent': 2})
self.assertEqual(response.content.decode(), '{\n "foo": "bar"\n}')
class StreamingHttpResponseTests(SimpleTestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(['hello', 'world']))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b'hello', b'world'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(['abc', 'def'])
self.assertEqual(list(r), [b'abc', b'def'])
self.assertEqual(list(r), [])
# iterating over Unicode strings still yields bytestring chunks.
r.streaming_content = iter(['hello', 'café'])
chunks = list(r)
# '\xc3\xa9' == unichr(233).encode('utf-8')
self.assertEqual(chunks, [b'hello', b'caf\xc3\xa9'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, 'content'))
# and you can't accidentally assign to a `content` attribute.
with self.assertRaises(AttributeError):
r.content = 'xyz'
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, 'streaming_content'))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(['abc', 'def'])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b'ABC', b'DEF'])
# coercing a streaming response to bytes doesn't return a complete HTTP
# message like a regular response does. it only gives us the headers.
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(
six.binary_type(r), b'Content-Type: text/html; charset=utf-8')
# and this won't consume its content.
self.assertEqual(list(r), [b'hello', b'world'])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(['hello', 'world']))
with self.assertRaises(Exception):
r.write('!')
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(r.getvalue(), b'helloworld')
class FileCloseTests(SimpleTestCase):
def setUp(self):
# Disable the request_finished signal during this test
# to avoid interfering with the database connection.
request_finished.disconnect(close_old_connections)
def tearDown(self):
request_finished.connect(close_old_connections)
def test_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertTrue(file1.closed)
r.close()
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
"""
Test that we don't output tricky characters in encoded value
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertNotIn(";", c.output().rstrip(';')) # IE compat
self.assertNotIn(",", c.output().rstrip(';')) # Safari compat
def test_decode(self):
"""
Test that we can still preserve semi-colons and commas
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c['test'].value, c3['test'])
def test_decode_2(self):
"""
Test that we haven't broken normal encoding
"""
c = SimpleCookie()
c['test'] = b"\xf0"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c['test'].value, c3['test'])
def test_nonstandard_keys(self):
"""
Test that a single non-standard cookie name doesn't affect all cookies. Ticket #13007.
"""
self.assertIn('good_cookie', parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
def test_repeated_nonstandard_keys(self):
"""
Test that a repeated non-standard name doesn't affect all cookies. Ticket #15852
"""
self.assertIn('good_cookie', parse_cookie('a:=b; a:=c; good_cookie=yes').keys())
def test_python_cookies(self):
"""
Test cases copied from Python's Lib/test/test_http_cookies.py
"""
self.assertEqual(parse_cookie('chips=ahoy; vienna=finger'), {'chips': 'ahoy', 'vienna': 'finger'})
# Here parse_cookie() differs from Python's cookie parsing in that it
# treats all semicolons as delimiters, even within quotes.
self.assertEqual(
parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'),
{'keebler': '"E=mc2', 'L': '\\"Loves\\"', 'fudge': '\\012', '': '"'}
)
# Illegal cookies that have an '=' char in an unquoted value.
self.assertEqual(parse_cookie('keebler=E=mc2'), {'keebler': 'E=mc2'})
# Cookies with ':' character in their name.
self.assertEqual(parse_cookie('key:term=value:term'), {'key:term': 'value:term'})
# Cookies with '[' and ']'.
self.assertEqual(parse_cookie('a=b; c=[; d=r; f=h'), {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'})
def test_cookie_edgecases(self):
# Cookies that RFC6265 allows.
self.assertEqual(parse_cookie('a=b; Domain=example.com'), {'a': 'b', 'Domain': 'example.com'})
# parse_cookie() has historically kept only the last cookie with the
# same name.
self.assertEqual(parse_cookie('a=b; h=i; a=c'), {'a': 'c', 'h': 'i'})
def test_invalid_cookies(self):
"""
Cookie strings that go against RFC6265 but browsers will send if set
via document.cookie.
"""
# Chunks without an equals sign appear as unnamed values per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
self.assertIn('django_language', parse_cookie('abc=def; unnamed; django_language=en').keys())
# Even a double quote may be an unamed value.
self.assertEqual(parse_cookie('a=b; "; c=d'), {'a': 'b', '': '"', 'c': 'd'})
# Spaces in names and values, and an equals sign in values.
self.assertEqual(parse_cookie('a b c=d e = f; gh=i'), {'a b c': 'd e = f', 'gh': 'i'})
# More characters the spec forbids.
self.assertEqual(parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'), {'a b,c<>@:/[]?{}': 'd " =e,f g'})
# Unicode characters. The spec only allows ASCII.
self.assertEqual(parse_cookie('saint=André Bessette'), {'saint': force_str('André Bessette')})
# Browsers don't send extra whitespace or semicolons in Cookie headers,
# but parse_cookie() should parse whitespace the same way
# document.cookie parses whitespace.
self.assertEqual(parse_cookie(' = b ; ; = ; c = ; '), {'': 'b', 'c': ''})
def test_httponly_after_load(self):
"""
Test that we can use httponly attribute on cookies that we load
"""
c = SimpleCookie()
c.load("name=val")
c['name']['httponly'] = True
self.assertTrue(c['name']['httponly'])
def test_load_dict(self):
c = SimpleCookie()
c.load({'name': 'val'})
self.assertEqual(c['name'].value, 'val')
@unittest.skipUnless(six.PY2, "PY3 throws an exception on invalid cookie keys.")
def test_bad_cookie(self):
"""
Regression test for #18403
"""
r = HttpResponse()
r.set_cookie("a:.b/", 1)
self.assertEqual(len(r.cookies.bad_cookies), 1)
def test_pickle(self):
rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1'
expected_output = 'Set-Cookie: %s' % rawdata
C = SimpleCookie()
C.load(rawdata)
self.assertEqual(C.output(), expected_output)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
C1 = pickle.loads(pickle.dumps(C, protocol=proto))
self.assertEqual(C1.output(), expected_output)
| 38.935115
| 118
| 0.60151
|
from __future__ import unicode_literals
import copy
import json
import os
import pickle
import unittest
import uuid
from django.core.exceptions import SuspiciousOperation
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import request_finished
from django.db import close_old_connections
from django.http import (
BadHeaderError, HttpResponse, HttpResponseNotAllowed,
HttpResponseNotModified, HttpResponsePermanentRedirect,
HttpResponseRedirect, JsonResponse, QueryDict, SimpleCookie,
StreamingHttpResponse, parse_cookie,
)
from django.test import SimpleTestCase
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import force_str
from django.utils.functional import lazystr
class QueryDictTests(unittest.TestCase):
def test_create_with_no_args(self):
self.assertEqual(QueryDict(), QueryDict(str('')))
def test_missing_key(self):
q = QueryDict()
with self.assertRaises(KeyError):
q.__getitem__('foo')
def test_immutability(self):
q = QueryDict()
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
def test_immutable_get_with_default(self):
q = QueryDict()
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict()
self.assertEqual(q.getlist('foo'), [])
if six.PY2:
self.assertIs(q.has_key('foo'), False)
self.assertNotIn('foo', q)
self.assertEqual(list(six.iteritems(q)), [])
self.assertEqual(list(six.iterlists(q)), [])
self.assertEqual(list(six.iterkeys(q)), [])
self.assertEqual(list(six.itervalues(q)), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
q = QueryDict(str('foo=bar'))
self.assertEqual(q['foo'], 'bar')
with self.assertRaises(KeyError):
q.__getitem__('bar')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
if six.PY2:
self.assertFalse(q.has_key('bar'))
self.assertNotIn('bar', q)
self.assertEqual(list(six.iteritems(q)), [('foo', 'bar')])
self.assertEqual(list(six.iterlists(q)), [('foo', ['bar'])])
self.assertEqual(list(six.iterkeys(q)), ['foo'])
self.assertEqual(list(six.itervalues(q)), ['bar'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict(mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict(mutable=True)
q['next'] = '/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
q = QueryDict().copy()
with self.assertRaises(KeyError):
q.__getitem__("foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
del q['name']
self.assertNotIn('name', q)
def test_basic_mutable_operations(self):
q = QueryDict(mutable=True)
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
if six.PY2:
self.assertTrue(q.has_key('foo'))
self.assertIn('foo', q)
self.assertListEqual(sorted(six.iteritems(q)),
[('foo', 'another'), ('name', 'john')])
self.assertListEqual(sorted(six.iterlists(q)),
[('foo', ['bar', 'baz', 'another']), ('name', ['john'])])
self.assertListEqual(sorted(six.iterkeys(q)),
['foo', 'name'])
self.assertListEqual(sorted(six.itervalues(q)),
['another', 'john'])
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo'), ['bar', 'baz', 'another', 'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertIn(q.urlencode(), ['foo=bar&name=john', 'name=john&foo=bar'])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
q = QueryDict(str('vote=yes&vote=no'))
self.assertEqual(q['vote'], 'no')
with self.assertRaises(AttributeError):
q.__setitem__('something', 'bar')
self.assertEqual(q.get('vote', 'default'), 'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), ['yes', 'no'])
self.assertEqual(q.getlist('foo'), [])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.setlist('foo', ['bar', 'baz'])
with self.assertRaises(AttributeError):
q.appendlist('foo', ['bar'])
if six.PY2:
self.assertIs(q.has_key('vote'), True)
self.assertIn('vote', q)
if six.PY2:
self.assertIs(q.has_key('foo'), False)
self.assertNotIn('foo', q)
self.assertEqual(list(six.iteritems(q)), [('vote', 'no')])
self.assertEqual(list(six.iterlists(q)), [('vote', ['yes', 'no'])])
self.assertEqual(list(six.iterkeys(q)), ['vote'])
self.assertEqual(list(six.itervalues(q)), ['no'])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({'foo': 'bar'})
with self.assertRaises(AttributeError):
q.pop('foo')
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault('foo', 'bar')
with self.assertRaises(AttributeError):
q.__delitem__('vote')
if six.PY2:
def test_invalid_input_encoding(self):
q = QueryDict(str(b'foo=bar&foo=\xff'))
self.assertEqual(q['foo'], '\xff')
self.assertEqual(q.getlist('foo'), ['bar', '\xff'])
def test_pickle(self):
q = QueryDict()
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict(str('a=b&c=d'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict(str('a=b&c=d&a=1'))
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
def test_update_from_querydict(self):
x = QueryDict(str("a=1&a=2"), mutable=True)
y = QueryDict(str("a=3&a=4"))
x.update(y)
self.assertEqual(x.getlist('a'), ['1', '2', '3', '4'])
def test_non_default_encoding(self):
q = QueryDict(str('cur=%A4'), encoding='iso-8859-15')
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
q = q.copy()
self.assertEqual(q.encoding, 'iso-8859-15')
self.assertEqual(list(six.iteritems(q)), [('cur', '€')])
self.assertEqual(q.urlencode(), 'cur=%A4')
self.assertEqual(copy.copy(q).encoding, 'iso-8859-15')
self.assertEqual(copy.deepcopy(q).encoding, 'iso-8859-15')
class HttpResponseTests(unittest.TestCase):
def test_headers_type(self):
r = HttpResponse()
r['key'] = 'test'
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
r['key'] = 'test'.encode('ascii')
self.assertEqual(r['key'], str('test'))
self.assertIsInstance(r['key'], str)
self.assertIn(b'test', r.serialize_headers())
r['key'] = 'café'
self.assertEqual(r['key'], force_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
r['key'] = 'café'.encode('latin-1')
self.assertEqual(r['key'], force_str('café', 'latin-1'))
self.assertIsInstance(r['key'], str)
self.assertIn('café'.encode('latin-1'), r.serialize_headers())
r['key'] = '†'
self.assertEqual(r['key'], str('=?utf-8?b?4oCg?='))
self.assertIsInstance(r['key'], str)
self.assertIn(b'=?utf-8?b?4oCg?=', r.serialize_headers())
# The response also converts unicode or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r['Content-Type']
r['foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
del r['Content-Type']
r[b'foo'] = 'bar'
l = list(r.items())
self.assertEqual(len(l), 1)
self.assertEqual(l[0], ('foo', 'bar'))
self.assertIsInstance(l[0][0], str)
r = HttpResponse()
with self.assertRaises(UnicodeError):
r.__setitem__('føø', 'bar')
with self.assertRaises(UnicodeError):
r.__setitem__('føø'.encode('utf-8'), 'bar')
def test_long_line(self):
# Bug #20889: long lines trigger newlines to be added to headers
# (which is not allowed due to bug #10188)
h = HttpResponse()
f = 'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88'.encode('latin-1')
f = f.decode('utf-8')
h['Content-Disposition'] = 'attachment; filename="%s"' % f
# This one is triggering http://bugs.python.org/issue20747, that is Python
# will itself insert a newline in the header
h['Content-Disposition'] = 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"'
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
with self.assertRaises(BadHeaderError):
r.__setitem__('test\rstr', 'test')
with self.assertRaises(BadHeaderError):
r.__setitem__('test\nstr', 'test')
def test_dict_behavior(self):
r = HttpResponse()
self.assertIsNone(r.get('test'))
def test_non_string_content(self):
# Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b'12345')
# test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b'12345')
def test_iter_content(self):
r = HttpResponse(['abc', 'def', 'ghi'])
self.assertEqual(r.content, b'abcdefghi')
# test iter content via property
r = HttpResponse()
r.content = ['idan', 'alex', 'jacob']
self.assertEqual(r.content, b'idanalexjacob')
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b'123')
# test odd inputs
r = HttpResponse()
r.content = ['1', '2', 3, '\u079e']
# '\xde\x9e' == unichr(1950).encode('utf-8')
self.assertEqual(r.content, b'123\xde\x9e')
# .content can safely be accessed multiple times.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b'helloworld')
# __iter__ can safely be called multiple times (#20187).
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(b''.join(r), b'helloworld')
# Accessing .content still works.
self.assertEqual(r.content, b'helloworld')
# Accessing .content also works if the response was iterated first.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(b''.join(r), b'helloworld')
self.assertEqual(r.content, b'helloworld')
# Additional content can be written to the response.
r = HttpResponse(iter(['hello', 'world']))
self.assertEqual(r.content, b'helloworld')
r.write('!')
self.assertEqual(r.content, b'helloworld!')
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse('abc')
i = iter(r)
self.assertEqual(list(i), [b'abc'])
self.assertEqual(list(i), [])
def test_lazy_content(self):
r = HttpResponse(lazystr('helloworld'))
self.assertEqual(r.content, b'helloworld')
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(['abc'])
r.write('def')
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b'abcdef')
# with Content-Encoding header
r = HttpResponse()
r['Content-Encoding'] = 'winning'
r.write(b'abc')
r.write(b'def')
self.assertEqual(r.content, b'abcdef')
def test_stream_interface(self):
r = HttpResponse('asdf')
self.assertEqual(r.getvalue(), b'asdf')
r = HttpResponse()
self.assertIs(r.writable(), True)
r.writelines(['foo\n', 'bar\n', 'baz\n'])
self.assertEqual(r.content, b'foo\nbar\nbaz\n')
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
'mailto:test@example.com',
'file:///etc/passwd',
]
for url in bad_urls:
with self.assertRaises(SuspiciousOperation):
HttpResponseRedirect(url)
with self.assertRaises(SuspiciousOperation):
HttpResponsePermanentRedirect(url)
class HttpResponseSubclassesTests(SimpleTestCase):
def test_redirect(self):
response = HttpResponseRedirect('/redirected/')
self.assertEqual(response.status_code, 302)
# Test that standard HttpResponse init args can be used
response = HttpResponseRedirect(
'/redirected/',
content='The resource has temporarily moved',
content_type='text/html',
)
self.assertContains(response, 'The resource has temporarily moved', status_code=302)
# Test that url attribute is right
self.assertEqual(response.url, response['Location'])
def test_redirect_lazy(self):
r = HttpResponseRedirect(lazystr('/redirected/'))
self.assertEqual(r.url, '/redirected/')
def test_redirect_repr(self):
response = HttpResponseRedirect('/redirected/')
expected = '<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/redirected/">'
self.assertEqual(repr(response), expected)
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn('content-type', response)
def test_not_allowed(self):
response = HttpResponseNotAllowed(['GET'])
self.assertEqual(response.status_code, 405)
# Test that standard HttpResponse init args can be used
response = HttpResponseNotAllowed(['GET'], content='Only the GET method is allowed', content_type='text/html')
self.assertContains(response, 'Only the GET method is allowed', status_code=405)
def test_not_allowed_repr(self):
response = HttpResponseNotAllowed(['GET', 'OPTIONS'], content_type='text/plain')
expected = '<HttpResponseNotAllowed [GET, OPTIONS] status_code=405, "text/plain">'
self.assertEqual(repr(response), expected)
class JsonResponseTests(SimpleTestCase):
def test_json_response_non_ascii(self):
data = {'key': 'łóżko'}
response = JsonResponse(data)
self.assertEqual(json.loads(response.content.decode()), data)
def test_json_response_raises_type_error_with_default_setting(self):
with self.assertRaisesMessage(
TypeError,
'In order to allow non-dict objects to be serialized set the '
'safe parameter to False'
):
JsonResponse([1, 2, 3])
def test_json_response_text(self):
response = JsonResponse('foobar', safe=False)
self.assertEqual(json.loads(response.content.decode()), 'foobar')
def test_json_response_list(self):
response = JsonResponse(['foo', 'bar'], safe=False)
self.assertEqual(json.loads(response.content.decode()), ['foo', 'bar'])
def test_json_response_uuid(self):
u = uuid.uuid4()
response = JsonResponse(u, safe=False)
self.assertEqual(json.loads(response.content.decode()), str(u))
def test_json_response_custom_encoder(self):
class CustomDjangoJSONEncoder(DjangoJSONEncoder):
def encode(self, o):
return json.dumps({'foo': 'bar'})
response = JsonResponse({}, encoder=CustomDjangoJSONEncoder)
self.assertEqual(json.loads(response.content.decode()), {'foo': 'bar'})
def test_json_response_passing_arguments_to_json_dumps(self):
response = JsonResponse({'foo': 'bar'}, json_dumps_params={'indent': 2})
self.assertEqual(response.content.decode(), '{\n "foo": "bar"\n}')
class StreamingHttpResponseTests(SimpleTestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(['hello', 'world']))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b'hello', b'world'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(['abc', 'def'])
self.assertEqual(list(r), [b'abc', b'def'])
self.assertEqual(list(r), [])
# iterating over Unicode strings still yields bytestring chunks.
r.streaming_content = iter(['hello', 'café'])
chunks = list(r)
# '\xc3\xa9' == unichr(233).encode('utf-8')
self.assertEqual(chunks, [b'hello', b'caf\xc3\xa9'])
for chunk in chunks:
self.assertIsInstance(chunk, six.binary_type)
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, 'content'))
with self.assertRaises(AttributeError):
r.content = 'xyz'
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, 'streaming_content'))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(['abc', 'def'])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b'ABC', b'DEF'])
# coercing a streaming response to bytes doesn't return a complete HTTP
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(
six.binary_type(r), b'Content-Type: text/html; charset=utf-8')
self.assertEqual(list(r), [b'hello', b'world'])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(['hello', 'world']))
with self.assertRaises(Exception):
r.write('!')
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
r = StreamingHttpResponse(iter(['hello', 'world']))
self.assertEqual(r.getvalue(), b'helloworld')
class FileCloseTests(SimpleTestCase):
def setUp(self):
request_finished.disconnect(close_old_connections)
def tearDown(self):
request_finished.connect(close_old_connections)
def test_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
file1 = open(filename)
r = HttpResponse(file1)
self.assertTrue(file1.closed)
r.close()
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(upath(__file__)), 'abc.txt')
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertNotIn(";", c.output().rstrip(';'))
self.assertNotIn(",", c.output().rstrip(';'))
def test_decode(self):
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c['test'].value, c3['test'])
def test_decode_2(self):
c = SimpleCookie()
c['test'] = b"\xf0"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c['test'].value, c2['test'].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c['test'].value, c3['test'])
def test_nonstandard_keys(self):
self.assertIn('good_cookie', parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
def test_repeated_nonstandard_keys(self):
self.assertIn('good_cookie', parse_cookie('a:=b; a:=c; good_cookie=yes').keys())
def test_python_cookies(self):
self.assertEqual(parse_cookie('chips=ahoy; vienna=finger'), {'chips': 'ahoy', 'vienna': 'finger'})
# treats all semicolons as delimiters, even within quotes.
self.assertEqual(
parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'),
{'keebler': '"E=mc2', 'L': '\\"Loves\\"', 'fudge': '\\012', '': '"'}
)
# Illegal cookies that have an '=' char in an unquoted value.
self.assertEqual(parse_cookie('keebler=E=mc2'), {'keebler': 'E=mc2'})
# Cookies with ':' character in their name.
self.assertEqual(parse_cookie('key:term=value:term'), {'key:term': 'value:term'})
# Cookies with '[' and ']'.
self.assertEqual(parse_cookie('a=b; c=[; d=r; f=h'), {'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'})
def test_cookie_edgecases(self):
# Cookies that RFC6265 allows.
self.assertEqual(parse_cookie('a=b; Domain=example.com'), {'a': 'b', 'Domain': 'example.com'})
# parse_cookie() has historically kept only the last cookie with the
# same name.
self.assertEqual(parse_cookie('a=b; h=i; a=c'), {'a': 'c', 'h': 'i'})
def test_invalid_cookies(self):
# Chunks without an equals sign appear as unnamed values per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
self.assertIn('django_language', parse_cookie('abc=def; unnamed; django_language=en').keys())
# Even a double quote may be an unamed value.
self.assertEqual(parse_cookie('a=b; "; c=d'), {'a': 'b', '': '"', 'c': 'd'})
# Spaces in names and values, and an equals sign in values.
self.assertEqual(parse_cookie('a b c=d e = f; gh=i'), {'a b c': 'd e = f', 'gh': 'i'})
# More characters the spec forbids.
self.assertEqual(parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'), {'a b,c<>@:/[]?{}': 'd " =e,f g'})
# Unicode characters. The spec only allows ASCII.
self.assertEqual(parse_cookie('saint=André Bessette'), {'saint': force_str('André Bessette')})
# Browsers don't send extra whitespace or semicolons in Cookie headers,
self.assertEqual(parse_cookie(' = b ; ; = ; c = ; '), {'': 'b', 'c': ''})
def test_httponly_after_load(self):
c = SimpleCookie()
c.load("name=val")
c['name']['httponly'] = True
self.assertTrue(c['name']['httponly'])
def test_load_dict(self):
c = SimpleCookie()
c.load({'name': 'val'})
self.assertEqual(c['name'].value, 'val')
@unittest.skipUnless(six.PY2, "PY3 throws an exception on invalid cookie keys.")
def test_bad_cookie(self):
r = HttpResponse()
r.set_cookie("a:.b/", 1)
self.assertEqual(len(r.cookies.bad_cookies), 1)
def test_pickle(self):
rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1'
expected_output = 'Set-Cookie: %s' % rawdata
C = SimpleCookie()
C.load(rawdata)
self.assertEqual(C.output(), expected_output)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
C1 = pickle.loads(pickle.dumps(C, protocol=proto))
self.assertEqual(C1.output(), expected_output)
| true
| true
|
f7152c01a06b1a8eea4ae4c08a05e1af35676efc
| 82
|
py
|
Python
|
pyrobolearn/states/generators/__init__.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 2
|
2021-01-21T21:08:30.000Z
|
2022-03-29T16:45:49.000Z
|
pyrobolearn/states/generators/__init__.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | null | null | null |
pyrobolearn/states/generators/__init__.py
|
Pandinosaurus/pyrobolearn
|
9cd7c060723fda7d2779fa255ac998c2c82b8436
|
[
"Apache-2.0"
] | 1
|
2020-09-29T21:25:39.000Z
|
2020-09-29T21:25:39.000Z
|
# -*- coding: utf-8 -*-
# import state generators
from .state_generator import *
| 16.4
| 30
| 0.682927
|
from .state_generator import *
| true
| true
|
f7152cd6c81c021fabfecc053762e195b6af37eb
| 493
|
py
|
Python
|
setup.py
|
knorth55/chainer-dense-fusion
|
8ff53173d7071fc2cfcd05b1e0b2c544aeed090b
|
[
"MIT"
] | 22
|
2019-01-31T23:50:30.000Z
|
2021-09-13T09:41:00.000Z
|
setup.py
|
knorth55/chainer-dense-fusion
|
8ff53173d7071fc2cfcd05b1e0b2c544aeed090b
|
[
"MIT"
] | 4
|
2019-07-31T14:40:06.000Z
|
2022-03-16T13:32:45.000Z
|
setup.py
|
knorth55/chainer-dense-fusion
|
8ff53173d7071fc2cfcd05b1e0b2c544aeed090b
|
[
"MIT"
] | 3
|
2019-08-30T09:18:45.000Z
|
2020-03-03T16:07:51.000Z
|
from setuptools import find_packages
from setuptools import setup
version = '0.0.0'
setup(
name='chainer_dense_fusion',
version=version,
packages=find_packages(),
install_requires=open('requirements.txt').readlines(),
description='',
long_description=open('README.md').read(),
author='Shingo Kitagawa',
author_email='shingogo.5511@gmail.com',
url='https://github.com/knorth55/chainer-dense-fusion',
license='MIT',
keywords='machine-learning',
)
| 23.47619
| 59
| 0.703854
|
from setuptools import find_packages
from setuptools import setup
version = '0.0.0'
setup(
name='chainer_dense_fusion',
version=version,
packages=find_packages(),
install_requires=open('requirements.txt').readlines(),
description='',
long_description=open('README.md').read(),
author='Shingo Kitagawa',
author_email='shingogo.5511@gmail.com',
url='https://github.com/knorth55/chainer-dense-fusion',
license='MIT',
keywords='machine-learning',
)
| true
| true
|
f7152da2720cfd4a357ce9c5b71bde73ceb5bb7b
| 2,199
|
py
|
Python
|
year_2018/day_09.py
|
gchazot/aoc
|
1926114b1060a927be3f87732ba0a399afd98ae4
|
[
"MIT"
] | 1
|
2020-04-12T16:14:29.000Z
|
2020-04-12T16:14:29.000Z
|
year_2018/day_09.py
|
gchazot/aoc
|
1926114b1060a927be3f87732ba0a399afd98ae4
|
[
"MIT"
] | null | null | null |
year_2018/day_09.py
|
gchazot/aoc
|
1926114b1060a927be3f87732ba0a399afd98ae4
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import unittest
class TestMarbleGame(unittest.TestCase):
def test_starts_empty(self):
game = MarbleGame(0, 0)
self.assertListEqual([], game.scores)
self.assertListEqual([0], game._circle)
def test_play_examples(self):
def high_score(players, last_marble):
game = MarbleGame(players, last_marble)
return game.play()
self.assertEqual(32, high_score(9, 25))
self.assertEqual(8317, high_score(10, 1618))
self.assertEqual(146373, high_score(13, 7999))
self.assertEqual(2764, high_score(17, 1104))
self.assertEqual(54718, high_score(21, 6111))
self.assertEqual(37305, high_score(30, 5807))
def test_play_mine(self):
self.assertEqual(410375, MarbleGame(439, 71307).play())
@unittest.skip("Too slow, > 2h")
def test_play_huge_mine(self):
self.assertEqual(3314195047, MarbleGame(439, 71307 * 100).play())
class MarbleGame:
def __init__(self, num_players, last_marble):
self.scores = [0 for _ in range(num_players)]
self.last_marble = last_marble
self._circle = [0]
self._next_marble = 1
self._current_index = 0
def play(self):
while self._next_marble <= self.last_marble:
self.play_round()
return max(self.scores)
def play_round(self):
if self._next_marble % 23 != 0:
next_index = (self._current_index + 2) % len(self._circle)
self._circle.insert(next_index, self._next_marble)
else:
num_players = len(self.scores)
current_player_index = self._next_marble % num_players
self.scores[current_player_index] += self._next_marble
next_index = (self._current_index - 7) % len(self._circle)
self.scores[current_player_index] += self._circle.pop(next_index)
self._current_index = next_index
self._next_marble += 1
def print(self):
print(" ".join(map(str, self.scores)), " ",
self._next_marble, "-",
self._current_index, "-",
" ".join(map(str, self._circle)))
print()
| 34.359375
| 77
| 0.626648
|
from __future__ import print_function
import unittest
class TestMarbleGame(unittest.TestCase):
def test_starts_empty(self):
game = MarbleGame(0, 0)
self.assertListEqual([], game.scores)
self.assertListEqual([0], game._circle)
def test_play_examples(self):
def high_score(players, last_marble):
game = MarbleGame(players, last_marble)
return game.play()
self.assertEqual(32, high_score(9, 25))
self.assertEqual(8317, high_score(10, 1618))
self.assertEqual(146373, high_score(13, 7999))
self.assertEqual(2764, high_score(17, 1104))
self.assertEqual(54718, high_score(21, 6111))
self.assertEqual(37305, high_score(30, 5807))
def test_play_mine(self):
self.assertEqual(410375, MarbleGame(439, 71307).play())
@unittest.skip("Too slow, > 2h")
def test_play_huge_mine(self):
self.assertEqual(3314195047, MarbleGame(439, 71307 * 100).play())
class MarbleGame:
def __init__(self, num_players, last_marble):
self.scores = [0 for _ in range(num_players)]
self.last_marble = last_marble
self._circle = [0]
self._next_marble = 1
self._current_index = 0
def play(self):
while self._next_marble <= self.last_marble:
self.play_round()
return max(self.scores)
def play_round(self):
if self._next_marble % 23 != 0:
next_index = (self._current_index + 2) % len(self._circle)
self._circle.insert(next_index, self._next_marble)
else:
num_players = len(self.scores)
current_player_index = self._next_marble % num_players
self.scores[current_player_index] += self._next_marble
next_index = (self._current_index - 7) % len(self._circle)
self.scores[current_player_index] += self._circle.pop(next_index)
self._current_index = next_index
self._next_marble += 1
def print(self):
print(" ".join(map(str, self.scores)), " ",
self._next_marble, "-",
self._current_index, "-",
" ".join(map(str, self._circle)))
print()
| true
| true
|
f7152e2178499fecdb9dce8c3f5d9bfbf5c3dfd6
| 297
|
py
|
Python
|
tests/test_modes.py
|
s-s-boika/obdlib
|
5b0b35276575a522d20858b6993a9bebf0acc968
|
[
"MIT"
] | 9
|
2015-07-14T07:15:58.000Z
|
2021-06-03T01:42:19.000Z
|
tests/test_modes.py
|
s-s-boika/obdlib
|
5b0b35276575a522d20858b6993a9bebf0acc968
|
[
"MIT"
] | null | null | null |
tests/test_modes.py
|
s-s-boika/obdlib
|
5b0b35276575a522d20858b6993a9bebf0acc968
|
[
"MIT"
] | 4
|
2015-07-15T09:05:46.000Z
|
2022-02-06T04:28:53.000Z
|
import unittest
import obdlib.obd.modes as modes
class TestModes(unittest.TestCase):
def test_init(self):
m = modes.Modes(1)
self.assertIsInstance(m.modes, dict)
suite = unittest.TestLoader().loadTestsFromTestCase(TestModes)
unittest.TextTestRunner(verbosity=2).run(suite)
| 22.846154
| 62
| 0.747475
|
import unittest
import obdlib.obd.modes as modes
class TestModes(unittest.TestCase):
def test_init(self):
m = modes.Modes(1)
self.assertIsInstance(m.modes, dict)
suite = unittest.TestLoader().loadTestsFromTestCase(TestModes)
unittest.TextTestRunner(verbosity=2).run(suite)
| true
| true
|
f7152e96fcfdafb7945bd56df781ae2d29581903
| 4,319
|
py
|
Python
|
gbrarscrapy.py
|
wothard/scrapyfloat
|
ff0c6698a4732015358ed5e9a07e24dd212eaf7f
|
[
"MIT"
] | null | null | null |
gbrarscrapy.py
|
wothard/scrapyfloat
|
ff0c6698a4732015358ed5e9a07e24dd212eaf7f
|
[
"MIT"
] | null | null | null |
gbrarscrapy.py
|
wothard/scrapyfloat
|
ff0c6698a4732015358ed5e9a07e24dd212eaf7f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
from lxml import html
import requests
import os
import random
import time
from fake_agent import fakeagent
class Gbrarscrapy(object):
def __init__(self, url_li, proxy_single):
self.title_xpa = '//a[@onmouseover]/text()'
self.score_list_xpa = '//span[@style="color:DarkSlateGray"]/text()'
self.id_xpa = '//a[contains(@href,"/torrent/")]/@href'
self.ch_xpa = '//tr[@class="lista2"][{}]/td[2]/span/text()'
# self.date_list_xpa = '//td[contains(@align,"center")
# and contains(@width,"150px")]/text()'
self.seli_xpa = '//td[@align="center" and @width="50px"]/font/text()'
self.tor_dict = dict() # 地址字典(包含地址,健康度,评分)
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=' +
'0.9,image/webp,image/apng,*/*;q=0.8',
'Cookies': 'skt=v97mrzygux; gaDts48g=q8h5pp9t; skt=v97mrzygux; gaDts48g=q8h5pp9t; expla=1; tcc; aby=2; ppu_main_9ef78edf998c4df1e1636c9a474d9f47=1; ppu_sub_9ef78edf998c4df1e1636c9a474d9f47=1; ppu_delay_9ef78edf998c4df1e1636c9a474d9f47=1'
}
self.url = url_li
self.pro = proxy_single
self.user_agent = fakeagent.load_ua()
def run(self):
while 1:
try:
temp_agent = random.choice(self.user_agent)
agent = temp_agent.split("\n")[0]
self.headers["User-Agent"] = agent
pro = {"http": "http://" + random.choice(self.pro)}
s = requests.get(self.url, headers=self.headers,
proxies=pro, timeout=10)
response = html.fromstring(s.text)
print(s.text)
title_l = response.xpath(self.title_xpa) # title
id = (response.xpath(self.id_xpa)) # id
seed = response.xpath(self.seli_xpa) # seed
torrent_f = self.torent_front(id)
for i in range(25):
# tor_addr 是完整种子下载地址
address = torrent_f[i] + title_l[i] + "-[rarbg.to].torrent"
check_sc = response.xpath(self.ch_xpa.format(i + 1))
# 电影名称提取
title = title_l[i].split(".1080p.")[0]
# 标记分数 无分数则为 0
if not check_sc or ('/' not in check_sc[0]):
score = 0
if '/' in check_sc[0]:
score = float((check_sc[0].split(" ")[-1]).split('/')[0])
if score >= 5:
self.torrent_dict(title_l[i], seed[i],
title, address, score)
time.sleep(2)
print(len(self.tor_dict), self.tor_dict)
print(self.url)
self.torrent_save()
print("保存成功一页")
break
except Exception as e:
print("REason: ", e)
print(self.url)
self.error_save_page(self.url)
def torent_front(self, id):
torrent_f = [] # 地址前缀
for i in range(len(id) - 8):
te = id[i + 8].split("torrent/")[-1]
if "comment" not in te:
temp = "https://rarbgprx.org/download.php?id={}&f=".format(te)
torrent_f.append(temp)
return torrent_f
def torrent_dict(self, title_l, seed, title, address, score):
# 检查是否重复
if title_l in self.tor_dict.keys():
# 检查健康度 及评分在5.0以上的数据
if seed > self.tor_dict[title][0]:
self.tor_dict[title] = [str(seed), address, str(score)]
else:
self.tor_dict[title] = [str(seed), address, str(score)]
else:
self.tor_dict[title] = [str(seed), address, str(score)]
def torrent_save(self):
with open(os.getcwd()+'/data/dianying.txt', 'a') as f:
for (i, j) in self.tor_dict.items():
f.write(i)
f.write(", ")
for k in j:
f.write(k)
f.write(", ")
f.write("\n")
def error_save_page(self, url):
with open(os.getcwd()+'/data/error_page_1.txt', 'a') as f:
f.write(url)
f.write("\n")
| 41.133333
| 249
| 0.508219
|
from lxml import html
import requests
import os
import random
import time
from fake_agent import fakeagent
class Gbrarscrapy(object):
def __init__(self, url_li, proxy_single):
self.title_xpa = '//a[@onmouseover]/text()'
self.score_list_xpa = '//span[@style="color:DarkSlateGray"]/text()'
self.id_xpa = '//a[contains(@href,"/torrent/")]/@href'
self.ch_xpa = '//tr[@class="lista2"][{}]/td[2]/span/text()'
# and contains(@width,"150px")]/text()'
self.seli_xpa = '//td[@align="center" and @width="50px"]/font/text()'
self.tor_dict = dict()
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=' +
'0.9,image/webp,image/apng,*/*;q=0.8',
'Cookies': 'skt=v97mrzygux; gaDts48g=q8h5pp9t; skt=v97mrzygux; gaDts48g=q8h5pp9t; expla=1; tcc; aby=2; ppu_main_9ef78edf998c4df1e1636c9a474d9f47=1; ppu_sub_9ef78edf998c4df1e1636c9a474d9f47=1; ppu_delay_9ef78edf998c4df1e1636c9a474d9f47=1'
}
self.url = url_li
self.pro = proxy_single
self.user_agent = fakeagent.load_ua()
def run(self):
while 1:
try:
temp_agent = random.choice(self.user_agent)
agent = temp_agent.split("\n")[0]
self.headers["User-Agent"] = agent
pro = {"http": "http://" + random.choice(self.pro)}
s = requests.get(self.url, headers=self.headers,
proxies=pro, timeout=10)
response = html.fromstring(s.text)
print(s.text)
title_l = response.xpath(self.title_xpa)
id = (response.xpath(self.id_xpa))
seed = response.xpath(self.seli_xpa)
torrent_f = self.torent_front(id)
for i in range(25):
address = torrent_f[i] + title_l[i] + "-[rarbg.to].torrent"
check_sc = response.xpath(self.ch_xpa.format(i + 1))
title = title_l[i].split(".1080p.")[0]
if not check_sc or ('/' not in check_sc[0]):
score = 0
if '/' in check_sc[0]:
score = float((check_sc[0].split(" ")[-1]).split('/')[0])
if score >= 5:
self.torrent_dict(title_l[i], seed[i],
title, address, score)
time.sleep(2)
print(len(self.tor_dict), self.tor_dict)
print(self.url)
self.torrent_save()
print("保存成功一页")
break
except Exception as e:
print("REason: ", e)
print(self.url)
self.error_save_page(self.url)
def torent_front(self, id):
torrent_f = []
for i in range(len(id) - 8):
te = id[i + 8].split("torrent/")[-1]
if "comment" not in te:
temp = "https://rarbgprx.org/download.php?id={}&f=".format(te)
torrent_f.append(temp)
return torrent_f
def torrent_dict(self, title_l, seed, title, address, score):
if title_l in self.tor_dict.keys():
if seed > self.tor_dict[title][0]:
self.tor_dict[title] = [str(seed), address, str(score)]
else:
self.tor_dict[title] = [str(seed), address, str(score)]
else:
self.tor_dict[title] = [str(seed), address, str(score)]
def torrent_save(self):
with open(os.getcwd()+'/data/dianying.txt', 'a') as f:
for (i, j) in self.tor_dict.items():
f.write(i)
f.write(", ")
for k in j:
f.write(k)
f.write(", ")
f.write("\n")
def error_save_page(self, url):
with open(os.getcwd()+'/data/error_page_1.txt', 'a') as f:
f.write(url)
f.write("\n")
| true
| true
|
f7152ea08588300b8fbe747412eb41de76a983a8
| 456
|
py
|
Python
|
experiments/3_parallel_training.py
|
dddaga/word-tree
|
ed6c59c16feee04d5c6003b3f5f4df68e6808e04
|
[
"MIT"
] | null | null | null |
experiments/3_parallel_training.py
|
dddaga/word-tree
|
ed6c59c16feee04d5c6003b3f5f4df68e6808e04
|
[
"MIT"
] | null | null | null |
experiments/3_parallel_training.py
|
dddaga/word-tree
|
ed6c59c16feee04d5c6003b3f5f4df68e6808e04
|
[
"MIT"
] | 1
|
2020-12-02T09:07:06.000Z
|
2020-12-02T09:07:06.000Z
|
import pymongo
EXPERIMENT_NAME = 'EXP_3'
CORPUS_PATH = 'data/pride_and_prejudice_cleaned.txt'
TRAINING_WINDOW = 3
CONTEXT_DIMENSION = 64
CONTEXT_DECAY = 0.5
CONTRASTIVE_WEIGHT = 0.001
LEANING_RATE = 1
DROPOUT = 0.1
myclient = pymongo.MongoClient('mongodb://localhost:27017')
mydb = myclient["mydatabase"]
collection = mydb.parallel_trainging
'''
Experiment details:
Parallel training
3 parallel instances
'''
| 14.709677
| 61
| 0.710526
|
import pymongo
EXPERIMENT_NAME = 'EXP_3'
CORPUS_PATH = 'data/pride_and_prejudice_cleaned.txt'
TRAINING_WINDOW = 3
CONTEXT_DIMENSION = 64
CONTEXT_DECAY = 0.5
CONTRASTIVE_WEIGHT = 0.001
LEANING_RATE = 1
DROPOUT = 0.1
myclient = pymongo.MongoClient('mongodb://localhost:27017')
mydb = myclient["mydatabase"]
collection = mydb.parallel_trainging
| true
| true
|
f7152f1421c03597f5ecc2d6a08acdad363400e1
| 3,893
|
py
|
Python
|
deprecated/Imputation/GRUI/Run_GAN_imputed.py
|
srinivasans/DeepSepsis
|
8647a2ec93ad5a937638acfc279a756bbfa04f7f
|
[
"Apache-2.0"
] | 2
|
2019-04-22T07:41:23.000Z
|
2019-04-23T02:45:06.000Z
|
deprecated/Imputation/GRUI/Run_GAN_imputed.py
|
srinivasans/DeepSepsis
|
8647a2ec93ad5a937638acfc279a756bbfa04f7f
|
[
"Apache-2.0"
] | null | null | null |
deprecated/Imputation/GRUI/Run_GAN_imputed.py
|
srinivasans/DeepSepsis
|
8647a2ec93ad5a937638acfc279a756bbfa04f7f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 26 10:47:41 2018
@author: yonghong
"""
from __future__ import print_function
import sys
sys.path.append("..")
import argparse
import os
import tensorflow as tf
from Physionet2019ImputedSepsisData import readImputed
import gru_delta_forGAN
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--gpus', type=str, default = None)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--run-type', type=str, default='test')
parser.add_argument('--data-path', type=str, default="../Gan_Imputation/imputation_train_results/WGAN_no_mask/")
#输入填充之后的训练数据集的完整路径 Gan_Imputation/imputation_train_results/WGAN_no_mask/30_8_128_64_0.001_400_True_True_True_0.15_0.5
parser.add_argument('--model-path', type=str, default=None)
parser.add_argument('--result-path', type=str, default=None)
parser.add_argument('--lr', type=float, default=0.01)
#parser.add_argument('--epoch', type=int, default=20)
parser.add_argument('--n-inputs', type=int, default=41)
parser.add_argument('--n-hidden-units', type=int, default=64)
parser.add_argument('--n-classes', type=int, default=2)
parser.add_argument('--checkpoint-dir', type=str, default='checkpoint_physionet_imputed',
help='Directory name to save the checkpoints')
parser.add_argument('--log-dir', type=str, default='logs_physionet_imputed',
help='Directory name to save training logs')
parser.add_argument('--isNormal',type=int,default=1)
parser.add_argument('--isSlicing',type=int,default=1)
#0 false 1 true
parser.add_argument('--isBatch-normal',type=int,default=1)
args = parser.parse_args()
if args.isBatch_normal==0:
args.isBatch_normal=False
if args.isBatch_normal==1:
args.isBatch_normal=True
if args.isNormal==0:
args.isNormal=False
if args.isNormal==1:
args.isNormal=True
if args.isSlicing==0:
args.isSlicing=False
if args.isSlicing==1:
args.isSlicing=True
checkdir=args.checkpoint_dir
logdir=args.log_dir
base=args.data_path
data_paths=["30_8_128_64_0.001_400_True_True_False_0.15_0.5"]
max_auc = 0.0
for d in data_paths:
args.data_path=os.path.join(base,d)
path_splits=args.data_path.split("/")
if len(path_splits[-1])==0:
datasetName=path_splits[-2]
else:
datasetName=path_splits[-1]
args.checkpoint_dir=checkdir+"/"+datasetName
args.log_dir=logdir+"/"+datasetName
dt_train=readImputed.ReadImputedPhysionetData(args.data_path)
dt_train.load()
dt_test=readImputed.ReadImputedPhysionetData(args.data_path.replace("imputation_train_results","imputation_test_results"))
dt_test.load()
lrs=[0.004,0.003,0.005,0.006,0.007,0.008,0.009,0.01,0.012,0.015]
#lrs = [0.0075,0.0085]
for lr in lrs:
args.lr=lr
epoch=30
args.epoch=epoch
print("epoch: %2d"%(epoch))
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = gru_delta_forGAN.grui(sess,
args=args,
dataset=dt_train,
test_set = dt_test
)
# build graph
model.build()
auc = model.train()
if auc > max_auc:
max_auc = auc
print("")
print("max auc is: " + str(max_auc))
f2 = open("max_auc","w")
f2.write(str(max_auc))
f2.close()
| 36.046296
| 130
| 0.625995
|
from __future__ import print_function
import sys
sys.path.append("..")
import argparse
import os
import tensorflow as tf
from Physionet2019ImputedSepsisData import readImputed
import gru_delta_forGAN
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--gpus', type=str, default = None)
parser.add_argument('--batch-size', type=int, default=128)
parser.add_argument('--run-type', type=str, default='test')
parser.add_argument('--data-path', type=str, default="../Gan_Imputation/imputation_train_results/WGAN_no_mask/")
parser.add_argument('--model-path', type=str, default=None)
parser.add_argument('--result-path', type=str, default=None)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--n-inputs', type=int, default=41)
parser.add_argument('--n-hidden-units', type=int, default=64)
parser.add_argument('--n-classes', type=int, default=2)
parser.add_argument('--checkpoint-dir', type=str, default='checkpoint_physionet_imputed',
help='Directory name to save the checkpoints')
parser.add_argument('--log-dir', type=str, default='logs_physionet_imputed',
help='Directory name to save training logs')
parser.add_argument('--isNormal',type=int,default=1)
parser.add_argument('--isSlicing',type=int,default=1)
parser.add_argument('--isBatch-normal',type=int,default=1)
args = parser.parse_args()
if args.isBatch_normal==0:
args.isBatch_normal=False
if args.isBatch_normal==1:
args.isBatch_normal=True
if args.isNormal==0:
args.isNormal=False
if args.isNormal==1:
args.isNormal=True
if args.isSlicing==0:
args.isSlicing=False
if args.isSlicing==1:
args.isSlicing=True
checkdir=args.checkpoint_dir
logdir=args.log_dir
base=args.data_path
data_paths=["30_8_128_64_0.001_400_True_True_False_0.15_0.5"]
max_auc = 0.0
for d in data_paths:
args.data_path=os.path.join(base,d)
path_splits=args.data_path.split("/")
if len(path_splits[-1])==0:
datasetName=path_splits[-2]
else:
datasetName=path_splits[-1]
args.checkpoint_dir=checkdir+"/"+datasetName
args.log_dir=logdir+"/"+datasetName
dt_train=readImputed.ReadImputedPhysionetData(args.data_path)
dt_train.load()
dt_test=readImputed.ReadImputedPhysionetData(args.data_path.replace("imputation_train_results","imputation_test_results"))
dt_test.load()
lrs=[0.004,0.003,0.005,0.006,0.007,0.008,0.009,0.01,0.012,0.015]
for lr in lrs:
args.lr=lr
epoch=30
args.epoch=epoch
print("epoch: %2d"%(epoch))
tf.reset_default_graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
model = gru_delta_forGAN.grui(sess,
args=args,
dataset=dt_train,
test_set = dt_test
)
model.build()
auc = model.train()
if auc > max_auc:
max_auc = auc
print("")
print("max auc is: " + str(max_auc))
f2 = open("max_auc","w")
f2.write(str(max_auc))
f2.close()
| true
| true
|
f7152f1b586fbf4b47b2c1084a5b2a3f185a8418
| 340
|
py
|
Python
|
apps/teams/adminx.py
|
slyslyme/CTF_AWD_Platform
|
6e9eec0a23a316aaf1927d4ec5be923ac26ff21e
|
[
"MIT"
] | 85
|
2019-04-21T01:38:18.000Z
|
2022-03-22T08:06:21.000Z
|
apps/teams/adminx.py
|
xuchaoa/CTF_AWD_Platform
|
b2201f18677939442002e16e64280acd44f72bfe
|
[
"MIT"
] | 12
|
2019-05-10T14:09:12.000Z
|
2022-03-11T23:45:35.000Z
|
apps/teams/adminx.py
|
slyslyme/CTF_AWD_Platform
|
6e9eec0a23a316aaf1927d4ec5be923ac26ff21e
|
[
"MIT"
] | 21
|
2019-04-14T16:12:15.000Z
|
2022-03-22T08:06:22.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Archerx
# @time: 2019/4/16 上午 11:35
from .models import TeamProfile
import xadmin
class TeamDispaly(object):
list_display = ('id','team_name','team_captain','team_member1','team_member2','team_member3','competition','team_token')
xadmin.site.register(TeamProfile, TeamDispaly)
| 28.333333
| 124
| 0.729412
|
from .models import TeamProfile
import xadmin
class TeamDispaly(object):
list_display = ('id','team_name','team_captain','team_member1','team_member2','team_member3','competition','team_token')
xadmin.site.register(TeamProfile, TeamDispaly)
| true
| true
|
f7152fae1381c42726a0ec3c4057ea6d2f710ce3
| 3,769
|
py
|
Python
|
kmip/core/messages/payloads/create.py
|
vbnmmnbv/PyKMIP
|
4617ae528006178c466fe3945a477f568b596940
|
[
"Apache-2.0"
] | 12
|
2016-09-14T21:59:10.000Z
|
2020-03-11T07:37:25.000Z
|
kmip/core/messages/payloads/create.py
|
vbnmmnbv/PyKMIP
|
4617ae528006178c466fe3945a477f568b596940
|
[
"Apache-2.0"
] | 1
|
2021-06-25T15:43:48.000Z
|
2021-06-25T15:43:48.000Z
|
kmip/core/messages/payloads/create.py
|
vbnmmnbv/PyKMIP
|
4617ae528006178c466fe3945a477f568b596940
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from kmip.core import attributes
from kmip.core import enums
from kmip.core.enums import Tags
from kmip.core.objects import TemplateAttribute
from kmip.core.primitives import Struct
from kmip.core.utils import BytearrayStream
class CreateRequestPayload(Struct):
def __init__(self,
object_type=None,
template_attribute=None):
super(CreateRequestPayload, self).__init__(
tag=enums.Tags.REQUEST_PAYLOAD)
self.object_type = object_type
self.template_attribute = template_attribute
self.validate()
def read(self, istream):
super(CreateRequestPayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.object_type = attributes.ObjectType()
self.template_attribute = TemplateAttribute()
self.object_type.read(tstream)
self.template_attribute.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
# Write the object type and template attribute of the request payload
self.object_type.write(tstream)
self.template_attribute.write(tstream)
# Write the length and value of the request payload
self.length = tstream.length()
super(CreateRequestPayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
class CreateResponsePayload(Struct):
def __init__(self,
object_type=None,
unique_identifier=None,
template_attribute=None):
super(CreateResponsePayload, self).__init__(
tag=enums.Tags.RESPONSE_PAYLOAD)
self.object_type = object_type
self.unique_identifier = unique_identifier
self.template_attribute = template_attribute
self.validate()
def read(self, istream):
super(CreateResponsePayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.object_type = attributes.ObjectType()
self.unique_identifier = attributes.UniqueIdentifier()
self.object_type.read(tstream)
self.unique_identifier.read(tstream)
if self.is_tag_next(Tags.TEMPLATE_ATTRIBUTE, tstream):
self.template_attribute = TemplateAttribute()
self.template_attribute.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
# Write the contents of the request payload
self.object_type.write(tstream)
self.unique_identifier.write(tstream)
if self.template_attribute is not None:
self.template_attribute.write(tstream)
# Write the length and value of the request payload
self.length = tstream.length()
super(CreateResponsePayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
# TODO (peter-hamilton) Finish implementation.
pass
| 32.491379
| 77
| 0.689573
|
from kmip.core import attributes
from kmip.core import enums
from kmip.core.enums import Tags
from kmip.core.objects import TemplateAttribute
from kmip.core.primitives import Struct
from kmip.core.utils import BytearrayStream
class CreateRequestPayload(Struct):
def __init__(self,
object_type=None,
template_attribute=None):
super(CreateRequestPayload, self).__init__(
tag=enums.Tags.REQUEST_PAYLOAD)
self.object_type = object_type
self.template_attribute = template_attribute
self.validate()
def read(self, istream):
super(CreateRequestPayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.object_type = attributes.ObjectType()
self.template_attribute = TemplateAttribute()
self.object_type.read(tstream)
self.template_attribute.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.object_type.write(tstream)
self.template_attribute.write(tstream)
self.length = tstream.length()
super(CreateRequestPayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
pass
class CreateResponsePayload(Struct):
def __init__(self,
object_type=None,
unique_identifier=None,
template_attribute=None):
super(CreateResponsePayload, self).__init__(
tag=enums.Tags.RESPONSE_PAYLOAD)
self.object_type = object_type
self.unique_identifier = unique_identifier
self.template_attribute = template_attribute
self.validate()
def read(self, istream):
super(CreateResponsePayload, self).read(istream)
tstream = BytearrayStream(istream.read(self.length))
self.object_type = attributes.ObjectType()
self.unique_identifier = attributes.UniqueIdentifier()
self.object_type.read(tstream)
self.unique_identifier.read(tstream)
if self.is_tag_next(Tags.TEMPLATE_ATTRIBUTE, tstream):
self.template_attribute = TemplateAttribute()
self.template_attribute.read(tstream)
self.is_oversized(tstream)
self.validate()
def write(self, ostream):
tstream = BytearrayStream()
self.object_type.write(tstream)
self.unique_identifier.write(tstream)
if self.template_attribute is not None:
self.template_attribute.write(tstream)
self.length = tstream.length()
super(CreateResponsePayload, self).write(ostream)
ostream.write(tstream.buffer)
def validate(self):
pass
| true
| true
|
f7152fd92189d41c2d60b2ab2fa4b993a10814c8
| 9,994
|
py
|
Python
|
torchtext/experimental/datasets/language_modeling.py
|
NicolasHug/text
|
651c1f70ee6e75705aa1c5e4d4cf86ff69b6cbed
|
[
"BSD-3-Clause"
] | null | null | null |
torchtext/experimental/datasets/language_modeling.py
|
NicolasHug/text
|
651c1f70ee6e75705aa1c5e4d4cf86ff69b6cbed
|
[
"BSD-3-Clause"
] | null | null | null |
torchtext/experimental/datasets/language_modeling.py
|
NicolasHug/text
|
651c1f70ee6e75705aa1c5e4d4cf86ff69b6cbed
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import logging
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.experimental.datasets.raw import language_modeling as raw
from torchtext.experimental.datasets.raw.common import check_default_set
from torchtext.experimental.datasets.raw.common import wrap_datasets
logger_ = logging.getLogger(__name__)
def build_vocab(data, transforms):
def apply_transforms(data):
for line in data:
tokens = transforms(line)
yield tokens
return build_vocab_from_iterator(apply_transforms(data), len(data))
class LanguageModelingDataset(torch.utils.data.Dataset):
"""Defines a dataset for language modeling.
Currently, we only support the following datasets:
- WikiText2
- WikiText103
- PennTreebank
- WMTNewsCrawl
"""
def __init__(self, data, vocab, transform):
"""Initiate language modeling dataset.
Args:
data: a tensor of tokens. tokens are ids after
numericalizing the string tokens.
torch.tensor([token_id_1, token_id_2, token_id_3, token_id1]).long()
vocab: Vocabulary object used for dataset.
transform: Text string transform.
"""
super(LanguageModelingDataset, self).__init__()
self.vocab = vocab
self.transform = transform
self.data = data
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
def __iter__(self):
for x in self.data:
yield x
def get_vocab(self):
return self.vocab
def _setup_datasets(dataset_name, tokenizer, root, vocab, split_, year, language):
if tokenizer is None:
tokenizer = get_tokenizer('basic_english')
split = check_default_set(split_, ('train', 'test', 'valid'), dataset_name)
if vocab is None:
if 'train' not in split:
raise TypeError("Must pass a vocab if train is not selected.")
if dataset_name == 'WMTNewsCrawl':
raw_train, = raw.DATASETS[dataset_name](root=root, split=('train',), year=year, language=language)
else:
raw_train, = raw.DATASETS[dataset_name](root=root, split=('train',))
logger_.info('Building Vocab based on train data')
vocab = build_vocab(raw_train, tokenizer)
logger_.info('Vocab has %d entries', len(vocab))
def text_transform(line):
return torch.tensor([vocab[token] for token in tokenizer(line)], dtype=torch.long)
if dataset_name == 'WMTNewsCrawl':
raw_datasets = raw.DATASETS[dataset_name](root=root, split=split, year=year, language=language)
else:
raw_datasets = raw.DATASETS[dataset_name](root=root, split=split)
raw_data = {name: list(map(text_transform, raw_dataset)) for name, raw_dataset in zip(split, raw_datasets)}
logger_.info('Building datasets for {}'.format(split))
return wrap_datasets(tuple(LanguageModelingDataset(raw_data[item], vocab, text_transform)
for item in split), split_)
def WikiText2(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
""" Defines WikiText2 datasets.
Create language modeling dataset: WikiText2
Separately returns the train/test/valid set
Args:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
split: a string or tuple for the returned datasets. Default: ('train', 'valid','test')
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.experimental.datasets import WikiText2
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, valid_dataset, test_dataset = WikiText2(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = WikiText2(tokenizer=tokenizer, vocab=vocab,
split='valid')
"""
return _setup_datasets("WikiText2", tokenizer, root, vocab, split, None, None)
def WikiText103(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
""" Defines WikiText103 datasets.
Create language modeling dataset: WikiText103
Separately returns the train/test/valid set
Args:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
split: a string or tuple for the returned datasets. Default: ('train', 'valid', 'test')
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.experimental.datasets import WikiText103
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, valid_dataset, test_dataset = WikiText103(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = WikiText103(tokenizer=tokenizer, vocab=vocab,
split='valid')
"""
return _setup_datasets("WikiText103", tokenizer, root, vocab, split, None, None)
def PennTreebank(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
""" Defines PennTreebank datasets.
Create language modeling dataset: PennTreebank
Separately returns the train/test/valid set
Args:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
split: a string or tuple for the returned datasets. Default: ('train', 'valid', 'test')
By default, all the three datasets (train, test, valid) are generated. Users
could also choose any one or two of them, for example ('train', 'test') or
just a string 'train'. If 'train' is not in the tuple or string, a vocab
object should be provided which will be used to process valid and/or test
data.
Examples:
>>> from torchtext.experimental.datasets import PennTreebank
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, valid_dataset, test_dataset = PennTreebank(tokenizer=tokenizer)
>>> vocab = train_dataset.get_vocab()
>>> valid_dataset, = PennTreebank(tokenizer=tokenizer, vocab=vocab,
split='valid')
"""
return _setup_datasets("PennTreebank", tokenizer, root, vocab, split, None, None)
def WMTNewsCrawl(tokenizer=None, root='.data', vocab=None, split=('train'), year=2010, language='en'):
""" Defines WMTNewsCrawl datasets.
Create language modeling dataset: WMTNewsCrawl
returns the train set
Args:
tokenizer: the tokenizer used to preprocess raw text data.
The default one is basic_english tokenizer in fastText. spacy tokenizer
is supported as well (see example below). A custom tokenizer is callable
function with input of a string and output of a token list.
root: Directory where the datasets are saved. Default: ".data"
vocab: Vocabulary used for dataset. If None, it will generate a new
vocabulary based on the train data set.
split: a string or tuple for the returned datasets
(Default: ('train',))
year: the year of the dataset (Default: 2010)
language: the language of the dataset (Default: 'en')
Examples:
>>> from torchtext.experimental.datasets import WMTNewsCrawl
>>> from torchtext.data.utils import get_tokenizer
>>> tokenizer = get_tokenizer("spacy")
>>> train_dataset, = WMTNewsCrawl(tokenizer=tokenizer, split='train')
Note: WMTNewsCrawl provides datasets based on the year and language instead of train/valid/test.
"""
return _setup_datasets("WMTNewsCrawl", tokenizer, root, vocab, split, year, language)
DATASETS = {
'WikiText2': WikiText2,
'WikiText103': WikiText103,
'PennTreebank': PennTreebank,
'WMTNewsCrawl': WMTNewsCrawl
}
| 42.892704
| 111
| 0.662097
|
import torch
import logging
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.experimental.datasets.raw import language_modeling as raw
from torchtext.experimental.datasets.raw.common import check_default_set
from torchtext.experimental.datasets.raw.common import wrap_datasets
logger_ = logging.getLogger(__name__)
def build_vocab(data, transforms):
def apply_transforms(data):
for line in data:
tokens = transforms(line)
yield tokens
return build_vocab_from_iterator(apply_transforms(data), len(data))
class LanguageModelingDataset(torch.utils.data.Dataset):
def __init__(self, data, vocab, transform):
super(LanguageModelingDataset, self).__init__()
self.vocab = vocab
self.transform = transform
self.data = data
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
def __iter__(self):
for x in self.data:
yield x
def get_vocab(self):
return self.vocab
def _setup_datasets(dataset_name, tokenizer, root, vocab, split_, year, language):
if tokenizer is None:
tokenizer = get_tokenizer('basic_english')
split = check_default_set(split_, ('train', 'test', 'valid'), dataset_name)
if vocab is None:
if 'train' not in split:
raise TypeError("Must pass a vocab if train is not selected.")
if dataset_name == 'WMTNewsCrawl':
raw_train, = raw.DATASETS[dataset_name](root=root, split=('train',), year=year, language=language)
else:
raw_train, = raw.DATASETS[dataset_name](root=root, split=('train',))
logger_.info('Building Vocab based on train data')
vocab = build_vocab(raw_train, tokenizer)
logger_.info('Vocab has %d entries', len(vocab))
def text_transform(line):
return torch.tensor([vocab[token] for token in tokenizer(line)], dtype=torch.long)
if dataset_name == 'WMTNewsCrawl':
raw_datasets = raw.DATASETS[dataset_name](root=root, split=split, year=year, language=language)
else:
raw_datasets = raw.DATASETS[dataset_name](root=root, split=split)
raw_data = {name: list(map(text_transform, raw_dataset)) for name, raw_dataset in zip(split, raw_datasets)}
logger_.info('Building datasets for {}'.format(split))
return wrap_datasets(tuple(LanguageModelingDataset(raw_data[item], vocab, text_transform)
for item in split), split_)
def WikiText2(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
return _setup_datasets("WikiText2", tokenizer, root, vocab, split, None, None)
def WikiText103(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
return _setup_datasets("WikiText103", tokenizer, root, vocab, split, None, None)
def PennTreebank(tokenizer=None, root='.data', vocab=None, split=('train', 'valid', 'test')):
return _setup_datasets("PennTreebank", tokenizer, root, vocab, split, None, None)
def WMTNewsCrawl(tokenizer=None, root='.data', vocab=None, split=('train'), year=2010, language='en'):
return _setup_datasets("WMTNewsCrawl", tokenizer, root, vocab, split, year, language)
DATASETS = {
'WikiText2': WikiText2,
'WikiText103': WikiText103,
'PennTreebank': PennTreebank,
'WMTNewsCrawl': WMTNewsCrawl
}
| true
| true
|
f715303f7cff1a03797169fbe6f8d2773e09ef68
| 925
|
py
|
Python
|
challenge/urls.py
|
superdev0505/mtp-web
|
8288765a89daaa7b02dfd7e78cc51c4f12d7fcce
|
[
"MIT"
] | null | null | null |
challenge/urls.py
|
superdev0505/mtp-web
|
8288765a89daaa7b02dfd7e78cc51c4f12d7fcce
|
[
"MIT"
] | null | null | null |
challenge/urls.py
|
superdev0505/mtp-web
|
8288765a89daaa7b02dfd7e78cc51c4f12d7fcce
|
[
"MIT"
] | null | null | null |
from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.index, name='challenge.index'),
path('create', views.challenge_create, name='challenge.challenge_create'),
path('<str:unique_id>/edit/', views.challenge_edit, name='challenge.my_challenge_edit'),
re_path(r'^my-list/(?P<page>\d*)?$', views.my_challenge_list, name='challenge.my_challenge_list'),
re_path(r'^list/(?P<page>\d*)?$', views.challenge_list, name='challenge.challenge_list'),
path('<str:unique_id>/delete/', views.my_challenge_delete, name='challenge.my_challenge_delete'),
path('<str:unique_id>/', views.challenge_detail, name='challenge.challenge_detail'),
path('<str:unique_id>/leaderboard/', views.challenge_leaderboard, name='challenge.challenge_leaderboard'),
path('ajax/get_challenge_detail/<str:unique_id>/', views.ajax_challenge_detail, name='challenge.ajax_challenge_detail'),
]
| 57.8125
| 124
| 0.740541
|
from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.index, name='challenge.index'),
path('create', views.challenge_create, name='challenge.challenge_create'),
path('<str:unique_id>/edit/', views.challenge_edit, name='challenge.my_challenge_edit'),
re_path(r'^my-list/(?P<page>\d*)?$', views.my_challenge_list, name='challenge.my_challenge_list'),
re_path(r'^list/(?P<page>\d*)?$', views.challenge_list, name='challenge.challenge_list'),
path('<str:unique_id>/delete/', views.my_challenge_delete, name='challenge.my_challenge_delete'),
path('<str:unique_id>/', views.challenge_detail, name='challenge.challenge_detail'),
path('<str:unique_id>/leaderboard/', views.challenge_leaderboard, name='challenge.challenge_leaderboard'),
path('ajax/get_challenge_detail/<str:unique_id>/', views.ajax_challenge_detail, name='challenge.ajax_challenge_detail'),
]
| true
| true
|
f7153249e54fec334ca1d518b4485c45f6ac4c7a
| 693
|
py
|
Python
|
osiris/vault/__init__.py
|
skadyan/aws-glue-python-kickstart
|
5e3228a0793188d248f801a2b5a522210048ccde
|
[
"Apache-2.0"
] | 4
|
2020-04-23T18:43:27.000Z
|
2022-02-22T03:57:06.000Z
|
osiris/vault/__init__.py
|
skadyan/aws-glue-python-kickstart
|
5e3228a0793188d248f801a2b5a522210048ccde
|
[
"Apache-2.0"
] | 1
|
2021-06-02T00:47:12.000Z
|
2021-06-02T00:47:12.000Z
|
osiris/vault/__init__.py
|
skadyan/aws-glue-python-kickstart
|
5e3228a0793188d248f801a2b5a522210048ccde
|
[
"Apache-2.0"
] | null | null | null |
import abc
from abc import abstractmethod
from typing import Union
from osiris.base.generalutils import instantiate
class SecretVault(abc.ABC):
@abstractmethod
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
pass
class NoopSecretVault(SecretVault):
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
return None
def new_secret_vault(env) -> SecretVault:
instance = None
if env.flag("sys.vault.enabled"):
impl = env.get_property("sys.vault.impl")
impl_kwargs = env.get_section("sys.vault.impl_kwargs")
instance = instantiate(impl, impl_kwargs)
return instance
| 24.75
| 83
| 0.688312
|
import abc
from abc import abstractmethod
from typing import Union
from osiris.base.generalutils import instantiate
class SecretVault(abc.ABC):
@abstractmethod
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
pass
class NoopSecretVault(SecretVault):
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
return None
def new_secret_vault(env) -> SecretVault:
instance = None
if env.flag("sys.vault.enabled"):
impl = env.get_property("sys.vault.impl")
impl_kwargs = env.get_section("sys.vault.impl_kwargs")
instance = instantiate(impl, impl_kwargs)
return instance
| true
| true
|
f71533a0ade4d2a2240d14b32b74a3bbac06db98
| 7,949
|
py
|
Python
|
i2plib/tunnel.py
|
undecidedzogvisrainbowvitalispotent-360/i2plib
|
6edf51cd5d21cc745aa7e23cb98c582144884fa8
|
[
"MIT"
] | 25
|
2018-09-05T16:44:05.000Z
|
2022-02-16T18:32:32.000Z
|
i2plib/tunnel.py
|
undecidedzogvisvitalispotent8stars360/i2plib
|
6edf51cd5d21cc745aa7e23cb98c582144884fa8
|
[
"MIT"
] | 2
|
2018-10-24T19:57:16.000Z
|
2019-01-26T14:30:40.000Z
|
i2plib/tunnel.py
|
undecidedzogvisvitalispotent8stars360/i2plib
|
6edf51cd5d21cc745aa7e23cb98c582144884fa8
|
[
"MIT"
] | 5
|
2018-10-24T18:01:46.000Z
|
2020-12-15T18:16:14.000Z
|
import logging
import asyncio
import argparse
import i2plib.sam
import i2plib.aiosam
import i2plib.utils
from i2plib.log import logger
BUFFER_SIZE = 65536
async def proxy_data(reader, writer):
"""Proxy data from reader to writer"""
try:
while True:
data = await reader.read(BUFFER_SIZE)
if not data:
break
writer.write(data)
except Exception as e:
logger.debug('proxy_data_task exception {}'.format(e))
finally:
try:
writer.close()
except RuntimeError:
pass
logger.debug('close connection')
class I2PTunnel(object):
"""Base I2P Tunnel object, not to be used directly
:param local_address: A local address to use for a tunnel.
E.g. ("127.0.0.1", 6668)
:param destination: (optional) Destination to use for this tunnel. Can be
a base64 encoded string, :class:`i2plib.Destination`
instance or None. A new destination is created when it
is None.
:param session_name: (optional) Session nick name. A new session nickname is
generated if not specified.
:param options: (optional) A dict object with i2cp options
:param loop: (optional) Event loop instance
:param sam_address: (optional) SAM API address
"""
def __init__(self, local_address, destination=None, session_name=None,
options={}, loop=None, sam_address=i2plib.sam.DEFAULT_ADDRESS):
self.local_address = local_address
self.destination = destination
self.session_name = session_name or i2plib.utils.generate_session_id()
self.options = options
self.loop = loop
self.sam_address = sam_address
async def _pre_run(self):
if not self.destination:
self.destination = await i2plib.new_destination(
sam_address=self.sam_address, loop=self.loop)
_, self.session_writer = await i2plib.aiosam.create_session(
self.session_name, style=self.style, options=self.options,
sam_address=self.sam_address,
loop=self.loop, destination=self.destination)
def stop(self):
"""Stop the tunnel"""
self.session_writer.close()
class ClientTunnel(I2PTunnel):
"""Client tunnel, a subclass of i2plib.tunnel.I2PTunnel
If you run a client tunnel with a local address ("127.0.0.1", 6668) and
a remote destination "irc.echelon.i2p", all connections to 127.0.0.1:6668
will be proxied to irc.echelon.i2p.
:param remote_destination: Remote I2P destination, can be either .i2p
domain, .b32.i2p address, base64 destination or
:class:`i2plib.Destination` instance
"""
def __init__(self, remote_destination, *args, **kwargs):
super().__init__(*args, **kwargs)
self.style = "STREAM"
self.remote_destination = remote_destination
async def run(self):
"""A coroutine used to run the tunnel"""
await self._pre_run()
async def handle_client(client_reader, client_writer):
"""Handle local client connection"""
remote_reader, remote_writer = await i2plib.aiosam.stream_connect(
self.session_name, self.remote_destination,
sam_address=self.sam_address, loop=self.loop)
asyncio.ensure_future(proxy_data(remote_reader, client_writer),
loop=self.loop)
asyncio.ensure_future(proxy_data(client_reader, remote_writer),
loop=self.loop)
self.server = await asyncio.start_server(handle_client, *self.local_address,
loop=self.loop)
def stop(self):
super().stop()
self.server.close()
class ServerTunnel(I2PTunnel):
"""Server tunnel, a subclass of i2plib.tunnel.I2PTunnel
If you want to expose a local service 127.0.0.1:80 to the I2P network, run
a server tunnel with a local address ("127.0.0.1", 80). If you don't
provide a private key or a session name, it will use a TRANSIENT
destination.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.style = "STREAM"
async def run(self):
"""A coroutine used to run the tunnel"""
await self._pre_run()
async def handle_client(incoming, client_reader, client_writer):
# data and dest may come in one chunk
dest, data = incoming.split(b"\n", 1)
remote_destination = i2plib.sam.Destination(dest.decode())
logger.debug("{} client connected: {}.b32.i2p".format(
self.session_name, remote_destination.base32))
try:
remote_reader, remote_writer = await asyncio.wait_for(
asyncio.open_connection(
host=self.local_address[0],
port=self.local_address[1], loop=self.loop),
timeout=5, loop=self.loop)
if data: remote_writer.write(data)
asyncio.ensure_future(proxy_data(remote_reader, client_writer),
loop=self.loop)
asyncio.ensure_future(proxy_data(client_reader, remote_writer),
loop=self.loop)
except ConnectionRefusedError:
client_writer.close()
async def server_loop():
try:
while True:
client_reader, client_writer = await i2plib.aiosam.stream_accept(
self.session_name, sam_address=self.sam_address,
loop=self.loop)
incoming = await client_reader.read(BUFFER_SIZE)
asyncio.ensure_future(handle_client(
incoming, client_reader, client_writer), loop=self.loop)
except asyncio.CancelledError:
pass
self.server_loop = asyncio.ensure_future(server_loop(), loop=self.loop)
def stop(self):
super().stop()
self.server_loop.cancel()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('type', metavar="TYPE", choices=('server', 'client'),
help="Tunnel type (server or client)")
parser.add_argument('address', metavar="ADDRESS",
help="Local address (e.g. 127.0.0.1:8000)")
parser.add_argument('--debug', '-d', action='store_true',
help='Debugging')
parser.add_argument('--key', '-k', default='', metavar='PRIVATE_KEY',
help='Path to private key file')
parser.add_argument('--destination', '-D', default='',
metavar='DESTINATION', help='Remote destination')
args = parser.parse_args()
SAM_ADDRESS = i2plib.utils.get_sam_address()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
loop = asyncio.get_event_loop()
loop.set_debug(args.debug)
if args.key:
destination = i2plib.sam.Destination(path=args.key, has_private_key=True)
else:
destination = None
local_address = i2plib.utils.address_from_string(args.address)
if args.type == "client":
tunnel = ClientTunnel(args.destination, local_address, loop=loop,
destination=destination, sam_address=SAM_ADDRESS)
elif args.type == "server":
tunnel = ServerTunnel(local_address, loop=loop, destination=destination,
sam_address=SAM_ADDRESS)
asyncio.ensure_future(tunnel.run(), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
tunnel.stop()
finally:
loop.stop()
loop.close()
| 38.965686
| 85
| 0.603472
|
import logging
import asyncio
import argparse
import i2plib.sam
import i2plib.aiosam
import i2plib.utils
from i2plib.log import logger
BUFFER_SIZE = 65536
async def proxy_data(reader, writer):
try:
while True:
data = await reader.read(BUFFER_SIZE)
if not data:
break
writer.write(data)
except Exception as e:
logger.debug('proxy_data_task exception {}'.format(e))
finally:
try:
writer.close()
except RuntimeError:
pass
logger.debug('close connection')
class I2PTunnel(object):
def __init__(self, local_address, destination=None, session_name=None,
options={}, loop=None, sam_address=i2plib.sam.DEFAULT_ADDRESS):
self.local_address = local_address
self.destination = destination
self.session_name = session_name or i2plib.utils.generate_session_id()
self.options = options
self.loop = loop
self.sam_address = sam_address
async def _pre_run(self):
if not self.destination:
self.destination = await i2plib.new_destination(
sam_address=self.sam_address, loop=self.loop)
_, self.session_writer = await i2plib.aiosam.create_session(
self.session_name, style=self.style, options=self.options,
sam_address=self.sam_address,
loop=self.loop, destination=self.destination)
def stop(self):
self.session_writer.close()
class ClientTunnel(I2PTunnel):
def __init__(self, remote_destination, *args, **kwargs):
super().__init__(*args, **kwargs)
self.style = "STREAM"
self.remote_destination = remote_destination
async def run(self):
await self._pre_run()
async def handle_client(client_reader, client_writer):
remote_reader, remote_writer = await i2plib.aiosam.stream_connect(
self.session_name, self.remote_destination,
sam_address=self.sam_address, loop=self.loop)
asyncio.ensure_future(proxy_data(remote_reader, client_writer),
loop=self.loop)
asyncio.ensure_future(proxy_data(client_reader, remote_writer),
loop=self.loop)
self.server = await asyncio.start_server(handle_client, *self.local_address,
loop=self.loop)
def stop(self):
super().stop()
self.server.close()
class ServerTunnel(I2PTunnel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.style = "STREAM"
async def run(self):
await self._pre_run()
async def handle_client(incoming, client_reader, client_writer):
dest, data = incoming.split(b"\n", 1)
remote_destination = i2plib.sam.Destination(dest.decode())
logger.debug("{} client connected: {}.b32.i2p".format(
self.session_name, remote_destination.base32))
try:
remote_reader, remote_writer = await asyncio.wait_for(
asyncio.open_connection(
host=self.local_address[0],
port=self.local_address[1], loop=self.loop),
timeout=5, loop=self.loop)
if data: remote_writer.write(data)
asyncio.ensure_future(proxy_data(remote_reader, client_writer),
loop=self.loop)
asyncio.ensure_future(proxy_data(client_reader, remote_writer),
loop=self.loop)
except ConnectionRefusedError:
client_writer.close()
async def server_loop():
try:
while True:
client_reader, client_writer = await i2plib.aiosam.stream_accept(
self.session_name, sam_address=self.sam_address,
loop=self.loop)
incoming = await client_reader.read(BUFFER_SIZE)
asyncio.ensure_future(handle_client(
incoming, client_reader, client_writer), loop=self.loop)
except asyncio.CancelledError:
pass
self.server_loop = asyncio.ensure_future(server_loop(), loop=self.loop)
def stop(self):
super().stop()
self.server_loop.cancel()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('type', metavar="TYPE", choices=('server', 'client'),
help="Tunnel type (server or client)")
parser.add_argument('address', metavar="ADDRESS",
help="Local address (e.g. 127.0.0.1:8000)")
parser.add_argument('--debug', '-d', action='store_true',
help='Debugging')
parser.add_argument('--key', '-k', default='', metavar='PRIVATE_KEY',
help='Path to private key file')
parser.add_argument('--destination', '-D', default='',
metavar='DESTINATION', help='Remote destination')
args = parser.parse_args()
SAM_ADDRESS = i2plib.utils.get_sam_address()
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
loop = asyncio.get_event_loop()
loop.set_debug(args.debug)
if args.key:
destination = i2plib.sam.Destination(path=args.key, has_private_key=True)
else:
destination = None
local_address = i2plib.utils.address_from_string(args.address)
if args.type == "client":
tunnel = ClientTunnel(args.destination, local_address, loop=loop,
destination=destination, sam_address=SAM_ADDRESS)
elif args.type == "server":
tunnel = ServerTunnel(local_address, loop=loop, destination=destination,
sam_address=SAM_ADDRESS)
asyncio.ensure_future(tunnel.run(), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
tunnel.stop()
finally:
loop.stop()
loop.close()
| true
| true
|
f71535405665888b171719de9948f63f35341da0
| 948
|
py
|
Python
|
dune/gdt/test/linearelliptic/mpi_linearelliptic__block_swipdg_discretization.py
|
TiKeil/dune-gdt
|
25c8b987cc07a4b8b966c1a07ea21b78dba7852f
|
[
"BSD-2-Clause"
] | null | null | null |
dune/gdt/test/linearelliptic/mpi_linearelliptic__block_swipdg_discretization.py
|
TiKeil/dune-gdt
|
25c8b987cc07a4b8b966c1a07ea21b78dba7852f
|
[
"BSD-2-Clause"
] | null | null | null |
dune/gdt/test/linearelliptic/mpi_linearelliptic__block_swipdg_discretization.py
|
TiKeil/dune-gdt
|
25c8b987cc07a4b8b966c1a07ea21b78dba7852f
|
[
"BSD-2-Clause"
] | null | null | null |
# ~~~
# This file is part of the dune-gdt project:
# https://github.com/dune-community/dune-gdt
# Copyright 2010-2018 dune-gdt developers and contributors. All rights reserved.
# License: Dual licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# or GPL-2.0+ (http://opensource.org/licenses/gpl-license)
# with "runtime exception" (http://www.dune-project.org/license.html)
# Authors:
# Felix Schindler (2017 - 2018)
# Rene Milk (2017 - 2018)
#
# ~~~
import itertools
from dune.xt.codegen import typeid_to_typedef_name, la_backends
grids = ['Yasp2Grid']
casenames = ['ESV2007DdSubdomainsTestCase',]
testcases = ['Dune::GDT::LinearElliptic::{}<{}>'.format(c, g) for c, g in itertools.product(casenames, grids)]
permutations = itertools.product(testcases, ('gdt',), ('istl_sparse', ))
permutations = [(t, s, l, typeid_to_typedef_name('{}_{}_{}'.format(t, s, l))) for t, s, l in permutations]
| 39.5
| 110
| 0.696203
|
import itertools
from dune.xt.codegen import typeid_to_typedef_name, la_backends
grids = ['Yasp2Grid']
casenames = ['ESV2007DdSubdomainsTestCase',]
testcases = ['Dune::GDT::LinearElliptic::{}<{}>'.format(c, g) for c, g in itertools.product(casenames, grids)]
permutations = itertools.product(testcases, ('gdt',), ('istl_sparse', ))
permutations = [(t, s, l, typeid_to_typedef_name('{}_{}_{}'.format(t, s, l))) for t, s, l in permutations]
| true
| true
|
f715368c12fac7bcd0f0179357f6b421ee70790a
| 795
|
py
|
Python
|
config_music.py
|
vincenzodentamaro/transformer-xl
|
61b76d783be49e409863667bba8576826bbf54df
|
[
"MIT"
] | 16
|
2020-09-30T02:31:53.000Z
|
2022-03-09T10:27:25.000Z
|
config_music.py
|
vincenzodentamaro/transformer-xl
|
61b76d783be49e409863667bba8576826bbf54df
|
[
"MIT"
] | 4
|
2020-11-09T03:58:04.000Z
|
2021-09-21T09:00:22.000Z
|
config_music.py
|
vincenzodentamaro/transformer-xl
|
61b76d783be49e409863667bba8576826bbf54df
|
[
"MIT"
] | 5
|
2020-09-30T02:31:56.000Z
|
2021-10-06T15:50:18.000Z
|
import joblib
tempo = 500000
ppq = 480
numerator = 4
denominator = 4
clocks_per_click = 24
notated_32nd_notes_per_beat = 8
cc_kept = [64, 67]
cc_threshold = 64
cc_lower = 0
cc_upper = 127
vel_value = 64
n_notes = 128
n_cc = 2 * len(cc_kept)
n_sounds = 2 * n_notes + n_cc + 1
n_deltas = 66 + 1
pad_idx = 0
n_jobs = joblib.cpu_count()
d_sound = 384
d_delta = 256
d_combined = d_sound + d_delta
n_heads_sound = 6
n_heads_delta = 4
n_heads_combined = n_heads_sound + n_heads_delta
n_layers_sound = 3
n_layers_delta = 3
n_layers_combined = 6
seq_len = 256
mem_len = 384
batch_size = 8
dropout_rate = 0.1
n_epochs = 200
max_segs_per_batch = 20
lr = 0.00002
use_attn_reg = True
dataset_url = 'https://storage.googleapis.com/magentadata/datasets/maestro/v2.0.0/maestro-v2.0.0-midi.zip'
| 14.722222
| 106
| 0.735849
|
import joblib
tempo = 500000
ppq = 480
numerator = 4
denominator = 4
clocks_per_click = 24
notated_32nd_notes_per_beat = 8
cc_kept = [64, 67]
cc_threshold = 64
cc_lower = 0
cc_upper = 127
vel_value = 64
n_notes = 128
n_cc = 2 * len(cc_kept)
n_sounds = 2 * n_notes + n_cc + 1
n_deltas = 66 + 1
pad_idx = 0
n_jobs = joblib.cpu_count()
d_sound = 384
d_delta = 256
d_combined = d_sound + d_delta
n_heads_sound = 6
n_heads_delta = 4
n_heads_combined = n_heads_sound + n_heads_delta
n_layers_sound = 3
n_layers_delta = 3
n_layers_combined = 6
seq_len = 256
mem_len = 384
batch_size = 8
dropout_rate = 0.1
n_epochs = 200
max_segs_per_batch = 20
lr = 0.00002
use_attn_reg = True
dataset_url = 'https://storage.googleapis.com/magentadata/datasets/maestro/v2.0.0/maestro-v2.0.0-midi.zip'
| true
| true
|
f715368cd5d00722102b52900d74a1a59b5b3689
| 1,003
|
py
|
Python
|
loaf/projects/admin.py
|
Charles4th/Loaf
|
1a42fd7c1dc74a90231acfee0d65e235eb586ea3
|
[
"MIT"
] | 1
|
2018-12-24T03:30:08.000Z
|
2018-12-24T03:30:08.000Z
|
loaf/projects/admin.py
|
Charles4th/Loaf
|
1a42fd7c1dc74a90231acfee0d65e235eb586ea3
|
[
"MIT"
] | 2
|
2020-06-05T18:34:54.000Z
|
2022-02-10T11:23:33.000Z
|
loaf/projects/admin.py
|
Charles4th/Loaf
|
1a42fd7c1dc74a90231acfee0d65e235eb586ea3
|
[
"MIT"
] | 1
|
2018-08-07T08:49:28.000Z
|
2018-08-07T08:49:28.000Z
|
from django.contrib import admin
from . import models
# Register your models here.
@admin.register(models.Project)
class ProjectAdmin(admin.ModelAdmin):
list_display_links = (
'title',
)
search_fields = (
'title',
)
list_filter = (
'title',
'creator',
)
list_display = (
'file',
'title',
'creator',
'created_at',
'updated_at',
)
@admin.register(models.Like)
class LikeAdmin(admin.ModelAdmin):
list_display = (
'creator',
'project',
'created_at',
'updated_at',
)
@admin.register(models.Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = (
'message',
'creator',
'project',
'created_at',
'updated_at',
)
@admin.register(models.Join)
class JoinAdmin(admin.ModelAdmin):
list_display = (
'joiner',
'project',
'created_at',
'updated_at',
)
| 17
| 37
| 0.540379
|
from django.contrib import admin
from . import models
@admin.register(models.Project)
class ProjectAdmin(admin.ModelAdmin):
list_display_links = (
'title',
)
search_fields = (
'title',
)
list_filter = (
'title',
'creator',
)
list_display = (
'file',
'title',
'creator',
'created_at',
'updated_at',
)
@admin.register(models.Like)
class LikeAdmin(admin.ModelAdmin):
list_display = (
'creator',
'project',
'created_at',
'updated_at',
)
@admin.register(models.Comment)
class CommentAdmin(admin.ModelAdmin):
list_display = (
'message',
'creator',
'project',
'created_at',
'updated_at',
)
@admin.register(models.Join)
class JoinAdmin(admin.ModelAdmin):
list_display = (
'joiner',
'project',
'created_at',
'updated_at',
)
| true
| true
|
f71537787e6f655fdc91b195e6460f7fc600f783
| 3,800
|
py
|
Python
|
src/solution/sdc_workspace/catkin_ws/src/sdc_package/scripts/mission_planner.py
|
coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
|
f6d3e162888bd79d59b771c82ff028df0f70ae11
|
[
"MIT"
] | 8
|
2019-06-04T16:21:07.000Z
|
2021-09-05T07:24:20.000Z
|
src/solution/sdc_workspace/catkin_ws/src/sdc_package/scripts/mission_planner.py
|
coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
|
f6d3e162888bd79d59b771c82ff028df0f70ae11
|
[
"MIT"
] | null | null | null |
src/solution/sdc_workspace/catkin_ws/src/sdc_package/scripts/mission_planner.py
|
coherentsolutionsinc/issoft-insights-2019-sdc-carla-ros
|
f6d3e162888bd79d59b771c82ff028df0f70ae11
|
[
"MIT"
] | 1
|
2019-06-21T14:37:18.000Z
|
2019-06-21T14:37:18.000Z
|
#!/usr/bin/env python
import os
import csv
import rospy
# TODO: 1. Import waypoint messages
from sdc_package.msg import BaseWaypoint, Path
class MissionPlanner(object):
def __init__(self):
self.start_time = None
# TODO: 2. Init mission planner node
rospy.init_node('mission_planner')
#-------------------------------------------------------------------------------------------------------
self.wait_master_initialization()
# TODO: 3. Create publisher to publish mission path to /planner/mission_waypoints topic
self.waypoints_publisher = rospy.Publisher('/planner/mission_waypoints',
Path, queue_size=1, latch=True)
#-------------------------------------------------------------------------------------------------------
# TODO: 4. Get waypoints file path from parameters
waypoints_file_path = rospy.get_param('~waypoints_path')
#-------------------------------------------------------------------------------------------------------
#TODO 5. Load waypoints from fie using "load_waypoints" method
waypoints = self.load_waypoints(waypoints_file_path)
#-------------------------------------------------------------------------------------------------------
# TODO: 6. Publish waypoints to created publisher
self.publish_waypoints(waypoints)
#-------------------------------------------------------------------------------------------------------
rospy.loginfo('Waypoints published')
# TODO: 7. Run Empty ROS loop to keep node online using rospy.spin()
rospy.spin()
#-------------------------------------------------------------------------------------------------------
# Wait ROS Master node is initialized
def wait_master_initialization(self):
while not self.start_time and not rospy.is_shutdown():
self.start_time = rospy.Time.now().to_nsec()
if not rospy.is_shutdown():
rospy.loginfo('Mission Planner: ROS master initialized.')
def load_waypoints(self, path):
waypoints = []
# check if file path is valid
if os.path.isfile(path):
waypointsFile = open(path, 'r')
# read csv file
with waypointsFile:
reader = csv.reader(waypointsFile)
for row in reader:
# row[0] to access first csv element
# row[1] to access second csv element
# TODO: 8. Create new BaseWaypoint object and add to waypoints array
waypoint = BaseWaypoint()
waypoint.x = float(row[0])
waypoint.y = -float(row[1])
waypoints.append(waypoint)
#-------------------------------------------------------------------------------------------------------
rospy.loginfo('Waypoints Loaded: found %d waypoints', len(waypoints))
else:
rospy.logerr('%s is not a file', path)
return waypoints
def publish_waypoints(self, waypoints):
# TODO: 9. Crete new Path message with waypoints provided and publish to /planner/mission_waypoints topic
path = Path()
path.waypoints = waypoints
#-------------------------------------------------------------------------------------------------------
self.waypoints_publisher.publish(path)
if __name__ == '__main__':
try:
MissionPlanner()
except rospy.ROSInterruptException:
rospy.logerr('Could not start Mission Planner node.')
pass
| 40.425532
| 124
| 0.448947
|
import os
import csv
import rospy
from sdc_package.msg import BaseWaypoint, Path
class MissionPlanner(object):
def __init__(self):
self.start_time = None
rospy.init_node('mission_planner')
self.wait_master_initialization()
self.waypoints_publisher = rospy.Publisher('/planner/mission_waypoints',
Path, queue_size=1, latch=True)
waypoints_file_path = rospy.get_param('~waypoints_path')
waypoints = self.load_waypoints(waypoints_file_path)
self.publish_waypoints(waypoints)
rospy.loginfo('Waypoints published')
rospy.spin()
def wait_master_initialization(self):
while not self.start_time and not rospy.is_shutdown():
self.start_time = rospy.Time.now().to_nsec()
if not rospy.is_shutdown():
rospy.loginfo('Mission Planner: ROS master initialized.')
def load_waypoints(self, path):
waypoints = []
if os.path.isfile(path):
waypointsFile = open(path, 'r')
with waypointsFile:
reader = csv.reader(waypointsFile)
for row in reader:
waypoint = BaseWaypoint()
waypoint.x = float(row[0])
waypoint.y = -float(row[1])
waypoints.append(waypoint)
rospy.loginfo('Waypoints Loaded: found %d waypoints', len(waypoints))
else:
rospy.logerr('%s is not a file', path)
return waypoints
def publish_waypoints(self, waypoints):
path = Path()
path.waypoints = waypoints
self.waypoints_publisher.publish(path)
if __name__ == '__main__':
try:
MissionPlanner()
except rospy.ROSInterruptException:
rospy.logerr('Could not start Mission Planner node.')
pass
| true
| true
|
f71537954c3bd01d3b1211f2a051aa20670e6f9c
| 3,870
|
py
|
Python
|
run_local_mertric.py
|
middleprince/fashionAi
|
c512936b4983c2fb093008f06e04753180af0a90
|
[
"Apache-2.0"
] | 316
|
2018-06-01T16:21:21.000Z
|
2022-03-22T03:25:20.000Z
|
run_local_mertric.py
|
middleprince/fashionAi
|
c512936b4983c2fb093008f06e04753180af0a90
|
[
"Apache-2.0"
] | 8
|
2018-06-02T07:07:49.000Z
|
2019-07-11T06:55:43.000Z
|
run_local_mertric.py
|
middleprince/fashionAi
|
c512936b4983c2fb093008f06e04753180af0a90
|
[
"Apache-2.0"
] | 91
|
2018-06-01T17:12:21.000Z
|
2022-03-19T06:54:34.000Z
|
import os
import sys
import time
import numpy as np
import pandas as pd
import argparse
import math
import config as cfg
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='The Normarlized Error Mertric Calculation For FashionAI Keypoint Detection Script.')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--prediction', default='',
help='The path of file containing the prediction of keypoints.')
parser.add_argument('--cat', type=lambda s: s.lower() in ['True', 'true', 't', 'yes', '1'], help="whether print Normarlized Error for each catgory")
parser.add_argument('--gt', default='./stage1_testb_gt.csv',
help='The path of file containing the ground truth of keypoints.')
args = parser.parse_args()
def run():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print(sum_dist)
def run_by_cat():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
for cat_ in cfg.CATEGORIES:
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
if cat_ not in img_cat:
continue
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print('{}:'.format(cat_), sum_dist)
if __name__ == '__main__':
if not args.cat:
run()
else:
run_by_cat()
| 35.833333
| 148
| 0.582946
|
import os
import sys
import time
import numpy as np
import pandas as pd
import argparse
import math
import config as cfg
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='The Normarlized Error Mertric Calculation For FashionAI Keypoint Detection Script.')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--prediction', default='',
help='The path of file containing the prediction of keypoints.')
parser.add_argument('--cat', type=lambda s: s.lower() in ['True', 'true', 't', 'yes', '1'], help="whether print Normarlized Error for each catgory")
parser.add_argument('--gt', default='./stage1_testb_gt.csv',
help='The path of file containing the ground truth of keypoints.')
args = parser.parse_args()
def run():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print(sum_dist)
def run_by_cat():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
for cat_ in cfg.CATEGORIES:
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
if cat_ not in img_cat:
continue
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print('{}:'.format(cat_), sum_dist)
if __name__ == '__main__':
if not args.cat:
run()
else:
run_by_cat()
| true
| true
|
f71538dd1163b1cc57d1780f5f59d458c6767583
| 5,306
|
py
|
Python
|
examples/plot_SimpleLineTest.py
|
aasensio/Lightweaver
|
9a261e72235f05df548148da140012f40dbd1e4b
|
[
"MIT"
] | 13
|
2020-01-13T14:01:23.000Z
|
2022-03-11T08:36:45.000Z
|
examples/plot_SimpleLineTest.py
|
aasensio/Lightweaver
|
9a261e72235f05df548148da140012f40dbd1e4b
|
[
"MIT"
] | 30
|
2020-01-17T13:00:37.000Z
|
2022-03-07T12:08:37.000Z
|
examples/plot_SimpleLineTest.py
|
aasensio/Lightweaver
|
9a261e72235f05df548148da140012f40dbd1e4b
|
[
"MIT"
] | 4
|
2021-07-07T11:21:07.000Z
|
2021-11-23T06:52:02.000Z
|
"""
===============================================================
Computing a simple NLTE 8542 line profile in a FAL C atmosphere
===============================================================
"""
#%%
# First, we import everything we need. Lightweaver is typically imported as
# `lw`, but things like the library of model atoms and Fal atmospheres need to
# be imported separately.
from lightweaver.fal import Falc82
from lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, Si_atom, Al_atom, \
CaII_atom, Fe_atom, He_9_atom, He_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
import matplotlib.pyplot as plt
import time
import numpy as np
#%%
# Now, we define the functions that will be used in our spectral synthesise.
# First `synth_8542` which synthesises and returns the line given by an
# atmosphere.
def synth_8542(atmos, conserve, useNe, wave):
'''
Synthesise a spectral line for given atmosphere with different
conditions.
Parameters
----------
atmos : lw.Atmosphere
The atmospheric model in which to synthesise the line.
conserve : bool
Whether to start from LTE electron density and conserve charge, or
simply use from the electron density present in the atomic model.
useNe : bool
Whether to use the electron density present in the model as the
starting solution, or compute the LTE electron density.
wave : np.ndarray
Array of wavelengths over which to resynthesise the final line
profile for muz=1.
Returns
-------
ctx : lw.Context
The Context object that was used to compute the equilibrium
populations.
Iwave : np.ndarray
The intensity at muz=1 for each wavelength in `wave`.
'''
# Configure the atmospheric angular quadrature
atmos.quadrature(5)
# Configure the set of atomic models to use.
aSet = lw.RadiativeSet([H_6_atom(), C_atom(), O_atom(), Si_atom(),
Al_atom(), CaII_atom(), Fe_atom(), He_9_atom(),
MgII_atom(), N_atom(), Na_atom(), S_atom()
])
# Set H and Ca to "active" i.e. NLTE, everything else participates as an
# LTE background.
aSet.set_active('H', 'Ca')
# Compute the necessary wavelength dependent information (SpectrumConfiguration).
spect = aSet.compute_wavelength_grid()
# Either compute the equilibrium populations at the fixed electron density
# provided in the model, or iterate an LTE electron density and compute the
# corresponding equilibrium populations (SpeciesStateTable).
if useNe:
eqPops = aSet.compute_eq_pops(atmos)
else:
eqPops = aSet.iterate_lte_ne_eq_pops(atmos)
# Configure the Context which holds the state of the simulation for the
# backend, and provides the python interface to the backend.
# Feel free to increase Nthreads to increase the number of threads the
# program will use.
ctx = lw.Context(atmos, spect, eqPops, conserveCharge=conserve, Nthreads=1)
start = time.time()
# Iterate the Context to convergence
iterate_ctx(ctx)
end = time.time()
print('%.2f s' % (end - start))
# Update the background populations based on the converged solution and
# compute the final intensity for mu=1 on the provided wavelength grid.
eqPops.update_lte_atoms_Hmin_pops(atmos)
Iwave = ctx.compute_rays(wave, [atmos.muz[-1]], stokes=False)
return ctx, Iwave
def iterate_ctx(ctx, Nscatter=3, NmaxIter=500):
'''
Iterate a Context to convergence.
'''
for i in range(NmaxIter):
# Compute the formal solution
dJ = ctx.formal_sol_gamma_matrices()
# Just update J for Nscatter iterations
if i < Nscatter:
continue
# Update the active populations under statistical equilibrium,
# conserving charge if this option was set on the Context.
delta = ctx.stat_equil()
# If we are converged in both relative change of J and populations,
# then print a message and return
# N.B. as this is just a simple case, there is no checking for failure
# to converge within the NmaxIter. This could be achieved simpy with an
# else block after this for.
if dJ < 3e-3 and delta < 1e-3:
print('%d iterations' % i)
print('-'*80)
return
#%%
# The wavelength grid to output the final synthesised line on.
wave = np.linspace(853.9444, 854.9444, 1001)
#%%
# Load an lw.Atmosphere object containing the FAL C atmosphere with 82 points
# in depth, before synthesising the Ca II 8542 \AA line profile using:
#
# - The given electron density.
# - The electron density charge conserved from a starting LTE solution.
# - The LTE electron density.
#
# These results are then plotted.
atmosRef = Falc82()
ctxRef, IwaveRef = synth_8542(atmosRef, conserve=False, useNe=True, wave=wave)
atmosCons = Falc82()
ctxCons, IwaveCons = synth_8542(atmosCons, conserve=True, useNe=False, wave=wave)
atmosLte = Falc82()
ctx, IwaveLte = synth_8542(atmosLte, conserve=False, useNe=False, wave=wave)
plt.plot(wave, IwaveRef, label='Reference FAL')
plt.plot(wave, IwaveCons, label='Reference Cons')
plt.plot(wave, IwaveLte, label='Reference LTE n_e')
plt.show()
| 38.729927
| 85
| 0.671692
|
from lightweaver.fal import Falc82
from lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, Si_atom, Al_atom, \
CaII_atom, Fe_atom, He_9_atom, He_atom, MgII_atom, N_atom, Na_atom, S_atom
import lightweaver as lw
import matplotlib.pyplot as plt
import time
import numpy as np
def synth_8542(atmos, conserve, useNe, wave):
atmos.quadrature(5)
aSet = lw.RadiativeSet([H_6_atom(), C_atom(), O_atom(), Si_atom(),
Al_atom(), CaII_atom(), Fe_atom(), He_9_atom(),
MgII_atom(), N_atom(), Na_atom(), S_atom()
])
aSet.set_active('H', 'Ca')
spect = aSet.compute_wavelength_grid()
if useNe:
eqPops = aSet.compute_eq_pops(atmos)
else:
eqPops = aSet.iterate_lte_ne_eq_pops(atmos)
ctx = lw.Context(atmos, spect, eqPops, conserveCharge=conserve, Nthreads=1)
start = time.time()
iterate_ctx(ctx)
end = time.time()
print('%.2f s' % (end - start))
eqPops.update_lte_atoms_Hmin_pops(atmos)
Iwave = ctx.compute_rays(wave, [atmos.muz[-1]], stokes=False)
return ctx, Iwave
def iterate_ctx(ctx, Nscatter=3, NmaxIter=500):
for i in range(NmaxIter):
dJ = ctx.formal_sol_gamma_matrices()
if i < Nscatter:
continue
delta = ctx.stat_equil()
if dJ < 3e-3 and delta < 1e-3:
print('%d iterations' % i)
print('-'*80)
return
wave = np.linspace(853.9444, 854.9444, 1001)
atmosRef = Falc82()
ctxRef, IwaveRef = synth_8542(atmosRef, conserve=False, useNe=True, wave=wave)
atmosCons = Falc82()
ctxCons, IwaveCons = synth_8542(atmosCons, conserve=True, useNe=False, wave=wave)
atmosLte = Falc82()
ctx, IwaveLte = synth_8542(atmosLte, conserve=False, useNe=False, wave=wave)
plt.plot(wave, IwaveRef, label='Reference FAL')
plt.plot(wave, IwaveCons, label='Reference Cons')
plt.plot(wave, IwaveLte, label='Reference LTE n_e')
plt.show()
| true
| true
|
f7153948cabbb10fc8cd4bd9ce5fe812b7a32534
| 1,921
|
py
|
Python
|
rme/datasets/mnist.py
|
satishjasthi/convnet-study
|
ccd20c90e449fc8db694abf706db178e9413e57b
|
[
"MIT"
] | 40
|
2016-09-17T00:57:42.000Z
|
2021-09-25T05:24:27.000Z
|
rme/datasets/mnist.py
|
satishjasthi/convnet-study
|
ccd20c90e449fc8db694abf706db178e9413e57b
|
[
"MIT"
] | 1
|
2017-09-08T08:29:31.000Z
|
2017-09-13T23:21:09.000Z
|
rme/datasets/mnist.py
|
satishjasthi/convnet-study
|
ccd20c90e449fc8db694abf706db178e9413e57b
|
[
"MIT"
] | 22
|
2016-11-06T03:57:22.000Z
|
2021-09-25T05:24:32.000Z
|
from __future__ import absolute_import
import os
import numpy as np
import gzip
import struct
from .preprocessing import one_hotify
def load(data_dir, valid_ratio=0.0, one_hot=True, shuffle=False, dtype='float32'):
train_set, valid_set, test_set = {}, {}, {}
# Get data from binary files
for img_set, file_name in zip((train_set, test_set), ('train', 't10k')):
# Load images
img_path = os.path.join(data_dir, file_name + '-images-idx3-ubyte.gz')
with gzip.open(img_path, 'rb') as f:
magic_num, num_imgs, num_rows, num_cols = struct.unpack('>iiii',
f.read(16))
shape = (num_imgs, num_rows, num_cols, 1)
img_set['data'] = np.fromstring(f.read(),
dtype='uint8').astype(dtype).reshape(shape)
# Load labels
label_path = os.path.join(data_dir, file_name + '-labels-idx1-ubyte.gz')
with gzip.open(label_path, 'rb') as f:
magic_num, num_labels = struct.unpack('>ii', f.read(8))
img_set['labels'] = np.fromstring(f.read(),
dtype='uint8').astype('int')
if one_hot:
img_set['labels'] = one_hotify(img_set['labels'])
N = train_set['data'].shape[0]
if shuffle:
# Shuffle and separate between training and validation set
new_order = np.random.permutation(np.arange(N))
train_set['data'] = train_set['data'][new_order]
train_set['labels'] = train_set['labels'][new_order]
# Get the number of samples on the training set
M = int((1 - valid_ratio)*N)
# Separate validation set
valid_set['data'] = train_set['data'][M:]
valid_set['labels'] = train_set['labels'][M:]
train_set['data'] = train_set['data'][:M]
train_set['labels'] = train_set['labels'][:M]
return train_set, valid_set, test_set
def preprocess(dataset):
mean = 33.3
std = 78.6
dataset -= mean
dataset /= std
return dataset
| 33.12069
| 82
| 0.630401
|
from __future__ import absolute_import
import os
import numpy as np
import gzip
import struct
from .preprocessing import one_hotify
def load(data_dir, valid_ratio=0.0, one_hot=True, shuffle=False, dtype='float32'):
train_set, valid_set, test_set = {}, {}, {}
for img_set, file_name in zip((train_set, test_set), ('train', 't10k')):
img_path = os.path.join(data_dir, file_name + '-images-idx3-ubyte.gz')
with gzip.open(img_path, 'rb') as f:
magic_num, num_imgs, num_rows, num_cols = struct.unpack('>iiii',
f.read(16))
shape = (num_imgs, num_rows, num_cols, 1)
img_set['data'] = np.fromstring(f.read(),
dtype='uint8').astype(dtype).reshape(shape)
label_path = os.path.join(data_dir, file_name + '-labels-idx1-ubyte.gz')
with gzip.open(label_path, 'rb') as f:
magic_num, num_labels = struct.unpack('>ii', f.read(8))
img_set['labels'] = np.fromstring(f.read(),
dtype='uint8').astype('int')
if one_hot:
img_set['labels'] = one_hotify(img_set['labels'])
N = train_set['data'].shape[0]
if shuffle:
new_order = np.random.permutation(np.arange(N))
train_set['data'] = train_set['data'][new_order]
train_set['labels'] = train_set['labels'][new_order]
M = int((1 - valid_ratio)*N)
valid_set['data'] = train_set['data'][M:]
valid_set['labels'] = train_set['labels'][M:]
train_set['data'] = train_set['data'][:M]
train_set['labels'] = train_set['labels'][:M]
return train_set, valid_set, test_set
def preprocess(dataset):
mean = 33.3
std = 78.6
dataset -= mean
dataset /= std
return dataset
| true
| true
|
f7153a02e898f5f116d487d957f85db359c928ad
| 5,631
|
py
|
Python
|
run_preprocessing_oggm.py
|
Wang518hongyu/PyGEM
|
1c9fa133133b3d463b1383d4792c535fa61c5b8d
|
[
"MIT"
] | null | null | null |
run_preprocessing_oggm.py
|
Wang518hongyu/PyGEM
|
1c9fa133133b3d463b1383d4792c535fa61c5b8d
|
[
"MIT"
] | null | null | null |
run_preprocessing_oggm.py
|
Wang518hongyu/PyGEM
|
1c9fa133133b3d463b1383d4792c535fa61c5b8d
|
[
"MIT"
] | null | null | null |
""" PRE-PROCESSING FOR MODEL RUNS USING OGGM """
# Built-in libraries
import argparse
import collections
import inspect
import multiprocessing
import os
import time
# External libraries
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
# Local libraries
import class_climate
#import class_mbdata
import pygem.pygem_input as pygem_prms
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_modelsetup as modelsetup
import spc_split_glaciers as split_glaciers
from oggm import cfg
from oggm import graphics
from oggm import tasks, utils, workflow
from oggm.core import climate
from oggm.core.flowline import FluxBasedModel
from oggm.shop import rgitopo
from pygem.massbalance import PyGEMMassBalance
from pygem.glacierdynamics import MassRedistributionCurveModel
from pygem.oggm_compat import single_flowline_glacier_directory
from pygem.shop import calving, debris, mbdata, icethickness
#%%
# ===== OGGM CONFIG FILE =====
# Initialize OGGM and set up the default run parameters
cfg.initialize(logging_level='WORKFLOW')
cfg.PARAMS['use_multiprocessing'] = False
#cfg.PARAMS['mp_processes'] = 1
cfg.PARAMS['border'] = 10
# Usually we recommend to set dl_verify to True - here it is quite slow
# because of the huge files so we just turn it off.
# Switch it on for real cases!
cfg.PARAMS['dl_verify'] = True
cfg.PARAMS['use_multiple_flowlines'] = False
# temporary directory for testing (deleted on computer restart)
#cfg.PATHS['working_dir'] = utils.get_temp_dir('PyGEM_ex')
cfg.PATHS['working_dir'] = pygem_prms.oggm_gdir_fp
# ===== LOAD GLACIERS =====
if pygem_prms.glac_no is not None:
glac_no = pygem_prms.glac_no
else:
main_glac_rgi_all = modelsetup.selectglaciersrgitable(
rgi_regionsO1=pygem_prms.rgi_regionsO1, rgi_regionsO2=pygem_prms.rgi_regionsO2,
rgi_glac_number=pygem_prms.rgi_glac_number)
glac_no = list(main_glac_rgi_all['rgino_str'].values)
rgi_ids = ['RGI60-' + x.split('.')[0].zfill(2) + '.' + x.split('.')[1] for x in glac_no]
#%% ===== SELECT BEST DEM =====
# Get the pre-processed topography data
# - creates directories from scratch
gdirs = rgitopo.init_glacier_directories_from_rgitopo(rgi_ids)
# ===== FLOWLINES (w debris) =====
# - checks if directories are created (only use if you're on an already prepared directory)
#gdirs = workflow.init_glacier_directories(rgi_ids)
print('\nTO-DO LIST:')
print(' - reinstall from git\n\n')
# Compute all the stuff
list_tasks = [
# Tasks for OGGM
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.catchment_area,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
# tasks.compute_downstream_line, # check??
# tasks.compute_downstream_bedshape,
# OGGM needs this to advance the glacier - it will be the exact same simply with additional bins below
# - init_present_time_glacier does this!
# # New workflow following Huss and Farinotti (2012) - squeezed flowline
# # - squeezed flowline averages slow of all branches over a bin
# # - OGGM does it based on the main flowline where most of the mass is; also we have more control with frontal ablation width
# Debris tasks
debris.debris_to_gdir,
debris.debris_binned,
# Consensus ice thickness
icethickness.consensus_gridded,
icethickness.consensus_binned,
# Mass balance data
mbdata.mb_df_to_gdir,
]
for task in list_tasks:
workflow.execute_entity_task(task, gdirs)
## ===== Mass balance data =====
##mbdata.mb_bins_to_reg_glacierwide(mb_binned_fp=pygem_prms.mb_binned_fp, O1Regions=['01'])
##workflow.execute_entity_task(mbdata.mb_bins_to_glacierwide, gdirs)
#workflow.execute_entity_task(mbdata.mb_df_to_gdir, gdirs)
# ===== CALVING CALIBRATION =====
# Individual glaciers
#for gdir in gdirs:
# if gdir.is_tidewater:
# calving.calibrate_calving_k_single_wconsensus(gdir)
## Perform inversion based on PyGEM MB
### Add thickness, width_m, and dx_meter to inversion flowlines so they are compatible with PyGEM's
### mass balance model (necessary because OGGM's inversion flowlines use pixel distances; however,
### this will likely be rectified in the future)
#fls_inv = gdirs[0].read_pickle('inversion_flowlines')
#%%
# ----- Alternative to use squeezed flowlines from Huss and Farinotti (2012) -----
#tasks.simple_glacier_masks, # much more robust mask than the one used for flowlines
#tasks.elevation_band_flowline, # same as Huss and Farinotti; produces the binned elevation (30m), length, and width
#tasks.fixed_dx_elevation_band_flowline, # converts the binned elevation, length, width to the fixed dx grid in OGGM
# # output is the same flowline object
# ----- Alternative way of running tasks -----
#for rgi_id in rgi_ids:
# gdirs = rgitopo.init_glacier_directories_from_rgitopo([rgi_id])
# gdir = gdirs[0]
# tasks.glacier_masks(gdir)
# tasks.compute_centerlines(gdir)
# tasks.glacier_masks(gdir)
# tasks.compute_centerlines(gdir)
# tasks.initialize_flowlines(gdir)
# tasks.compute_downstream_line(gdir)
# tasks.catchment_area(gdir)
# tasks.catchment_width_geom(gdir)
# tasks.catchment_width_correction(gdir)
# # Debris tasks
# debris.debris_to_gdir(gdir)
# debris.debris_binned(gdir)
# # Consensus ice thickness
# icethickness.consensus_gridded(gdir)
# icethickness.consensus_binned(gdir)
# # Tidewater
# if gdir.is_tidewater:
# calving.calibrate_calving_k_single_wconsensus(gdir)
| 35.866242
| 129
| 0.74818
|
import argparse
import collections
import inspect
import multiprocessing
import os
import time
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import class_climate
import pygem.pygem_input as pygem_prms
import pygemfxns_gcmbiasadj as gcmbiasadj
import pygemfxns_modelsetup as modelsetup
import spc_split_glaciers as split_glaciers
from oggm import cfg
from oggm import graphics
from oggm import tasks, utils, workflow
from oggm.core import climate
from oggm.core.flowline import FluxBasedModel
from oggm.shop import rgitopo
from pygem.massbalance import PyGEMMassBalance
from pygem.glacierdynamics import MassRedistributionCurveModel
from pygem.oggm_compat import single_flowline_glacier_directory
from pygem.shop import calving, debris, mbdata, icethickness
cfg.initialize(logging_level='WORKFLOW')
cfg.PARAMS['use_multiprocessing'] = False
cfg.PARAMS['border'] = 10
cfg.PARAMS['dl_verify'] = True
cfg.PARAMS['use_multiple_flowlines'] = False
cfg.PATHS['working_dir'] = pygem_prms.oggm_gdir_fp
if pygem_prms.glac_no is not None:
glac_no = pygem_prms.glac_no
else:
main_glac_rgi_all = modelsetup.selectglaciersrgitable(
rgi_regionsO1=pygem_prms.rgi_regionsO1, rgi_regionsO2=pygem_prms.rgi_regionsO2,
rgi_glac_number=pygem_prms.rgi_glac_number)
glac_no = list(main_glac_rgi_all['rgino_str'].values)
rgi_ids = ['RGI60-' + x.split('.')[0].zfill(2) + '.' + x.split('.')[1] for x in glac_no]
gdirs = rgitopo.init_glacier_directories_from_rgitopo(rgi_ids)
#gdirs = workflow.init_glacier_directories(rgi_ids)
print('\nTO-DO LIST:')
print(' - reinstall from git\n\n')
# Compute all the stuff
list_tasks = [
# Tasks for OGGM
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.catchment_area,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
# tasks.compute_downstream_line, # check??
# tasks.compute_downstream_bedshape,
# OGGM needs this to advance the glacier - it will be the exact same simply with additional bins below
# - init_present_time_glacier does this!
# # New workflow following Huss and Farinotti (2012) - squeezed flowline
# # - squeezed flowline averages slow of all branches over a bin
# # - OGGM does it based on the main flowline where most of the mass is; also we have more control with frontal ablation width
# Debris tasks
debris.debris_to_gdir,
debris.debris_binned,
# Consensus ice thickness
icethickness.consensus_gridded,
icethickness.consensus_binned,
# Mass balance data
mbdata.mb_df_to_gdir,
]
for task in list_tasks:
workflow.execute_entity_task(task, gdirs)
## ===== Mass balance data =====
##mbdata.mb_bins_to_reg_glacierwide(mb_binned_fp=pygem_prms.mb_binned_fp, O1Regions=['01'])
##workflow.execute_entity_task(mbdata.mb_bins_to_glacierwide, gdirs)
#workflow.execute_entity_task(mbdata.mb_df_to_gdir, gdirs)
# ===== CALVING CALIBRATION =====
# Individual glaciers
#for gdir in gdirs:
# if gdir.is_tidewater:
# calving.calibrate_calving_k_single_wconsensus(gdir)
## Perform inversion based on PyGEM MB
### Add thickness, width_m, and dx_meter to inversion flowlines so they are compatible with PyGEM's
sks.simple_glacier_masks, # much more robust mask than the one used for flowlines
#tasks.elevation_band_flowline, # same as Huss and Farinotti; produces the binned elevation (30m), length, and width
#tasks.fixed_dx_elevation_band_flowline, # converts the binned elevation, length, width to the fixed dx grid in OGGM
# # output is the same flowline object
# ----- Alternative way of running tasks -----
#for rgi_id in rgi_ids:
# gdirs = rgitopo.init_glacier_directories_from_rgitopo([rgi_id])
# gdir = gdirs[0]
# tasks.glacier_masks(gdir)
# tasks.compute_centerlines(gdir)
# tasks.glacier_masks(gdir)
# tasks.compute_centerlines(gdir)
# tasks.initialize_flowlines(gdir)
# tasks.compute_downstream_line(gdir)
# tasks.catchment_area(gdir)
# tasks.catchment_width_geom(gdir)
# tasks.catchment_width_correction(gdir)
# # Debris tasks
# debris.debris_to_gdir(gdir)
# debris.debris_binned(gdir)
# # Consensus ice thickness
# icethickness.consensus_gridded(gdir)
# icethickness.consensus_binned(gdir)
# # Tidewater
# if gdir.is_tidewater:
# calving.calibrate_calving_k_single_wconsensus(gdir)
| true
| true
|
f7153a70ee09cafbc4a4a4209f921a512961caf3
| 308
|
py
|
Python
|
aispace/datasets/tokenizer/__init__.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 32
|
2020-01-16T07:59:03.000Z
|
2022-03-31T09:24:00.000Z
|
aispace/datasets/tokenizer/__init__.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 9
|
2020-06-05T03:27:06.000Z
|
2022-03-12T01:00:17.000Z
|
aispace/datasets/tokenizer/__init__.py
|
SmileGoat/AiSpace
|
35fc120667e4263c99b300815e0bf018f5064a40
|
[
"Apache-2.0"
] | 3
|
2020-06-09T02:22:50.000Z
|
2021-07-19T06:07:07.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2019-11-10 16:50
# @Author : yingyuankai
# @Email : yingyuankai@aliyun.com
# @File : __init__.py
from .bert_tokenizer import BertTokenizer
from .tokenizer_base import BaseTokenizer
from .xlnet_tokenizer import XlnetTokenizer
from .gpt_tokenizer import CPMTokenizer
| 30.8
| 43
| 0.746753
|
from .bert_tokenizer import BertTokenizer
from .tokenizer_base import BaseTokenizer
from .xlnet_tokenizer import XlnetTokenizer
from .gpt_tokenizer import CPMTokenizer
| true
| true
|
f7153af43ab719b288088a86b292514bb5b4ec0a
| 2,233
|
py
|
Python
|
gcalcli/authorization.py
|
kdrabek/gcalcli
|
c05d84ea14a0e85f3689efc6ddd258de33c76e95
|
[
"MIT"
] | null | null | null |
gcalcli/authorization.py
|
kdrabek/gcalcli
|
c05d84ea14a0e85f3689efc6ddd258de33c76e95
|
[
"MIT"
] | null | null | null |
gcalcli/authorization.py
|
kdrabek/gcalcli
|
c05d84ea14a0e85f3689efc6ddd258de33c76e95
|
[
"MIT"
] | null | null | null |
import json
from pathlib import Path
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import Flow
# set of permissions for particular API
SCOPES = 'https://www.googleapis.com/auth/calendar'
CONFIG_PATH = Path.home() / '.gcalcli'
CREDENTIALS_PATH = CONFIG_PATH / 'credentials.json'
TOKEN_PATH = CONFIG_PATH / 'token.json'
def open_file(path, formatter=None):
with open(path, 'r') as f:
if formatter:
return formatter(f.read())
return f.read()
def save_file(path, content):
with open(path, 'w') as f:
return f.write(content)
def create_credentials(token, flow, scopes=SCOPES):
return Credentials(
token['access_token'],
refresh_token=token['refresh_token'],
token_uri=flow.client_config['token_uri'],
client_id=flow.client_config['client_id'],
client_secret=flow.client_config['client_secret'],
scopes=scopes
)
def setup_authentication():
Path.mkdir(CONFIG_PATH, exist_ok=True)
print('Please go to Google API console,')
print('then generate & download credentials .json file')
creds = input("Paste contents of the file here: ")
save_file(CREDENTIALS_PATH, creds)
flow = Flow.from_client_secrets_file(
CREDENTIALS_PATH, SCOPES, redirect_uri='urn:ietf:wg:oauth:2.0:oob'
)
auth_url, _ = flow.authorization_url()
print('Please go to this URL: {}'.format(auth_url))
code = input('Enter the authorization code: ')
token = flow.fetch_token(code=code)
save_file(TOKEN_PATH, json.dumps(token))
return create_credentials(token, flow)
def is_authentication_setup():
Path.mkdir(CONFIG_PATH, exist_ok=True)
try:
token = open_file(CREDENTIALS_PATH, json.loads)
credentials = open_file(TOKEN_PATH)
except Exception as e:
print(e)
return False
return token is not None and credentials is not None
def load_credentials():
Path.mkdir(CONFIG_PATH, exist_ok=True)
flow = Flow.from_client_secrets_file(
CREDENTIALS_PATH, SCOPES,
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
token = open_file(TOKEN_PATH, formatter=json.loads)
return create_credentials(token, flow)
| 27.9125
| 74
| 0.695477
|
import json
from pathlib import Path
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import Flow
SCOPES = 'https://www.googleapis.com/auth/calendar'
CONFIG_PATH = Path.home() / '.gcalcli'
CREDENTIALS_PATH = CONFIG_PATH / 'credentials.json'
TOKEN_PATH = CONFIG_PATH / 'token.json'
def open_file(path, formatter=None):
with open(path, 'r') as f:
if formatter:
return formatter(f.read())
return f.read()
def save_file(path, content):
with open(path, 'w') as f:
return f.write(content)
def create_credentials(token, flow, scopes=SCOPES):
return Credentials(
token['access_token'],
refresh_token=token['refresh_token'],
token_uri=flow.client_config['token_uri'],
client_id=flow.client_config['client_id'],
client_secret=flow.client_config['client_secret'],
scopes=scopes
)
def setup_authentication():
Path.mkdir(CONFIG_PATH, exist_ok=True)
print('Please go to Google API console,')
print('then generate & download credentials .json file')
creds = input("Paste contents of the file here: ")
save_file(CREDENTIALS_PATH, creds)
flow = Flow.from_client_secrets_file(
CREDENTIALS_PATH, SCOPES, redirect_uri='urn:ietf:wg:oauth:2.0:oob'
)
auth_url, _ = flow.authorization_url()
print('Please go to this URL: {}'.format(auth_url))
code = input('Enter the authorization code: ')
token = flow.fetch_token(code=code)
save_file(TOKEN_PATH, json.dumps(token))
return create_credentials(token, flow)
def is_authentication_setup():
Path.mkdir(CONFIG_PATH, exist_ok=True)
try:
token = open_file(CREDENTIALS_PATH, json.loads)
credentials = open_file(TOKEN_PATH)
except Exception as e:
print(e)
return False
return token is not None and credentials is not None
def load_credentials():
Path.mkdir(CONFIG_PATH, exist_ok=True)
flow = Flow.from_client_secrets_file(
CREDENTIALS_PATH, SCOPES,
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
token = open_file(TOKEN_PATH, formatter=json.loads)
return create_credentials(token, flow)
| true
| true
|
f7153b1e77bb06edb0103c75b470f2e4165017f6
| 4,895
|
py
|
Python
|
snpdb/views/views_autocomplete.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 5
|
2021-01-14T03:34:42.000Z
|
2022-03-07T15:34:18.000Z
|
snpdb/views/views_autocomplete.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | 551
|
2020-10-19T00:02:38.000Z
|
2022-03-30T02:18:22.000Z
|
snpdb/views/views_autocomplete.py
|
SACGF/variantgrid
|
515195e2f03a0da3a3e5f2919d8e0431babfd9c9
|
[
"RSA-MD"
] | null | null | null |
from abc import ABC
from django.contrib.auth.models import User
from django.db.models.functions import Length
from django.db.models.query_utils import Q
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.views.decorators.vary import vary_on_cookie
from library.constants import MINUTE_SECS
from library.django_utils.autocomplete_utils import AutocompleteView
from snpdb.models import VCF, Sample, Cohort, CustomColumnsCollection, CustomColumn, Tag, Trio, \
Lab, GenomicIntervalsCollection, GenomeBuild, ImportStatus, Project
class GenomeBuildAutocompleteView(AutocompleteView, ABC):
def filter_to_genome_build(self, qs, path_to_genome_build):
genome_build_id = self.forwarded.get('genome_build_id')
if genome_build_id:
genome_build = GenomeBuild.objects.get(pk=genome_build_id)
qs = qs.filter(**{path_to_genome_build: genome_build})
return qs
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class UserAutocompleteView(AutocompleteView):
fields = ['last_name', 'first_name', 'username']
def get_user_queryset(self, user):
return User.objects.all()
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class UsernameAutocompleteView(AutocompleteView):
"""
Needed to make separate from UserAutocompleteView for the sake of sort order
"""
fields = ['username', 'first_name', 'last_name']
def get_user_queryset(self, user):
return User.objects.all()
def get_result_label(self, obj):
return obj.username
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class LabAutocompleteView(AutocompleteView):
fields = ['organization__name', 'name']
def get_user_queryset(self, user):
return Lab.objects.filter(organization__active=True)
def get_result_label(self, obj):
return f'{obj.organization.name} - {obj.name}'
@method_decorator([cache_page(MINUTE_SECS)], name='get') # Doesn't need to vary_on_cookie as no permissions on Proj
class ProjectAutocompleteView(AutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
return Project.objects.all()
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class VCFAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = VCF.filter_for_user(user, True).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(qs, "genome_build")
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class SampleAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
sample_qs = Sample.filter_for_user(user, True).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(sample_qs, "vcf__genome_build")
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class CohortAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
vcf_success_if_exists = Q(vcf__isnull=True) | Q(vcf__import_status=ImportStatus.SUCCESS)
qs = Cohort.filter_for_user(user, success_status_only=True).filter(vcf_success_if_exists)
return self.filter_to_genome_build(qs, "genome_build")
class CustomColumnAutocompleteView(AutocompleteView):
fields = ['column__grid_column_name']
def get_user_queryset(self, user):
# Called different things in Analysis/UserSettings
columns = self.forwarded.get('columns') or self.forwarded.get('custom_columns_collection')
if columns:
custom_columns_collections_qs = CustomColumnsCollection.filter_for_user(user).filter(pk=columns)
else:
custom_columns_collections_qs = CustomColumnsCollection.objects.none()
return CustomColumn.objects.filter(custom_columns_collection__in=custom_columns_collections_qs)
class GenomicIntervalsCollectionAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = GenomicIntervalsCollection.filter_for_user(user).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(qs, "genome_build")
@method_decorator(cache_page(5), name='dispatch')
class TagAutocompleteView(AutocompleteView):
fields = ['id']
def get_user_queryset(self, user):
return Tag.objects.all().order_by(Length("id").asc())
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class TrioAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = Trio.filter_for_user(user, success_status_only=True)
return self.filter_to_genome_build(qs, "cohort__genome_build")
| 37.083333
| 116
| 0.756486
|
from abc import ABC
from django.contrib.auth.models import User
from django.db.models.functions import Length
from django.db.models.query_utils import Q
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.views.decorators.vary import vary_on_cookie
from library.constants import MINUTE_SECS
from library.django_utils.autocomplete_utils import AutocompleteView
from snpdb.models import VCF, Sample, Cohort, CustomColumnsCollection, CustomColumn, Tag, Trio, \
Lab, GenomicIntervalsCollection, GenomeBuild, ImportStatus, Project
class GenomeBuildAutocompleteView(AutocompleteView, ABC):
def filter_to_genome_build(self, qs, path_to_genome_build):
genome_build_id = self.forwarded.get('genome_build_id')
if genome_build_id:
genome_build = GenomeBuild.objects.get(pk=genome_build_id)
qs = qs.filter(**{path_to_genome_build: genome_build})
return qs
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class UserAutocompleteView(AutocompleteView):
fields = ['last_name', 'first_name', 'username']
def get_user_queryset(self, user):
return User.objects.all()
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class UsernameAutocompleteView(AutocompleteView):
fields = ['username', 'first_name', 'last_name']
def get_user_queryset(self, user):
return User.objects.all()
def get_result_label(self, obj):
return obj.username
@method_decorator(cache_page(MINUTE_SECS), name='dispatch')
class LabAutocompleteView(AutocompleteView):
fields = ['organization__name', 'name']
def get_user_queryset(self, user):
return Lab.objects.filter(organization__active=True)
def get_result_label(self, obj):
return f'{obj.organization.name} - {obj.name}'
@method_decorator([cache_page(MINUTE_SECS)], name='get')
class ProjectAutocompleteView(AutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
return Project.objects.all()
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class VCFAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = VCF.filter_for_user(user, True).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(qs, "genome_build")
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class SampleAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
sample_qs = Sample.filter_for_user(user, True).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(sample_qs, "vcf__genome_build")
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class CohortAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
vcf_success_if_exists = Q(vcf__isnull=True) | Q(vcf__import_status=ImportStatus.SUCCESS)
qs = Cohort.filter_for_user(user, success_status_only=True).filter(vcf_success_if_exists)
return self.filter_to_genome_build(qs, "genome_build")
class CustomColumnAutocompleteView(AutocompleteView):
fields = ['column__grid_column_name']
def get_user_queryset(self, user):
# Called different things in Analysis/UserSettings
columns = self.forwarded.get('columns') or self.forwarded.get('custom_columns_collection')
if columns:
custom_columns_collections_qs = CustomColumnsCollection.filter_for_user(user).filter(pk=columns)
else:
custom_columns_collections_qs = CustomColumnsCollection.objects.none()
return CustomColumn.objects.filter(custom_columns_collection__in=custom_columns_collections_qs)
class GenomicIntervalsCollectionAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = GenomicIntervalsCollection.filter_for_user(user).filter(import_status=ImportStatus.SUCCESS)
return self.filter_to_genome_build(qs, "genome_build")
@method_decorator(cache_page(5), name='dispatch')
class TagAutocompleteView(AutocompleteView):
fields = ['id']
def get_user_queryset(self, user):
return Tag.objects.all().order_by(Length("id").asc())
@method_decorator([cache_page(MINUTE_SECS), vary_on_cookie], name='dispatch')
class TrioAutocompleteView(GenomeBuildAutocompleteView):
fields = ['name']
def get_user_queryset(self, user):
qs = Trio.filter_for_user(user, success_status_only=True)
return self.filter_to_genome_build(qs, "cohort__genome_build")
| true
| true
|
f7153bf91286bae42e9a55fce4714d6889e21164
| 233
|
py
|
Python
|
conexao.py
|
gabrielmonzato20/ProjetoCp
|
a0d6a3204487d653669284f651c911c09386d626
|
[
"Apache-2.0"
] | null | null | null |
conexao.py
|
gabrielmonzato20/ProjetoCp
|
a0d6a3204487d653669284f651c911c09386d626
|
[
"Apache-2.0"
] | null | null | null |
conexao.py
|
gabrielmonzato20/ProjetoCp
|
a0d6a3204487d653669284f651c911c09386d626
|
[
"Apache-2.0"
] | 1
|
2018-09-19T12:28:08.000Z
|
2018-09-19T12:28:08.000Z
|
def mensagem():
print('Criando no python')
def tabuada():
n = int(input('Digite um número que deseja ver a tabuada: '))
for x in range (1,11):
print('{} X {:2} = {:2}'.format(n, x, n*x))
mensagem()
tabuada()
| 21.181818
| 65
| 0.562232
|
def mensagem():
print('Criando no python')
def tabuada():
n = int(input('Digite um número que deseja ver a tabuada: '))
for x in range (1,11):
print('{} X {:2} = {:2}'.format(n, x, n*x))
mensagem()
tabuada()
| true
| true
|
f7153c21736cca53d92914b2228e578ffc94a1f1
| 13,114
|
py
|
Python
|
projects/g3h1-cp-fml-interpreter/src/lexer/dfa.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
projects/g3h1-cp-fml-interpreter/src/lexer/dfa.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
projects/g3h1-cp-fml-interpreter/src/lexer/dfa.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
from enum import unique, Enum
class DFA:
def __init__(self, source_data):
if type(source_data) != dict:
raise TypeError('第 1 个参数期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data))
))
if type(source_data.get('type')) != Token:
raise TypeError('第 1 个参数的 "type" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='Token', arg_type=str(type(source_data.get('type')))
))
self.token_type = source_data.get('type')
if type(source_data.get('as_set')) != set:
raise TypeError('第 1 个参数的 "as_set" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='set', arg_type=str(type(source_data.get('as_set')))
))
self.as_set = source_data.get('as_set')
if type(source_data.get('stm')) != dict:
raise TypeError('第 1 个参数的 "stm" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data.get('stm')))
))
self.stm = source_data.get('stm')
self.state = 0
# 清除状态(回到初态)
def clear(self):
self.state = 0
# 状态转移函数
# 返回 bool 类型,转移成功返回 True,否则返回 False
def move(self, ch):
# 条件跳转
if self.stm.get(ch) is not None:
if self.stm.get(ch)[self.state] is not None:
self.state = self.stm.get(ch)[self.state]
else:
return False
# 特殊字符集跳转
elif self.stm.get(SpecificCharSet.BLANK) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.BLANK):
if self.stm.get(SpecificCharSet.BLANK)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.BLANK)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.NONZERO_DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.NONZERO_DIGIT):
if self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.DIGIT):
if self.stm.get(SpecificCharSet.DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.DIGIT)[self.state]
else:
return False
# 任意跳转
elif self.stm.get(SpecificCharSet.ANY) is not None:
if self.stm.get(SpecificCharSet.ANY)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.ANY)[self.state]
else:
return False
# 非接受字符集
else:
return False
return True
# 判断是否处于接受状态
def is_access(self):
return self.state in self.as_set
@unique
class Token(Enum):
# 保留字
ORIGIN = 1
SCALE = 2
ROT = 3
IS = 4
TO = 5
STEP = 6
DRAW = 7
FOR = 8
FROM = 9
COLOR = 10
BACKGROUND = 11
# 分隔符
SEMICOLON = 21 # 分号
L_BRACKET = 22 # 左括号
R_BRACKET = 23 # 右括号
COMMA = 24 # 逗号
# 运算符
PLUS = 35 # 加号
MINUS = 36 # 减号
MUL = 37 # 乘号
DIV = 38 # 除号
POWER = 39 # 乘方号
# 其他
FUNC = 51 # 函数
NUM = 52 # 数值字面量
CONST_ID = 53 # 常量
T = 54 # 参数
COMMENT = 61 # 注释
NON_TOKEN = 62 # 空记号(源程序结束)
ERR_TOKEN = 63 # 错误记号
class SpecificCharSet(object):
NONZERO_DIGIT = 'NONZERO_DIGIT'
DIGIT = 'DIGIT'
BLANK = 'BLANK'
ANY = 'ANY'
CHARSET_MAP = {
'NONZERO_DIGIT': {'1', '2', '3', '4', '5', '6', '7', '8', '9'},
'DIGIT': {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'},
'BLANK': {'\n', ' '}
}
# 识别各种记号的 DFA
# type 是 DFA 识别的记号的类型
# as_set 是 DFA 的接受状态集,access state set
# stm 是 DFA 状态转移矩阵,state transition matrix,状态 0 为起始状态
DFA_DATA = (
# 保留字
{
'type': Token.ORIGIN,
'as_set': {7, },
'stm': {
'o': (1, None, None, None, None, None, None, None),
'r': (None, 2, None, None, None, None, None, None),
'i': (None, None, 3, None, 5, None, None, None),
'g': (None, None, None, 4, None, None, None, None),
'n': (None, None, None, None, None, 6, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, 7, None)
}
}, {
'type': Token.SCALE,
'as_set': {6, },
'stm': {
's': (1, None, None, None, None, None, None),
'c': (None, 2, None, None, None, None, None),
'a': (None, None, 3, None, None, None, None),
'l': (None, None, None, 4, None, None, None),
'e': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.ROT,
'as_set': {4, },
'stm': {
'r': (1, None, None, None, None),
'o': (None, 2, None, None, None),
't': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.IS,
'as_set': {3, },
'stm': {
'i': (1, None, None, None),
's': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.TO,
'as_set': {3, },
'stm': {
't': (1, None, None, None),
'o': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.STEP,
'as_set': {5, },
'stm': {
's': (1, None, None, None, None, None),
't': (None, 2, None, None, None, None),
'e': (None, None, 3, None, None, None),
'p': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.DRAW,
'as_set': {5, },
'stm': {
'd': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'a': (None, None, 3, None, None, None),
'w': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.FOR,
'as_set': {4, },
'stm': {
'f': (1, None, None, None, None),
'o': (None, 2, None, None, None),
'r': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.FROM,
'as_set': {5, },
'stm': {
'f': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'o': (None, None, 3, None, None, None),
'm': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None)
}
}, {
'type': Token.COLOR,
'as_set': {6, },
'stm': {
'c': (1, None, None, None, None, None, None),
'o': (None, 2, None, 4, None, None, None),
'l': (None, None, 3, None, None, None, None),
'r': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.BACKGROUND,
'as_set': {11, },
'stm': {
'b': (1, None, None, None, None, None, None, None, None, None, None, None),
'a': (None, 2, None, None, None, None, None, None, None, None, None, None),
'c': (None, None, 3, None, None, None, None, None, None, None, None, None),
'k': (None, None, None, 4, None, None, None, None, None, None, None, None),
'g': (None, None, None, None, 5, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 6, None, None, None, None, None, None),
'o': (None, None, None, None, None, None, 7, None, None, None, None, None),
'u': (None, None, None, None, None, None, None, 8, None, None, None, None),
'n': (None, None, None, None, None, None, None, None, 9, None, None, None),
'd': (None, None, None, None, None, None, None, None, None, 10, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, None, None, None, None, 11, None)
}
},
# 分隔符
{
'type': Token.SEMICOLON,
'as_set': {1, },
'stm': {
';': (1, None)
}
}, {
'type': Token.L_BRACKET,
'as_set': {1, },
'stm': {
'(': (1, None)
}
}, {
'type': Token.R_BRACKET,
'as_set': {1, },
'stm': {
')': (1, None)
}
}, {
'type': Token.COMMA,
'as_set': {1, },
'stm': {
',': (1, None)
}
},
# 运算符
{
'type': Token.PLUS,
'as_set': {1, },
'stm': {
'+': (1, None)
}
}, {
'type': Token.MINUS,
'as_set': {1, },
'stm': {
'-': (1, None)
}
}, {
'type': Token.MUL,
'as_set': {1, },
'stm': {
'*': (1, None)
}
}, {
'type': Token.DIV,
'as_set': {1, },
'stm': {
'/': (1, None)
}
}, {
'type': Token.POWER,
'as_set': {1, },
'stm': {
'^': (1, None)
}
},
# 其他
{
'type': Token.FUNC,
'as_set': {10, },
'stm': {
'a': (None, 6, None, None, None, None, None, None, None, None, None),
'c': (3, None, None, None, None, None, None, None, None, None, None),
'e': (4, None, None, None, None, None, None, None, None, None, None),
'i': (None, None, 6, None, None, None, None, None, None, None, None),
'l': (6, None, None, None, None, None, None, None, None, None, None),
'n': (None, None, None, None, None, None, 10, None, None, None, None),
'o': (None, None, None, 8, None, None, None, None, None, None, None),
'p': (None, None, None, None, None, None, None, None, None, 10, None),
'q': (None, None, 5, None, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 7, None, None, None, None, None),
's': (2, None, None, None, None, None, None, None, 10, None, None),
't': (1, None, None, None, None, None, None, 10, None, None, None),
'x': (None, None, None, None, 9, None, None, None, None, None, None)
}
}, {
'type': Token.NUM,
'as_set': {2, 3, 4},
'stm': {
SpecificCharSet.NONZERO_DIGIT: (3, 4, None, 3, 4),
'0': (2, 4, None, 3, 4),
'.': (1, None, 4, 4, None)
}
}, {
'type': Token.CONST_ID,
'as_set': {2, },
'stm': {
'e': (2, None, None),
'p': (1, None, None),
'i': (None, 2, None),
}
}, {
'type': Token.T,
'as_set': {1, },
'stm': {
't': (1, None)
}
}, {
'type': Token.COMMENT,
'as_set': {3, },
'stm': {
SpecificCharSet.ANY: (None, None, 2, None),
'/': (1, 2, 2, None),
'\n': (None, None, 3, None),
}
}, {
'type': Token.ERR_TOKEN,
'as_set': {0, 1},
'stm': {
SpecificCharSet.ANY: (1, 1),
SpecificCharSet.BLANK: (None, None)
}
}
)
| 34.970667
| 108
| 0.416959
|
from enum import unique, Enum
class DFA:
def __init__(self, source_data):
if type(source_data) != dict:
raise TypeError('第 1 个参数期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data))
))
if type(source_data.get('type')) != Token:
raise TypeError('第 1 个参数的 "type" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='Token', arg_type=str(type(source_data.get('type')))
))
self.token_type = source_data.get('type')
if type(source_data.get('as_set')) != set:
raise TypeError('第 1 个参数的 "as_set" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='set', arg_type=str(type(source_data.get('as_set')))
))
self.as_set = source_data.get('as_set')
if type(source_data.get('stm')) != dict:
raise TypeError('第 1 个参数的 "stm" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data.get('stm')))
))
self.stm = source_data.get('stm')
self.state = 0
def clear(self):
self.state = 0
def move(self, ch):
if self.stm.get(ch) is not None:
if self.stm.get(ch)[self.state] is not None:
self.state = self.stm.get(ch)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.BLANK) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.BLANK):
if self.stm.get(SpecificCharSet.BLANK)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.BLANK)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.NONZERO_DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.NONZERO_DIGIT):
if self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.DIGIT):
if self.stm.get(SpecificCharSet.DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.DIGIT)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.ANY) is not None:
if self.stm.get(SpecificCharSet.ANY)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.ANY)[self.state]
else:
return False
else:
return False
return True
def is_access(self):
return self.state in self.as_set
@unique
class Token(Enum):
ORIGIN = 1
SCALE = 2
ROT = 3
IS = 4
TO = 5
STEP = 6
DRAW = 7
FOR = 8
FROM = 9
COLOR = 10
BACKGROUND = 11
SEMICOLON = 21
L_BRACKET = 22
R_BRACKET = 23
COMMA = 24
PLUS = 35
MINUS = 36
MUL = 37
DIV = 38
POWER = 39
FUNC = 51
NUM = 52
CONST_ID = 53
T = 54
COMMENT = 61
NON_TOKEN = 62
ERR_TOKEN = 63
class SpecificCharSet(object):
NONZERO_DIGIT = 'NONZERO_DIGIT'
DIGIT = 'DIGIT'
BLANK = 'BLANK'
ANY = 'ANY'
CHARSET_MAP = {
'NONZERO_DIGIT': {'1', '2', '3', '4', '5', '6', '7', '8', '9'},
'DIGIT': {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'},
'BLANK': {'\n', ' '}
}
DFA_DATA = (
{
'type': Token.ORIGIN,
'as_set': {7, },
'stm': {
'o': (1, None, None, None, None, None, None, None),
'r': (None, 2, None, None, None, None, None, None),
'i': (None, None, 3, None, 5, None, None, None),
'g': (None, None, None, 4, None, None, None, None),
'n': (None, None, None, None, None, 6, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, 7, None)
}
}, {
'type': Token.SCALE,
'as_set': {6, },
'stm': {
's': (1, None, None, None, None, None, None),
'c': (None, 2, None, None, None, None, None),
'a': (None, None, 3, None, None, None, None),
'l': (None, None, None, 4, None, None, None),
'e': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.ROT,
'as_set': {4, },
'stm': {
'r': (1, None, None, None, None),
'o': (None, 2, None, None, None),
't': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.IS,
'as_set': {3, },
'stm': {
'i': (1, None, None, None),
's': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.TO,
'as_set': {3, },
'stm': {
't': (1, None, None, None),
'o': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.STEP,
'as_set': {5, },
'stm': {
's': (1, None, None, None, None, None),
't': (None, 2, None, None, None, None),
'e': (None, None, 3, None, None, None),
'p': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.DRAW,
'as_set': {5, },
'stm': {
'd': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'a': (None, None, 3, None, None, None),
'w': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.FOR,
'as_set': {4, },
'stm': {
'f': (1, None, None, None, None),
'o': (None, 2, None, None, None),
'r': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.FROM,
'as_set': {5, },
'stm': {
'f': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'o': (None, None, 3, None, None, None),
'm': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None)
}
}, {
'type': Token.COLOR,
'as_set': {6, },
'stm': {
'c': (1, None, None, None, None, None, None),
'o': (None, 2, None, 4, None, None, None),
'l': (None, None, 3, None, None, None, None),
'r': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.BACKGROUND,
'as_set': {11, },
'stm': {
'b': (1, None, None, None, None, None, None, None, None, None, None, None),
'a': (None, 2, None, None, None, None, None, None, None, None, None, None),
'c': (None, None, 3, None, None, None, None, None, None, None, None, None),
'k': (None, None, None, 4, None, None, None, None, None, None, None, None),
'g': (None, None, None, None, 5, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 6, None, None, None, None, None, None),
'o': (None, None, None, None, None, None, 7, None, None, None, None, None),
'u': (None, None, None, None, None, None, None, 8, None, None, None, None),
'n': (None, None, None, None, None, None, None, None, 9, None, None, None),
'd': (None, None, None, None, None, None, None, None, None, 10, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, None, None, None, None, 11, None)
}
},
{
'type': Token.SEMICOLON,
'as_set': {1, },
'stm': {
';': (1, None)
}
}, {
'type': Token.L_BRACKET,
'as_set': {1, },
'stm': {
'(': (1, None)
}
}, {
'type': Token.R_BRACKET,
'as_set': {1, },
'stm': {
')': (1, None)
}
}, {
'type': Token.COMMA,
'as_set': {1, },
'stm': {
',': (1, None)
}
},
{
'type': Token.PLUS,
'as_set': {1, },
'stm': {
'+': (1, None)
}
}, {
'type': Token.MINUS,
'as_set': {1, },
'stm': {
'-': (1, None)
}
}, {
'type': Token.MUL,
'as_set': {1, },
'stm': {
'*': (1, None)
}
}, {
'type': Token.DIV,
'as_set': {1, },
'stm': {
'/': (1, None)
}
}, {
'type': Token.POWER,
'as_set': {1, },
'stm': {
'^': (1, None)
}
},
{
'type': Token.FUNC,
'as_set': {10, },
'stm': {
'a': (None, 6, None, None, None, None, None, None, None, None, None),
'c': (3, None, None, None, None, None, None, None, None, None, None),
'e': (4, None, None, None, None, None, None, None, None, None, None),
'i': (None, None, 6, None, None, None, None, None, None, None, None),
'l': (6, None, None, None, None, None, None, None, None, None, None),
'n': (None, None, None, None, None, None, 10, None, None, None, None),
'o': (None, None, None, 8, None, None, None, None, None, None, None),
'p': (None, None, None, None, None, None, None, None, None, 10, None),
'q': (None, None, 5, None, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 7, None, None, None, None, None),
's': (2, None, None, None, None, None, None, None, 10, None, None),
't': (1, None, None, None, None, None, None, 10, None, None, None),
'x': (None, None, None, None, 9, None, None, None, None, None, None)
}
}, {
'type': Token.NUM,
'as_set': {2, 3, 4},
'stm': {
SpecificCharSet.NONZERO_DIGIT: (3, 4, None, 3, 4),
'0': (2, 4, None, 3, 4),
'.': (1, None, 4, 4, None)
}
}, {
'type': Token.CONST_ID,
'as_set': {2, },
'stm': {
'e': (2, None, None),
'p': (1, None, None),
'i': (None, 2, None),
}
}, {
'type': Token.T,
'as_set': {1, },
'stm': {
't': (1, None)
}
}, {
'type': Token.COMMENT,
'as_set': {3, },
'stm': {
SpecificCharSet.ANY: (None, None, 2, None),
'/': (1, 2, 2, None),
'\n': (None, None, 3, None),
}
}, {
'type': Token.ERR_TOKEN,
'as_set': {0, 1},
'stm': {
SpecificCharSet.ANY: (1, 1),
SpecificCharSet.BLANK: (None, None)
}
}
)
| true
| true
|
f7153c4f5db58c6522a6d97004d7dcdde2bcc24c
| 262
|
py
|
Python
|
src/core/migrations/0050_merge_20190212_0720.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
src/core/migrations/0050_merge_20190212_0720.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
src/core/migrations/0050_merge_20190212_0720.py
|
metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3
|
c754d3b1b401906a21640b8eacb6b724a448b31c
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.2 on 2019-02-12 07:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0049_merge_20190212_0544'),
('core', '0049_article_head'),
]
operations = [
]
| 17.466667
| 47
| 0.633588
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0049_merge_20190212_0544'),
('core', '0049_article_head'),
]
operations = [
]
| true
| true
|
f7153d3e5f303fac4afc1dc66b303035bd382d50
| 969
|
py
|
Python
|
doc/api/epydoc/build.py
|
swamper123/pymodbus
|
7dfac6f19c60d3aa50a168ff82db88204dfb3a30
|
[
"BSD-3-Clause"
] | null | null | null |
doc/api/epydoc/build.py
|
swamper123/pymodbus
|
7dfac6f19c60d3aa50a168ff82db88204dfb3a30
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T12:01:38.000Z
|
2022-03-21T02:39:59.000Z
|
doc/api/epydoc/build.py
|
swamper123/pymodbus
|
7dfac6f19c60d3aa50a168ff82db88204dfb3a30
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
'''
Epydoc API Runner
------------------
Using pkg_resources, we attempt to see if epydoc is installed,
if so, we use its cli program to compile the documents
'''
try:
import sys, os, shutil
import pkg_resources
pkg_resources.require("epydoc")
from epydoc.cli import cli
sys.argv = '''epydoc.py pymodbus
--html --simple-term --quiet
--include-log
--graph=all
--docformat=plaintext
--debug
--exclude=._
--exclude=tests
--output=html/
'''.split()
#bugs in trunk for --docformat=restructuredtext
if not os.path.exists("./html"):
os.mkdir("./html")
print( "Building Epydoc API Documentation")
cli()
if os.path.exists('../../../build'):
shutil.move("html", "../../../build/epydoc")
except Exception as ex:
import traceback,sys
traceback.print_exc(file=sys.stdout)
print( "Epydoc not avaliable...not building")
| 24.846154
| 62
| 0.603715
|
try:
import sys, os, shutil
import pkg_resources
pkg_resources.require("epydoc")
from epydoc.cli import cli
sys.argv = '''epydoc.py pymodbus
--html --simple-term --quiet
--include-log
--graph=all
--docformat=plaintext
--debug
--exclude=._
--exclude=tests
--output=html/
'''.split()
if not os.path.exists("./html"):
os.mkdir("./html")
print( "Building Epydoc API Documentation")
cli()
if os.path.exists('../../../build'):
shutil.move("html", "../../../build/epydoc")
except Exception as ex:
import traceback,sys
traceback.print_exc(file=sys.stdout)
print( "Epydoc not avaliable...not building")
| true
| true
|
f7153e68784e7eb11f5fe8da4e684247486b27ce
| 58,002
|
py
|
Python
|
Source/ThirdParty/gyp/pylib/gyp/generator/msvs.py
|
VincentWei/mdolphin-core
|
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
|
[
"Apache-2.0"
] | 6
|
2017-05-31T01:46:45.000Z
|
2018-06-12T10:53:30.000Z
|
WebKit/Source/ThirdParty/gyp/pylib/gyp/generator/msvs.py
|
JavaScriptTesting/LJS
|
9818dbdb421036569fff93124ac2385d45d01c3a
|
[
"Apache-2.0"
] | null | null | null |
WebKit/Source/ThirdParty/gyp/pylib/gyp/generator/msvs.py
|
JavaScriptTesting/LJS
|
9818dbdb421036569fff93124ac2385d45d01c3a
|
[
"Apache-2.0"
] | 2
|
2017-07-17T06:02:42.000Z
|
2018-09-19T10:08:38.000Z
|
#!/usr/bin/python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ntpath
import posixpath
import os
import re
import subprocess
import sys
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
import gyp.MSVSSettings as MSVSSettings
import gyp.common
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)/obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)/lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
""" Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if len(path) > 0 and path[-1] == '\\':
path = path[:-1]
return path
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = dict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
# Add a folder for excluded files.
if excluded_result:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd):
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['"%s"' % i for i in direct_cmd]
direct_cmd = [i.replace('"', '\\"') for i in direct_cmd]
#direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = (
'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
'set CYGWIN=nontsec&& ')
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += (
'bash -c "%(cmd)s"')
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Fix the paths
# If the argument starts with a slash, it's probably a command line switch
arguments = [i.startswith('/') and i or _FixPath(i) for i in cmd[1:]]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path):
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = [_FixPath(i) for i in inputs]
outputs = [_FixPath(i) for i in outputs]
tool = MSVSProject.Tool(
'VCCustomBuildTool', {
'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for input in actions_dict:
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions_dict[input]:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = [_FixPath(i) for i in rule.get('inputs', [])]
raw_outputs = [_FixPath(i) for i in rule.get('outputs', [])]
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename))
rules_file.Create(spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = [_FixPath(i) for i in r.get('inputs', [])]
outputs = [_FixPath(i) for i in r.get('outputs', [])]
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.Write()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
file.write('\tmkdir -p %s\n' % od)
file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
file.write('\t%s\n\n' % cmd)
# Close up the file.
file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=[_FixPath(i) for i in all_inputs],
outputs=[_FixPath(i) for i in all_outputs],
description='Running %s' % cmd,
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this."""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument, so that the Win32
CommandLineToArgv function will turn the escaped result back into the
original string. See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this."""
def replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention."""
def replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
list = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(list), 2):
list[i] = delimiters_replacer_regex.sub(replace, list[i])
# Concatenate back into a single string
s = '"'.join(list)
if len(list) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
def _AdjustSourcesForRules(rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = set([_FixPath(i) for i in inputs])
outputs = set([_FixPath(i) for i in outputs])
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = set([_FixPath(s) for s in actions_to_add.keys()])
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project
Arguments:
proj_path: Path of the vcproj file to generate.
spec: The target dictionary containing the properties of the target.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) == None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GenerateProject(project, options, version):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return
_GenerateMSVSProject(project, options, version)
def _GenerateMSVSProject(project, options, version):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
"""
spec = project.spec
vcproj_dir = os.path.dirname(project.path)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version=version)
p.Create(spec['target_name'], guid=project.guid, platforms=platforms)
# Get directory project file is in.
gyp_dir = os.path.split(project.path)[0]
gyp_file = posixpath.split(project.build_file)[1]
gyp_path = _NormalizedSource(gyp_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, gyp_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
sources, excluded_sources = _PrepareListOfSources(project, spec,
relative_path_of_gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, gyp_dir, options, spec,
sources, excluded_sources,
actions_to_add)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources))
# Add in files.
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompileHeaderStubs(p, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.Write()
def _GetUniquePlatforms(spec):
"""Return the list of unique platforms for this spec, e.g ['win32', ...]
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version=version)
user_file.Create(spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project. It's a number defined
by Microsoft. May raise an exception.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
'dummy_executable': '1', # .exe
}[spec['type']]
except KeyError, e:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionnary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(config, spec)
out_file, vc_tool = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)\\$(ProjectName)\\vc80.pdb')
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = [_FixPath(i) for i in include_dirs]
resource_include_dirs = [_FixPath(i) for i in resource_include_dirs]
return include_dirs, resource_include_dirs
def _GetLibraries(config, spec):
"""Returns the list of libraries for this configuration.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
return [re.sub('^(\-l)', '', lib) for lib in libraries]
def _GetOutputFilePathAndTool(spec):
"""Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A pair of (file path, name of the tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', '$(OutDir)\\', '.exe'),
'shared_library': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'loadable_module': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'static_library': ('VCLibrarianTool', '$(OutDir)\\lib\\', '.lib'),
'dummy_executable': ('VCLinkerTool', '$(IntDir)\\', '.junk'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, out_dir, suffix = output_file_props
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ""
if spec['type'] in ['shared_library', 'loadable_module']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError('Multiple module definition files in one target, '
'target %s lists multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
""" Convert the content of the tools array to a form expected by
VisualStudio.
Arguments:
tools: A dictionnary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionnary of settings; the tool name is the key.
config: The dictionnary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = [_FixPath(i) for i in vsprops_dirs]
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
if not prepared_attrs.has_key('OutputDirectory'):
prepared_attrs['OutputDirectory'] = '$(SolutionDir)$(ConfigurationName)'
if not prepared_attrs.has_key('IntermediateDirectory'):
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources = [_NormalizedSource(s) for s in sources_array]
sources_set.update(set(sources))
def _PrepareListOfSources(project, spec, relative_path_of_gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
project: the MSVSProject object.
spec: The target dictionary containing the properties of the target.
relative_path_of_gyp_file: The relative path of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources)
"""
sources = set()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
sources.add(relative_path_of_gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a.get('inputs', [])
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = set(inputs)
sources.update(inputs)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = [_FixPath(i) for i in sources]
# Convert to proper windows form.
excluded_sources = [_FixPath(i) for i in excluded_sources]
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded)
# Add in dummy file for type none.
if spec['type'] == 'dummy_executable':
# Pull in a dummy main so it can link successfully.
dummy_relpath = gyp.common.RelativePath(
options.depth + '\\tools\\gyp\\gyp_dummy.c', gyp_dir)
sources.append(dummy_relpath)
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for config_name, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = set()
for config_name, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompileHeaderStubs(p, spec):
# Handle pre-compiled headers source stubs specially.
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
for a in actions:
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False)
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.Write()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
if path == '':
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node.keys():
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.split(build_file)[0], proj_filename)
fixpath_prefix = None
if options.generator_output:
projectDirPath = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fixpath_prefix = gyp.common.RelativePath(projectDirPath,
os.path.dirname(proj_path))
return proj_path, fixpath_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
_FixPath(proj_path),
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
projects[qualified_target] = obj
# Set all the dependencies
for project in projects.values():
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = \
MSVSVersion.SelectVisualStudioVersion(generator_flags.get('msvs_version',
'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
generator_flags = params.get('generator_flags', {})
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
_GenerateProject(project, options, msvs_version)
fixpath_prefix = None
for build_file in data.keys():
# Validate build_file extension
if build_file[-4:] != '.gyp':
continue
sln_path = build_file[:-4] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
| 37.614786
| 137
| 0.685063
|
import ntpath
import posixpath
import os
import re
import subprocess
import sys
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSVersion as MSVSVersion
import gyp.MSVSSettings as MSVSSettings
import gyp.common
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)/obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)/lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
]
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
""" Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if len(path) > 0 and path[-1] == '\\':
path = path[:-1]
return path
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = dict()
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
else:
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
if excluded_result:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name)-1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd):
if cygwin_shell:
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['"%s"' % i for i in direct_cmd]
direct_cmd = [i.replace('"', '\\"') for i in direct_cmd]
direct_cmd = ' '.join(direct_cmd)
cmd = (
'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
'set CYGWIN=nontsec&& ')
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += (
'bash -c "%(cmd)s"')
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return cmd
else:
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
arguments = [i.startswith('/') and i or _FixPath(i) for i in cmd[1:]]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path):
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
chosen_input = inputs[0]
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = [_FixPath(i) for i in inputs]
outputs = [_FixPath(i) for i in outputs]
tool = MSVSProject.Tool(
'VCCustomBuildTool', {
'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for input in actions_dict:
inputs = set()
outputs = set()
descriptions = []
commands = []
for action in actions_dict[input]:
inputs.update(set(action['inputs']))
outputs.update(set(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
rule_ext = rule['extension']
return [s for s in sources if s.endswith('.' + rule_ext)]
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = [_FixPath(i) for i in rule.get('inputs', [])]
raw_outputs = [_FixPath(i) for i in rule.get('outputs', [])]
inputs = set()
outputs = set()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename))
rules_file.Create(spec['target_name'])
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = [_FixPath(i) for i in r.get('inputs', [])]
outputs = [_FixPath(i) for i in r.get('outputs', [])]
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
rules_file.Write()
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
all_inputs = set()
all_outputs = set()
all_output_dirs = set()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(set(inputs))
all_outputs.update(set(outputs))
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
file.write('\tmkdir -p %s\n' % od)
file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
file.write('\t%s\n\n' % cmd)
# Close up the file.
file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True)
# Insert makefile as 0'th input, so it gets the action attached there,
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=[_FixPath(i) for i in all_inputs],
outputs=[_FixPath(i) for i in all_outputs],
description='Running %s' % cmd,
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this."""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument, so that the Win32
CommandLineToArgv function will turn the escaped result back into the
original string. See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this."""
def replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
s = quote_replacer_regex.sub(replace, s)
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention."""
def replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
list = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(list), 2):
list[i] = delimiters_replacer_regex.sub(replace, list[i])
# Concatenate back into a single string
s = '"'.join(list)
if len(list) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources)
def _AdjustSourcesForRules(rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = set([_FixPath(i) for i in inputs])
outputs = set([_FixPath(i) for i in outputs])
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = set([_FixPath(s) for s in actions_to_add.keys()])
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project
Arguments:
proj_path: Path of the vcproj file to generate.
spec: The target dictionary containing the properties of the target.
"""
default_config = _GetDefaultConfiguration(spec)
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) == None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GenerateProject(project, options, version):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
"""
default_config = _GetDefaultConfiguration(project.spec)
if default_config.get('msvs_existing_vcproj'):
return
_GenerateMSVSProject(project, options, version)
def _GenerateMSVSProject(project, options, version):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
"""
spec = project.spec
vcproj_dir = os.path.dirname(project.path)
if vcproj_dir and not os.path.exists(vcproj_dir):
os.makedirs(vcproj_dir)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version=version)
p.Create(spec['target_name'], guid=project.guid, platforms=platforms)
gyp_dir = os.path.split(project.path)[0]
gyp_file = posixpath.split(project.build_file)[1]
gyp_path = _NormalizedSource(gyp_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, gyp_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
sources, excluded_sources = _PrepareListOfSources(project, spec,
relative_path_of_gyp_file)
actions_to_add = {}
_GenerateRulesForMSVS(p, gyp_dir, options, spec,
sources, excluded_sources,
actions_to_add)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources))
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompileHeaderStubs(p, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
p.Write()
def _GetUniquePlatforms(spec):
"""Return the list of unique platforms for this spec, e.g ['win32', ...]
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
platforms = set()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version=version)
user_file.Create(spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project. It's a number defined
by Microsoft. May raise an exception.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
'dummy_executable': '1', # .exe
}[spec['type']]
except KeyError, e:
if spec.get('type'):
raise Exception('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise Exception('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionnary that defines the special processing to be done
for this configuration.
"""
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(config, spec)
out_file, vc_tool = _GetOutputFilePathAndTool(spec)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
tools = dict()
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)\\$(ProjectName)\\vc80.pdb')
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = [_FixPath(i) for i in include_dirs]
resource_include_dirs = [_FixPath(i) for i in resource_include_dirs]
return include_dirs, resource_include_dirs
def _GetLibraries(config, spec):
"""Returns the list of libraries for this configuration.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
return [re.sub('^(\-l)', '', lib) for lib in libraries]
def _GetOutputFilePathAndTool(spec):
"""Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A pair of (file path, name of the tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', '$(OutDir)\\', '.exe'),
'shared_library': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'loadable_module': ('VCLinkerTool', '$(OutDir)\\', '.dll'),
'static_library': ('VCLibrarianTool', '$(OutDir)\\lib\\', '.lib'),
'dummy_executable': ('VCLinkerTool', '$(IntDir)\\', '.junk'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, out_dir, suffix = output_file_props
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionnary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ""
if spec['type'] in ['shared_library', 'loadable_module']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError('Multiple module definition files in one target, '
'target %s lists multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
""" Convert the content of the tools array to a form expected by
VisualStudio.
Arguments:
tools: A dictionnary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionnary of settings; the tool name is the key.
config: The dictionnary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = [_FixPath(i) for i in vsprops_dirs]
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
if not prepared_attrs.has_key('OutputDirectory'):
prepared_attrs['OutputDirectory'] = '$(SolutionDir)$(ConfigurationName)'
if not prepared_attrs.has_key('IntermediateDirectory'):
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources = [_NormalizedSource(s) for s in sources_array]
sources_set.update(set(sources))
def _PrepareListOfSources(project, spec, relative_path_of_gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
project: the MSVSProject object.
spec: The target dictionary containing the properties of the target.
relative_path_of_gyp_file: The relative path of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources)
"""
sources = set()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = set()
# Add in the gyp file.
sources.add(relative_path_of_gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a.get('inputs', [])
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = set(inputs)
sources.update(inputs)
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
sources: A set of sources to be excluded for this project.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(set(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = [_FixPath(i) for i in sources]
# Convert to proper windows form.
excluded_sources = [_FixPath(i) for i in excluded_sources]
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded)
# Add in dummy file for type none.
if spec['type'] == 'dummy_executable':
# Pull in a dummy main so it can link successfully.
dummy_relpath = gyp.common.RelativePath(
options.depth + '\\tools\\gyp\\gyp_dummy.c', gyp_dir)
sources.append(dummy_relpath)
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for config_name, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
tool_files = set()
for config_name, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompileHeaderStubs(p, spec):
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
actions = spec.get('actions', [])
for a in actions:
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False)
inputs = a.get('inputs') or [relative_path_of_gyp_file]
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.Write()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
if path == '':
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node.keys():
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
root = _CollapseSingles('', root)
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.split(build_file)[0], proj_filename)
fixpath_prefix = None
if options.generator_output:
projectDirPath = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fixpath_prefix = gyp.common.RelativePath(projectDirPath,
os.path.dirname(proj_path))
return proj_path, fixpath_prefix
def _GetPlatformOverridesOfProject(spec):
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
obj = MSVSNew.MSVSProject(
_FixPath(proj_path),
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
projects[qualified_target] = obj
for project in projects.values():
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
msvs_version = \
MSVSVersion.SelectVisualStudioVersion(generator_flags.get('msvs_version',
'auto'))
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
generator_flags = params.get('generator_flags', {})
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
_GenerateProject(project, options, msvs_version)
fixpath_prefix = None
for build_file in data.keys():
# Validate build_file extension
if build_file[-4:] != '.gyp':
continue
sln_path = build_file[:-4] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
| false
| true
|
f7153eb3b8319ab7b8fbdefb6c2a256421b0226c
| 256
|
py
|
Python
|
submissions/joi2012yo/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1
|
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/joi2012yo/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3
|
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/joi2012yo/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
p = min([int(readline()) for _ in range(3)])
g = min([int(readline()) for _ in range(2)])
print(p + g - 50)
| 25.6
| 44
| 0.699219
|
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
p = min([int(readline()) for _ in range(3)])
g = min([int(readline()) for _ in range(2)])
print(p + g - 50)
| true
| true
|
f7153eeb0752afecb51dc681dd7cab991cb43202
| 1,722
|
py
|
Python
|
Model.py
|
Giorgiobientinesi/Workshop2
|
f454499d4befdb705b4672be25d8698ef2b37116
|
[
"MIT"
] | null | null | null |
Model.py
|
Giorgiobientinesi/Workshop2
|
f454499d4befdb705b4672be25d8698ef2b37116
|
[
"MIT"
] | null | null | null |
Model.py
|
Giorgiobientinesi/Workshop2
|
f454499d4befdb705b4672be25d8698ef2b37116
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
df = pd.read_csv("Airbnb-cleaned.csv")
df.columns
del df["Unnamed: 0"]
df1 = df[['neighbourhood', 'property_type', 'room_type']]
# IMPORT ENCODER
from sklearn.preprocessing import OneHotEncoder
# FIT ENCODER ON THE ORIGINAL DATASET TO MAKE IT REMEMBER CATEGORIES
enc = OneHotEncoder(sparse=False)
enc.fit(df1)
df["neighbourhood"].unique()
df[['Bijlmer-Oost', 'Noord-Oost', 'Noord-West', 'Oud-Noord',
'IJburg - Zeeburgereiland', 'Centrum-West',
'Oostelijk Havengebied - Indische Buurt', 'Centrum-Oost',
'Oud-Oost', 'Watergraafsmeer', 'Gaasperdam - Driemond',
'Westerpark', 'Bijlmer-Centrum', 'De Pijp - Rivierenbuurt', 'Zuid',
'Buitenveldert - Zuidas', 'De Baarsjes - Oud-West',
'Bos en Lommer', 'Geuzenveld - Slotermeer', 'Slotervaart',
'Osdorp', 'De Aker - Nieuw Sloten',
'Apartment', 'Bed & Breakfast', 'House',
'Entire home/apt', 'Private room', 'Shared room']] = enc.transform(
df1[["neighbourhood", "property_type", "room_type"]])
df = df.drop(["neighbourhood", "property_type", "room_type"], axis =1)
df["Distance_from_center(m)"] = df["Distance_from_center(m)"]/1000
y = df['price']
data = df.drop(['price'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=7)
model = RandomForestRegressor()
model.fit(X_train,y_train)
pred = model.predict(X_test)
mean_absolute_error(y_test, pred)
from joblib import dump, load
dump(model, 'Airbnb.joblib')
| 31.309091
| 91
| 0.702091
|
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
df = pd.read_csv("Airbnb-cleaned.csv")
df.columns
del df["Unnamed: 0"]
df1 = df[['neighbourhood', 'property_type', 'room_type']]
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(sparse=False)
enc.fit(df1)
df["neighbourhood"].unique()
df[['Bijlmer-Oost', 'Noord-Oost', 'Noord-West', 'Oud-Noord',
'IJburg - Zeeburgereiland', 'Centrum-West',
'Oostelijk Havengebied - Indische Buurt', 'Centrum-Oost',
'Oud-Oost', 'Watergraafsmeer', 'Gaasperdam - Driemond',
'Westerpark', 'Bijlmer-Centrum', 'De Pijp - Rivierenbuurt', 'Zuid',
'Buitenveldert - Zuidas', 'De Baarsjes - Oud-West',
'Bos en Lommer', 'Geuzenveld - Slotermeer', 'Slotervaart',
'Osdorp', 'De Aker - Nieuw Sloten',
'Apartment', 'Bed & Breakfast', 'House',
'Entire home/apt', 'Private room', 'Shared room']] = enc.transform(
df1[["neighbourhood", "property_type", "room_type"]])
df = df.drop(["neighbourhood", "property_type", "room_type"], axis =1)
df["Distance_from_center(m)"] = df["Distance_from_center(m)"]/1000
y = df['price']
data = df.drop(['price'], axis=1)
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=0.2, random_state=7)
model = RandomForestRegressor()
model.fit(X_train,y_train)
pred = model.predict(X_test)
mean_absolute_error(y_test, pred)
from joblib import dump, load
dump(model, 'Airbnb.joblib')
| true
| true
|
f7153f357c7a65ac30f9f3d65e5017cda2f97c38
| 1,185
|
py
|
Python
|
scrython/rulings/arena.py
|
vtbassmatt/Scrython
|
49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976
|
[
"MIT"
] | null | null | null |
scrython/rulings/arena.py
|
vtbassmatt/Scrython
|
49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976
|
[
"MIT"
] | null | null | null |
scrython/rulings/arena.py
|
vtbassmatt/Scrython
|
49fd9bd112e0f552a4310ac81fdb3f2b9e2a3976
|
[
"MIT"
] | null | null | null |
from .rulings_object import RulingsObject
class Arena(RulingsObject):
"""
cards/mtgo/:id/rulings
Gets the ruling of a card by the Arena Id.
Args:
id (string): The arena id of the card you want rulings for.
format (string, optional): Returns data in the specified method. Defaults to JSON.
face (string, optional):
If you're using the `image` format, this will specify if you want the front or back face.
version (string, optional):
If you're using the `image` format, this will specify if you want the small, normal, large, etc version of the image.
pretty (string, optional):
Returns a prettier version of the json object. Note that this may break functionality with Scrython.
Returns:
N/A
Raises:
N/A
Examples:
>>> rule = scrython.rulings.Arena(id="66975")
>>> rule.data_length()
"""
def __init__(self, **kwargs):
if kwargs.get('id') is None:
raise TypeError('No id provided to search by')
self.url = 'cards/arena/{}/rulings?'.format(str(kwargs.get('id')))
super(Arena, self).__init__(self.url)
| 33.857143
| 129
| 0.62616
|
from .rulings_object import RulingsObject
class Arena(RulingsObject):
def __init__(self, **kwargs):
if kwargs.get('id') is None:
raise TypeError('No id provided to search by')
self.url = 'cards/arena/{}/rulings?'.format(str(kwargs.get('id')))
super(Arena, self).__init__(self.url)
| true
| true
|
f715404495e00bc2fc41e3195c1aac56c832e314
| 2,319
|
py
|
Python
|
survos2/improc/regions/ccl.py
|
DiamondLightSource/SuRVoS2
|
42bacfb6a5cc267f38ca1337e51a443eae1a9d2b
|
[
"MIT"
] | 4
|
2017-10-10T14:47:16.000Z
|
2022-01-14T05:57:50.000Z
|
survos2/improc/regions/ccl.py
|
DiamondLightSource/SuRVoS2
|
42bacfb6a5cc267f38ca1337e51a443eae1a9d2b
|
[
"MIT"
] | 1
|
2022-01-11T21:11:12.000Z
|
2022-01-12T08:22:34.000Z
|
survos2/improc/regions/ccl.py
|
DiamondLightSource/SuRVoS2
|
42bacfb6a5cc267f38ca1337e51a443eae1a9d2b
|
[
"MIT"
] | 2
|
2018-03-06T06:31:29.000Z
|
2019-03-04T03:33:18.000Z
|
import logging
import os.path as op
import numpy as np
import pycuda.driver as cuda
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
from pycuda.compiler import SourceModule
from ..improc_types import int3
from ..utils import gpuregion, cpuregion
from ..cuda import asgpuarray, grid_kernel_config
from ._ccl import _remap, _relabel2d, _relabel3d, _merge_small3d
__dirname__ = op.dirname(__file__)
@gpuregion
def ccl3d(labels, remap=True):
assert labels.ndim == 3
assert labels.dtype == np.uint32
with open(op.join(__dirname__, "kernels", "ccl3d.cu"), "r") as f:
_mod_conv = SourceModule(f.read())
gpu_ccl_local = _mod_conv.get_function("uf_local")
gpu_ccl_global = _mod_conv.get_function("uf_global")
gpu_ccl_final = _mod_conv.get_function("uf_final")
labels_gpu = asgpuarray(labels, dtype=np.uint32)
result_gpu = gpuarray.zeros_like(labels_gpu)
shape = np.asarray(tuple(labels.shape[::-1]), dtype=int3)
block, grid = grid_kernel_config(gpu_ccl_local, labels.shape)
shared = int(np.prod(block) * 8)
gpu_ccl_local(labels_gpu, result_gpu, shape, block=block, grid=grid, shared=shared)
gpu_ccl_global(labels_gpu, result_gpu, shape, block=block, grid=grid)
gpu_ccl_final(result_gpu, shape, block=block, grid=grid)
if remap:
return remap_labels(result_gpu.get())
return result_gpu
def remap_labels(labels):
assert labels.dtype == np.uint32
new_labels = _remap(labels.ravel())
new_labels.shape = labels.shape
return new_labels
def relabel(labels):
assert labels.dtype == np.uint32
if labels.ndim == 2:
new_labels = _relabel2d(labels.ravel(), labels.shape[1])
elif labels.ndim == 3:
new_labels = _relabel3d(labels.ravel(), labels.shape[1], labels.shape[2])
else:
raise ValueError(
"Input array has to be 2 or 3 dimensional: {}".format(labels.ndim)
)
new_labels.shape = labels.shape
return new_labels
@cpuregion
def merge_small(data, labels, min_size=1, **kwargs):
if data.ndim != labels.ndim + 1:
data = data[..., None]
assert data.ndim == labels.ndim + 1
return _merge_small3d(data, labels, labels.max() + 1, min_size)
| 29.35443
| 88
| 0.675722
|
import logging
import os.path as op
import numpy as np
import pycuda.driver as cuda
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
from pycuda.compiler import SourceModule
from ..improc_types import int3
from ..utils import gpuregion, cpuregion
from ..cuda import asgpuarray, grid_kernel_config
from ._ccl import _remap, _relabel2d, _relabel3d, _merge_small3d
__dirname__ = op.dirname(__file__)
@gpuregion
def ccl3d(labels, remap=True):
assert labels.ndim == 3
assert labels.dtype == np.uint32
with open(op.join(__dirname__, "kernels", "ccl3d.cu"), "r") as f:
_mod_conv = SourceModule(f.read())
gpu_ccl_local = _mod_conv.get_function("uf_local")
gpu_ccl_global = _mod_conv.get_function("uf_global")
gpu_ccl_final = _mod_conv.get_function("uf_final")
labels_gpu = asgpuarray(labels, dtype=np.uint32)
result_gpu = gpuarray.zeros_like(labels_gpu)
shape = np.asarray(tuple(labels.shape[::-1]), dtype=int3)
block, grid = grid_kernel_config(gpu_ccl_local, labels.shape)
shared = int(np.prod(block) * 8)
gpu_ccl_local(labels_gpu, result_gpu, shape, block=block, grid=grid, shared=shared)
gpu_ccl_global(labels_gpu, result_gpu, shape, block=block, grid=grid)
gpu_ccl_final(result_gpu, shape, block=block, grid=grid)
if remap:
return remap_labels(result_gpu.get())
return result_gpu
def remap_labels(labels):
assert labels.dtype == np.uint32
new_labels = _remap(labels.ravel())
new_labels.shape = labels.shape
return new_labels
def relabel(labels):
assert labels.dtype == np.uint32
if labels.ndim == 2:
new_labels = _relabel2d(labels.ravel(), labels.shape[1])
elif labels.ndim == 3:
new_labels = _relabel3d(labels.ravel(), labels.shape[1], labels.shape[2])
else:
raise ValueError(
"Input array has to be 2 or 3 dimensional: {}".format(labels.ndim)
)
new_labels.shape = labels.shape
return new_labels
@cpuregion
def merge_small(data, labels, min_size=1, **kwargs):
if data.ndim != labels.ndim + 1:
data = data[..., None]
assert data.ndim == labels.ndim + 1
return _merge_small3d(data, labels, labels.max() + 1, min_size)
| true
| true
|
f71540c4bd66f93fc57f13dd1acee11e0731db26
| 1,753
|
py
|
Python
|
fyle/platform/platform.py
|
fylein/fyle-platform-sdk-py
|
dcf0f1de25e95e41ec213dc97c09196203090d01
|
[
"MIT"
] | 1
|
2022-03-08T09:43:30.000Z
|
2022-03-08T09:43:30.000Z
|
fyle/platform/platform.py
|
fylein/fyle-platform-sdk-py
|
dcf0f1de25e95e41ec213dc97c09196203090d01
|
[
"MIT"
] | 2
|
2021-11-22T09:12:12.000Z
|
2022-03-17T10:13:40.000Z
|
fyle/platform/platform.py
|
fylein/fyle-platform-sdk-py
|
dcf0f1de25e95e41ec213dc97c09196203090d01
|
[
"MIT"
] | null | null | null |
"""
Fyle Platform SDK Class
"""
from .apis import v1beta
from .globals.config import config
from .internals.auth import Auth
class Platform(Auth):
"""The main class which creates a connection with
Fyle APIs using OAuth2 authentication (refresh token grant type).
Parameters:
client_id (str): Client ID for Fyle API.
client_secret (str): Client secret for Fyle API.
refresh_token (str): Refresh token for Fyle API.
"""
def __init__(self, server_url, token_url, client_id, client_secret, refresh_token):
super().__init__()
# store the credentials
self.__server_url = server_url
self.__token_url = token_url
self.__client_id = client_id
self.__client_secret = client_secret
self.__refresh_token = refresh_token
self.v1beta = v1beta
# get the access token
self.set_server_url()
self.set_token_url()
self.set_client_id()
self.set_client_secret()
self.set_refresh_token()
self.update_access_token()
def set_server_url(self):
"""Set the Server URL in all API objects."""
config.set('FYLE', 'SERVER_URL', self.__server_url)
def set_token_url(self):
"""Set the Token URL in all API objects."""
config.set('FYLE', 'TOKEN_URL', self.__token_url)
def set_client_id(self):
"""Set the Client ID."""
config.set('AUTH', 'CLIENT_ID', self.__client_id)
def set_client_secret(self):
"""Set the Client Secret."""
config.set('AUTH', 'CLIENT_SECRET', self.__client_secret)
def set_refresh_token(self):
"""Set the Refresh token."""
config.set('AUTH', 'REFRESH_TOKEN', self.__refresh_token)
| 27.390625
| 87
| 0.64575
|
from .apis import v1beta
from .globals.config import config
from .internals.auth import Auth
class Platform(Auth):
def __init__(self, server_url, token_url, client_id, client_secret, refresh_token):
super().__init__()
self.__server_url = server_url
self.__token_url = token_url
self.__client_id = client_id
self.__client_secret = client_secret
self.__refresh_token = refresh_token
self.v1beta = v1beta
self.set_server_url()
self.set_token_url()
self.set_client_id()
self.set_client_secret()
self.set_refresh_token()
self.update_access_token()
def set_server_url(self):
config.set('FYLE', 'SERVER_URL', self.__server_url)
def set_token_url(self):
config.set('FYLE', 'TOKEN_URL', self.__token_url)
def set_client_id(self):
config.set('AUTH', 'CLIENT_ID', self.__client_id)
def set_client_secret(self):
config.set('AUTH', 'CLIENT_SECRET', self.__client_secret)
def set_refresh_token(self):
config.set('AUTH', 'REFRESH_TOKEN', self.__refresh_token)
| true
| true
|
f7154188c1409f7ad80c6acf7c69384da06e644f
| 4,384
|
py
|
Python
|
jobChomper/graph.py
|
bhautikj/jobChomper
|
09b50b3e14ab580a93376e4882214c18a8da34d5
|
[
"MIT"
] | 1
|
2018-03-16T03:16:49.000Z
|
2018-03-16T03:16:49.000Z
|
jobChomper/graph.py
|
bhautikj/jobChomper
|
09b50b3e14ab580a93376e4882214c18a8da34d5
|
[
"MIT"
] | null | null | null |
jobChomper/graph.py
|
bhautikj/jobChomper
|
09b50b3e14ab580a93376e4882214c18a8da34d5
|
[
"MIT"
] | null | null | null |
##
## Weirdo Tree Graph that powers jobChomper
## --
##
## Assertions:
## * DAG is made up of named edges
## * Each edge is a triple (A, B, NEEDSPREVIOUSTOPASS)
## A, B are the named nodes
## B will execute after A has evaluated
## NEEDSPREVIOUSTOPASS is True or False; if it is True then A _must_ evaluate as True for B to run
## * There's a special node called STARTNODE from where execution starts
## * Comment lines in graph file start with #
## * Elements in graph lines separated by ',' - for example:
## A, B, True
##
import jobChomper.node
import logging
STARTNODENAME = "STARTNODE"
RUNONLYONPASS = "onlyOnPass"
RUNONFAIL = "onFail"
def findCycle(graph):
todo = set(graph.keys())
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for node in graph[top]:
if node in stack:
return stack[stack.index(node):]
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return None
class Graph(object):
""" Graph Object """
def __init__(self):
self.init = True
self.edges = set()
self.runDict = {}
self.nodeSet = set()
def buildRunDict(self):
self.runDict = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
self.nodeSet.add(nodeA)
self.nodeSet.add(nodeB)
priorSuccess = edge[2]
if nodeA not in self.runDict.keys():
self.runDict[nodeA] = {}
self.runDict[nodeA][RUNONLYONPASS]=[]
self.runDict[nodeA][RUNONFAIL]=[]
if priorSuccess == True:
self.runDict[nodeA][RUNONLYONPASS].append(nodeB)
else:
self.runDict[nodeA][RUNONFAIL].append(nodeB)
for node in self.nodeSet:
if node not in self.runDict.keys():
self.runDict[node]={}
self.runDict[node][RUNONLYONPASS]=[]
self.runDict[node][RUNONFAIL]=[]
def findCycles(self):
connectivity = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA not in connectivity.keys():
connectivity[nodeA] = []
connectivity[nodeA].append(nodeB)
return findCycle(connectivity)
def checkEdgeNodesValid(self):
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA == STARTNODENAME:
continue
if not jobChomper.node.nodeExists(nodeA):
raise ValueError("[Graph] no such node as: " + nodeA)
if not jobChomper.node.nodeExists(nodeB):
raise ValueError("[Graph] no such node as: " + nodeB)
def loadGraphFromFile(self, filename):
foundStart = False
with open(filename) as graphBody:
data = graphBody.read()
for line in data.split('\n'):
line = line.strip()
# Empty line
if line == '':
continue
# Comment line
if line[0] == '#':
continue
spl = line.split(',')
# Not a triple
if len(spl) != 3:
logging.error("Problem parsing: " + filename + " file has invalid triple: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " file has invalid triple: " + line)
nodeA = spl[0].strip()
nodeB = spl[1].strip()
prevEval = False
if spl[2].lower().strip() == 'true':
prevEval = True
if nodeA == STARTNODENAME:
if foundStart == True:
logging.error("Problem parsing: " + filename + " start node defined again: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " start node defined again: " + line)
else:
foundStart = True
triple = (nodeA, nodeB, prevEval)
self.edges.add(triple)
if foundStart == False:
logging.error("Problem parsing: " + filename + " cound not find " + STARTNODENAME)
raise ValueError("[Graph] Problem parsing: " + filename + " cound not find " + STARTNODENAME)
self.buildRunDict()
cycles = self.findCycles()
if cycles != None:
logging.error("Problem parsing: " + filename + " cycle detected:" + str(cycles))
raise ValueError("[Graph] Problem parsing: " + filename + " cycle detected:" + str(cycles))
self.checkEdgeNodesValid()
| 28.842105
| 107
| 0.580748
|
[-1]
for node in graph[top]:
if node in stack:
return stack[stack.index(node):]
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return None
class Graph(object):
def __init__(self):
self.init = True
self.edges = set()
self.runDict = {}
self.nodeSet = set()
def buildRunDict(self):
self.runDict = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
self.nodeSet.add(nodeA)
self.nodeSet.add(nodeB)
priorSuccess = edge[2]
if nodeA not in self.runDict.keys():
self.runDict[nodeA] = {}
self.runDict[nodeA][RUNONLYONPASS]=[]
self.runDict[nodeA][RUNONFAIL]=[]
if priorSuccess == True:
self.runDict[nodeA][RUNONLYONPASS].append(nodeB)
else:
self.runDict[nodeA][RUNONFAIL].append(nodeB)
for node in self.nodeSet:
if node not in self.runDict.keys():
self.runDict[node]={}
self.runDict[node][RUNONLYONPASS]=[]
self.runDict[node][RUNONFAIL]=[]
def findCycles(self):
connectivity = {}
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA not in connectivity.keys():
connectivity[nodeA] = []
connectivity[nodeA].append(nodeB)
return findCycle(connectivity)
def checkEdgeNodesValid(self):
for edge in self.edges:
nodeA = edge[0]
nodeB = edge[1]
if nodeA == STARTNODENAME:
continue
if not jobChomper.node.nodeExists(nodeA):
raise ValueError("[Graph] no such node as: " + nodeA)
if not jobChomper.node.nodeExists(nodeB):
raise ValueError("[Graph] no such node as: " + nodeB)
def loadGraphFromFile(self, filename):
foundStart = False
with open(filename) as graphBody:
data = graphBody.read()
for line in data.split('\n'):
line = line.strip()
# Empty line
if line == '':
continue
# Comment line
if line[0] == '
continue
spl = line.split(',')
# Not a triple
if len(spl) != 3:
logging.error("Problem parsing: " + filename + " file has invalid triple: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " file has invalid triple: " + line)
nodeA = spl[0].strip()
nodeB = spl[1].strip()
prevEval = False
if spl[2].lower().strip() == 'true':
prevEval = True
if nodeA == STARTNODENAME:
if foundStart == True:
logging.error("Problem parsing: " + filename + " start node defined again: " + line)
raise ValueError("[Graph] Problem parsing: " + filename + " start node defined again: " + line)
else:
foundStart = True
triple = (nodeA, nodeB, prevEval)
self.edges.add(triple)
if foundStart == False:
logging.error("Problem parsing: " + filename + " cound not find " + STARTNODENAME)
raise ValueError("[Graph] Problem parsing: " + filename + " cound not find " + STARTNODENAME)
self.buildRunDict()
cycles = self.findCycles()
if cycles != None:
logging.error("Problem parsing: " + filename + " cycle detected:" + str(cycles))
raise ValueError("[Graph] Problem parsing: " + filename + " cycle detected:" + str(cycles))
self.checkEdgeNodesValid()
| true
| true
|
f715418cf642bd95568448dcaef9e2cf8c16dcc4
| 1,446
|
py
|
Python
|
tests/00_unit/test_base.py
|
wolcomm/eos-prefix-list-agent
|
a1ec37494048f0f0524ca5ff985838d844c84e4e
|
[
"MIT"
] | 8
|
2019-06-02T23:47:38.000Z
|
2021-08-24T07:30:08.000Z
|
tests/00_unit/test_base.py
|
wolcomm/eos-prefix-list-agent
|
a1ec37494048f0f0524ca5ff985838d844c84e4e
|
[
"MIT"
] | 39
|
2019-04-09T06:21:56.000Z
|
2022-01-29T10:00:37.000Z
|
tests/00_unit/test_base.py
|
wolcomm/eos-prefix-list-agent
|
a1ec37494048f0f0524ca5ff985838d844c84e4e
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the MIT License
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for prefix_list_agent.agent module."""
from __future__ import print_function
import pytest
from prefix_list_agent.base import PrefixListBase
class TestPrefixListAgent(object):
"""Test cases for PrefixListBase object."""
def test_init(self, sdk, mocker):
"""Test case for PrefixListAgent initialisation."""
mocker.patch("eossdk.Tracer", autospec=True)
base = PrefixListBase()
assert isinstance(base, PrefixListBase)
@pytest.mark.parametrize("level", ("emerg", "alert", "crit", "err",
"warning", "notice", "info", "debug"))
def test_tracing(self, mocker, level):
"""Test calls to tracer."""
mocker.patch("eossdk.Tracer", autospec=True)
base = PrefixListBase()
method = getattr(base, level)
method("message")
assert base.tracer.trace.call_count == 1
| 37.076923
| 79
| 0.69018
|
from __future__ import print_function
import pytest
from prefix_list_agent.base import PrefixListBase
class TestPrefixListAgent(object):
def test_init(self, sdk, mocker):
mocker.patch("eossdk.Tracer", autospec=True)
base = PrefixListBase()
assert isinstance(base, PrefixListBase)
@pytest.mark.parametrize("level", ("emerg", "alert", "crit", "err",
"warning", "notice", "info", "debug"))
def test_tracing(self, mocker, level):
mocker.patch("eossdk.Tracer", autospec=True)
base = PrefixListBase()
method = getattr(base, level)
method("message")
assert base.tracer.trace.call_count == 1
| true
| true
|
f71541e82fcb35f2b0c32b4abe7a90cb5afb6096
| 4,982
|
py
|
Python
|
homeassistant/components/august/gateway.py
|
bg1000/core
|
4ee4d674d8931927eae5222e3bf8dd6e26f3c6e5
|
[
"Apache-2.0"
] | 1
|
2021-03-20T12:25:26.000Z
|
2021-03-20T12:25:26.000Z
|
homeassistant/components/august/gateway.py
|
bg1000/core
|
4ee4d674d8931927eae5222e3bf8dd6e26f3c6e5
|
[
"Apache-2.0"
] | 51
|
2020-08-03T07:30:44.000Z
|
2022-03-22T06:02:42.000Z
|
homeassistant/components/august/gateway.py
|
bg1000/core
|
4ee4d674d8931927eae5222e3bf8dd6e26f3c6e5
|
[
"Apache-2.0"
] | null | null | null |
"""Handle August connection setup and authentication."""
import asyncio
import logging
import os
from aiohttp import ClientError, ClientResponseError
from august.api_async import ApiAsync
from august.authenticator_async import AuthenticationState, AuthenticatorAsync
from homeassistant.const import (
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers import aiohttp_client
from .const import (
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DEFAULT_AUGUST_CONFIG_FILE,
DEFAULT_TIMEOUT,
VERIFICATION_CODE_KEY,
)
from .exceptions import CannotConnect, InvalidAuth, RequireValidation
_LOGGER = logging.getLogger(__name__)
class AugustGateway:
"""Handle the connection to August."""
def __init__(self, hass):
"""Init the connection."""
self._aiohttp_session = aiohttp_client.async_get_clientsession(hass)
self._token_refresh_lock = asyncio.Lock()
self._access_token_cache_file = None
self._hass = hass
self._config = None
self.api = None
self.authenticator = None
self.authentication = None
@property
def access_token(self):
"""Access token for the api."""
return self.authentication.access_token
def config_entry(self):
"""Config entry."""
return {
CONF_LOGIN_METHOD: self._config[CONF_LOGIN_METHOD],
CONF_USERNAME: self._config[CONF_USERNAME],
CONF_INSTALL_ID: self._config.get(CONF_INSTALL_ID),
CONF_ACCESS_TOKEN_CACHE_FILE: self._access_token_cache_file,
}
async def async_setup(self, conf):
"""Create the api and authenticator objects."""
if conf.get(VERIFICATION_CODE_KEY):
return
self._access_token_cache_file = conf.get(
CONF_ACCESS_TOKEN_CACHE_FILE,
f".{conf[CONF_USERNAME]}{DEFAULT_AUGUST_CONFIG_FILE}",
)
self._config = conf
self.api = ApiAsync(
self._aiohttp_session,
timeout=self._config.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
self.authenticator = AuthenticatorAsync(
self.api,
self._config[CONF_LOGIN_METHOD],
self._config[CONF_USERNAME],
self._config.get(CONF_PASSWORD, ""),
install_id=self._config.get(CONF_INSTALL_ID),
access_token_cache_file=self._hass.config.path(
self._access_token_cache_file
),
)
await self.authenticator.async_setup_authentication()
async def async_authenticate(self):
"""Authenticate with the details provided to setup."""
self.authentication = None
try:
self.authentication = await self.authenticator.async_authenticate()
if self.authentication.state == AuthenticationState.AUTHENTICATED:
# Call the locks api to verify we are actually
# authenticated because we can be authenticated
# by have no access
await self.api.async_get_operable_locks(self.access_token)
except ClientResponseError as ex:
if ex.status == HTTP_UNAUTHORIZED:
raise InvalidAuth from ex
raise CannotConnect from ex
except ClientError as ex:
_LOGGER.error("Unable to connect to August service: %s", str(ex))
raise CannotConnect from ex
if self.authentication.state == AuthenticationState.BAD_PASSWORD:
raise InvalidAuth
if self.authentication.state == AuthenticationState.REQUIRES_VALIDATION:
raise RequireValidation
if self.authentication.state != AuthenticationState.AUTHENTICATED:
_LOGGER.error("Unknown authentication state: %s", self.authentication.state)
raise InvalidAuth
return self.authentication
async def async_reset_authentication(self):
"""Remove the cache file."""
await self._hass.async_add_executor_job(self._reset_authentication)
def _reset_authentication(self):
"""Remove the cache file."""
if os.path.exists(self._access_token_cache_file):
os.unlink(self._access_token_cache_file)
async def async_refresh_access_token_if_needed(self):
"""Refresh the august access token if needed."""
if not self.authenticator.should_refresh():
return
async with self._token_refresh_lock:
refreshed_authentication = (
await self.authenticator.async_refresh_access_token(force=False)
)
_LOGGER.info(
"Refreshed august access token. The old token expired at %s, and the new token expires at %s",
self.authentication.access_token_expires,
refreshed_authentication.access_token_expires,
)
self.authentication = refreshed_authentication
| 34.839161
| 110
| 0.66399
|
import asyncio
import logging
import os
from aiohttp import ClientError, ClientResponseError
from august.api_async import ApiAsync
from august.authenticator_async import AuthenticationState, AuthenticatorAsync
from homeassistant.const import (
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers import aiohttp_client
from .const import (
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DEFAULT_AUGUST_CONFIG_FILE,
DEFAULT_TIMEOUT,
VERIFICATION_CODE_KEY,
)
from .exceptions import CannotConnect, InvalidAuth, RequireValidation
_LOGGER = logging.getLogger(__name__)
class AugustGateway:
def __init__(self, hass):
self._aiohttp_session = aiohttp_client.async_get_clientsession(hass)
self._token_refresh_lock = asyncio.Lock()
self._access_token_cache_file = None
self._hass = hass
self._config = None
self.api = None
self.authenticator = None
self.authentication = None
@property
def access_token(self):
return self.authentication.access_token
def config_entry(self):
return {
CONF_LOGIN_METHOD: self._config[CONF_LOGIN_METHOD],
CONF_USERNAME: self._config[CONF_USERNAME],
CONF_INSTALL_ID: self._config.get(CONF_INSTALL_ID),
CONF_ACCESS_TOKEN_CACHE_FILE: self._access_token_cache_file,
}
async def async_setup(self, conf):
if conf.get(VERIFICATION_CODE_KEY):
return
self._access_token_cache_file = conf.get(
CONF_ACCESS_TOKEN_CACHE_FILE,
f".{conf[CONF_USERNAME]}{DEFAULT_AUGUST_CONFIG_FILE}",
)
self._config = conf
self.api = ApiAsync(
self._aiohttp_session,
timeout=self._config.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
self.authenticator = AuthenticatorAsync(
self.api,
self._config[CONF_LOGIN_METHOD],
self._config[CONF_USERNAME],
self._config.get(CONF_PASSWORD, ""),
install_id=self._config.get(CONF_INSTALL_ID),
access_token_cache_file=self._hass.config.path(
self._access_token_cache_file
),
)
await self.authenticator.async_setup_authentication()
async def async_authenticate(self):
self.authentication = None
try:
self.authentication = await self.authenticator.async_authenticate()
if self.authentication.state == AuthenticationState.AUTHENTICATED:
await self.api.async_get_operable_locks(self.access_token)
except ClientResponseError as ex:
if ex.status == HTTP_UNAUTHORIZED:
raise InvalidAuth from ex
raise CannotConnect from ex
except ClientError as ex:
_LOGGER.error("Unable to connect to August service: %s", str(ex))
raise CannotConnect from ex
if self.authentication.state == AuthenticationState.BAD_PASSWORD:
raise InvalidAuth
if self.authentication.state == AuthenticationState.REQUIRES_VALIDATION:
raise RequireValidation
if self.authentication.state != AuthenticationState.AUTHENTICATED:
_LOGGER.error("Unknown authentication state: %s", self.authentication.state)
raise InvalidAuth
return self.authentication
async def async_reset_authentication(self):
await self._hass.async_add_executor_job(self._reset_authentication)
def _reset_authentication(self):
if os.path.exists(self._access_token_cache_file):
os.unlink(self._access_token_cache_file)
async def async_refresh_access_token_if_needed(self):
if not self.authenticator.should_refresh():
return
async with self._token_refresh_lock:
refreshed_authentication = (
await self.authenticator.async_refresh_access_token(force=False)
)
_LOGGER.info(
"Refreshed august access token. The old token expired at %s, and the new token expires at %s",
self.authentication.access_token_expires,
refreshed_authentication.access_token_expires,
)
self.authentication = refreshed_authentication
| true
| true
|
f715431153040fc9d72aca7e7a4ab69f64467305
| 8,101
|
py
|
Python
|
mv_gaussian/low_dim_w_summary_stats/run_script_snpla.py
|
SamuelWiqvist/snpla
|
9d586c5d09de3eecd2536485af6fc28a915443e4
|
[
"MIT"
] | 2
|
2021-02-17T14:13:54.000Z
|
2021-06-01T08:29:35.000Z
|
mv_gaussian/low_dim_w_summary_stats/run_script_snpla.py
|
SamuelWiqvist/snpla
|
9d586c5d09de3eecd2536485af6fc28a915443e4
|
[
"MIT"
] | null | null | null |
mv_gaussian/low_dim_w_summary_stats/run_script_snpla.py
|
SamuelWiqvist/snpla
|
9d586c5d09de3eecd2536485af6fc28a915443e4
|
[
"MIT"
] | null | null | null |
# Imports
import sys
import torch
import os
import time
import numpy as np
from torch.distributions.multivariate_normal import MultivariateNormal
# Initial set up
lunarc = int(sys.argv[1])
dim = int(sys.argv[2])
seed = int(sys.argv[3])
seed_data = int(sys.argv[4])
hp_tuning = int(sys.argv[5]) # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp
lambda_val = float(sys.argv[6]) # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp
print("Input args:")
print("Dim: " + str(dim))
print("seed: " + str(seed))
print("seed_data: " + str(seed_data))
id_job = str(dim) + '_' + str(seed) + '_' + str(seed_data)
if hp_tuning > 0:
id_job = id_job + "_" + str(hp_tuning)
if lambda_val > 0:
id_job = id_job + "_" + str(lambda_val)
# Set wd
print(os.getcwd())
# set the wd to the base folder for the project
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev')
sys.path.append('./')
print(os.getcwd())
# Load all utility functions for all methods
import mv_gaussian.low_dim_w_summary_stats.functions as func
import algorithms.snpla as snpla
# Set model and generate data
x_o, conj_model, analytical_posterior = func.set_up_model(seed)
# set up posterior network
flow_lik, flow_post = func.set_up_networks()
## Generate test data
N_prior_pred_test = 1000
x_test, theta_test = func.run_model_sim(N_prior_pred_test, seed + 2, conj_model, analytical_posterior,
conj_model.model.covariance_matrix, dim, True)
# Generate test data for obs data set
print(conj_model.model_sim(theta_test).shape)
N_test_obs_data = 1000
x_test_obs_data = torch.zeros(N_test_obs_data, 5)
theta_test_obs_data = torch.zeros(N_test_obs_data, dim)
for i in range(N_test_obs_data):
x_test_obs_data[i, :] = func.calc_summary_stats(x_o)
theta_test_obs_data[i, :] = conj_model.model.loc
# Set up networks for the likelihood model
# Base dist for posterior model
flow_lik, flow_post = func.set_up_networks()
hyper_params = [0.001, 0.002, 0.95, 0.7] # lr_like, lr_post, gamma_post, gamma
if lambda_val > 0:
hyper_params[-1] = lambda_val
if hp_tuning >= 2:
hyper_params = func.sample_hp("snpla", hp_tuning)
optimizer_lik = torch.optim.Adam(flow_lik.parameters(), lr=hyper_params[0])
optimizer_post = torch.optim.Adam(flow_post.parameters(), lr=hyper_params[1])
decay_rate_post = hyper_params[2] # no adaptation of Adam's base rate
nbr_rounds = 10
prob_prior_decay_rate = hyper_params[3]
prob_prior = snpla.calc_prob_prior(nbr_rounds, prob_prior_decay_rate)
print(prob_prior)
#nbr_lik = [2000, 2000, 2000, 2000]
#nbr_epochs_lik = [25, 25, 25, 25]
#batch_size = 50
#batch_size_post = 50
#nbr_post = [10000, 10000, 10000, 10000]
#nbr_epochs_post = [25, 25, 25, 25]
nbr_lik = [2500 for _ in range(nbr_rounds)] # [1000, 1000, 1000, 1000, 1000] # , 2000, 2000]
nbr_epochs_lik = [75 for _ in range(nbr_rounds)] # [100, 100, 100, 100, 100]
batch_size = 50
batch_size_post = 1000
nbr_post = [10000 for _ in range(nbr_rounds)] # [10000, 10000, 10000, 10000, 10000] # , 10000, 10000]
nbr_epochs_post = [75 for _ in range(nbr_rounds)] # [50, 50, 50, 50, 50, 50]
x_o_batch_post = torch.zeros(batch_size_post, 5)
for i in range(batch_size_post):
x_o_batch_post[i, :] = func.calc_summary_stats(x_o)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
start = time.time()
# TODO check prior and simulator
models_lik, models_post = snpla.inference_snpla(flow_lik,
flow_post,
conj_model.prior,
conj_model.model_sim,
optimizer_lik,
optimizer_post,
decay_rate_post,
func.calc_summary_stats(x_o),
x_o_batch_post,
dim,
prob_prior,
nbr_lik,
nbr_epochs_lik,
nbr_post,
nbr_epochs_post,
batch_size,
batch_size_post)
end = time.time()
run_time = end - start
print("")
print("Runtime:" + str(round(run_time, 2)))
kl_divs_trained = []
start = time.time()
torch.manual_seed(seed)
for i in range(nbr_rounds):
print(i)
posterior_sample = models_post[i].sample(1000, context=func.calc_summary_stats(x_o))
posterior_sample = posterior_sample.reshape((1000, 2))
kl_divs_trained.append(conj_model.kl_div(analytical_posterior, posterior_sample))
if hp_tuning == 0 and lambda_val > 0:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/lambda_val/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
elif hp_tuning == 0:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
else:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/hp_tuning/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
end = time.time()
run_time_inference = (end - start) / nbr_rounds
if hp_tuning == 0 and lambda_val > 0:
with open('mv_gaussian/low_dim_w_summary_stats/lambda_val/snpla_' + id_job + '.txt', 'w') as f:
for h in hyper_params:
f.write('%.6f\n' % h)
for p in prob_prior:
f.write('%.6f\n' % p)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
elif hp_tuning == 0:
with open('mv_gaussian/low_dim_w_summary_stats/results/snpla_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
else:
with open('mv_gaussian/low_dim_w_summary_stats/hp_tuning/snpla_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % hp_tuning)
for h in hyper_params:
f.write('%.6f\n' % h)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
if hp_tuning == 0:
# Inference
# Sample data from post pred
N_post_pred_test = 1000
x_post_pred, theta_post_pred = func.run_model_sim(N_post_pred_test, seed + 3, conj_model, analytical_posterior,
conj_model.model.covariance_matrix, dim, False)
torch.manual_seed(seed)
x_prior = flow_lik.sample(1, context=theta_test)
x_theta_true = flow_lik.sample(1, context=theta_test_obs_data)
x_post = flow_lik.sample(1, context=theta_post_pred)
x_prior = x_prior.reshape(x_test.shape)
x_theta_true = x_theta_true.reshape(x_test_obs_data.shape)
x_post = x_post.reshape(x_post_pred.shape)
# Write results
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_snpla_' + id_job +
'.csv', x_theta_true.detach().numpy(), delimiter=",")
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_prior_snpla_' + id_job + '.csv',
x_prior.detach().numpy(), delimiter=",")
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_post_snpla_' + id_job + '.csv',
x_post.detach().numpy(), delimiter=",")
| 33.754167
| 125
| 0.627453
|
import sys
import torch
import os
import time
import numpy as np
from torch.distributions.multivariate_normal import MultivariateNormal
lunarc = int(sys.argv[1])
dim = int(sys.argv[2])
seed = int(sys.argv[3])
seed_data = int(sys.argv[4])
hp_tuning = int(sys.argv[5])
lambda_val = float(sys.argv[6])
print("Input args:")
print("Dim: " + str(dim))
print("seed: " + str(seed))
print("seed_data: " + str(seed_data))
id_job = str(dim) + '_' + str(seed) + '_' + str(seed_data)
if hp_tuning > 0:
id_job = id_job + "_" + str(hp_tuning)
if lambda_val > 0:
id_job = id_job + "_" + str(lambda_val)
print(os.getcwd())
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev')
sys.path.append('./')
print(os.getcwd())
import mv_gaussian.low_dim_w_summary_stats.functions as func
import algorithms.snpla as snpla
x_o, conj_model, analytical_posterior = func.set_up_model(seed)
flow_lik, flow_post = func.set_up_networks()
= 1000
x_test, theta_test = func.run_model_sim(N_prior_pred_test, seed + 2, conj_model, analytical_posterior,
conj_model.model.covariance_matrix, dim, True)
print(conj_model.model_sim(theta_test).shape)
N_test_obs_data = 1000
x_test_obs_data = torch.zeros(N_test_obs_data, 5)
theta_test_obs_data = torch.zeros(N_test_obs_data, dim)
for i in range(N_test_obs_data):
x_test_obs_data[i, :] = func.calc_summary_stats(x_o)
theta_test_obs_data[i, :] = conj_model.model.loc
flow_lik, flow_post = func.set_up_networks()
hyper_params = [0.001, 0.002, 0.95, 0.7]
if lambda_val > 0:
hyper_params[-1] = lambda_val
if hp_tuning >= 2:
hyper_params = func.sample_hp("snpla", hp_tuning)
optimizer_lik = torch.optim.Adam(flow_lik.parameters(), lr=hyper_params[0])
optimizer_post = torch.optim.Adam(flow_post.parameters(), lr=hyper_params[1])
decay_rate_post = hyper_params[2]
nbr_rounds = 10
prob_prior_decay_rate = hyper_params[3]
prob_prior = snpla.calc_prob_prior(nbr_rounds, prob_prior_decay_rate)
print(prob_prior)
#nbr_lik = [2000, 2000, 2000, 2000]
#nbr_epochs_lik = [25, 25, 25, 25]
#batch_size = 50
#batch_size_post = 50
#nbr_post = [10000, 10000, 10000, 10000]
#nbr_epochs_post = [25, 25, 25, 25]
nbr_lik = [2500 for _ in range(nbr_rounds)] # [1000, 1000, 1000, 1000, 1000] # , 2000, 2000]
nbr_epochs_lik = [75 for _ in range(nbr_rounds)] # [100, 100, 100, 100, 100]
batch_size = 50
batch_size_post = 1000
nbr_post = [10000 for _ in range(nbr_rounds)] # [10000, 10000, 10000, 10000, 10000] # , 10000, 10000]
nbr_epochs_post = [75 for _ in range(nbr_rounds)] # [50, 50, 50, 50, 50, 50]
x_o_batch_post = torch.zeros(batch_size_post, 5)
for i in range(batch_size_post):
x_o_batch_post[i, :] = func.calc_summary_stats(x_o)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
start = time.time()
# TODO check prior and simulator
models_lik, models_post = snpla.inference_snpla(flow_lik,
flow_post,
conj_model.prior,
conj_model.model_sim,
optimizer_lik,
optimizer_post,
decay_rate_post,
func.calc_summary_stats(x_o),
x_o_batch_post,
dim,
prob_prior,
nbr_lik,
nbr_epochs_lik,
nbr_post,
nbr_epochs_post,
batch_size,
batch_size_post)
end = time.time()
run_time = end - start
print("")
print("Runtime:" + str(round(run_time, 2)))
kl_divs_trained = []
start = time.time()
torch.manual_seed(seed)
for i in range(nbr_rounds):
print(i)
posterior_sample = models_post[i].sample(1000, context=func.calc_summary_stats(x_o))
posterior_sample = posterior_sample.reshape((1000, 2))
kl_divs_trained.append(conj_model.kl_div(analytical_posterior, posterior_sample))
if hp_tuning == 0 and lambda_val > 0:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/lambda_val/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
elif hp_tuning == 0:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
else:
np.savetxt('mv_gaussian/low_dim_w_summary_stats/hp_tuning/post_samples_snpla_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
end = time.time()
run_time_inference = (end - start) / nbr_rounds
if hp_tuning == 0 and lambda_val > 0:
with open('mv_gaussian/low_dim_w_summary_stats/lambda_val/snpla_' + id_job + '.txt', 'w') as f:
for h in hyper_params:
f.write('%.6f\n' % h)
for p in prob_prior:
f.write('%.6f\n' % p)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
elif hp_tuning == 0:
with open('mv_gaussian/low_dim_w_summary_stats/results/snpla_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
else:
with open('mv_gaussian/low_dim_w_summary_stats/hp_tuning/snpla_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % hp_tuning)
for h in hyper_params:
f.write('%.6f\n' % h)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(nbr_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
if hp_tuning == 0:
# Inference
# Sample data from post pred
N_post_pred_test = 1000
x_post_pred, theta_post_pred = func.run_model_sim(N_post_pred_test, seed + 3, conj_model, analytical_posterior,
conj_model.model.covariance_matrix, dim, False)
torch.manual_seed(seed)
x_prior = flow_lik.sample(1, context=theta_test)
x_theta_true = flow_lik.sample(1, context=theta_test_obs_data)
x_post = flow_lik.sample(1, context=theta_post_pred)
x_prior = x_prior.reshape(x_test.shape)
x_theta_true = x_theta_true.reshape(x_test_obs_data.shape)
x_post = x_post.reshape(x_post_pred.shape)
# Write results
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_snpla_' + id_job +
'.csv', x_theta_true.detach().numpy(), delimiter=",")
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_prior_snpla_' + id_job + '.csv',
x_prior.detach().numpy(), delimiter=",")
np.savetxt('mv_gaussian/low_dim_w_summary_stats/data/data_recon_post_snpla_' + id_job + '.csv',
x_post.detach().numpy(), delimiter=",")
| true
| true
|
f715435ca91863f52909480c8b9b5ef1a9fa028f
| 3,048
|
py
|
Python
|
alipay/aop/api/domain/AlipayUserCertDocIDCard.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayUserCertDocIDCard.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayUserCertDocIDCard.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserCertDocIDCard(object):
def __init__(self):
self._encoded_img_emblem = None
self._encoded_img_identity = None
self._expire_date = None
self._name = None
self._number = None
@property
def encoded_img_emblem(self):
return self._encoded_img_emblem
@encoded_img_emblem.setter
def encoded_img_emblem(self, value):
self._encoded_img_emblem = value
@property
def encoded_img_identity(self):
return self._encoded_img_identity
@encoded_img_identity.setter
def encoded_img_identity(self, value):
self._encoded_img_identity = value
@property
def expire_date(self):
return self._expire_date
@expire_date.setter
def expire_date(self, value):
self._expire_date = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def number(self):
return self._number
@number.setter
def number(self, value):
self._number = value
def to_alipay_dict(self):
params = dict()
if self.encoded_img_emblem:
if hasattr(self.encoded_img_emblem, 'to_alipay_dict'):
params['encoded_img_emblem'] = self.encoded_img_emblem.to_alipay_dict()
else:
params['encoded_img_emblem'] = self.encoded_img_emblem
if self.encoded_img_identity:
if hasattr(self.encoded_img_identity, 'to_alipay_dict'):
params['encoded_img_identity'] = self.encoded_img_identity.to_alipay_dict()
else:
params['encoded_img_identity'] = self.encoded_img_identity
if self.expire_date:
if hasattr(self.expire_date, 'to_alipay_dict'):
params['expire_date'] = self.expire_date.to_alipay_dict()
else:
params['expire_date'] = self.expire_date
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.number:
if hasattr(self.number, 'to_alipay_dict'):
params['number'] = self.number.to_alipay_dict()
else:
params['number'] = self.number
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserCertDocIDCard()
if 'encoded_img_emblem' in d:
o.encoded_img_emblem = d['encoded_img_emblem']
if 'encoded_img_identity' in d:
o.encoded_img_identity = d['encoded_img_identity']
if 'expire_date' in d:
o.expire_date = d['expire_date']
if 'name' in d:
o.name = d['name']
if 'number' in d:
o.number = d['number']
return o
| 30.178218
| 91
| 0.607612
|
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserCertDocIDCard(object):
def __init__(self):
self._encoded_img_emblem = None
self._encoded_img_identity = None
self._expire_date = None
self._name = None
self._number = None
@property
def encoded_img_emblem(self):
return self._encoded_img_emblem
@encoded_img_emblem.setter
def encoded_img_emblem(self, value):
self._encoded_img_emblem = value
@property
def encoded_img_identity(self):
return self._encoded_img_identity
@encoded_img_identity.setter
def encoded_img_identity(self, value):
self._encoded_img_identity = value
@property
def expire_date(self):
return self._expire_date
@expire_date.setter
def expire_date(self, value):
self._expire_date = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def number(self):
return self._number
@number.setter
def number(self, value):
self._number = value
def to_alipay_dict(self):
params = dict()
if self.encoded_img_emblem:
if hasattr(self.encoded_img_emblem, 'to_alipay_dict'):
params['encoded_img_emblem'] = self.encoded_img_emblem.to_alipay_dict()
else:
params['encoded_img_emblem'] = self.encoded_img_emblem
if self.encoded_img_identity:
if hasattr(self.encoded_img_identity, 'to_alipay_dict'):
params['encoded_img_identity'] = self.encoded_img_identity.to_alipay_dict()
else:
params['encoded_img_identity'] = self.encoded_img_identity
if self.expire_date:
if hasattr(self.expire_date, 'to_alipay_dict'):
params['expire_date'] = self.expire_date.to_alipay_dict()
else:
params['expire_date'] = self.expire_date
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.number:
if hasattr(self.number, 'to_alipay_dict'):
params['number'] = self.number.to_alipay_dict()
else:
params['number'] = self.number
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserCertDocIDCard()
if 'encoded_img_emblem' in d:
o.encoded_img_emblem = d['encoded_img_emblem']
if 'encoded_img_identity' in d:
o.encoded_img_identity = d['encoded_img_identity']
if 'expire_date' in d:
o.expire_date = d['expire_date']
if 'name' in d:
o.name = d['name']
if 'number' in d:
o.number = d['number']
return o
| true
| true
|
f71543f1283b285219a186f659bb0b4f1109d5be
| 387
|
py
|
Python
|
boost/inception/migrations/0006_auto_20190723_1445.py
|
igorMIA/bus_com_parser
|
07de65f3106c302d96b5fd9dad89562de44ec63f
|
[
"MIT"
] | null | null | null |
boost/inception/migrations/0006_auto_20190723_1445.py
|
igorMIA/bus_com_parser
|
07de65f3106c302d96b5fd9dad89562de44ec63f
|
[
"MIT"
] | 12
|
2020-02-12T01:09:12.000Z
|
2022-03-11T23:54:05.000Z
|
boost/inception/migrations/0006_auto_20190723_1445.py
|
igorMIA/bus_com_parser
|
07de65f3106c302d96b5fd9dad89562de44ec63f
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.3 on 2019-07-23 14:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inception', '0005_auto_20190723_0810'),
]
operations = [
migrations.AlterField(
model_name='busstation',
name='cost',
field=models.FloatField(null=True),
),
]
| 20.368421
| 49
| 0.599483
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inception', '0005_auto_20190723_0810'),
]
operations = [
migrations.AlterField(
model_name='busstation',
name='cost',
field=models.FloatField(null=True),
),
]
| true
| true
|
f715452e7dcf6d35a6ee975f8b79b14024d1e44c
| 1,053
|
py
|
Python
|
ProjectEulerPython/problems/problem_039.py
|
geo-desic/project-euler
|
8065ee082a6948447ef961c9aa960c90a815a3ab
|
[
"MIT"
] | null | null | null |
ProjectEulerPython/problems/problem_039.py
|
geo-desic/project-euler
|
8065ee082a6948447ef961c9aa960c90a815a3ab
|
[
"MIT"
] | null | null | null |
ProjectEulerPython/problems/problem_039.py
|
geo-desic/project-euler
|
8065ee082a6948447ef961c9aa960c90a815a3ab
|
[
"MIT"
] | null | null | null |
from problems.problem import Problem
def generate_pythagorean_triples(ub: int) -> []:
# https://en.wikipedia.org/wiki/Pythagorean_triple
result = []
for a in range(1, ub):
aa = a * a
b = a + 1
c = b + 1
while c <= ub:
cc = aa + b * b
while c * c < cc:
c += 1
if c * c == cc and c <= ub:
result.append([a + b + c, a, b, c])
b += 1
return result
class Problem039(Problem):
def calculate_answer(self) -> int:
# p = perimeter
# a < b < c = hypotenuse
answer = 0
max_perimeter = 1000
solution_counts = [0 for i in range(max_perimeter + 1)]
triangles = generate_pythagorean_triples(max_perimeter // 2 + 1)
max_solutions = 0
for triangle in triangles:
p = triangle[0]
if p <= max_perimeter:
solution_counts[p] += 1
solutions = solution_counts[p]
if (solutions > max_solutions):
max_solutions = solutions
answer = p
self.print_detail(f"p = {answer}; solutions = {solutions}")
return answer
| 27
| 69
| 0.576448
|
from problems.problem import Problem
def generate_pythagorean_triples(ub: int) -> []:
result = []
for a in range(1, ub):
aa = a * a
b = a + 1
c = b + 1
while c <= ub:
cc = aa + b * b
while c * c < cc:
c += 1
if c * c == cc and c <= ub:
result.append([a + b + c, a, b, c])
b += 1
return result
class Problem039(Problem):
def calculate_answer(self) -> int:
answer = 0
max_perimeter = 1000
solution_counts = [0 for i in range(max_perimeter + 1)]
triangles = generate_pythagorean_triples(max_perimeter // 2 + 1)
max_solutions = 0
for triangle in triangles:
p = triangle[0]
if p <= max_perimeter:
solution_counts[p] += 1
solutions = solution_counts[p]
if (solutions > max_solutions):
max_solutions = solutions
answer = p
self.print_detail(f"p = {answer}; solutions = {solutions}")
return answer
| true
| true
|
f71545a38ac9fb407c64619685ddb0408292df6d
| 10,791
|
py
|
Python
|
maskgen/batch/validate_from_s3.py
|
spongezhang/maskgen
|
7284e300d1cb326a5349879de0bace9cfa8788a8
|
[
"BSD-3-Clause"
] | null | null | null |
maskgen/batch/validate_from_s3.py
|
spongezhang/maskgen
|
7284e300d1cb326a5349879de0bace9cfa8788a8
|
[
"BSD-3-Clause"
] | null | null | null |
maskgen/batch/validate_from_s3.py
|
spongezhang/maskgen
|
7284e300d1cb326a5349879de0bace9cfa8788a8
|
[
"BSD-3-Clause"
] | null | null | null |
import argparse
import os
import maskgen.scenario_model
from maskgen.tool_set import *
from maskgen import video_tools
import tempfile
from maskgen.scenario_model import ImageProjectModel
from maskgen.image_graph import extract_archive
from maskgen.graph_rules import processProjectProperties
from maskgen.batch import BatchProcessor, pick_projects
import hashlib
import shutil
import sys
import csv
import time
from functools import partial
from maskgen import plugins
def reproduceMask(scModel):
"""
Rebuild all edge masks
:param scModel: scenario model
:return:
"""
for edge in scModel.getGraph().get_edges():
scModel.select(edge)
scModel.reproduceMask()
print 'Updated masks in project: ' + str(scModel.getName())
def select_region(imfile, prev):
im = openImage(imfile)
if im.mode == 'RGBA' or im.mode == 'LA':
return imfile
else:
if not os.path.exists(prev):
pos = prev.rfind('.')
mod_filename = prev[0:pos] + prev[pos:].lower()
if os.path.exists(mod_filename):
prev = mod_filename
prevIm = Image.open(prev)
if im.mode == 'L' and set(im.getdata()).issubset({0, 1, 255}) and not isRGBA(prevIm):
rgba = prevIm.convert('RGBA')
bw = im.point(lambda x: 1 if x > 0 else 0, 'F')
rgbaarr = np.asarray(rgba)
bwa = np.asarray(bw)
prod = np.multiply(bw, rgbaarr[3,:,:])
newIm = np.array([rgbaarr[0,:,:], rgbaarr[1,:,:], rgbaarr[2,:,:], prod])
newImPIL = Image.fromarray(newIm, 'RGBA')
newImPIL.save(imfile)
return imfile
return imfile
def isRGBA(im):
return im.mode == 'RGBA'
mod_functions=globals()
def getFunction(name, function_mappings={}):
if name is None:
return None
import importlib
if name in function_mappings:
return function_mappings[name]
elif name in mod_functions:
function_mappings[name] = mod_functions[name]
return function_mappings[name]
else:
mod_name, func_name = name.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
function_mappings[name] = func
return func
except Exception as e:
logging.getLogger('maskgen').error('Unable to load rule {}: {}'.format(name,str(e)))
raise e
def update_rotation(scModel):
"""
Add rotation parameter to OutputPNG and OutputTIFF operations
:param scModel: Opened project model
:param project: Project JSON file
:return: None. Updates JSON.
"""
rotateOps = ['OutputPng', 'OutputTif']
projectDir = scModel.getGraph().dir
for edge in scModel.getGraph().get_edges():
currentLink = scModel.getGraph().get_edge(edge[0], edge[1])
if currentLink['op'] in rotateOps:
if 'arguments' not in currentLink:
currentLink['arguments'] = {}
if 'Image Rotated' in currentLink['arguments']:
continue
change = edge['shape change'] if 'shape change' in edge else None
if change and change != '(0,0)':
currentLink['arguments']['Image Rotated'] = 'yes'
elif change and change == '(0,0)':
currentLink['arguments']['Image Rotated'] = 'no'
else:
startFile = scModel.getGraph().get_node(edge[0])['file']
endFile = scModel.getGraph().get_node(edge[1])['file']
im1 = Image.open(os.path.join(projectDir, startFile))
im2 = Image.open(os.path.join(projectDir, endFile))
if im1.size != im2.size:
currentLink['arguments']['Image Rotated'] = 'yes'
else:
currentLink['arguments']['Image Rotated'] = 'no'
def validate_by(scModel, person):
scModel.setProjectData('validation', 'yes')
scModel.setProjectData('validatedby', person)
scModel.setProjectData('validationdate', time.strftime("%m/%d/%Y"))
scModel.save()
def isSuccessor(scModel, successors, node, ops):
"""
:param scModel:
:return:
@type successors: list of str
@type scModel: ImageProjectModel
"""
for successor in successors:
edge = scModel.getGraph().get_edge(node,successor)
if edge['op'] not in ops:
return False
return True
def missingVideo(scModel):
import copy
"""
:param scModel:
:return:
@type scModel: ImageProjectModel
"""
for edge in scModel.getGraph().get_edges():
currentLink = scModel.getGraph().get_edge(edge[0], edge[1])
successors = scModel.getGraph().successors(edge[1])
predecessors = scModel.getGraph().predecessors(edge[1])
if currentLink['op'] == 'AddAudioSample':
sourceim, source = scModel.getGraph().get_image(edge[0])
im, dest = scModel.getGraph().get_image(edge[1])
sourcemetadata = video_tools.getMeta(source,show_streams=True)[0]
destmetadata = video_tools.getMeta(dest,show_streams=True)[0]
if len(sourcemetadata) > 0:
sourcevidcount = len([idx for idx, val in enumerate(sourcemetadata) if val['codec_type'] != 'audio'])
if len(destmetadata) > 0:
destvidcount = len([x for x in (idx for idx, val in enumerate(destmetadata) if val['codec_type'] != 'audio')])
if sourcevidcount != destvidcount:
if not isSuccessor(scModel, successors, edge[1], ['AntiForensicCopyExif', 'OutputMP4', 'Donor']):
raise ValueError('Cannot correct AddAudioSample for edge {} to {} due to successor node'.format(
edge[0], edge[1]
))
predecessors = [pred for pred in predecessors if pred != edge[0]]
if len(predecessors) == 0:
donor = scModel.getBaseNode(edge[1])
else:
donor = predecessors[0]
args= dict() if 'arguments' not in currentLink else copy.copy(currentLink['arguments'])
args['donor'] = donor
plugins.callPlugin('OverwriteAudioStream',sourceim,source,dest,donor=donor)
def recompressAsVideo(scModel):
"""
:param scModel:
:return:
@type scModel: maskgen.scenario_model.ImageProjectModel
"""
for edge in scModel.getGraph().get_edges():
currentLink = scModel.getGraph().get_edge(edge[0], edge[1])
successors = scModel.getGraph().successors(edge[1])
predecessors = scModel.getGraph().predecessors(edge[1])
# should we consider video nodes just to be sure?
#finalNode = scModel.getGraph().get_node(edge[1])
if currentLink['op'] == 'AntiForensicCopyExif' and \
len(successors) == 0 and \
currentLink['softwareName'].lower() == 'ffmpeg':
predecessors = [pred for pred in predecessors if pred != edge[0]]
if len (predecessors) == 0:
donor = scModel.getBaseNode(edge[1])
else:
donor = predecessors[0]
scModel.selectImage(edge[1])
scModel.remove()
scModel.selectImage(edge[0])
scModel.imageFromPlugin('CompressAsVideo',donor=donor)
def perform_update(project,args, functions, tempdir):
scModel = maskgen.scenario_model.ImageProjectModel(project)
print 'User: ' + scModel.getGraph().getDataItem('username')
validator = scModel.getProjectData('validatedby')
if not args.validate:
if validator is not None:
setPwdX(CustomPwdX(validator))
else:
setPwdX(CustomPwdX(scModel.getGraph().getDataItem('username')))
for function in functions:
function(scModel)
if args.validate:
scModel.set_validation_properties('yes', get_username(), 'QA redone via Batch Updater')
scModel.save()
if args.updategraph:
if os.path.exists(os.path.join(scModel.get_dir(),'_overview_.png')):
return
error_list = scModel.exporttos3(args.uploadfolder, tempdir)
if len(error_list) > 0:
for err in error_list:
print err
raise ValueError('Export Failed')
return scModel.validate()
def fetchfromS3(dir, location, file):
import boto3
BUCKET = location.split('/')[0].strip()
DIR = location[location.find('/') + 1:].strip() +'/'
s3 = boto3.resource('s3')
my_bucket = s3.Bucket(BUCKET)
my_bucket.download_file(DIR + file, os.path.join(dir, file))
def processProject(args, functions, file_to_process):
"""
:param args:
:param functions:
:param file_to_process:
:return:
@type file_to_process : str
"""
if not file_to_process.endswith('tgz') and os.path.exists(os.path.join(args.tempfolder,file_to_process)):
dir = os.path.join(args.tempfolder,file_to_process)
fetch = False
else:
dir = tempfile.mkdtemp(dir=args.tempfolder) if args.tempfolder else tempfile.mkdtemp()
fetch = True
try:
if fetch:
fetchfromS3(dir, args.downloadfolder,file_to_process)
extract_archive(os.path.join(dir, file_to_process), dir)
for project in pick_projects(dir):
perform_update(project, args,functions, dir)
finally:
if fetch:
shutil.rmtree(dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', required=True, help='File of projects')
parser.add_argument('-df', '--downloadfolder', required=True, help='Download folder')
parser.add_argument('-ug', '--updategraph', required=False, help='Upload Graph',action='store_true')
parser.add_argument('-uf', '--uploadfolder', required=True, help='Upload folder')
parser.add_argument('-v', '--validate', required=False, help='QA',action='store_true')
parser.add_argument('-tf', '--tempfolder', required=False, help='Temp Holder')
parser.add_argument('-e', '--functions', required=False, help='List of function')
parser.add_argument('-cf', '--completefile', required=True, help='Projects to Completed')
args = parser.parse_args()
functions_map = {}
functions = []
if args.functions is not None:
functions = [getFunction(name, function_mappings=functions_map) for name in args.functions.split(',')]
with open(args.file, 'r') as input_file:
files_to_process = input_file.readlines()
files_to_process = [x.strip() for x in files_to_process]
processor = BatchProcessor(args.completefile,files_to_process)
func = partial(processProject,args,functions)
processor.process(func)
if __name__ == '__main__':
main()
| 39.097826
| 126
| 0.624317
|
import argparse
import os
import maskgen.scenario_model
from maskgen.tool_set import *
from maskgen import video_tools
import tempfile
from maskgen.scenario_model import ImageProjectModel
from maskgen.image_graph import extract_archive
from maskgen.graph_rules import processProjectProperties
from maskgen.batch import BatchProcessor, pick_projects
import hashlib
import shutil
import sys
import csv
import time
from functools import partial
from maskgen import plugins
def reproduceMask(scModel):
"""
Rebuild all edge masks
:param scModel: scenario model
:return:
"""
for edge in scModel.getGraph().get_edges():
scModel.select(edge)
scModel.reproduceMask()
print 'Updated masks in project: ' + str(scModel.getName())
def select_region(imfile, prev):
im = openImage(imfile)
if im.mode == 'RGBA' or im.mode == 'LA':
return imfile
else:
if not os.path.exists(prev):
pos = prev.rfind('.')
mod_filename = prev[0:pos] + prev[pos:].lower()
if os.path.exists(mod_filename):
prev = mod_filename
prevIm = Image.open(prev)
if im.mode == 'L' and set(im.getdata()).issubset({0, 1, 255}) and not isRGBA(prevIm):
rgba = prevIm.convert('RGBA')
bw = im.point(lambda x: 1 if x > 0 else 0, 'F')
rgbaarr = np.asarray(rgba)
bwa = np.asarray(bw)
prod = np.multiply(bw, rgbaarr[3,:,:])
newIm = np.array([rgbaarr[0,:,:], rgbaarr[1,:,:], rgbaarr[2,:,:], prod])
newImPIL = Image.fromarray(newIm, 'RGBA')
newImPIL.save(imfile)
return imfile
return imfile
def isRGBA(im):
return im.mode == 'RGBA'
mod_functions=globals()
def getFunction(name, function_mappings={}):
if name is None:
return None
import importlib
if name in function_mappings:
return function_mappings[name]
elif name in mod_functions:
function_mappings[name] = mod_functions[name]
return function_mappings[name]
else:
mod_name, func_name = name.rsplit('.', 1)
try:
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
function_mappings[name] = func
return func
except Exception as e:
logging.getLogger('maskgen').error('Unable to load rule {}: {}'.format(name,str(e)))
raise e
def update_rotation(scModel):
"""
Add rotation parameter to OutputPNG and OutputTIFF operations
:param scModel: Opened project model
:param project: Project JSON file
:return: None. Updates JSON.
"""
rotateOps = ['OutputPng', 'OutputTif']
projectDir = scModel.getGraph().dir
for edge in scModel.getGraph().get_edges():
currentLink = scModel.getGraph().get_edge(edge[0], edge[1])
if currentLink['op'] in rotateOps:
if 'arguments' not in currentLink:
currentLink['arguments'] = {}
if 'Image Rotated' in currentLink['arguments']:
continue
change = edge['shape change'] if 'shape change' in edge else None
if change and change != '(0,0)':
currentLink['arguments']['Image Rotated'] = 'yes'
elif change and change == '(0,0)':
currentLink['arguments']['Image Rotated'] = 'no'
else:
startFile = scModel.getGraph().get_node(edge[0])['file']
endFile = scModel.getGraph().get_node(edge[1])['file']
im1 = Image.open(os.path.join(projectDir, startFile))
im2 = Image.open(os.path.join(projectDir, endFile))
if im1.size != im2.size:
currentLink['arguments']['Image Rotated'] = 'yes'
else:
currentLink['arguments']['Image Rotated'] = 'no'
def validate_by(scModel, person):
scModel.setProjectData('validation', 'yes')
scModel.setProjectData('validatedby', person)
scModel.setProjectData('validationdate', time.strftime("%m/%d/%Y"))
scModel.save()
def isSuccessor(scModel, successors, node, ops):
"""
:param scModel:
:return:
@type successors: list of str
@type scModel: ImageProjectModel
"""
for successor in successors:
edge = scModel.getGraph().get_edge(node,successor)
if edge['op'] not in ops:
return False
return True
def missingVideo(scModel):
import copy
"""
:param scModel:
:return:
@type scModel: ImageProjectModel
"""
for edge in scModel.getGraph().get_edges():
currentLink = scModel.getGraph().get_edge(edge[0], edge[1])
successors = scModel.getGraph().successors(edge[1])
predecessors = scModel.getGraph().predecessors(edge[1])
if currentLink['op'] == 'AddAudioSample':
sourceim, source = scModel.getGraph().get_image(edge[0])
im, dest = scModel.getGraph().get_image(edge[1])
sourcemetadata = video_tools.getMeta(source,show_streams=True)[0]
destmetadata = video_tools.getMeta(dest,show_streams=True)[0]
if len(sourcemetadata) > 0:
sourcevidcount = len([idx for idx, val in enumerate(sourcemetadata) if val['codec_type'] != 'audio'])
if len(destmetadata) > 0:
destvidcount = len([x for x in (idx for idx, val in enumerate(destmetadata) if val['codec_type'] != 'audio')])
if sourcevidcount != destvidcount:
if not isSuccessor(scModel, successors, edge[1], ['AntiForensicCopyExif', 'OutputMP4', 'Donor']):
raise ValueError('Cannot correct AddAudioSample for edge {} to {} due to successor node'.format(
edge[0], edge[1]
))
predecessors = [pred for pred in predecessors if pred != edge[0]]
if len(predecessors) == 0:
donor = scModel.getBaseNode(edge[1])
else:
donor = predecessors[0]
args= dict() if 'arguments' not in currentLink else copy.copy(currentLink['arguments'])
args['donor'] = donor
plugins.callPlugin('OverwriteAudioStream',sourceim,source,dest,donor=donor)
def recompressAsVideo(scModel):
"""
:param scModel:
:return:
@type scModel: maskgen.scenario_model.ImageProjectModel
"""
for edge in scModel.getGraph().get_edges():
currentLink = scModel.getGraph().get_edge(edge[0], edge[1])
successors = scModel.getGraph().successors(edge[1])
predecessors = scModel.getGraph().predecessors(edge[1])
if currentLink['op'] == 'AntiForensicCopyExif' and \
len(successors) == 0 and \
currentLink['softwareName'].lower() == 'ffmpeg':
predecessors = [pred for pred in predecessors if pred != edge[0]]
if len (predecessors) == 0:
donor = scModel.getBaseNode(edge[1])
else:
donor = predecessors[0]
scModel.selectImage(edge[1])
scModel.remove()
scModel.selectImage(edge[0])
scModel.imageFromPlugin('CompressAsVideo',donor=donor)
def perform_update(project,args, functions, tempdir):
scModel = maskgen.scenario_model.ImageProjectModel(project)
print 'User: ' + scModel.getGraph().getDataItem('username')
validator = scModel.getProjectData('validatedby')
if not args.validate:
if validator is not None:
setPwdX(CustomPwdX(validator))
else:
setPwdX(CustomPwdX(scModel.getGraph().getDataItem('username')))
for function in functions:
function(scModel)
if args.validate:
scModel.set_validation_properties('yes', get_username(), 'QA redone via Batch Updater')
scModel.save()
if args.updategraph:
if os.path.exists(os.path.join(scModel.get_dir(),'_overview_.png')):
return
error_list = scModel.exporttos3(args.uploadfolder, tempdir)
if len(error_list) > 0:
for err in error_list:
print err
raise ValueError('Export Failed')
return scModel.validate()
def fetchfromS3(dir, location, file):
import boto3
BUCKET = location.split('/')[0].strip()
DIR = location[location.find('/') + 1:].strip() +'/'
s3 = boto3.resource('s3')
my_bucket = s3.Bucket(BUCKET)
my_bucket.download_file(DIR + file, os.path.join(dir, file))
def processProject(args, functions, file_to_process):
"""
:param args:
:param functions:
:param file_to_process:
:return:
@type file_to_process : str
"""
if not file_to_process.endswith('tgz') and os.path.exists(os.path.join(args.tempfolder,file_to_process)):
dir = os.path.join(args.tempfolder,file_to_process)
fetch = False
else:
dir = tempfile.mkdtemp(dir=args.tempfolder) if args.tempfolder else tempfile.mkdtemp()
fetch = True
try:
if fetch:
fetchfromS3(dir, args.downloadfolder,file_to_process)
extract_archive(os.path.join(dir, file_to_process), dir)
for project in pick_projects(dir):
perform_update(project, args,functions, dir)
finally:
if fetch:
shutil.rmtree(dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', required=True, help='File of projects')
parser.add_argument('-df', '--downloadfolder', required=True, help='Download folder')
parser.add_argument('-ug', '--updategraph', required=False, help='Upload Graph',action='store_true')
parser.add_argument('-uf', '--uploadfolder', required=True, help='Upload folder')
parser.add_argument('-v', '--validate', required=False, help='QA',action='store_true')
parser.add_argument('-tf', '--tempfolder', required=False, help='Temp Holder')
parser.add_argument('-e', '--functions', required=False, help='List of function')
parser.add_argument('-cf', '--completefile', required=True, help='Projects to Completed')
args = parser.parse_args()
functions_map = {}
functions = []
if args.functions is not None:
functions = [getFunction(name, function_mappings=functions_map) for name in args.functions.split(',')]
with open(args.file, 'r') as input_file:
files_to_process = input_file.readlines()
files_to_process = [x.strip() for x in files_to_process]
processor = BatchProcessor(args.completefile,files_to_process)
func = partial(processProject,args,functions)
processor.process(func)
if __name__ == '__main__':
main()
| false
| true
|
f7154628c62314b11aa471a6abfc977dd15376a2
| 1,130
|
py
|
Python
|
scripts/install_nightly.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 62
|
2018-03-30T15:46:56.000Z
|
2021-12-08T23:30:24.000Z
|
scripts/install_nightly.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 114
|
2018-03-21T01:12:43.000Z
|
2021-07-05T12:29:54.000Z
|
scripts/install_nightly.py
|
CDAT/uvcdat
|
5133560c0c049b5c93ee321ba0af494253b44f91
|
[
"BSD-3-Clause"
] | 14
|
2018-06-06T02:42:47.000Z
|
2021-11-26T03:27:00.000Z
|
import sys
import os
import argparse
this_dir = os.path.abspath(os.path.dirname(__file__))
modules_dir = os.path.join(this_dir, '..', 'modules')
sys.path.append(modules_dir)
from Const import *
from Util import *
from CondaUtils import *
from CDATSetupUtils import *
valid_py_vers = PYTHON_VERSIONS
parser = argparse.ArgumentParser(description="install nightly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-w", "--workdir",
help="working directory -- miniconda will be installed here")
parser.add_argument("-p", "--py_ver", choices=valid_py_vers,
help="python version, 'py2.7' or 'py3.6' or 'py3.7'")
args = parser.parse_args()
workdir = args.workdir
py_ver = args.py_ver
status, conda_path = install_miniconda(workdir, py_ver)
if status != SUCCESS:
sys.exit(FAILURE)
print("xxx conda_path: {p}".format(p=conda_path))
status, env_name = install_nightly(workdir, conda_path, 'nightly', py_ver)
if status != SUCCESS:
sys.exit(FAILURE)
status = conda_list(conda_path, env_name)
sys.exit(status)
| 23.061224
| 88
| 0.7
|
import sys
import os
import argparse
this_dir = os.path.abspath(os.path.dirname(__file__))
modules_dir = os.path.join(this_dir, '..', 'modules')
sys.path.append(modules_dir)
from Const import *
from Util import *
from CondaUtils import *
from CDATSetupUtils import *
valid_py_vers = PYTHON_VERSIONS
parser = argparse.ArgumentParser(description="install nightly",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-w", "--workdir",
help="working directory -- miniconda will be installed here")
parser.add_argument("-p", "--py_ver", choices=valid_py_vers,
help="python version, 'py2.7' or 'py3.6' or 'py3.7'")
args = parser.parse_args()
workdir = args.workdir
py_ver = args.py_ver
status, conda_path = install_miniconda(workdir, py_ver)
if status != SUCCESS:
sys.exit(FAILURE)
print("xxx conda_path: {p}".format(p=conda_path))
status, env_name = install_nightly(workdir, conda_path, 'nightly', py_ver)
if status != SUCCESS:
sys.exit(FAILURE)
status = conda_list(conda_path, env_name)
sys.exit(status)
| true
| true
|
f715466767fcc3856809b3d312d0e597406be25b
| 1,717
|
py
|
Python
|
model/summaries.py
|
victor-estrade/SystGradDescent
|
822e7094290301ec47a99433381a8d6406798aff
|
[
"MIT"
] | 2
|
2019-03-20T09:05:02.000Z
|
2019-03-20T15:23:44.000Z
|
model/summaries.py
|
victor-estrade/SystGradDescent
|
822e7094290301ec47a99433381a8d6406798aff
|
[
"MIT"
] | null | null | null |
model/summaries.py
|
victor-estrade/SystGradDescent
|
822e7094290301ec47a99433381a8d6406798aff
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
DEFAULT_N_BINS = 10
def compute_summaries(clf, X, W, n_bins=DEFAULT_N_BINS):
proba = clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=n_bins)
return count
class ClassifierSummaryComputer():
def __init__(self, clf, n_bins=DEFAULT_N_BINS):
self.clf = clf
self.n_bins = n_bins
def __call__(self, X, W):
proba = self.clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=self.n_bins)
return count
class HistogramSummaryComputer():
def __init__(self, n_bins=DEFAULT_N_BINS):
self.n_bins = n_bins
def fit(self, X):
self.edges_list = []
for i in range(X.shape[1]):
x = X[:, i]
maximum = np.max(x)
minimum = np.min(x)
diff = maximum - minimum
maximum = maximum + diff / self.n_bins # be a bit more inclusive
minimum = minimum - diff / self.n_bins # be a bit more inclusive
count, bin_edges = np.histogram(x, range=(minimum, maximum), bins=self.n_bins)
self.edges_list.append(bin_edges)
return self
def predict(self, X, W):
counts = []
for i, bin_edges in enumerate(self.edges_list):
x = X[:, i]
count, _ = np.histogram(x, bins=bin_edges, weights=W)
counts.extend(count)
return counts
def __call__(self, X, W):
counts = self.predict(X, W)
return np.array(counts)
| 30.122807
| 90
| 0.610367
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
DEFAULT_N_BINS = 10
def compute_summaries(clf, X, W, n_bins=DEFAULT_N_BINS):
proba = clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=n_bins)
return count
class ClassifierSummaryComputer():
def __init__(self, clf, n_bins=DEFAULT_N_BINS):
self.clf = clf
self.n_bins = n_bins
def __call__(self, X, W):
proba = self.clf.predict_proba(X)
count, _ = np.histogram(proba[:, 1], range=(0., 1.), weights=W, bins=self.n_bins)
return count
class HistogramSummaryComputer():
def __init__(self, n_bins=DEFAULT_N_BINS):
self.n_bins = n_bins
def fit(self, X):
self.edges_list = []
for i in range(X.shape[1]):
x = X[:, i]
maximum = np.max(x)
minimum = np.min(x)
diff = maximum - minimum
maximum = maximum + diff / self.n_bins
minimum = minimum - diff / self.n_bins
count, bin_edges = np.histogram(x, range=(minimum, maximum), bins=self.n_bins)
self.edges_list.append(bin_edges)
return self
def predict(self, X, W):
counts = []
for i, bin_edges in enumerate(self.edges_list):
x = X[:, i]
count, _ = np.histogram(x, bins=bin_edges, weights=W)
counts.extend(count)
return counts
def __call__(self, X, W):
counts = self.predict(X, W)
return np.array(counts)
| true
| true
|
f71546b9ddaa47e3907f339c6e8a2f21aac12fe0
| 1,566
|
py
|
Python
|
14_Modulos_e_pacotes/ex110/moeda.py
|
TheCarvalho/Curso-Em-Video-Python
|
8bd5128023ddf8b0f59eab46463c95e47569da73
|
[
"Unlicense"
] | null | null | null |
14_Modulos_e_pacotes/ex110/moeda.py
|
TheCarvalho/Curso-Em-Video-Python
|
8bd5128023ddf8b0f59eab46463c95e47569da73
|
[
"Unlicense"
] | null | null | null |
14_Modulos_e_pacotes/ex110/moeda.py
|
TheCarvalho/Curso-Em-Video-Python
|
8bd5128023ddf8b0f59eab46463c95e47569da73
|
[
"Unlicense"
] | null | null | null |
def metade(valor=0, formato=False):
res = valor/2
return res if formato is False else moeda(res)
def dobro(valor=0, formato=False):
res = valor*2
return res if formato is False else moeda(res)
def aumentar(valor=0, porcentagem=0, formato=False):
res = valor+(valor * porcentagem/100)
return res if formato is False else moeda(res)
def diminuir(valor=0, porcentagem=0, formato=False):
res = valor-(valor * porcentagem/100)
return res if not formato else moeda(res)
#! lembra que if formato: => formato = True ||| if not formato: => formato = False
# aqui moeda fica como segundo parametro pois o primeiro a ser importado é o valor
def moeda(valor=0, moeda='R$'): # Posso mudar a moeda apenas informando outra lá na hora de importar
return f'{moeda}{valor:.2f}'.replace('.', ',')
def resumo(p=0, por1=10, por2=5):
print('-'*40)
# .rjust() => direita | .ljust() => esquerda | .center() => centralizar
print('RESUMO DO VALOR'.center(40))
print('-'*40)
print(f'Preço Analisado:\t{moeda(p)}')
print(f'Dobro do preço:\t\t{dobro(p,True)}')
print(f'Metade do preço:\t{metade(p,True)}')
print(f'{por1}% de aumento:\t\t{aumentar(p,por1,True)}')
print(f'{por2}% de Redução:\t\t{diminuir(p,por2,True)}')
print('-'*40)
'''
print(f'\nA metade de {moeda.moeda(p, "US$")} é {moeda.metade(p, True )}')
print(f'O dobro de {moeda.moeda(p)} é {moeda.dobro(p, True)}')
print(f'Aumentando 10% temos {moeda.aumentar(p, 10, True)}')
print(f'Reduzindo 13% temos {moeda.diminuir(p, 13, True)}')
'''
| 31.32
| 101
| 0.650064
|
def metade(valor=0, formato=False):
res = valor/2
return res if formato is False else moeda(res)
def dobro(valor=0, formato=False):
res = valor*2
return res if formato is False else moeda(res)
def aumentar(valor=0, porcentagem=0, formato=False):
res = valor+(valor * porcentagem/100)
return res if formato is False else moeda(res)
def diminuir(valor=0, porcentagem=0, formato=False):
res = valor-(valor * porcentagem/100)
return res if not formato else moeda(res)
def moeda(valor=0, moeda='R$'):
return f'{moeda}{valor:.2f}'.replace('.', ',')
def resumo(p=0, por1=10, por2=5):
print('-'*40)
print('RESUMO DO VALOR'.center(40))
print('-'*40)
print(f'Preço Analisado:\t{moeda(p)}')
print(f'Dobro do preço:\t\t{dobro(p,True)}')
print(f'Metade do preço:\t{metade(p,True)}')
print(f'{por1}% de aumento:\t\t{aumentar(p,por1,True)}')
print(f'{por2}% de Redução:\t\t{diminuir(p,por2,True)}')
print('-'*40)
| true
| true
|
f71548f4a246b57a8868bfef6d1910128b7621d9
| 4,245
|
py
|
Python
|
tools/ci_build/op_registration_validator.py
|
mszhanyi/onnxruntime
|
6f85d3e5c81c919022ac4a77e5a051da8518b15d
|
[
"MIT"
] | 669
|
2018-12-03T22:00:31.000Z
|
2019-05-06T19:42:49.000Z
|
tools/ci_build/op_registration_validator.py
|
mszhanyi/onnxruntime
|
6f85d3e5c81c919022ac4a77e5a051da8518b15d
|
[
"MIT"
] | 440
|
2018-12-03T21:09:56.000Z
|
2019-05-06T20:47:23.000Z
|
tools/ci_build/op_registration_validator.py
|
mszhanyi/onnxruntime
|
6f85d3e5c81c919022ac4a77e5a051da8518b15d
|
[
"MIT"
] | 140
|
2018-12-03T21:15:28.000Z
|
2019-05-06T18:02:36.000Z
|
# !/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Validate ORT kernel registrations.
"""
import argparse
import os
import sys
import typing
import op_registration_utils
from logger import get_logger
log = get_logger("op_registration_validator")
# deprecated ops where the last registration should have an end version.
# value for each entry is the opset when it was deprecated. end version of last registration should equal value - 1.
deprecated_ops = {
"kOnnxDomain:Scatter": 11,
"kOnnxDomain:Upsample": 10,
# MeanVarianceNormalization and ThresholdedRelu were in contrib ops and incorrectly registered using
# kOnnxDomain. They became official ONNX operators later and are registered there now. That leaves
# entries in the contrib ops registrations with end versions for when the contrib op was 'deprecated'
# and became an official op.
"kOnnxDomain:MeanVarianceNormalization": 9,
"kOnnxDomain:ThresholdedRelu": 10,
}
class RegistrationValidator(op_registration_utils.RegistrationProcessor):
def __init__(self):
self.last_op_registrations = {}
self.failed = False
def process_registration(
self,
lines: typing.List[str],
domain: str,
operator: str,
start_version: int,
end_version: typing.Optional[int] = None,
type: typing.Optional[str] = None,
):
key = domain + ":" + operator
prev_start, prev_end = self.last_op_registrations[key] if key in self.last_op_registrations else (None, None)
if prev_start:
# a typed registration where the to/from matches for each entry so nothing to update
if prev_start == start_version and prev_end == end_version:
return
# previous registration was unversioned but should have been if we are seeing another registration
if not prev_end:
log.error(
"Invalid registration for {}. Registration for opset {} has no end version but was "
"superceeded by version {}.".format(key, prev_start, start_version)
)
self.failed = True
return
# previous registration end opset is not adjacent to the start of the next registration
if prev_end != start_version - 1:
log.error(
"Invalid registration for {}. Registration for opset {} should have end version of {}".format(
key, prev_start, start_version - 1
)
)
self.failed = True
return
self.last_op_registrations[key] = (start_version, end_version)
def ok(self):
return not self.failed
def validate_last_registrations(self):
# make sure we have an unversioned last entry for each operator unless it's deprecated
for entry in self.last_op_registrations.items():
key, value = entry
opset_from, opset_to = value
deprecated = key in deprecated_ops and opset_to == deprecated_ops[key] - 1
if opset_to and not deprecated:
log.error("Missing unversioned registration for {}".format(key))
self.failed = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script to validate operator kernel registrations.")
parser.add_argument(
"--ort_root",
type=str,
help="Path to ONNXRuntime repository root. " "Inferred from the location of this script if not provided.",
)
args = parser.parse_args()
ort_root = os.path.abspath(args.ort_root) if args.ort_root else ""
include_cuda = True # validate CPU and CUDA EP registrations
registration_files = op_registration_utils.get_kernel_registration_files(ort_root, include_cuda)
for file in registration_files:
log.info("Processing {}".format(file))
processor = RegistrationValidator()
op_registration_utils.process_kernel_registration_file(file, processor)
processor.validate_last_registrations()
if not processor.ok():
sys.exit(-1)
| 36.594828
| 117
| 0.659128
|
import argparse
import os
import sys
import typing
import op_registration_utils
from logger import get_logger
log = get_logger("op_registration_validator")
deprecated_ops = {
"kOnnxDomain:Scatter": 11,
"kOnnxDomain:Upsample": 10,
"kOnnxDomain:MeanVarianceNormalization": 9,
"kOnnxDomain:ThresholdedRelu": 10,
}
class RegistrationValidator(op_registration_utils.RegistrationProcessor):
def __init__(self):
self.last_op_registrations = {}
self.failed = False
def process_registration(
self,
lines: typing.List[str],
domain: str,
operator: str,
start_version: int,
end_version: typing.Optional[int] = None,
type: typing.Optional[str] = None,
):
key = domain + ":" + operator
prev_start, prev_end = self.last_op_registrations[key] if key in self.last_op_registrations else (None, None)
if prev_start:
if prev_start == start_version and prev_end == end_version:
return
if not prev_end:
log.error(
"Invalid registration for {}. Registration for opset {} has no end version but was "
"superceeded by version {}.".format(key, prev_start, start_version)
)
self.failed = True
return
if prev_end != start_version - 1:
log.error(
"Invalid registration for {}. Registration for opset {} should have end version of {}".format(
key, prev_start, start_version - 1
)
)
self.failed = True
return
self.last_op_registrations[key] = (start_version, end_version)
def ok(self):
return not self.failed
def validate_last_registrations(self):
for entry in self.last_op_registrations.items():
key, value = entry
opset_from, opset_to = value
deprecated = key in deprecated_ops and opset_to == deprecated_ops[key] - 1
if opset_to and not deprecated:
log.error("Missing unversioned registration for {}".format(key))
self.failed = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script to validate operator kernel registrations.")
parser.add_argument(
"--ort_root",
type=str,
help="Path to ONNXRuntime repository root. " "Inferred from the location of this script if not provided.",
)
args = parser.parse_args()
ort_root = os.path.abspath(args.ort_root) if args.ort_root else ""
include_cuda = True # validate CPU and CUDA EP registrations
registration_files = op_registration_utils.get_kernel_registration_files(ort_root, include_cuda)
for file in registration_files:
log.info("Processing {}".format(file))
processor = RegistrationValidator()
op_registration_utils.process_kernel_registration_file(file, processor)
processor.validate_last_registrations()
if not processor.ok():
sys.exit(-1)
| true
| true
|
f7154930de7d5ffc6f439d636c13d83d490d8d16
| 1,853
|
py
|
Python
|
src/users/models/microsoftgraphcalendar_permission.py
|
peombwa/Sample-Graph-Python-Client
|
3396f531fbe6bb40a740767c4e31aee95a3b932e
|
[
"MIT"
] | null | null | null |
src/users/models/microsoftgraphcalendar_permission.py
|
peombwa/Sample-Graph-Python-Client
|
3396f531fbe6bb40a740767c4e31aee95a3b932e
|
[
"MIT"
] | null | null | null |
src/users/models/microsoftgraphcalendar_permission.py
|
peombwa/Sample-Graph-Python-Client
|
3396f531fbe6bb40a740767c4e31aee95a3b932e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftgraphcalendarPermission(Model):
"""MicrosoftgraphcalendarPermission.
:param id:
:type id: str
:param email_address:
:type email_address: ~users.models.MicrosoftgraphemailAddress
:param is_removable:
:type is_removable: bool
:param is_inside_organization:
:type is_inside_organization: bool
:param role: Possible values include: 'none', 'freeBusyRead',
'limitedRead', 'read', 'write', 'delegateWithoutPrivateEventAccess',
'delegateWithPrivateEventAccess', 'custom'
:type role: str or ~users.models.enum
:param allowed_roles:
:type allowed_roles: list[str]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'MicrosoftgraphemailAddress'},
'is_removable': {'key': 'isRemovable', 'type': 'bool'},
'is_inside_organization': {'key': 'isInsideOrganization', 'type': 'bool'},
'role': {'key': 'role', 'type': 'str'},
'allowed_roles': {'key': 'allowedRoles', 'type': '[str]'},
}
def __init__(self, id=None, email_address=None, is_removable=None, is_inside_organization=None, role=None, allowed_roles=None):
super(MicrosoftgraphcalendarPermission, self).__init__()
self.id = id
self.email_address = email_address
self.is_removable = is_removable
self.is_inside_organization = is_inside_organization
self.role = role
self.allowed_roles = allowed_roles
| 39.425532
| 131
| 0.622774
|
from msrest.serialization import Model
class MicrosoftgraphcalendarPermission(Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'MicrosoftgraphemailAddress'},
'is_removable': {'key': 'isRemovable', 'type': 'bool'},
'is_inside_organization': {'key': 'isInsideOrganization', 'type': 'bool'},
'role': {'key': 'role', 'type': 'str'},
'allowed_roles': {'key': 'allowedRoles', 'type': '[str]'},
}
def __init__(self, id=None, email_address=None, is_removable=None, is_inside_organization=None, role=None, allowed_roles=None):
super(MicrosoftgraphcalendarPermission, self).__init__()
self.id = id
self.email_address = email_address
self.is_removable = is_removable
self.is_inside_organization = is_inside_organization
self.role = role
self.allowed_roles = allowed_roles
| true
| true
|
f7154c78addf3f568945d3b47a10b93101d1a781
| 4,877
|
py
|
Python
|
xadmin/plugins/passwords.py
|
jneight/django-xadmin
|
d5fca423e3ae10e3ca086e5ae9ea7068872f0a29
|
[
"BSD-3-Clause"
] | null | null | null |
xadmin/plugins/passwords.py
|
jneight/django-xadmin
|
d5fca423e3ae10e3ca086e5ae9ea7068872f0a29
|
[
"BSD-3-Clause"
] | null | null | null |
xadmin/plugins/passwords.py
|
jneight/django-xadmin
|
d5fca423e3ae10e3ca086e5ae9ea7068872f0a29
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=utf-8
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.views import password_reset_confirm
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.views.base import BaseAdminPlugin, BaseAdminView, csrf_protect_m
from xadmin.views.website import LoginView
class ResetPasswordSendView(BaseAdminView):
need_site_permission = False
password_reset_form = PasswordResetForm
password_reset_template = 'xadmin/auth/password_reset/form.html'
password_reset_done_template = 'xadmin/auth/password_reset/done.html'
password_reset_token_generator = default_token_generator
password_reset_from_email = None
password_reset_email_template = 'xadmin/auth/password_reset/email.html'
password_reset_subject_template = None
def get(self, request, *args, **kwargs):
context = super(ResetPasswordSendView, self).get_context()
context['form'] = kwargs.get('form', self.password_reset_form())
return TemplateResponse(request, self.password_reset_template, context,
current_app=self.admin_site.name)
@csrf_protect_m
def post(self, request, *args, **kwargs):
form = self.password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': self.password_reset_token_generator,
'email_template_name': self.password_reset_email_template,
'request': request,
'domain_override': request.get_host()
}
if self.password_reset_from_email:
opts['from_email'] = self.password_reset_from_email
if self.password_reset_subject_template:
opts['subject_template_name'] = self.password_reset_subject_template
form.save(**opts)
context = super(ResetPasswordSendView, self).get_context()
return TemplateResponse(request, self.password_reset_done_template, context,
current_app=self.admin_site.name)
else:
return self.get(request, form=form)
site.register_view(r'^xadmin/password_reset/$', ResetPasswordSendView, name='xadmin_password_reset')
class ResetLinkPlugin(BaseAdminPlugin):
def block_form_bottom(self, context, nodes):
reset_link = self.get_admin_url('xadmin_password_reset')
return '<div class="text-info" style="margin-top:15px;"><a href="%s"><i class="icon-question-sign"></i> %s</a></div>' % (reset_link, _('Forgotten your password or username?'))
site.register_plugin(ResetLinkPlugin, LoginView)
class ResetPasswordComfirmView(BaseAdminView):
need_site_permission = False
password_reset_set_form = SetPasswordForm
password_reset_confirm_template = 'xadmin/auth/password_reset/confirm.html'
password_reset_token_generator = default_token_generator
def do_view(self, request, uidb36, token, *args, **kwargs):
context = super(ResetPasswordComfirmView, self).get_context()
return password_reset_confirm(request, uidb36, token,
template_name=self.password_reset_confirm_template,
token_generator=self.password_reset_token_generator,
set_password_form=self.password_reset_set_form,
post_reset_redirect=self.get_admin_url('xadmin_password_reset_complete'),
current_app=self.admin_site.name, extra_context=context)
def get(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def post(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def get_media(self):
return super(ResetPasswordComfirmView, self).get_media() + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
site.register_view(r'^xadmin/password_reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
ResetPasswordComfirmView, name='xadmin_password_reset_confirm')
class ResetPasswordCompleteView(BaseAdminView):
need_site_permission = False
password_reset_complete_template = 'xadmin/auth/password_reset/complete.html'
def get(self, request, *args, **kwargs):
context = super(ResetPasswordCompleteView, self).get_context()
context['login_url'] = self.get_admin_url('index')
return TemplateResponse(request, self.password_reset_complete_template, context,
current_app=self.admin_site.name)
site.register_view(r'^xadmin/password_reset/complete/$', ResetPasswordCompleteView, name='xadmin_password_reset_complete')
| 42.408696
| 183
| 0.710478
|
from django.contrib.auth.forms import PasswordResetForm, SetPasswordForm
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth.views import password_reset_confirm
from django.template.response import TemplateResponse
from django.utils.translation import ugettext as _
from xadmin.sites import site
from xadmin.views.base import BaseAdminPlugin, BaseAdminView, csrf_protect_m
from xadmin.views.website import LoginView
class ResetPasswordSendView(BaseAdminView):
need_site_permission = False
password_reset_form = PasswordResetForm
password_reset_template = 'xadmin/auth/password_reset/form.html'
password_reset_done_template = 'xadmin/auth/password_reset/done.html'
password_reset_token_generator = default_token_generator
password_reset_from_email = None
password_reset_email_template = 'xadmin/auth/password_reset/email.html'
password_reset_subject_template = None
def get(self, request, *args, **kwargs):
context = super(ResetPasswordSendView, self).get_context()
context['form'] = kwargs.get('form', self.password_reset_form())
return TemplateResponse(request, self.password_reset_template, context,
current_app=self.admin_site.name)
@csrf_protect_m
def post(self, request, *args, **kwargs):
form = self.password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': self.password_reset_token_generator,
'email_template_name': self.password_reset_email_template,
'request': request,
'domain_override': request.get_host()
}
if self.password_reset_from_email:
opts['from_email'] = self.password_reset_from_email
if self.password_reset_subject_template:
opts['subject_template_name'] = self.password_reset_subject_template
form.save(**opts)
context = super(ResetPasswordSendView, self).get_context()
return TemplateResponse(request, self.password_reset_done_template, context,
current_app=self.admin_site.name)
else:
return self.get(request, form=form)
site.register_view(r'^xadmin/password_reset/$', ResetPasswordSendView, name='xadmin_password_reset')
class ResetLinkPlugin(BaseAdminPlugin):
def block_form_bottom(self, context, nodes):
reset_link = self.get_admin_url('xadmin_password_reset')
return '<div class="text-info" style="margin-top:15px;"><a href="%s"><i class="icon-question-sign"></i> %s</a></div>' % (reset_link, _('Forgotten your password or username?'))
site.register_plugin(ResetLinkPlugin, LoginView)
class ResetPasswordComfirmView(BaseAdminView):
need_site_permission = False
password_reset_set_form = SetPasswordForm
password_reset_confirm_template = 'xadmin/auth/password_reset/confirm.html'
password_reset_token_generator = default_token_generator
def do_view(self, request, uidb36, token, *args, **kwargs):
context = super(ResetPasswordComfirmView, self).get_context()
return password_reset_confirm(request, uidb36, token,
template_name=self.password_reset_confirm_template,
token_generator=self.password_reset_token_generator,
set_password_form=self.password_reset_set_form,
post_reset_redirect=self.get_admin_url('xadmin_password_reset_complete'),
current_app=self.admin_site.name, extra_context=context)
def get(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def post(self, request, uidb36, token, *args, **kwargs):
return self.do_view(request, uidb36, token)
def get_media(self):
return super(ResetPasswordComfirmView, self).get_media() + \
self.vendor('xadmin.page.form.js', 'xadmin.form.css')
site.register_view(r'^xadmin/password_reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
ResetPasswordComfirmView, name='xadmin_password_reset_confirm')
class ResetPasswordCompleteView(BaseAdminView):
need_site_permission = False
password_reset_complete_template = 'xadmin/auth/password_reset/complete.html'
def get(self, request, *args, **kwargs):
context = super(ResetPasswordCompleteView, self).get_context()
context['login_url'] = self.get_admin_url('index')
return TemplateResponse(request, self.password_reset_complete_template, context,
current_app=self.admin_site.name)
site.register_view(r'^xadmin/password_reset/complete/$', ResetPasswordCompleteView, name='xadmin_password_reset_complete')
| true
| true
|
f7154e6ded9574e14e9389f664090a3155de4514
| 18,588
|
py
|
Python
|
Lib/idlelib/idle_test/test_pyparse.py
|
fongchinghinunsw/cpython
|
19926d058dc33856631c6c6b3fcb45b04fcab666
|
[
"CNRI-Python-GPL-Compatible"
] | 120
|
2019-11-12T19:22:44.000Z
|
2020-05-17T12:17:25.000Z
|
Lib/idlelib/idle_test/test_pyparse.py
|
fongchinghinunsw/cpython
|
19926d058dc33856631c6c6b3fcb45b04fcab666
|
[
"CNRI-Python-GPL-Compatible"
] | 19
|
2021-02-18T05:59:03.000Z
|
2022-01-13T01:00:52.000Z
|
Lib/idlelib/idle_test/test_pyparse.py
|
fongchinghinunsw/cpython
|
19926d058dc33856631c6c6b3fcb45b04fcab666
|
[
"CNRI-Python-GPL-Compatible"
] | 18
|
2021-02-22T13:32:56.000Z
|
2022-01-22T12:38:29.000Z
|
"Test pyparse, coverage 96%."
from idlelib import pyparse
import unittest
from collections import namedtuple
class ParseMapTest(unittest.TestCase):
def test_parsemap(self):
keepwhite = {ord(c): ord(c) for c in ' \t\n\r'}
mapping = pyparse.ParseMap(keepwhite)
self.assertEqual(mapping[ord('\t')], ord('\t'))
self.assertEqual(mapping[ord('a')], ord('x'))
self.assertEqual(mapping[1000], ord('x'))
def test_trans(self):
# trans is the production instance of ParseMap, used in _study1
parser = pyparse.Parser(4, 4)
self.assertEqual('\t a([{b}])b"c\'d\n'.translate(pyparse.trans),
'xxx(((x)))x"x\'x\n')
class PyParseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = pyparse.Parser(indentwidth=4, tabwidth=4)
@classmethod
def tearDownClass(cls):
del cls.parser
def test_init(self):
self.assertEqual(self.parser.indentwidth, 4)
self.assertEqual(self.parser.tabwidth, 4)
def test_set_code(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
# Not empty and doesn't end with newline.
with self.assertRaises(AssertionError):
setcode('a')
tests = ('',
'a\n')
for string in tests:
with self.subTest(string=string):
setcode(string)
eq(p.code, string)
eq(p.study_level, 0)
def test_find_good_parse_start(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
start = p.find_good_parse_start
# Split def across lines.
setcode('"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a,\n'
' b=True):\n'
' pass\n'
)
# No value sent for is_char_in_string().
self.assertIsNone(start())
# Make text look like a string. This returns pos as the start
# position, but it's set to None.
self.assertIsNone(start(is_char_in_string=lambda index: True))
# Make all text look like it's not in a string. This means that it
# found a good start position.
eq(start(is_char_in_string=lambda index: False), 44)
# If the beginning of the def line is not in a string, then it
# returns that as the index.
eq(start(is_char_in_string=lambda index: index > 44), 44)
# If the beginning of the def line is in a string, then it
# looks for a previous index.
eq(start(is_char_in_string=lambda index: index >= 44), 33)
# If everything before the 'def' is in a string, then returns None.
# The non-continuation def line returns 44 (see below).
eq(start(is_char_in_string=lambda index: index < 44), None)
# Code without extra line break in def line - mostly returns the same
# values.
setcode('"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a, b=True):\n'
' pass\n'
)
eq(start(is_char_in_string=lambda index: False), 44)
eq(start(is_char_in_string=lambda index: index > 44), 44)
eq(start(is_char_in_string=lambda index: index >= 44), 33)
# When the def line isn't split, this returns which doesn't match the
# split line test.
eq(start(is_char_in_string=lambda index: index < 44), 44)
def test_set_lo(self):
code = (
'"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a,\n'
' b=True):\n'
' pass\n'
)
p = self.parser
p.set_code(code)
# Previous character is not a newline.
with self.assertRaises(AssertionError):
p.set_lo(5)
# A value of 0 doesn't change self.code.
p.set_lo(0)
self.assertEqual(p.code, code)
# An index that is preceded by a newline.
p.set_lo(44)
self.assertEqual(p.code, code[44:])
def test_study1(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
study = p._study1
(NONE, BACKSLASH, FIRST, NEXT, BRACKET) = range(5)
TestInfo = namedtuple('TestInfo', ['string', 'goodlines',
'continuation'])
tests = (
TestInfo('', [0], NONE),
# Docstrings.
TestInfo('"""This is a complete docstring."""\n', [0, 1], NONE),
TestInfo("'''This is a complete docstring.'''\n", [0, 1], NONE),
TestInfo('"""This is a continued docstring.\n', [0, 1], FIRST),
TestInfo("'''This is a continued docstring.\n", [0, 1], FIRST),
TestInfo('"""Closing quote does not match."\n', [0, 1], FIRST),
TestInfo('"""Bracket in docstring [\n', [0, 1], FIRST),
TestInfo("'''Incomplete two line docstring.\n\n", [0, 2], NEXT),
# Single-quoted strings.
TestInfo('"This is a complete string."\n', [0, 1], NONE),
TestInfo('"This is an incomplete string.\n', [0, 1], NONE),
TestInfo("'This is more incomplete.\n\n", [0, 1, 2], NONE),
# Comment (backslash does not continue comments).
TestInfo('# Comment\\\n', [0, 1], NONE),
# Brackets.
TestInfo('("""Complete string in bracket"""\n', [0, 1], BRACKET),
TestInfo('("""Open string in bracket\n', [0, 1], FIRST),
TestInfo('a = (1 + 2) - 5 *\\\n', [0, 1], BACKSLASH), # No bracket.
TestInfo('\n def function1(self, a,\n b):\n',
[0, 1, 3], NONE),
TestInfo('\n def function1(self, a,\\\n', [0, 1, 2], BRACKET),
TestInfo('\n def function1(self, a,\n', [0, 1, 2], BRACKET),
TestInfo('())\n', [0, 1], NONE), # Extra closer.
TestInfo(')(\n', [0, 1], BRACKET), # Extra closer.
# For the mismatched example, it doesn't look like continuation.
TestInfo('{)(]\n', [0, 1], NONE), # Mismatched.
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string) # resets study_level
study()
eq(p.study_level, 1)
eq(p.goodlines, test.goodlines)
eq(p.continuation, test.continuation)
# Called again, just returns without reprocessing.
self.assertIsNone(study())
def test_get_continuation_type(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
gettype = p.get_continuation_type
(NONE, BACKSLASH, FIRST, NEXT, BRACKET) = range(5)
TestInfo = namedtuple('TestInfo', ['string', 'continuation'])
tests = (
TestInfo('', NONE),
TestInfo('"""This is a continuation docstring.\n', FIRST),
TestInfo("'''This is a multiline-continued docstring.\n\n", NEXT),
TestInfo('a = (1 + 2) - 5 *\\\n', BACKSLASH),
TestInfo('\n def function1(self, a,\\\n', BRACKET)
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(gettype(), test.continuation)
def test_study2(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
study = p._study2
TestInfo = namedtuple('TestInfo', ['string', 'start', 'end', 'lastch',
'openbracket', 'bracketing'])
tests = (
TestInfo('', 0, 0, '', None, ((0, 0),)),
TestInfo("'''This is a multiline continuation docstring.\n\n",
0, 48, "'", None, ((0, 0), (0, 1), (48, 0))),
TestInfo(' # Comment\\\n',
0, 12, '', None, ((0, 0), (1, 1), (12, 0))),
# A comment without a space is a special case
TestInfo(' #Comment\\\n',
0, 0, '', None, ((0, 0),)),
# Backslash continuation.
TestInfo('a = (1 + 2) - 5 *\\\n',
0, 19, '*', None, ((0, 0), (4, 1), (11, 0))),
# Bracket continuation with close.
TestInfo('\n def function1(self, a,\n b):\n',
1, 48, ':', None, ((1, 0), (17, 1), (46, 0))),
# Bracket continuation with unneeded backslash.
TestInfo('\n def function1(self, a,\\\n',
1, 28, ',', 17, ((1, 0), (17, 1))),
# Bracket continuation.
TestInfo('\n def function1(self, a,\n',
1, 27, ',', 17, ((1, 0), (17, 1))),
# Bracket continuation with comment at end of line with text.
TestInfo('\n def function1(self, a, # End of line comment.\n',
1, 51, ',', 17, ((1, 0), (17, 1), (28, 2), (51, 1))),
# Multi-line statement with comment line in between code lines.
TestInfo(' a = ["first item",\n # Comment line\n "next item",\n',
0, 55, ',', 6, ((0, 0), (6, 1), (7, 2), (19, 1),
(23, 2), (38, 1), (42, 2), (53, 1))),
TestInfo('())\n',
0, 4, ')', None, ((0, 0), (0, 1), (2, 0), (3, 0))),
TestInfo(')(\n', 0, 3, '(', 1, ((0, 0), (1, 0), (1, 1))),
# Wrong closers still decrement stack level.
TestInfo('{)(]\n',
0, 5, ']', None, ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
# Character after backslash.
TestInfo(':\\a\n', 0, 4, '\\a', None, ((0, 0),)),
TestInfo('\n', 0, 0, '', None, ((0, 0),)),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
study()
eq(p.study_level, 2)
eq(p.stmt_start, test.start)
eq(p.stmt_end, test.end)
eq(p.lastch, test.lastch)
eq(p.lastopenbracketpos, test.openbracket)
eq(p.stmt_bracketing, test.bracketing)
# Called again, just returns without reprocessing.
self.assertIsNone(study())
def test_get_num_lines_in_stmt(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
getlines = p.get_num_lines_in_stmt
TestInfo = namedtuple('TestInfo', ['string', 'lines'])
tests = (
TestInfo('[x for x in a]\n', 1), # Closed on one line.
TestInfo('[x\nfor x in a\n', 2), # Not closed.
TestInfo('[x\\\nfor x in a\\\n', 2), # "", uneeded backslashes.
TestInfo('[x\nfor x in a\n]\n', 3), # Closed on multi-line.
TestInfo('\n"""Docstring comment L1"""\nL2\nL3\nL4\n', 1),
TestInfo('\n"""Docstring comment L1\nL2"""\nL3\nL4\n', 1),
TestInfo('\n"""Docstring comment L1\\\nL2\\\nL3\\\nL4\\\n', 4),
TestInfo('\n\n"""Docstring comment L1\\\nL2\\\nL3\\\nL4\\\n"""\n', 5)
)
# Blank string doesn't have enough elements in goodlines.
setcode('')
with self.assertRaises(IndexError):
getlines()
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(getlines(), test.lines)
def test_compute_bracket_indent(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
indent = p.compute_bracket_indent
TestInfo = namedtuple('TestInfo', ['string', 'spaces'])
tests = (
TestInfo('def function1(self, a,\n', 14),
# Characters after bracket.
TestInfo('\n def function1(self, a,\n', 18),
TestInfo('\n\tdef function1(self, a,\n', 18),
# No characters after bracket.
TestInfo('\n def function1(\n', 8),
TestInfo('\n\tdef function1(\n', 8),
TestInfo('\n def function1( \n', 8), # Ignore extra spaces.
TestInfo('[\n"first item",\n # Comment line\n "next item",\n', 0),
TestInfo('[\n "first item",\n # Comment line\n "next item",\n', 2),
TestInfo('["first item",\n # Comment line\n "next item",\n', 1),
TestInfo('(\n', 4),
TestInfo('(a\n', 1),
)
# Must be C_BRACKET continuation type.
setcode('def function1(self, a, b):\n')
with self.assertRaises(AssertionError):
indent()
for test in tests:
setcode(test.string)
eq(indent(), test.spaces)
def test_compute_backslash_indent(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
indent = p.compute_backslash_indent
# Must be C_BACKSLASH continuation type.
errors = (('def function1(self, a, b\\\n'), # Bracket.
(' """ (\\\n'), # Docstring.
('a = #\\\n'), # Inline comment.
)
for string in errors:
with self.subTest(string=string):
setcode(string)
with self.assertRaises(AssertionError):
indent()
TestInfo = namedtuple('TestInfo', ('string', 'spaces'))
tests = (TestInfo('a = (1 + 2) - 5 *\\\n', 4),
TestInfo('a = 1 + 2 - 5 *\\\n', 4),
TestInfo(' a = 1 + 2 - 5 *\\\n', 8),
TestInfo(' a = "spam"\\\n', 6),
TestInfo(' a = \\\n"a"\\\n', 4),
TestInfo(' a = #\\\n"a"\\\n', 5),
TestInfo('a == \\\n', 2),
TestInfo('a != \\\n', 2),
# Difference between containing = and those not.
TestInfo('\\\n', 2),
TestInfo(' \\\n', 6),
TestInfo('\t\\\n', 6),
TestInfo('a\\\n', 3),
TestInfo('{}\\\n', 4),
TestInfo('(1 + 2) - 5 *\\\n', 3),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(indent(), test.spaces)
def test_get_base_indent_string(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
baseindent = p.get_base_indent_string
TestInfo = namedtuple('TestInfo', ['string', 'indent'])
tests = (TestInfo('', ''),
TestInfo('def a():\n', ''),
TestInfo('\tdef a():\n', '\t'),
TestInfo(' def a():\n', ' '),
TestInfo(' def a(\n', ' '),
TestInfo('\t\n def a(\n', ' '),
TestInfo('\t\n # Comment.\n', ' '),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(baseindent(), test.indent)
def test_is_block_opener(self):
yes = self.assertTrue
no = self.assertFalse
p = self.parser
setcode = p.set_code
opener = p.is_block_opener
TestInfo = namedtuple('TestInfo', ['string', 'assert_'])
tests = (
TestInfo('def a():\n', yes),
TestInfo('\n def function1(self, a,\n b):\n', yes),
TestInfo(':\n', yes),
TestInfo('a:\n', yes),
TestInfo('):\n', yes),
TestInfo('(:\n', yes),
TestInfo('":\n', no),
TestInfo('\n def function1(self, a,\n', no),
TestInfo('def function1(self, a):\n pass\n', no),
TestInfo('# A comment:\n', no),
TestInfo('"""A docstring:\n', no),
TestInfo('"""A docstring:\n', no),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
test.assert_(opener())
def test_is_block_closer(self):
yes = self.assertTrue
no = self.assertFalse
p = self.parser
setcode = p.set_code
closer = p.is_block_closer
TestInfo = namedtuple('TestInfo', ['string', 'assert_'])
tests = (
TestInfo('return\n', yes),
TestInfo('\tbreak\n', yes),
TestInfo(' continue\n', yes),
TestInfo(' raise\n', yes),
TestInfo('pass \n', yes),
TestInfo('pass\t\n', yes),
TestInfo('return #\n', yes),
TestInfo('raised\n', no),
TestInfo('returning\n', no),
TestInfo('# return\n', no),
TestInfo('"""break\n', no),
TestInfo('"continue\n', no),
TestInfo('def function1(self, a):\n pass\n', yes),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
test.assert_(closer())
def test_get_last_stmt_bracketing(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
bracketing = p.get_last_stmt_bracketing
TestInfo = namedtuple('TestInfo', ['string', 'bracket'])
tests = (
TestInfo('', ((0, 0),)),
TestInfo('a\n', ((0, 0),)),
TestInfo('()()\n', ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
TestInfo('(\n)()\n', ((0, 0), (0, 1), (3, 0), (3, 1), (5, 0))),
TestInfo('()\n()\n', ((3, 0), (3, 1), (5, 0))),
TestInfo('()(\n)\n', ((0, 0), (0, 1), (2, 0), (2, 1), (5, 0))),
TestInfo('(())\n', ((0, 0), (0, 1), (1, 2), (3, 1), (4, 0))),
TestInfo('(\n())\n', ((0, 0), (0, 1), (2, 2), (4, 1), (5, 0))),
# Same as matched test.
TestInfo('{)(]\n', ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
TestInfo('(((())\n',
((0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (5, 3), (6, 2))),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(bracketing(), test.bracket)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 39.802998
| 84
| 0.48074
|
from idlelib import pyparse
import unittest
from collections import namedtuple
class ParseMapTest(unittest.TestCase):
def test_parsemap(self):
keepwhite = {ord(c): ord(c) for c in ' \t\n\r'}
mapping = pyparse.ParseMap(keepwhite)
self.assertEqual(mapping[ord('\t')], ord('\t'))
self.assertEqual(mapping[ord('a')], ord('x'))
self.assertEqual(mapping[1000], ord('x'))
def test_trans(self):
parser = pyparse.Parser(4, 4)
self.assertEqual('\t a([{b}])b"c\'d\n'.translate(pyparse.trans),
'xxx(((x)))x"x\'x\n')
class PyParseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = pyparse.Parser(indentwidth=4, tabwidth=4)
@classmethod
def tearDownClass(cls):
del cls.parser
def test_init(self):
self.assertEqual(self.parser.indentwidth, 4)
self.assertEqual(self.parser.tabwidth, 4)
def test_set_code(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
with self.assertRaises(AssertionError):
setcode('a')
tests = ('',
'a\n')
for string in tests:
with self.subTest(string=string):
setcode(string)
eq(p.code, string)
eq(p.study_level, 0)
def test_find_good_parse_start(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
start = p.find_good_parse_start
# Split def across lines.
setcode('"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a,\n'
' b=True):\n'
' pass\n'
)
# No value sent for is_char_in_string().
self.assertIsNone(start())
# Make text look like a string. This returns pos as the start
# position, but it's set to None.
self.assertIsNone(start(is_char_in_string=lambda index: True))
# found a good start position.
eq(start(is_char_in_string=lambda index: False), 44)
# If the beginning of the def line is not in a string, then it
# returns that as the index.
eq(start(is_char_in_string=lambda index: index > 44), 44)
# If the beginning of the def line is in a string, then it
# looks for a previous index.
eq(start(is_char_in_string=lambda index: index >= 44), 33)
# If everything before the 'def' is in a string, then returns None.
# The non-continuation def line returns 44 (see below).
eq(start(is_char_in_string=lambda index: index < 44), None)
# Code without extra line break in def line - mostly returns the same
# values.
setcode('"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a, b=True):\n'
' pass\n'
)
eq(start(is_char_in_string=lambda index: False), 44)
eq(start(is_char_in_string=lambda index: index > 44), 44)
eq(start(is_char_in_string=lambda index: index >= 44), 33)
# When the def line isn't split, this returns which doesn't match the
# split line test.
eq(start(is_char_in_string=lambda index: index < 44), 44)
def test_set_lo(self):
code = (
'"""This is a module docstring"""\n'
'class C():\n'
' def __init__(self, a,\n'
' b=True):\n'
' pass\n'
)
p = self.parser
p.set_code(code)
# Previous character is not a newline.
with self.assertRaises(AssertionError):
p.set_lo(5)
# A value of 0 doesn't change self.code.
p.set_lo(0)
self.assertEqual(p.code, code)
p.set_lo(44)
self.assertEqual(p.code, code[44:])
def test_study1(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
study = p._study1
(NONE, BACKSLASH, FIRST, NEXT, BRACKET) = range(5)
TestInfo = namedtuple('TestInfo', ['string', 'goodlines',
'continuation'])
tests = (
TestInfo('', [0], NONE),
TestInfo('"""This is a complete docstring."""\n', [0, 1], NONE),
TestInfo("'''This is a complete docstring.'''\n", [0, 1], NONE),
TestInfo('"""This is a continued docstring.\n', [0, 1], FIRST),
TestInfo("'''This is a continued docstring.\n", [0, 1], FIRST),
TestInfo('"""Closing quote does not match."\n', [0, 1], FIRST),
TestInfo('"""Bracket in docstring [\n', [0, 1], FIRST),
TestInfo("'''Incomplete two line docstring.\n\n", [0, 2], NEXT),
# Single-quoted strings.
TestInfo('"This is a complete string."\n', [0, 1], NONE),
TestInfo('"This is an incomplete string.\n', [0, 1], NONE),
TestInfo("'This is more incomplete.\n\n", [0, 1, 2], NONE),
# Comment (backslash does not continue comments).
TestInfo('# Comment\\\n', [0, 1], NONE),
# Brackets.
TestInfo('("""Complete string in bracket"""\n', [0, 1], BRACKET),
TestInfo('("""Open string in bracket\n', [0, 1], FIRST),
TestInfo('a = (1 + 2) - 5 *\\\n', [0, 1], BACKSLASH), # No bracket.
TestInfo('\n def function1(self, a,\n b):\n',
[0, 1, 3], NONE),
TestInfo('\n def function1(self, a,\\\n', [0, 1, 2], BRACKET),
TestInfo('\n def function1(self, a,\n', [0, 1, 2], BRACKET),
TestInfo('())\n', [0, 1], NONE), # Extra closer.
TestInfo(')(\n', [0, 1], BRACKET), # Extra closer.
# For the mismatched example, it doesn't look like continuation.
TestInfo('{)(]\n', [0, 1], NONE),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
study()
eq(p.study_level, 1)
eq(p.goodlines, test.goodlines)
eq(p.continuation, test.continuation)
self.assertIsNone(study())
def test_get_continuation_type(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
gettype = p.get_continuation_type
(NONE, BACKSLASH, FIRST, NEXT, BRACKET) = range(5)
TestInfo = namedtuple('TestInfo', ['string', 'continuation'])
tests = (
TestInfo('', NONE),
TestInfo('"""This is a continuation docstring.\n', FIRST),
TestInfo("'''This is a multiline-continued docstring.\n\n", NEXT),
TestInfo('a = (1 + 2) - 5 *\\\n', BACKSLASH),
TestInfo('\n def function1(self, a,\\\n', BRACKET)
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(gettype(), test.continuation)
def test_study2(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
study = p._study2
TestInfo = namedtuple('TestInfo', ['string', 'start', 'end', 'lastch',
'openbracket', 'bracketing'])
tests = (
TestInfo('', 0, 0, '', None, ((0, 0),)),
TestInfo("'''This is a multiline continuation docstring.\n\n",
0, 48, "'", None, ((0, 0), (0, 1), (48, 0))),
TestInfo(' # Comment\\\n',
0, 12, '', None, ((0, 0), (1, 1), (12, 0))),
# A comment without a space is a special case
TestInfo(' #Comment\\\n',
0, 0, '', None, ((0, 0),)),
# Backslash continuation.
TestInfo('a = (1 + 2) - 5 *\\\n',
0, 19, '*', None, ((0, 0), (4, 1), (11, 0))),
# Bracket continuation with close.
TestInfo('\n def function1(self, a,\n b):\n',
1, 48, ':', None, ((1, 0), (17, 1), (46, 0))),
# Bracket continuation with unneeded backslash.
TestInfo('\n def function1(self, a,\\\n',
1, 28, ',', 17, ((1, 0), (17, 1))),
# Bracket continuation.
TestInfo('\n def function1(self, a,\n',
1, 27, ',', 17, ((1, 0), (17, 1))),
# Bracket continuation with comment at end of line with text.
TestInfo('\n def function1(self, a, # End of line comment.\n',
1, 51, ',', 17, ((1, 0), (17, 1), (28, 2), (51, 1))),
# Multi-line statement with comment line in between code lines.
TestInfo(' a = ["first item",\n # Comment line\n "next item",\n',
0, 55, ',', 6, ((0, 0), (6, 1), (7, 2), (19, 1),
(23, 2), (38, 1), (42, 2), (53, 1))),
TestInfo('())\n',
0, 4, ')', None, ((0, 0), (0, 1), (2, 0), (3, 0))),
TestInfo(')(\n', 0, 3, '(', 1, ((0, 0), (1, 0), (1, 1))),
# Wrong closers still decrement stack level.
TestInfo('{)(]\n',
0, 5, ']', None, ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
# Character after backslash.
TestInfo(':\\a\n', 0, 4, '\\a', None, ((0, 0),)),
TestInfo('\n', 0, 0, '', None, ((0, 0),)),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
study()
eq(p.study_level, 2)
eq(p.stmt_start, test.start)
eq(p.stmt_end, test.end)
eq(p.lastch, test.lastch)
eq(p.lastopenbracketpos, test.openbracket)
eq(p.stmt_bracketing, test.bracketing)
# Called again, just returns without reprocessing.
self.assertIsNone(study())
def test_get_num_lines_in_stmt(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
getlines = p.get_num_lines_in_stmt
TestInfo = namedtuple('TestInfo', ['string', 'lines'])
tests = (
TestInfo('[x for x in a]\n', 1), # Closed on one line.
TestInfo('[x\nfor x in a\n', 2), # Not closed.
TestInfo('[x\\\nfor x in a\\\n', 2), # "", uneeded backslashes.
TestInfo('[x\nfor x in a\n]\n', 3), # Closed on multi-line.
TestInfo('\n"""Docstring comment L1"""\nL2\nL3\nL4\n', 1),
TestInfo('\n"""Docstring comment L1\nL2"""\nL3\nL4\n', 1),
TestInfo('\n"""Docstring comment L1\\\nL2\\\nL3\\\nL4\\\n', 4),
TestInfo('\n\n"""Docstring comment L1\\\nL2\\\nL3\\\nL4\\\n"""\n', 5)
)
# Blank string doesn't have enough elements in goodlines.
setcode('')
with self.assertRaises(IndexError):
getlines()
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(getlines(), test.lines)
def test_compute_bracket_indent(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
indent = p.compute_bracket_indent
TestInfo = namedtuple('TestInfo', ['string', 'spaces'])
tests = (
TestInfo('def function1(self, a,\n', 14),
TestInfo('\n def function1(self, a,\n', 18),
TestInfo('\n\tdef function1(self, a,\n', 18),
TestInfo('\n def function1(\n', 8),
TestInfo('\n\tdef function1(\n', 8),
TestInfo('\n def function1( \n', 8),
TestInfo('[\n"first item",\n # Comment line\n "next item",\n', 0),
TestInfo('[\n "first item",\n # Comment line\n "next item",\n', 2),
TestInfo('["first item",\n # Comment line\n "next item",\n', 1),
TestInfo('(\n', 4),
TestInfo('(a\n', 1),
)
setcode('def function1(self, a, b):\n')
with self.assertRaises(AssertionError):
indent()
for test in tests:
setcode(test.string)
eq(indent(), test.spaces)
def test_compute_backslash_indent(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
indent = p.compute_backslash_indent
errors = (('def function1(self, a, b\\\n'),
(' """ (\\\n'), # Docstring.
('a = #\\\n'), # Inline comment.
)
for string in errors:
with self.subTest(string=string):
setcode(string)
with self.assertRaises(AssertionError):
indent()
TestInfo = namedtuple('TestInfo', ('string', 'spaces'))
tests = (TestInfo('a = (1 + 2) - 5 *\\\n', 4),
TestInfo('a = 1 + 2 - 5 *\\\n', 4),
TestInfo(' a = 1 + 2 - 5 *\\\n', 8),
TestInfo(' a = "spam"\\\n', 6),
TestInfo(' a = \\\n"a"\\\n', 4),
TestInfo(' a = #\\\n"a"\\\n', 5),
TestInfo('a == \\\n', 2),
TestInfo('a != \\\n', 2),
# Difference between containing = and those not.
TestInfo('\\\n', 2),
TestInfo(' \\\n', 6),
TestInfo('\t\\\n', 6),
TestInfo('a\\\n', 3),
TestInfo('{}\\\n', 4),
TestInfo('(1 + 2) - 5 *\\\n', 3),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(indent(), test.spaces)
def test_get_base_indent_string(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
baseindent = p.get_base_indent_string
TestInfo = namedtuple('TestInfo', ['string', 'indent'])
tests = (TestInfo('', ''),
TestInfo('def a():\n', ''),
TestInfo('\tdef a():\n', '\t'),
TestInfo(' def a():\n', ' '),
TestInfo(' def a(\n', ' '),
TestInfo('\t\n def a(\n', ' '),
TestInfo('\t\n # Comment.\n', ' '),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(baseindent(), test.indent)
def test_is_block_opener(self):
yes = self.assertTrue
no = self.assertFalse
p = self.parser
setcode = p.set_code
opener = p.is_block_opener
TestInfo = namedtuple('TestInfo', ['string', 'assert_'])
tests = (
TestInfo('def a():\n', yes),
TestInfo('\n def function1(self, a,\n b):\n', yes),
TestInfo(':\n', yes),
TestInfo('a:\n', yes),
TestInfo('):\n', yes),
TestInfo('(:\n', yes),
TestInfo('":\n', no),
TestInfo('\n def function1(self, a,\n', no),
TestInfo('def function1(self, a):\n pass\n', no),
TestInfo('# A comment:\n', no),
TestInfo('"""A docstring:\n', no),
TestInfo('"""A docstring:\n', no),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
test.assert_(opener())
def test_is_block_closer(self):
yes = self.assertTrue
no = self.assertFalse
p = self.parser
setcode = p.set_code
closer = p.is_block_closer
TestInfo = namedtuple('TestInfo', ['string', 'assert_'])
tests = (
TestInfo('return\n', yes),
TestInfo('\tbreak\n', yes),
TestInfo(' continue\n', yes),
TestInfo(' raise\n', yes),
TestInfo('pass \n', yes),
TestInfo('pass\t\n', yes),
TestInfo('return #\n', yes),
TestInfo('raised\n', no),
TestInfo('returning\n', no),
TestInfo('# return\n', no),
TestInfo('"""break\n', no),
TestInfo('"continue\n', no),
TestInfo('def function1(self, a):\n pass\n', yes),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
test.assert_(closer())
def test_get_last_stmt_bracketing(self):
eq = self.assertEqual
p = self.parser
setcode = p.set_code
bracketing = p.get_last_stmt_bracketing
TestInfo = namedtuple('TestInfo', ['string', 'bracket'])
tests = (
TestInfo('', ((0, 0),)),
TestInfo('a\n', ((0, 0),)),
TestInfo('()()\n', ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
TestInfo('(\n)()\n', ((0, 0), (0, 1), (3, 0), (3, 1), (5, 0))),
TestInfo('()\n()\n', ((3, 0), (3, 1), (5, 0))),
TestInfo('()(\n)\n', ((0, 0), (0, 1), (2, 0), (2, 1), (5, 0))),
TestInfo('(())\n', ((0, 0), (0, 1), (1, 2), (3, 1), (4, 0))),
TestInfo('(\n())\n', ((0, 0), (0, 1), (2, 2), (4, 1), (5, 0))),
TestInfo('{)(]\n', ((0, 0), (0, 1), (2, 0), (2, 1), (4, 0))),
TestInfo('(((())\n',
((0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (5, 3), (6, 2))),
)
for test in tests:
with self.subTest(string=test.string):
setcode(test.string)
eq(bracketing(), test.bracket)
if __name__ == '__main__':
unittest.main(verbosity=2)
| true
| true
|
f715502ca425f6aa72c39448be881ca66c5df2be
| 1,703
|
py
|
Python
|
game.py
|
zty111/tonghua
|
71b0ecc857f72ab9bb7882358c15587117cdcd6a
|
[
"MIT"
] | null | null | null |
game.py
|
zty111/tonghua
|
71b0ecc857f72ab9bb7882358c15587117cdcd6a
|
[
"MIT"
] | null | null | null |
game.py
|
zty111/tonghua
|
71b0ecc857f72ab9bb7882358c15587117cdcd6a
|
[
"MIT"
] | null | null | null |
from keras.saving.save import load_model
from board import GameState, Player
from encoder import Encoder
from agent import Agent
import scoring
from board import Move, Point
from tiaocan import bot_name
class My():
def select_move(self, game_state):
print("请输入点坐标和方向(或弃权):")
x, y, d = input().split(' ')
x, y, d = int(x), int(y), int(d)
move = Move(Point(x, y), d)
if game_state.is_valid_move(move): return move
else: return Move.pass_turn()
def simulate_game(black_agent, white_agent):
print('Starting the game!')
game = GameState.new_game()
agents = {
Player.black: black_agent,
Player.white: white_agent
}
while not game.is_over():
game.print()
if game.next_player == Player.black: next_move = agents[game.next_player].greedy_move(game)
else: next_move = agents[game.next_player].select_move(game, False)
if next_move.is_pass: print("Pass!")
else: print(next_move.point, next_move.direction)
game = game.apply_move(next_move)
game_result = scoring.compute_game_result(game)
if game_result == Player.black:
print("You win!")
else:
print("Bot Zero win!")
encoder = Encoder()
model = load_model(bot_name)
black_agent = Agent(model, encoder, rounds_per_move = 160, c = 2.0)
white_agent = Agent(model, encoder, rounds_per_move = 160, c = 2.0)
print()
print("欢迎对局!")
print("输入为3个以空格隔开的数字")
print("前2个为点坐标(1~7)")
print("第3个为方向(0~23),具体如下")
print("0\t1\t2\t3\t4")
print("5\t6\t7\t8\t9")
print("10\t11\t棋子\t12\t13")
print("14\t15\t16\t17\t18")
print("19\t20\t21\t22\t23")
print("不要输错哦!")
simulate_game(black_agent, white_agent)
| 28.864407
| 99
| 0.664709
|
from keras.saving.save import load_model
from board import GameState, Player
from encoder import Encoder
from agent import Agent
import scoring
from board import Move, Point
from tiaocan import bot_name
class My():
def select_move(self, game_state):
print("请输入点坐标和方向(或弃权):")
x, y, d = input().split(' ')
x, y, d = int(x), int(y), int(d)
move = Move(Point(x, y), d)
if game_state.is_valid_move(move): return move
else: return Move.pass_turn()
def simulate_game(black_agent, white_agent):
print('Starting the game!')
game = GameState.new_game()
agents = {
Player.black: black_agent,
Player.white: white_agent
}
while not game.is_over():
game.print()
if game.next_player == Player.black: next_move = agents[game.next_player].greedy_move(game)
else: next_move = agents[game.next_player].select_move(game, False)
if next_move.is_pass: print("Pass!")
else: print(next_move.point, next_move.direction)
game = game.apply_move(next_move)
game_result = scoring.compute_game_result(game)
if game_result == Player.black:
print("You win!")
else:
print("Bot Zero win!")
encoder = Encoder()
model = load_model(bot_name)
black_agent = Agent(model, encoder, rounds_per_move = 160, c = 2.0)
white_agent = Agent(model, encoder, rounds_per_move = 160, c = 2.0)
print()
print("欢迎对局!")
print("输入为3个以空格隔开的数字")
print("前2个为点坐标(1~7)")
print("第3个为方向(0~23),具体如下")
print("0\t1\t2\t3\t4")
print("5\t6\t7\t8\t9")
print("10\t11\t棋子\t12\t13")
print("14\t15\t16\t17\t18")
print("19\t20\t21\t22\t23")
print("不要输错哦!")
simulate_game(black_agent, white_agent)
| true
| true
|
f715517d413224bd0e232c087a3dc3de8fac5148
| 2,409
|
py
|
Python
|
DATA/10_64_64_64_1E7/analy.py
|
Aieener/SUS_3D
|
8fc5a768a2339238939522baf96bce98bf61902e
|
[
"MIT"
] | null | null | null |
DATA/10_64_64_64_1E7/analy.py
|
Aieener/SUS_3D
|
8fc5a768a2339238939522baf96bce98bf61902e
|
[
"MIT"
] | null | null | null |
DATA/10_64_64_64_1E7/analy.py
|
Aieener/SUS_3D
|
8fc5a768a2339238939522baf96bce98bf61902e
|
[
"MIT"
] | null | null | null |
# analy.py
# A python program to analyze the SUS weighting function in order to reach the following goals:
# 1. plot the weight function
# 2. generate the normalized distribution for Z=1
# 3. extrapolate the N distribution for different Zs given by the user.
# Author: Yuding Ai
# Date: 2015 Oct 23
import math
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
def PN():
WF = [] # a list of my target Weighting function
PN = [] # a list of number distribution
with open("SUSWeight_function.txt","r") as file:
for line in file:
words = line.split()
n = float(words[0]) #take the value
WF.append(n); #append value into my WF list
maxi = max(WF)
if maxi > 500:
for i in range(len(WF)):
WF[i] = WF[i]-maxi +500
PN.append(math.exp(WF[i]));
PN = [float(i)/sum(PN) for i in PN]
return WF,PN
def Pplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b',markersize=3)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'P(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def enlargePplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b-',markersize=3,linewidth = 0.1)
plt.xlim(8600,9600)
plt.ylim(0,0.007)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'ENLP(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def Wplot(WN):
fig = plt.figure()
plt.plot(WN,'+r',markersize=1,)
plt.ylabel('Weighting Function')
plt.xlabel('N')
title = 'WeightingFunc.png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def exploPN(W,z):
P = [] # a list of number distribution
for i in range(len(W)):
W[i] = W[i] + i*math.log(z)
maxi = max(W)
if maxi > 500:
for j in range(len(W)):
W[j] = W[j]-maxi +500
P.append(math.exp(W[j]));
P = [float(k)/sum(P) for k in P]
return P
def main():
P = PN()[1] # take the P(N;z=1)
W = PN()[0] # take the original weighting function
Wplot(W)
# Pplot(P,"1")
# Pe = exploPN(W,4.44)
# enlargePplot(Pe,4.44)
# for i in range(10):
# W = PN()[0] # take the original weighting function
# t = 3.83 + 0.02*i
# Pe = exploPN(W,t)
# # Pplot(Pe,t)
# enlargePplot(Pe,t)
main()
| 23.38835
| 95
| 0.632213
|
import math
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import rc
Palatino']})
rc('text', usetex=True)
def PN():
WF = []
PN = []
with open("SUSWeight_function.txt","r") as file:
for line in file:
words = line.split()
n = float(words[0])
WF.append(n);
maxi = max(WF)
if maxi > 500:
for i in range(len(WF)):
WF[i] = WF[i]-maxi +500
PN.append(math.exp(WF[i]));
PN = [float(i)/sum(PN) for i in PN]
return WF,PN
def Pplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b',markersize=3)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'P(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def enlargePplot(PN,z):
fig = plt.figure()
plt.plot(PN,'+b-',markersize=3,linewidth = 0.1)
plt.xlim(8600,9600)
plt.ylim(0,0.007)
Z = str(z)
ylabel = 'P(N;Z='+ Z + ')'
plt.ylabel(ylabel)
plt.xlabel('N')
title = 'ENLP(N;Z='+ Z + ').png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def Wplot(WN):
fig = plt.figure()
plt.plot(WN,'+r',markersize=1,)
plt.ylabel('Weighting Function')
plt.xlabel('N')
title = 'WeightingFunc.png'
fig.savefig(title, dpi=300, bbox_inches='tight')
def exploPN(W,z):
P = []
for i in range(len(W)):
W[i] = W[i] + i*math.log(z)
maxi = max(W)
if maxi > 500:
for j in range(len(W)):
W[j] = W[j]-maxi +500
P.append(math.exp(W[j]));
P = [float(k)/sum(P) for k in P]
return P
def main():
P = PN()[1]
W = PN()[0]
Wplot(W)
| true
| true
|
f71552f33127dfdd46d5834de303cbaed5612835
| 1,326
|
py
|
Python
|
asset/lambda/index.py
|
jialechan/cdk-elasticache-monitor
|
584d1f583e934e32d80f1abea7fdc100c226b348
|
[
"Apache-2.0"
] | 1
|
2020-07-27T09:15:41.000Z
|
2020-07-27T09:15:41.000Z
|
asset/lambda/index.py
|
jialechan/cdk-elasticache-monitor
|
584d1f583e934e32d80f1abea7fdc100c226b348
|
[
"Apache-2.0"
] | 509
|
2020-08-04T07:02:41.000Z
|
2022-03-28T15:05:51.000Z
|
asset/lambda/index.py
|
jialechan/cdk-elasticache-monitor
|
584d1f583e934e32d80f1abea7fdc100c226b348
|
[
"Apache-2.0"
] | 1
|
2020-08-28T01:13:15.000Z
|
2020-08-28T01:13:15.000Z
|
import os
import json
import time
import urllib.parse
import urllib.request
def handler(event, context):
"""
alarm to slack
"""
print(json.dumps(event))
slack_webhook_url = os.environ['SLACK_WEBHOOK_URL']
channel = os.environ['CHANNEL']
username = os.environ['USERNAME']
icon_emoji = os.environ['ICON_EMOJI']
for record in event.get("Records"):
message = json.loads(record.get("Sns").get("Message"))
title = message.get("AlarmName")
info = message.get("AlarmDescription")
newStateReason = message.get("NewStateReason")
region = os.environ['AWS_REGION']
log = "https://" + region + ".console.aws.amazon.com/cloudwatch/home?region=" + \
region + "#alarmsV2:alarm/" + title + "?~(alarmStateFilter~'ALARM)"
values = {
"channel": channel,
"username": username,
"text": title + "\n" + info + "\n" + newStateReason + "\n" + "<" + log + "|AlarmState>",
"icon_emoji": icon_emoji
}
params = json.dumps(values).encode('utf8')
req = urllib.request.Request(slack_webhook_url, data=params, headers={
'content-type': 'application/json'})
response = urllib.request.urlopen(req)
print(response.read())
| 28.826087
| 100
| 0.585973
|
import os
import json
import time
import urllib.parse
import urllib.request
def handler(event, context):
print(json.dumps(event))
slack_webhook_url = os.environ['SLACK_WEBHOOK_URL']
channel = os.environ['CHANNEL']
username = os.environ['USERNAME']
icon_emoji = os.environ['ICON_EMOJI']
for record in event.get("Records"):
message = json.loads(record.get("Sns").get("Message"))
title = message.get("AlarmName")
info = message.get("AlarmDescription")
newStateReason = message.get("NewStateReason")
region = os.environ['AWS_REGION']
log = "https://" + region + ".console.aws.amazon.com/cloudwatch/home?region=" + \
region + "#alarmsV2:alarm/" + title + "?~(alarmStateFilter~'ALARM)"
values = {
"channel": channel,
"username": username,
"text": title + "\n" + info + "\n" + newStateReason + "\n" + "<" + log + "|AlarmState>",
"icon_emoji": icon_emoji
}
params = json.dumps(values).encode('utf8')
req = urllib.request.Request(slack_webhook_url, data=params, headers={
'content-type': 'application/json'})
response = urllib.request.urlopen(req)
print(response.read())
| true
| true
|
f715530ab61caacad8fa0ce7435869a1c5c114aa
| 417
|
py
|
Python
|
codestorm_e_learning/asgi.py
|
Sahiladiv/PSST_CSHTN-08
|
0cd4a5b27f16d17a410b1e7cd2596038925f7070
|
[
"MIT"
] | null | null | null |
codestorm_e_learning/asgi.py
|
Sahiladiv/PSST_CSHTN-08
|
0cd4a5b27f16d17a410b1e7cd2596038925f7070
|
[
"MIT"
] | null | null | null |
codestorm_e_learning/asgi.py
|
Sahiladiv/PSST_CSHTN-08
|
0cd4a5b27f16d17a410b1e7cd2596038925f7070
|
[
"MIT"
] | null | null | null |
"""
ASGI config for codestorm_e_learning project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'codestorm_e_learning.settings')
application = get_asgi_application()
| 24.529412
| 80
| 0.798561
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'codestorm_e_learning.settings')
application = get_asgi_application()
| true
| true
|
f71553a353729b9fcc5938672ff20758e7d08a89
| 1,886
|
py
|
Python
|
ramjet/data_interface/tess_eclipsing_binary_metadata_manager.py
|
golmschenk/ramjet
|
77fb4481a15088923308fda09804d80455d1a9cf
|
[
"Apache-2.0"
] | 3
|
2020-11-23T18:47:37.000Z
|
2021-08-05T17:45:51.000Z
|
ramjet/data_interface/tess_eclipsing_binary_metadata_manager.py
|
golmschenk/ramjet
|
77fb4481a15088923308fda09804d80455d1a9cf
|
[
"Apache-2.0"
] | 5
|
2021-08-19T00:54:57.000Z
|
2022-02-10T00:15:40.000Z
|
ramjet/data_interface/tess_eclipsing_binary_metadata_manager.py
|
golmschenk/ramjet
|
77fb4481a15088923308fda09804d80455d1a9cf
|
[
"Apache-2.0"
] | 3
|
2019-07-12T21:00:57.000Z
|
2020-06-03T22:18:13.000Z
|
"""
Code for managing the TESS eclipsing binary metadata.
"""
import pandas as pd
from pathlib import Path
from peewee import IntegerField, SchemaManager
from ramjet.data_interface.metadatabase import MetadatabaseModel, metadatabase
brian_powell_eclipsing_binary_csv_path = Path('data/tess_eclipsing_binaries/TESS_EB_catalog_23Jun.csv')
class TessEclipsingBinaryMetadata(MetadatabaseModel):
"""
A model for the TESS eclipsing binary metadatabase table.
"""
tic_id = IntegerField(index=True, unique=True)
class TessEclipsingBinaryMetadataManager:
"""
A class for managing the TESS eclipsing binary metadata.
"""
@staticmethod
def build_table():
"""
Builds the TESS eclipsing binary metadata table.
"""
print('Building TESS eclipsing binary metadata table...')
eclipsing_binary_data_frame = pd.read_csv(brian_powell_eclipsing_binary_csv_path, usecols=['ID'])
row_count = 0
metadatabase.drop_tables([TessEclipsingBinaryMetadata])
metadatabase.create_tables([TessEclipsingBinaryMetadata])
SchemaManager(TessEclipsingBinaryMetadata).drop_indexes()
rows = []
for index, tic_id in enumerate(eclipsing_binary_data_frame['ID'].values):
row = {'tic_id': tic_id}
rows.append(row)
row_count += 1
if row_count % 1000 == 0:
with metadatabase.atomic():
TessEclipsingBinaryMetadata.insert_many(rows).execute()
rows = []
with metadatabase.atomic():
TessEclipsingBinaryMetadata.insert_many(rows).execute()
SchemaManager(TessEclipsingBinaryMetadata).create_indexes()
print(f'Table built. {row_count} rows added.')
if __name__ == '__main__':
metadata_manager = TessEclipsingBinaryMetadataManager()
metadata_manager.build_table()
| 34.925926
| 105
| 0.696713
|
import pandas as pd
from pathlib import Path
from peewee import IntegerField, SchemaManager
from ramjet.data_interface.metadatabase import MetadatabaseModel, metadatabase
brian_powell_eclipsing_binary_csv_path = Path('data/tess_eclipsing_binaries/TESS_EB_catalog_23Jun.csv')
class TessEclipsingBinaryMetadata(MetadatabaseModel):
tic_id = IntegerField(index=True, unique=True)
class TessEclipsingBinaryMetadataManager:
@staticmethod
def build_table():
print('Building TESS eclipsing binary metadata table...')
eclipsing_binary_data_frame = pd.read_csv(brian_powell_eclipsing_binary_csv_path, usecols=['ID'])
row_count = 0
metadatabase.drop_tables([TessEclipsingBinaryMetadata])
metadatabase.create_tables([TessEclipsingBinaryMetadata])
SchemaManager(TessEclipsingBinaryMetadata).drop_indexes()
rows = []
for index, tic_id in enumerate(eclipsing_binary_data_frame['ID'].values):
row = {'tic_id': tic_id}
rows.append(row)
row_count += 1
if row_count % 1000 == 0:
with metadatabase.atomic():
TessEclipsingBinaryMetadata.insert_many(rows).execute()
rows = []
with metadatabase.atomic():
TessEclipsingBinaryMetadata.insert_many(rows).execute()
SchemaManager(TessEclipsingBinaryMetadata).create_indexes()
print(f'Table built. {row_count} rows added.')
if __name__ == '__main__':
metadata_manager = TessEclipsingBinaryMetadataManager()
metadata_manager.build_table()
| true
| true
|
f715544b39ad2bd0403cdd0c656584e7498e39cb
| 508
|
py
|
Python
|
instagram_profile/settings.py
|
barseghyanartur/django-instagram-profile
|
1bb36551114d26e7c75f0ddf8f79db68fc02101e
|
[
"BSD-3-Clause"
] | 1
|
2020-12-03T22:01:27.000Z
|
2020-12-03T22:01:27.000Z
|
instagram_profile/settings.py
|
barseghyanartur/django-instagram-profile
|
1bb36551114d26e7c75f0ddf8f79db68fc02101e
|
[
"BSD-3-Clause"
] | null | null | null |
instagram_profile/settings.py
|
barseghyanartur/django-instagram-profile
|
1bb36551114d26e7c75f0ddf8f79db68fc02101e
|
[
"BSD-3-Clause"
] | null | null | null |
from configparser import RawConfigParser
from django.conf import settings
env = RawConfigParser()
env.read(settings.BASE_DIR + '/env.ini')
INSTAGRAM_ACCOUNT = env['instagram']['account']
INSTAGRAM_AUTH_URL = env['instagram']['auth_url']
INSTAGRAM_ACCESS_TOKEN_URL = env['instagram']['access_token_url']
INSTAGRAM_APP_ID = env['instagram']['app_id']
INSTAGRAM_SECRET = env['instagram']['secret']
INSTAGRAM_REDIRECT_URL = env['instagram']['redirect_url']
INSTAGRAM_MEDIA_URL = env['instagram']['media_url']
| 33.866667
| 65
| 0.775591
|
from configparser import RawConfigParser
from django.conf import settings
env = RawConfigParser()
env.read(settings.BASE_DIR + '/env.ini')
INSTAGRAM_ACCOUNT = env['instagram']['account']
INSTAGRAM_AUTH_URL = env['instagram']['auth_url']
INSTAGRAM_ACCESS_TOKEN_URL = env['instagram']['access_token_url']
INSTAGRAM_APP_ID = env['instagram']['app_id']
INSTAGRAM_SECRET = env['instagram']['secret']
INSTAGRAM_REDIRECT_URL = env['instagram']['redirect_url']
INSTAGRAM_MEDIA_URL = env['instagram']['media_url']
| true
| true
|
f7155533198f2d3d8e3cde89ddd32d5f3fb20652
| 3,413
|
py
|
Python
|
extra/convert_all.py
|
Goten87/BLPConverter
|
a4baed2b76b0c23e28e4ac9066b2823304f4b498
|
[
"MIT"
] | null | null | null |
extra/convert_all.py
|
Goten87/BLPConverter
|
a4baed2b76b0c23e28e4ac9066b2823304f4b498
|
[
"MIT"
] | null | null | null |
extra/convert_all.py
|
Goten87/BLPConverter
|
a4baed2b76b0c23e28e4ac9066b2823304f4b498
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python2
import os
import sys
import subprocess
import select
from optparse import OptionParser
# Setup of the command-line arguments parser
text = "Usage: %prog [options] <root-folder>\n\nConvert (in-place) all the BLP files in <root-folder> and its subdirectories"
parser = OptionParser(text, version="%prog 1.0")
parser.add_option("--converter", action="store", default="BLPConverter", type="string",
dest="converter", metavar="CONVERTER",
help="Path to the BLPConverter executable")
parser.add_option("--remove", action="store_true", default=False,
dest="remove", help="Remove the BLP files successfully converted")
parser.add_option("--verbose", action="store_true", default=False,
dest="verbose", help="Verbose output")
# Handling of the arguments
(options, args) = parser.parse_args()
# Check the parameters
if len(args) != 1:
print "No root folder provided"
sys.exit(-1)
root_folder = args[0]
if root_folder[-1] != os.path.sep:
root_folder += os.path.sep
try:
subprocess.Popen('%s --help' % options.converter, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
except:
print "Can't execute BLPConverter at '%s'" % options.converter
sys.exit(-1)
# Walk the root folder
counter_success_total = 0
failed_total = []
for root, dirs, files in os.walk(root_folder):
if root == root_folder:
print "Processing '.'..."
else:
print "Processing '%s'..." % root[len(root_folder):]
blps = filter(lambda x: x.lower().endswith('.blp'), files)
counter_failed = 0
if len(blps) > 0:
current = os.getcwd()
os.chdir(root)
to_convert = blps
while len(to_convert) > 0:
p = subprocess.Popen('%s %s' % (options.converter, ' '.join([ '"%s"' % image for image in to_convert[0:10] ])), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
p.wait()
output = p.stdout.read()
failed = filter(lambda x: not(x.endswith(': OK')) and (len(x) > 0), output.split('\n'))
counter_failed += len(failed)
failed_total.extend(failed)
if options.verbose:
print ' * ' + output[:-1].replace('\n', '\n * ')
if options.remove:
failed2 = map(lambda x: x[0:x.find(':')], failed)
done = filter(lambda x: (x not in failed2) and (len(x) > 0), to_convert[0:10])
p = subprocess.Popen('rm -f %s' % (' '.join([ '"%s"' % image for image in done ])), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
p.wait()
to_convert = to_convert[10:]
os.chdir(current)
if counter_failed > 0:
print '%d images converted, %d images not converted' % (len(blps) - counter_failed, counter_failed)
else:
print '%d images converted' % (len(blps) - counter_failed)
print
counter_success_total += len(blps) - counter_failed
print '----------------------------------------------------------'
if len(failed_total) > 0:
print 'TOTAL: %d images converted, %d images not converted' % (counter_success_total, len(failed_total))
print
print 'Images not converted:'
for image in failed_total:
print ' * ' + image
else:
print 'TOTAL: %d images converted' % counter_success_total
| 34.474747
| 185
| 0.607091
|
import os
import sys
import subprocess
import select
from optparse import OptionParser
text = "Usage: %prog [options] <root-folder>\n\nConvert (in-place) all the BLP files in <root-folder> and its subdirectories"
parser = OptionParser(text, version="%prog 1.0")
parser.add_option("--converter", action="store", default="BLPConverter", type="string",
dest="converter", metavar="CONVERTER",
help="Path to the BLPConverter executable")
parser.add_option("--remove", action="store_true", default=False,
dest="remove", help="Remove the BLP files successfully converted")
parser.add_option("--verbose", action="store_true", default=False,
dest="verbose", help="Verbose output")
(options, args) = parser.parse_args()
if len(args) != 1:
print "No root folder provided"
sys.exit(-1)
root_folder = args[0]
if root_folder[-1] != os.path.sep:
root_folder += os.path.sep
try:
subprocess.Popen('%s --help' % options.converter, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
except:
print "Can't execute BLPConverter at '%s'" % options.converter
sys.exit(-1)
# Walk the root folder
counter_success_total = 0
failed_total = []
for root, dirs, files in os.walk(root_folder):
if root == root_folder:
print "Processing '.'..."
else:
print "Processing '%s'..." % root[len(root_folder):]
blps = filter(lambda x: x.lower().endswith('.blp'), files)
counter_failed = 0
if len(blps) > 0:
current = os.getcwd()
os.chdir(root)
to_convert = blps
while len(to_convert) > 0:
p = subprocess.Popen('%s %s' % (options.converter, ' '.join([ '"%s"' % image for image in to_convert[0:10] ])), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
p.wait()
output = p.stdout.read()
failed = filter(lambda x: not(x.endswith(': OK')) and (len(x) > 0), output.split('\n'))
counter_failed += len(failed)
failed_total.extend(failed)
if options.verbose:
print ' * ' + output[:-1].replace('\n', '\n * ')
if options.remove:
failed2 = map(lambda x: x[0:x.find(':')], failed)
done = filter(lambda x: (x not in failed2) and (len(x) > 0), to_convert[0:10])
p = subprocess.Popen('rm -f %s' % (' '.join([ '"%s"' % image for image in done ])), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
p.wait()
to_convert = to_convert[10:]
os.chdir(current)
if counter_failed > 0:
print '%d images converted, %d images not converted' % (len(blps) - counter_failed, counter_failed)
else:
print '%d images converted' % (len(blps) - counter_failed)
print
counter_success_total += len(blps) - counter_failed
print '----------------------------------------------------------'
if len(failed_total) > 0:
print 'TOTAL: %d images converted, %d images not converted' % (counter_success_total, len(failed_total))
print
print 'Images not converted:'
for image in failed_total:
print ' * ' + image
else:
print 'TOTAL: %d images converted' % counter_success_total
| false
| true
|
f715555b72413c60a579b8d7d74d37a54ae765af
| 1,832
|
py
|
Python
|
sorting/merge-sort-recursive.py
|
thehimalayanleo/Algorithm-Practice
|
aa63d90de7a3a72910ead1017574e2cca485009f
|
[
"MIT"
] | null | null | null |
sorting/merge-sort-recursive.py
|
thehimalayanleo/Algorithm-Practice
|
aa63d90de7a3a72910ead1017574e2cca485009f
|
[
"MIT"
] | null | null | null |
sorting/merge-sort-recursive.py
|
thehimalayanleo/Algorithm-Practice
|
aa63d90de7a3a72910ead1017574e2cca485009f
|
[
"MIT"
] | null | null | null |
## A recursive implementation of merge sort.
## Author: AJ
## test case 1 45 849 904 79 48942 7
class sorting:
def __init__(self):
self.arr = []
def get_data(self):
self.arr = list(map(int, input().split()))
return self.arr
def merge_sort(self, array):
if len(array) == 1:
return array
mid = len(array)//2 # Find the approximate middle point
# Separate the arrays using the middle point
left = self.merge_sort(array[:mid])
right = self.merge_sort(array[mid:])
left_indx = 0
right_indx = 0
complete_arr = []
# Iteratively combine the two arrays by sorting them appropriately
for indx in range(len(left) + len(right)):
if (left_indx < len(left)) and (right_indx < len(right)):
if (left[left_indx] < right[right_indx]):
complete_arr.append(left[left_indx])
left_indx+=1
else:
complete_arr.append(right[right_indx])
right_indx += 1
elif left_indx == len(left):
for indx2 in range(right_indx, len(right)):
complete_arr.append(right[indx2])
right_indx = len(right)
else:
for indx2 in range(left_indx, len(left)):
complete_arr.append(left[indx2])
left_indx = len(left)
#print(len(left)+len(right), len(complete_arr))
return complete_arr
def runner(self):
self.arr = self.merge_sort(self.arr)
def print_arr(self):
for ele in self.arr:
print(str(ele) + ' ', end='')
print('')
array = sorting()
array.get_data()
array.print_arr()
array.merge_sort(array.arr)
array.runner()
array.print_arr()
| 29.548387
| 74
| 0.554039
|
self.arr = list(map(int, input().split()))
return self.arr
def merge_sort(self, array):
if len(array) == 1:
return array
mid = len(array)//2
left = self.merge_sort(array[:mid])
right = self.merge_sort(array[mid:])
left_indx = 0
right_indx = 0
complete_arr = []
for indx in range(len(left) + len(right)):
if (left_indx < len(left)) and (right_indx < len(right)):
if (left[left_indx] < right[right_indx]):
complete_arr.append(left[left_indx])
left_indx+=1
else:
complete_arr.append(right[right_indx])
right_indx += 1
elif left_indx == len(left):
for indx2 in range(right_indx, len(right)):
complete_arr.append(right[indx2])
right_indx = len(right)
else:
for indx2 in range(left_indx, len(left)):
complete_arr.append(left[indx2])
left_indx = len(left)
return complete_arr
def runner(self):
self.arr = self.merge_sort(self.arr)
def print_arr(self):
for ele in self.arr:
print(str(ele) + ' ', end='')
print('')
array = sorting()
array.get_data()
array.print_arr()
array.merge_sort(array.arr)
array.runner()
array.print_arr()
| true
| true
|
f71555608a70f602ee61e7b668ed75d79fe49531
| 2,393
|
py
|
Python
|
pynmodl/tests/parsing/test_scoping.py
|
tjbanks/pynmodl
|
b7d6bb378711ce19cd651561c08146e3571d986a
|
[
"MIT"
] | 9
|
2017-06-03T19:33:46.000Z
|
2019-10-27T22:19:37.000Z
|
pynmodl/tests/parsing/test_scoping.py
|
tjbanks/pynmodl
|
b7d6bb378711ce19cd651561c08146e3571d986a
|
[
"MIT"
] | 11
|
2017-10-13T16:09:10.000Z
|
2019-05-08T16:37:11.000Z
|
pynmodl/tests/parsing/test_scoping.py
|
tjbanks/pynmodl
|
b7d6bb378711ce19cd651561c08146e3571d986a
|
[
"MIT"
] | 2
|
2017-08-29T14:29:45.000Z
|
2019-12-31T19:45:02.000Z
|
import os
from textx.metamodel import metamodel_from_file
from textx.model import children_of_type
from pynmodl.nmodl import NModlCompiler
mm = metamodel_from_file(
os.path.join(os.path.dirname(__file__), '../../grammar/nmodl.tx'))
mm.register_obj_processors({'VarRef': NModlCompiler().handle_varref})
def refs_in(node):
return children_of_type('VarRef', node)
def test_scoping():
p = """
PARAMETER {
v (mV)
}
STATE { x }
INITIAL {
LOCAL v
v = 10
x = -v : v is local
}
FUNCTION f(v) {
if(2 > 1){
LOCAL v
v = 123
f = v : v is local
}
else{
f = -v : v is funcpar
}
}
DERIVATIVE dx {
x' = f(x) + v : v is par
}
"""
blocks = mm.model_from_str(p).blocks
(parameter, state, initial, function_f, derivative) = blocks
locals_in_init = children_of_type('Local', initial)
assert refs_in(initial)[0].var == locals_in_init[0]
locals_in_function_f = children_of_type('Local', function_f)
assert refs_in(function_f)[0].var == locals_in_function_f[0]
assert refs_in(function_f)[2].var == locals_in_function_f[0]
assert type(refs_in(function_f)[-1].var).__name__ == 'FuncPar'
assert refs_in(derivative)[-1].var == parameter.parameters[0]
def test_multiple_locals():
p = """
PARAMETER {
v (mV)
}
STATE { n }
FUNCTION alpha(x)(/ms){
LOCAL a
a = 0.1
if(fabs(x) > a){
alpha=a*x/(1-exp(-x))
}else{
alpha=a/(1-0.5*x)
}
}
DERIVATIVE dn {
LOCAL a
a = 10
n' = alpha((v + 55)/a)}
"""
blocks = mm.model_from_str(p).blocks
(parameter, state, alpha, dn) = blocks
locals_in_alpha = children_of_type('Local', alpha)
alpha_a = locals_in_alpha[0]
alpha_x = alpha.pars[0]
assert refs_in(alpha)[0].var == alpha_a # _a_ = 0.1
assert refs_in(alpha)[1].var == alpha_x # fabs(_x_) > a
assert refs_in(alpha)[2].var == alpha_a # fabs(x) > _a_
assert refs_in(alpha)[3].var == alpha # _alpha_=a*x/(1-exp(-x))
assert refs_in(alpha)[4].var == alpha_a # alpha=_a_*x/(1-exp(-x))
assert refs_in(alpha)[5].var == alpha_x # alpha=a*_x_/(1-exp(-x))
| 26.588889
| 70
| 0.55328
|
import os
from textx.metamodel import metamodel_from_file
from textx.model import children_of_type
from pynmodl.nmodl import NModlCompiler
mm = metamodel_from_file(
os.path.join(os.path.dirname(__file__), '../../grammar/nmodl.tx'))
mm.register_obj_processors({'VarRef': NModlCompiler().handle_varref})
def refs_in(node):
return children_of_type('VarRef', node)
def test_scoping():
p = """
PARAMETER {
v (mV)
}
STATE { x }
INITIAL {
LOCAL v
v = 10
x = -v : v is local
}
FUNCTION f(v) {
if(2 > 1){
LOCAL v
v = 123
f = v : v is local
}
else{
f = -v : v is funcpar
}
}
DERIVATIVE dx {
x' = f(x) + v : v is par
}
"""
blocks = mm.model_from_str(p).blocks
(parameter, state, initial, function_f, derivative) = blocks
locals_in_init = children_of_type('Local', initial)
assert refs_in(initial)[0].var == locals_in_init[0]
locals_in_function_f = children_of_type('Local', function_f)
assert refs_in(function_f)[0].var == locals_in_function_f[0]
assert refs_in(function_f)[2].var == locals_in_function_f[0]
assert type(refs_in(function_f)[-1].var).__name__ == 'FuncPar'
assert refs_in(derivative)[-1].var == parameter.parameters[0]
def test_multiple_locals():
p = """
PARAMETER {
v (mV)
}
STATE { n }
FUNCTION alpha(x)(/ms){
LOCAL a
a = 0.1
if(fabs(x) > a){
alpha=a*x/(1-exp(-x))
}else{
alpha=a/(1-0.5*x)
}
}
DERIVATIVE dn {
LOCAL a
a = 10
n' = alpha((v + 55)/a)}
"""
blocks = mm.model_from_str(p).blocks
(parameter, state, alpha, dn) = blocks
locals_in_alpha = children_of_type('Local', alpha)
alpha_a = locals_in_alpha[0]
alpha_x = alpha.pars[0]
assert refs_in(alpha)[0].var == alpha_a
assert refs_in(alpha)[1].var == alpha_x
assert refs_in(alpha)[2].var == alpha_a
assert refs_in(alpha)[3].var == alpha
assert refs_in(alpha)[4].var == alpha_a
assert refs_in(alpha)[5].var == alpha_x
| true
| true
|
f715558281aaabbc79dcce8d745a1065f13cec44
| 27,919
|
py
|
Python
|
chalice/cli/__init__.py
|
sw33tr0ll/chalice
|
8c48771ef0fe2ae97a00e337ca5828e709b132d3
|
[
"Apache-2.0"
] | 1
|
2020-12-19T07:34:28.000Z
|
2020-12-19T07:34:28.000Z
|
chalice/cli/__init__.py
|
sw33tr0ll/chalice
|
8c48771ef0fe2ae97a00e337ca5828e709b132d3
|
[
"Apache-2.0"
] | 1
|
2020-12-20T21:08:52.000Z
|
2020-12-20T21:08:52.000Z
|
chalice/cli/__init__.py
|
sw33tr0ll/chalice
|
8c48771ef0fe2ae97a00e337ca5828e709b132d3
|
[
"Apache-2.0"
] | null | null | null |
"""Command line interface for chalice.
Contains commands for deploying chalice.
"""
import logging
import os
import platform
import sys
import tempfile
import shutil
import traceback
import functools
import json
import botocore.exceptions
import click
from typing import Dict, Any, Optional, cast # noqa
from chalice import __version__ as chalice_version
from chalice.app import Chalice # noqa
from chalice.awsclient import TypedAWSClient
from chalice.awsclient import ReadTimeout
from chalice.cli.factory import CLIFactory
from chalice.cli.factory import NoSuchFunctionError
from chalice.config import Config # noqa
from chalice.logs import display_logs, LogRetrieveOptions
from chalice.utils import create_zip_file
from chalice.deploy.validate import validate_routes, validate_python_version
from chalice.deploy.validate import ExperimentalFeatureError
from chalice.utils import getting_started_prompt, UI, serialize_to_json
from chalice.constants import CONFIG_VERSION, TEMPLATE_APP, GITIGNORE
from chalice.constants import DEFAULT_STAGE_NAME
from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME
from chalice.local import LocalDevServer # noqa
from chalice.constants import DEFAULT_HANDLER_NAME
from chalice.invoke import UnhandledLambdaError
from chalice.deploy.swagger import TemplatedSwaggerGenerator
from chalice.deploy.planner import PlanEncoder
from chalice.deploy.appgraph import ApplicationGraphBuilder, GraphPrettyPrint
def _configure_logging(level, format_string=None):
# type: (int, Optional[str]) -> None
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger('')
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def create_new_project_skeleton(project_name, profile=None):
# type: (str, Optional[str]) -> None
chalice_dir = os.path.join(project_name, '.chalice')
os.makedirs(chalice_dir)
config = os.path.join(project_name, '.chalice', 'config.json')
cfg = {
'version': CONFIG_VERSION,
'app_name': project_name,
'stages': {
DEFAULT_STAGE_NAME: {
'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME,
}
}
}
if profile is not None:
cfg['profile'] = profile
with open(config, 'w') as f:
f.write(serialize_to_json(cfg))
with open(os.path.join(project_name, 'requirements.txt'), 'w'):
pass
with open(os.path.join(project_name, 'app.py'), 'w') as f:
f.write(TEMPLATE_APP % project_name)
with open(os.path.join(project_name, '.gitignore'), 'w') as f:
f.write(GITIGNORE)
def get_system_info():
# type: () -> str
python_info = "python {}.{}.{}".format(sys.version_info[0],
sys.version_info[1],
sys.version_info[2])
platform_system = platform.system().lower()
platform_release = platform.release()
platform_info = "{} {}".format(platform_system, platform_release)
return "{}, {}".format(python_info, platform_info)
@click.group()
@click.version_option(version=chalice_version,
message='%(prog)s %(version)s, {}'
.format(get_system_info()))
@click.option('--project-dir',
help='The project directory path (absolute or relative).'
'Defaults to CWD')
@click.option('--debug/--no-debug',
default=False,
help='Print debug logs to stderr.')
@click.pass_context
def cli(ctx, project_dir, debug=False):
# type: (click.Context, str, bool) -> None
if project_dir is None:
project_dir = os.getcwd()
elif not os.path.isabs(project_dir):
project_dir = os.path.abspath(project_dir)
if debug is True:
_configure_logging(logging.DEBUG)
_configure_cli_env_vars()
ctx.obj['project_dir'] = project_dir
ctx.obj['debug'] = debug
ctx.obj['factory'] = CLIFactory(project_dir, debug, environ=os.environ)
os.chdir(project_dir)
def _configure_cli_env_vars():
# type: () -> None
# This will set chalice specific env vars so users can detect if
# we're running a Chalice CLI command. This is useful if you want
# conditional behavior only when we're actually running in Lambda
# in your app.py file.
os.environ['AWS_CHALICE_CLI_MODE'] = 'true'
@cli.command()
@click.option('--host', default='127.0.0.1')
@click.option('--port', default=8000, type=click.INT)
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage for the local server to use.')
@click.option('--autoreload/--no-autoreload',
default=True,
help='Automatically restart server when code changes.')
@click.pass_context
def local(ctx, host='127.0.0.1', port=8000, stage=DEFAULT_STAGE_NAME,
autoreload=True):
# type: (click.Context, str, int, str, bool) -> None
factory = ctx.obj['factory'] # type: CLIFactory
from chalice.cli import reloader
# We don't create the server here because that will bind the
# socket and we only want to do this in the worker process.
server_factory = functools.partial(
create_local_server, factory, host, port, stage)
# When running `chalice local`, a stdout logger is configured
# so you'll see the same stdout logging as you would when
# running in lambda. This is configuring the root logger.
# The app-specific logger (app.log) will still continue
# to work.
logging.basicConfig(
stream=sys.stdout, level=logging.INFO, format='%(message)s')
if autoreload:
project_dir = factory.create_config_obj(
chalice_stage_name=stage).project_dir
rc = reloader.run_with_reloader(
server_factory, os.environ, project_dir)
# Click doesn't sys.exit() with the RC this function. The
# recommended way to do this is to use sys.exit() directly,
# see: https://github.com/pallets/click/issues/747
sys.exit(rc)
run_local_server(factory, host, port, stage)
def create_local_server(factory, host, port, stage):
# type: (CLIFactory, str, int, str) -> LocalDevServer
config = factory.create_config_obj(
chalice_stage_name=stage
)
app_obj = config.chalice_app
# Check that `chalice deploy` would let us deploy these routes, otherwise
# there is no point in testing locally.
routes = config.chalice_app.routes
validate_routes(routes)
server = factory.create_local_server(app_obj, config, host, port)
return server
def run_local_server(factory, host, port, stage):
# type: (CLIFactory, str, int, str) -> None
server = create_local_server(factory, host, port, stage)
server.serve_forever()
@cli.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.option('--connection-timeout',
type=int,
help=('Overrides the default botocore connection '
'timeout.'))
@click.pass_context
def deploy(ctx, autogen_policy, profile, api_gateway_stage, stage,
connection_timeout):
# type: (click.Context, Optional[bool], str, str, str, int) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
session = factory.create_botocore_session(
connection_timeout=connection_timeout)
ui = UI()
d = factory.create_default_deployer(session=session,
config=config,
ui=ui)
deployed_values = d.deploy(config, chalice_stage_name=stage)
reporter = factory.create_deployment_reporter(ui=ui)
reporter.display_report(deployed_values)
@cli.group()
def dev():
# type: () -> None
"""Development and debugging commands for chalice.
All the commands under the "chalice dev" namespace are provided
to help chalice developers introspect the internals of chalice.
They are also useful for users to better understand the chalice
deployment process.
These commands are provided for informational purposes only.
There is NO guarantee of backwards compatibility for any
"chalice dev" commands. Do not rely on the output of these commands.
These commands allow introspection of chalice internals, and the
internals of chalice are subject to change as needed.
"""
@dev.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def plan(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
"""Generate and display deployment plan.
This command will calculate and pretty print the deployment plan
without actually executing the plan. It's primarily used to better
understand the chalice deployment process.
"""
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
session = factory.create_botocore_session()
ui = UI()
d = factory.create_plan_only_deployer(
session=session, config=config, ui=ui)
d.deploy(config, chalice_stage_name=stage)
@dev.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def appgraph(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
"""Generate and display the application graph."""
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
graph_build = ApplicationGraphBuilder()
graph = graph_build.build(config, stage)
ui = UI()
GraphPrettyPrint(ui).display_graph(graph)
@cli.command('invoke')
@click.option('-n', '--name', metavar='NAME', required=True,
help=('The name of the function to invoke. '
'This is the logical name of the function. If the '
'function is decorated by app.route use the name '
'api_handler instead.'))
@click.option('--profile', metavar='PROFILE',
help='Override profile at deploy time.')
@click.option('--stage', metavar='STAGE', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def invoke(ctx, name, profile, stage):
# type: (click.Context, str, str, str) -> None
"""Invoke the deployed lambda function NAME.
Reads payload from STDIN.
"""
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
try:
invoke_handler = factory.create_lambda_invoke_handler(name, stage)
payload = factory.create_stdin_reader().read()
invoke_handler.invoke(payload)
except NoSuchFunctionError as e:
err = click.ClickException(
"could not find a lambda function named %s." % e.name)
err.exit_code = 2
raise err
except botocore.exceptions.ClientError as e:
error = e.response['Error']
err = click.ClickException(
"got '%s' exception back from Lambda\n%s"
% (error['Code'], error['Message']))
err.exit_code = 1
raise err
except UnhandledLambdaError:
err = click.ClickException(
"Unhandled exception in Lambda function, details above.")
err.exit_code = 1
raise err
except ReadTimeout as e:
err = click.ClickException(e.message)
err.exit_code = 1
raise err
@cli.command('delete')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to delete.')
@click.pass_context
def delete(ctx, profile, stage):
# type: (click.Context, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(chalice_stage_name=stage)
session = factory.create_botocore_session()
d = factory.create_deletion_deployer(session=session, ui=UI())
d.deploy(config, chalice_stage_name=stage)
@cli.command()
@click.option('--num-entries', default=None, type=int,
help='Max number of log entries to show.')
@click.option('--include-lambda-messages/--no-include-lambda-messages',
default=False,
help='Controls whether or not lambda log messages are included.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to get logs for.')
@click.option('-n', '--name',
help='The name of the lambda function to retrieve logs from.',
default=DEFAULT_HANDLER_NAME)
@click.option('-s', '--since',
help=('Only display logs since the provided time. If the '
'-f/--follow option is specified, then this value will '
'default to 10 minutes from the current time. Otherwise '
'by default all log messages are displayed. This value '
'can either be an ISO8601 formatted timestamp or a '
'relative time. For relative times provide a number '
'and a single unit. Units can be "s" for seconds, '
'"m" for minutes, "h" for hours, "d" for days, and "w" '
'for weeks. For example "5m" would indicate to display '
'logs starting five minutes in the past.'),
default=None)
@click.option('-f', '--follow/--no-follow',
default=False,
help=('Continuously poll for new log messages. Note that this '
'is a best effort attempt, and in certain cases can '
'miss log messages. This option is intended for '
'interactive usage only.'))
@click.option('--profile', help='The profile to use for fetching logs.')
@click.pass_context
def logs(ctx, num_entries, include_lambda_messages, stage,
name, since, follow, profile):
# type: (click.Context, int, bool, str, str, str, bool, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(stage, False)
deployed = config.deployed_resources(stage)
if name in deployed.resource_names():
lambda_arn = deployed.resource_values(name)['lambda_arn']
session = factory.create_botocore_session()
retriever = factory.create_log_retriever(
session, lambda_arn, follow)
options = LogRetrieveOptions.create(
max_entries=num_entries,
since=since,
include_lambda_messages=include_lambda_messages,
)
display_logs(retriever, sys.stdout, options)
@cli.command('gen-policy')
@click.option('--filename',
help='The filename to analyze. Otherwise app.py is assumed.')
@click.pass_context
def gen_policy(ctx, filename):
# type: (click.Context, str) -> None
from chalice import policy
if filename is None:
filename = os.path.join(ctx.obj['project_dir'], 'app.py')
if not os.path.isfile(filename):
click.echo("App file does not exist: %s" % filename, err=True)
raise click.Abort()
with open(filename) as f:
contents = f.read()
generated = policy.policy_from_source_code(contents)
click.echo(serialize_to_json(generated))
@cli.command('new-project')
@click.argument('project_name', required=False)
@click.option('--profile', required=False)
def new_project(project_name, profile):
# type: (str, str) -> None
if project_name is None:
project_name = getting_started_prompt(click)
if os.path.isdir(project_name):
click.echo("Directory already exists: %s" % project_name, err=True)
raise click.Abort()
create_new_project_skeleton(project_name, profile)
validate_python_version(Config.create())
@cli.command('url')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to get the deployed URL for.')
@click.pass_context
def url(ctx, stage):
# type: (click.Context, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
deployed = config.deployed_resources(stage)
if deployed is not None and 'rest_api' in deployed.resource_names():
click.echo(deployed.resource_values('rest_api')['rest_api_url'])
else:
e = click.ClickException(
"Could not find a record of a Rest API in chalice stage: '%s'"
% stage)
e.exit_code = 2
raise e
@cli.command('generate-sdk')
@click.option('--sdk-type', default='javascript',
type=click.Choice(['javascript']))
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to generate an SDK for.')
@click.argument('outdir')
@click.pass_context
def generate_sdk(ctx, sdk_type, stage, outdir):
# type: (click.Context, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
session = factory.create_botocore_session()
client = TypedAWSClient(session)
deployed = config.deployed_resources(stage)
if deployed is not None and 'rest_api' in deployed.resource_names():
rest_api_id = deployed.resource_values('rest_api')['rest_api_id']
api_gateway_stage = config.api_gateway_stage
client.download_sdk(rest_api_id, outdir,
api_gateway_stage=api_gateway_stage,
sdk_type=sdk_type)
else:
click.echo("Could not find API ID, has this application "
"been deployed?", err=True)
raise click.Abort()
@cli.command('generate-models')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help="Chalice Stage for which to generate models.")
@click.pass_context
def generate_models(ctx, stage):
# type: (click.Context, str) -> None
"""Generate a model from Chalice routes.
Currently only supports generating Swagger 2.0 models.
"""
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
if not config.chalice_app.routes:
click.echo('No REST API found to generate model from.')
raise click.Abort()
swagger_generator = TemplatedSwaggerGenerator()
model = swagger_generator.generate_swagger(
config.chalice_app,
)
ui = UI()
ui.write(json.dumps(model, indent=4, cls=PlanEncoder))
ui.write('\n')
@cli.command('package')
@click.option('--pkg-format', default='cloudformation',
help=('Specify the provisioning engine to use for '
'template output. Chalice supports both '
'CloudFormation and Terraform. Default '
'is CloudFormation.'),
type=click.Choice(['cloudformation', 'terraform']))
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help="Chalice Stage to package.")
@click.option('--single-file', is_flag=True,
default=False,
help=("Create a single packaged file. "
"By default, the 'out' argument "
"specifies a directory in which the "
"package assets will be placed. If "
"this argument is specified, a single "
"zip file will be created instead. CloudFormation Only."))
@click.option('--merge-template',
help=('Specify a JSON or YAML template to be merged '
'into the generated template. This is useful '
'for adding resources to a Chalice template or '
'modify values in the template. CloudFormation Only.'))
@click.option('--template-format', default='json',
type=click.Choice(['json', 'yaml'], case_sensitive=False),
help=('Specify if the generated template should be serialized '
'as either JSON or YAML. CloudFormation only.'))
@click.option('--profile', help='Override profile at packaging time.')
@click.argument('out')
@click.pass_context
def package(ctx, single_file, stage, merge_template,
out, pkg_format, template_format, profile):
# type: (click.Context, bool, str, str, str, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(stage)
options = factory.create_package_options()
packager = factory.create_app_packager(config, options,
pkg_format,
template_format,
merge_template)
if pkg_format == 'terraform' and (merge_template or
single_file or
template_format != 'json'):
# I don't see any reason we couldn't support --single-file for
# terraform if we wanted to.
click.echo((
"Terraform format does not support "
"--merge-template, --single-file, or --template-format"))
raise click.Abort()
if single_file:
dirname = tempfile.mkdtemp()
try:
packager.package_app(config, dirname, stage)
create_zip_file(source_dir=dirname, outfile=out)
finally:
shutil.rmtree(dirname)
else:
packager.package_app(config, out, stage)
@cli.command('generate-pipeline')
@click.option('--pipeline-version',
default='v1',
type=click.Choice(['v1', 'v2']),
help='Which version of the pipeline template to generate.')
@click.option('-i', '--codebuild-image',
help=("Specify default codebuild image to use. "
"This option must be provided when using a python "
"version besides 2.7."))
@click.option('-s', '--source', default='codecommit',
type=click.Choice(['codecommit', 'github']),
help=("Specify the input source. The default value of "
"'codecommit' will create a CodeCommit repository "
"for you. The 'github' value allows you to "
"reference an existing GitHub repository."))
@click.option('-b', '--buildspec-file',
help=("Specify path for buildspec.yml file. "
"By default, the build steps are included in the "
"generated cloudformation template. If this option "
"is provided, a buildspec.yml will be generated "
"as a separate file and not included in the cfn "
"template. This allows you to make changes to how "
"the project is built without having to redeploy "
"a CloudFormation template. This file should be "
"named 'buildspec.yml' and placed in the root "
"directory of your app."))
@click.argument('filename')
@click.pass_context
def generate_pipeline(ctx, pipeline_version, codebuild_image, source,
buildspec_file, filename):
# type: (click.Context, str, str, str, str, str) -> None
"""Generate a cloudformation template for a starter CD pipeline.
This command will write a starter cloudformation template to
the filename you provide. It contains a CodeCommit repo,
a CodeBuild stage for packaging your chalice app, and a
CodePipeline stage to deploy your application using cloudformation.
You can use any AWS SDK or the AWS CLI to deploy this stack.
Here's an example using the AWS CLI:
\b
$ chalice generate-pipeline pipeline.json
$ aws cloudformation deploy --stack-name mystack \b
--template-file pipeline.json --capabilities CAPABILITY_IAM
"""
from chalice import pipeline
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj()
p = cast(pipeline.BasePipelineTemplate, None)
if pipeline_version == 'v1':
p = pipeline.CreatePipelineTemplateLegacy()
else:
p = pipeline.CreatePipelineTemplateV2()
params = pipeline.PipelineParameters(
app_name=config.app_name,
lambda_python_version=config.lambda_python_version,
codebuild_image=codebuild_image,
code_source=source,
pipeline_version=pipeline_version,
)
output = p.create_template(params)
if buildspec_file:
extractor = pipeline.BuildSpecExtractor()
buildspec_contents = extractor.extract_buildspec(output)
with open(buildspec_file, 'w') as f:
f.write(buildspec_contents)
with open(filename, 'w') as f:
f.write(serialize_to_json(output))
def main():
# type: () -> int
# click's dynamic attrs will allow us to pass through
# 'obj' via the context object, so we're ignoring
# these error messages from pylint because we know it's ok.
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
try:
return cli(obj={})
except botocore.exceptions.NoRegionError:
click.echo("No region configured. "
"Either export the AWS_DEFAULT_REGION "
"environment variable or set the "
"region value in our ~/.aws/config file.", err=True)
return 2
except ExperimentalFeatureError as e:
click.echo(str(e))
return 2
except Exception:
click.echo(traceback.format_exc(), err=True)
return 2
| 41.732436
| 79
| 0.647767
|
import logging
import os
import platform
import sys
import tempfile
import shutil
import traceback
import functools
import json
import botocore.exceptions
import click
from typing import Dict, Any, Optional, cast
from chalice import __version__ as chalice_version
from chalice.app import Chalice
from chalice.awsclient import TypedAWSClient
from chalice.awsclient import ReadTimeout
from chalice.cli.factory import CLIFactory
from chalice.cli.factory import NoSuchFunctionError
from chalice.config import Config
from chalice.logs import display_logs, LogRetrieveOptions
from chalice.utils import create_zip_file
from chalice.deploy.validate import validate_routes, validate_python_version
from chalice.deploy.validate import ExperimentalFeatureError
from chalice.utils import getting_started_prompt, UI, serialize_to_json
from chalice.constants import CONFIG_VERSION, TEMPLATE_APP, GITIGNORE
from chalice.constants import DEFAULT_STAGE_NAME
from chalice.constants import DEFAULT_APIGATEWAY_STAGE_NAME
from chalice.local import LocalDevServer
from chalice.constants import DEFAULT_HANDLER_NAME
from chalice.invoke import UnhandledLambdaError
from chalice.deploy.swagger import TemplatedSwaggerGenerator
from chalice.deploy.planner import PlanEncoder
from chalice.deploy.appgraph import ApplicationGraphBuilder, GraphPrettyPrint
def _configure_logging(level, format_string=None):
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger('')
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def create_new_project_skeleton(project_name, profile=None):
chalice_dir = os.path.join(project_name, '.chalice')
os.makedirs(chalice_dir)
config = os.path.join(project_name, '.chalice', 'config.json')
cfg = {
'version': CONFIG_VERSION,
'app_name': project_name,
'stages': {
DEFAULT_STAGE_NAME: {
'api_gateway_stage': DEFAULT_APIGATEWAY_STAGE_NAME,
}
}
}
if profile is not None:
cfg['profile'] = profile
with open(config, 'w') as f:
f.write(serialize_to_json(cfg))
with open(os.path.join(project_name, 'requirements.txt'), 'w'):
pass
with open(os.path.join(project_name, 'app.py'), 'w') as f:
f.write(TEMPLATE_APP % project_name)
with open(os.path.join(project_name, '.gitignore'), 'w') as f:
f.write(GITIGNORE)
def get_system_info():
python_info = "python {}.{}.{}".format(sys.version_info[0],
sys.version_info[1],
sys.version_info[2])
platform_system = platform.system().lower()
platform_release = platform.release()
platform_info = "{} {}".format(platform_system, platform_release)
return "{}, {}".format(python_info, platform_info)
@click.group()
@click.version_option(version=chalice_version,
message='%(prog)s %(version)s, {}'
.format(get_system_info()))
@click.option('--project-dir',
help='The project directory path (absolute or relative).'
'Defaults to CWD')
@click.option('--debug/--no-debug',
default=False,
help='Print debug logs to stderr.')
@click.pass_context
def cli(ctx, project_dir, debug=False):
if project_dir is None:
project_dir = os.getcwd()
elif not os.path.isabs(project_dir):
project_dir = os.path.abspath(project_dir)
if debug is True:
_configure_logging(logging.DEBUG)
_configure_cli_env_vars()
ctx.obj['project_dir'] = project_dir
ctx.obj['debug'] = debug
ctx.obj['factory'] = CLIFactory(project_dir, debug, environ=os.environ)
os.chdir(project_dir)
def _configure_cli_env_vars():
# conditional behavior only when we're actually running in Lambda
os.environ['AWS_CHALICE_CLI_MODE'] = 'true'
@cli.command()
@click.option('--host', default='127.0.0.1')
@click.option('--port', default=8000, type=click.INT)
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage for the local server to use.')
@click.option('--autoreload/--no-autoreload',
default=True,
help='Automatically restart server when code changes.')
@click.pass_context
def local(ctx, host='127.0.0.1', port=8000, stage=DEFAULT_STAGE_NAME,
autoreload=True):
factory = ctx.obj['factory']
from chalice.cli import reloader
# socket and we only want to do this in the worker process.
server_factory = functools.partial(
create_local_server, factory, host, port, stage)
# When running `chalice local`, a stdout logger is configured
# so you'll see the same stdout logging as you would when
logging.basicConfig(
stream=sys.stdout, level=logging.INFO, format='%(message)s')
if autoreload:
project_dir = factory.create_config_obj(
chalice_stage_name=stage).project_dir
rc = reloader.run_with_reloader(
server_factory, os.environ, project_dir)
# recommended way to do this is to use sys.exit() directly,
# see: https://github.com/pallets/click/issues/747
sys.exit(rc)
run_local_server(factory, host, port, stage)
def create_local_server(factory, host, port, stage):
# type: (CLIFactory, str, int, str) -> LocalDevServer
config = factory.create_config_obj(
chalice_stage_name=stage
)
app_obj = config.chalice_app
# Check that `chalice deploy` would let us deploy these routes, otherwise
# there is no point in testing locally.
routes = config.chalice_app.routes
validate_routes(routes)
server = factory.create_local_server(app_obj, config, host, port)
return server
def run_local_server(factory, host, port, stage):
# type: (CLIFactory, str, int, str) -> None
server = create_local_server(factory, host, port, stage)
server.serve_forever()
@cli.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.option('--connection-timeout',
type=int,
help=('Overrides the default botocore connection '
'timeout.'))
@click.pass_context
def deploy(ctx, autogen_policy, profile, api_gateway_stage, stage,
connection_timeout):
# type: (click.Context, Optional[bool], str, str, str, int) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
session = factory.create_botocore_session(
connection_timeout=connection_timeout)
ui = UI()
d = factory.create_default_deployer(session=session,
config=config,
ui=ui)
deployed_values = d.deploy(config, chalice_stage_name=stage)
reporter = factory.create_deployment_reporter(ui=ui)
reporter.display_report(deployed_values)
@cli.group()
def dev():
# type: () -> None
@dev.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def plan(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
session = factory.create_botocore_session()
ui = UI()
d = factory.create_plan_only_deployer(
session=session, config=config, ui=ui)
d.deploy(config, chalice_stage_name=stage)
@dev.command()
@click.option('--autogen-policy/--no-autogen-policy',
default=None,
help='Automatically generate IAM policy for app code.')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--api-gateway-stage',
help='Name of the API gateway stage to deploy to.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def appgraph(ctx, autogen_policy, profile, api_gateway_stage, stage):
# type: (click.Context, Optional[bool], str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(
chalice_stage_name=stage, autogen_policy=autogen_policy,
api_gateway_stage=api_gateway_stage,
)
graph_build = ApplicationGraphBuilder()
graph = graph_build.build(config, stage)
ui = UI()
GraphPrettyPrint(ui).display_graph(graph)
@cli.command('invoke')
@click.option('-n', '--name', metavar='NAME', required=True,
help=('The name of the function to invoke. '
'This is the logical name of the function. If the '
'function is decorated by app.route use the name '
'api_handler instead.'))
@click.option('--profile', metavar='PROFILE',
help='Override profile at deploy time.')
@click.option('--stage', metavar='STAGE', default=DEFAULT_STAGE_NAME,
help=('Name of the Chalice stage to deploy to. '
'Specifying a new chalice stage will create '
'an entirely new set of AWS resources.'))
@click.pass_context
def invoke(ctx, name, profile, stage):
# type: (click.Context, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
try:
invoke_handler = factory.create_lambda_invoke_handler(name, stage)
payload = factory.create_stdin_reader().read()
invoke_handler.invoke(payload)
except NoSuchFunctionError as e:
err = click.ClickException(
"could not find a lambda function named %s." % e.name)
err.exit_code = 2
raise err
except botocore.exceptions.ClientError as e:
error = e.response['Error']
err = click.ClickException(
"got '%s' exception back from Lambda\n%s"
% (error['Code'], error['Message']))
err.exit_code = 1
raise err
except UnhandledLambdaError:
err = click.ClickException(
"Unhandled exception in Lambda function, details above.")
err.exit_code = 1
raise err
except ReadTimeout as e:
err = click.ClickException(e.message)
err.exit_code = 1
raise err
@cli.command('delete')
@click.option('--profile', help='Override profile at deploy time.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to delete.')
@click.pass_context
def delete(ctx, profile, stage):
# type: (click.Context, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(chalice_stage_name=stage)
session = factory.create_botocore_session()
d = factory.create_deletion_deployer(session=session, ui=UI())
d.deploy(config, chalice_stage_name=stage)
@cli.command()
@click.option('--num-entries', default=None, type=int,
help='Max number of log entries to show.')
@click.option('--include-lambda-messages/--no-include-lambda-messages',
default=False,
help='Controls whether or not lambda log messages are included.')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to get logs for.')
@click.option('-n', '--name',
help='The name of the lambda function to retrieve logs from.',
default=DEFAULT_HANDLER_NAME)
@click.option('-s', '--since',
help=('Only display logs since the provided time. If the '
'-f/--follow option is specified, then this value will '
'default to 10 minutes from the current time. Otherwise '
'by default all log messages are displayed. This value '
'can either be an ISO8601 formatted timestamp or a '
'relative time. For relative times provide a number '
'and a single unit. Units can be "s" for seconds, '
'"m" for minutes, "h" for hours, "d" for days, and "w" '
'for weeks. For example "5m" would indicate to display '
'logs starting five minutes in the past.'),
default=None)
@click.option('-f', '--follow/--no-follow',
default=False,
help=('Continuously poll for new log messages. Note that this '
'is a best effort attempt, and in certain cases can '
'miss log messages. This option is intended for '
'interactive usage only.'))
@click.option('--profile', help='The profile to use for fetching logs.')
@click.pass_context
def logs(ctx, num_entries, include_lambda_messages, stage,
name, since, follow, profile):
# type: (click.Context, int, bool, str, str, str, bool, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(stage, False)
deployed = config.deployed_resources(stage)
if name in deployed.resource_names():
lambda_arn = deployed.resource_values(name)['lambda_arn']
session = factory.create_botocore_session()
retriever = factory.create_log_retriever(
session, lambda_arn, follow)
options = LogRetrieveOptions.create(
max_entries=num_entries,
since=since,
include_lambda_messages=include_lambda_messages,
)
display_logs(retriever, sys.stdout, options)
@cli.command('gen-policy')
@click.option('--filename',
help='The filename to analyze. Otherwise app.py is assumed.')
@click.pass_context
def gen_policy(ctx, filename):
# type: (click.Context, str) -> None
from chalice import policy
if filename is None:
filename = os.path.join(ctx.obj['project_dir'], 'app.py')
if not os.path.isfile(filename):
click.echo("App file does not exist: %s" % filename, err=True)
raise click.Abort()
with open(filename) as f:
contents = f.read()
generated = policy.policy_from_source_code(contents)
click.echo(serialize_to_json(generated))
@cli.command('new-project')
@click.argument('project_name', required=False)
@click.option('--profile', required=False)
def new_project(project_name, profile):
# type: (str, str) -> None
if project_name is None:
project_name = getting_started_prompt(click)
if os.path.isdir(project_name):
click.echo("Directory already exists: %s" % project_name, err=True)
raise click.Abort()
create_new_project_skeleton(project_name, profile)
validate_python_version(Config.create())
@cli.command('url')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to get the deployed URL for.')
@click.pass_context
def url(ctx, stage):
# type: (click.Context, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
deployed = config.deployed_resources(stage)
if deployed is not None and 'rest_api' in deployed.resource_names():
click.echo(deployed.resource_values('rest_api')['rest_api_url'])
else:
e = click.ClickException(
"Could not find a record of a Rest API in chalice stage: '%s'"
% stage)
e.exit_code = 2
raise e
@cli.command('generate-sdk')
@click.option('--sdk-type', default='javascript',
type=click.Choice(['javascript']))
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help='Name of the Chalice stage to generate an SDK for.')
@click.argument('outdir')
@click.pass_context
def generate_sdk(ctx, sdk_type, stage, outdir):
# type: (click.Context, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
session = factory.create_botocore_session()
client = TypedAWSClient(session)
deployed = config.deployed_resources(stage)
if deployed is not None and 'rest_api' in deployed.resource_names():
rest_api_id = deployed.resource_values('rest_api')['rest_api_id']
api_gateway_stage = config.api_gateway_stage
client.download_sdk(rest_api_id, outdir,
api_gateway_stage=api_gateway_stage,
sdk_type=sdk_type)
else:
click.echo("Could not find API ID, has this application "
"been deployed?", err=True)
raise click.Abort()
@cli.command('generate-models')
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help="Chalice Stage for which to generate models.")
@click.pass_context
def generate_models(ctx, stage):
# type: (click.Context, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj(stage)
if not config.chalice_app.routes:
click.echo('No REST API found to generate model from.')
raise click.Abort()
swagger_generator = TemplatedSwaggerGenerator()
model = swagger_generator.generate_swagger(
config.chalice_app,
)
ui = UI()
ui.write(json.dumps(model, indent=4, cls=PlanEncoder))
ui.write('\n')
@cli.command('package')
@click.option('--pkg-format', default='cloudformation',
help=('Specify the provisioning engine to use for '
'template output. Chalice supports both '
'CloudFormation and Terraform. Default '
'is CloudFormation.'),
type=click.Choice(['cloudformation', 'terraform']))
@click.option('--stage', default=DEFAULT_STAGE_NAME,
help="Chalice Stage to package.")
@click.option('--single-file', is_flag=True,
default=False,
help=("Create a single packaged file. "
"By default, the 'out' argument "
"specifies a directory in which the "
"package assets will be placed. If "
"this argument is specified, a single "
"zip file will be created instead. CloudFormation Only."))
@click.option('--merge-template',
help=('Specify a JSON or YAML template to be merged '
'into the generated template. This is useful '
'for adding resources to a Chalice template or '
'modify values in the template. CloudFormation Only.'))
@click.option('--template-format', default='json',
type=click.Choice(['json', 'yaml'], case_sensitive=False),
help=('Specify if the generated template should be serialized '
'as either JSON or YAML. CloudFormation only.'))
@click.option('--profile', help='Override profile at packaging time.')
@click.argument('out')
@click.pass_context
def package(ctx, single_file, stage, merge_template,
out, pkg_format, template_format, profile):
# type: (click.Context, bool, str, str, str, str, str, str) -> None
factory = ctx.obj['factory'] # type: CLIFactory
factory.profile = profile
config = factory.create_config_obj(stage)
options = factory.create_package_options()
packager = factory.create_app_packager(config, options,
pkg_format,
template_format,
merge_template)
if pkg_format == 'terraform' and (merge_template or
single_file or
template_format != 'json'):
# I don't see any reason we couldn't support --single-file for
# terraform if we wanted to.
click.echo((
"Terraform format does not support "
"--merge-template, --single-file, or --template-format"))
raise click.Abort()
if single_file:
dirname = tempfile.mkdtemp()
try:
packager.package_app(config, dirname, stage)
create_zip_file(source_dir=dirname, outfile=out)
finally:
shutil.rmtree(dirname)
else:
packager.package_app(config, out, stage)
@cli.command('generate-pipeline')
@click.option('--pipeline-version',
default='v1',
type=click.Choice(['v1', 'v2']),
help='Which version of the pipeline template to generate.')
@click.option('-i', '--codebuild-image',
help=("Specify default codebuild image to use. "
"This option must be provided when using a python "
"version besides 2.7."))
@click.option('-s', '--source', default='codecommit',
type=click.Choice(['codecommit', 'github']),
help=("Specify the input source. The default value of "
"'codecommit' will create a CodeCommit repository "
"for you. The 'github' value allows you to "
"reference an existing GitHub repository."))
@click.option('-b', '--buildspec-file',
help=("Specify path for buildspec.yml file. "
"By default, the build steps are included in the "
"generated cloudformation template. If this option "
"is provided, a buildspec.yml will be generated "
"as a separate file and not included in the cfn "
"template. This allows you to make changes to how "
"the project is built without having to redeploy "
"a CloudFormation template. This file should be "
"named 'buildspec.yml' and placed in the root "
"directory of your app."))
@click.argument('filename')
@click.pass_context
def generate_pipeline(ctx, pipeline_version, codebuild_image, source,
buildspec_file, filename):
# type: (click.Context, str, str, str, str, str) -> None
from chalice import pipeline
factory = ctx.obj['factory'] # type: CLIFactory
config = factory.create_config_obj()
p = cast(pipeline.BasePipelineTemplate, None)
if pipeline_version == 'v1':
p = pipeline.CreatePipelineTemplateLegacy()
else:
p = pipeline.CreatePipelineTemplateV2()
params = pipeline.PipelineParameters(
app_name=config.app_name,
lambda_python_version=config.lambda_python_version,
codebuild_image=codebuild_image,
code_source=source,
pipeline_version=pipeline_version,
)
output = p.create_template(params)
if buildspec_file:
extractor = pipeline.BuildSpecExtractor()
buildspec_contents = extractor.extract_buildspec(output)
with open(buildspec_file, 'w') as f:
f.write(buildspec_contents)
with open(filename, 'w') as f:
f.write(serialize_to_json(output))
def main():
# type: () -> int
# click's dynamic attrs will allow us to pass through
# these error messages from pylint because we know it's ok.
try:
return cli(obj={})
except botocore.exceptions.NoRegionError:
click.echo("No region configured. "
"Either export the AWS_DEFAULT_REGION "
"environment variable or set the "
"region value in our ~/.aws/config file.", err=True)
return 2
except ExperimentalFeatureError as e:
click.echo(str(e))
return 2
except Exception:
click.echo(traceback.format_exc(), err=True)
return 2
| true
| true
|
f715566e418c809bc95f0d6c57bc79a0f14a15b4
| 1,920
|
py
|
Python
|
nanobox_libcloud/tasks/azure_arm.py
|
mu-box/nanobox-adapter-libcloud
|
a8606799a4899c3e771f24467b61cc09a49f0d55
|
[
"MIT"
] | 4
|
2017-08-26T16:26:02.000Z
|
2017-11-10T02:20:32.000Z
|
nanobox_libcloud/tasks/azure_arm.py
|
mu-box/nanobox-adapter-libcloud
|
a8606799a4899c3e771f24467b61cc09a49f0d55
|
[
"MIT"
] | 9
|
2017-09-12T20:26:07.000Z
|
2019-04-01T18:08:28.000Z
|
nanobox_libcloud/tasks/azure_arm.py
|
mu-box/nanobox-adapter-libcloud
|
a8606799a4899c3e771f24467b61cc09a49f0d55
|
[
"MIT"
] | 7
|
2017-09-16T09:00:48.000Z
|
2021-03-01T04:28:39.000Z
|
from nanobox_libcloud import celery
from nanobox_libcloud import adapters
from time import sleep
import logging
@celery.task
def azure_destroy_arm(creds, name):
logger = logging.getLogger(__name__)
self = adapters.azure_arm.AzureARM()
driver = self._get_user_driver(**creds)
logger.info('Destroying server, NIC, public IP, and VHD...')
if driver.destroy_node(self._find_server(driver, name), ex_destroy_ip=True):
logger.info('Ensuring server was destroyed...')
while self._find_server(driver, name) is not None:
sleep(0.5)
app = name.rsplit('-', 1)[0]
if len(driver.list_nodes(app)) < 1:
logger.info('Destroying virtual network...')
net = self._find_network(driver, app)
while True:
try:
driver.ex_delete_network(net.id)
except BaseHTTPError as h:
if h.code == 202:
break
logging.info('%d: %s' % (h.code, h.message))
inuse = "is in use" in h.message
if h.code == 400 and inuse:
time.sleep(10)
break
while self._find_network(driver, app):
sleep(0.5)
logger.info('Destroying resource group...')
group = self._find_resource_group(driver, app)
while True:
try:
driver.ex_delete_resource_group(group.id)
except BaseHTTPError as h:
if h.code == 202:
break
logging.info('%d: %s' % (h.code, h.message))
inuse = "InUse" in h.message
if h.code == 400 and inuse:
time.sleep(10)
break
while self._find_resource_group(driver, app):
sleep(0.5)
| 36.226415
| 80
| 0.519792
|
from nanobox_libcloud import celery
from nanobox_libcloud import adapters
from time import sleep
import logging
@celery.task
def azure_destroy_arm(creds, name):
logger = logging.getLogger(__name__)
self = adapters.azure_arm.AzureARM()
driver = self._get_user_driver(**creds)
logger.info('Destroying server, NIC, public IP, and VHD...')
if driver.destroy_node(self._find_server(driver, name), ex_destroy_ip=True):
logger.info('Ensuring server was destroyed...')
while self._find_server(driver, name) is not None:
sleep(0.5)
app = name.rsplit('-', 1)[0]
if len(driver.list_nodes(app)) < 1:
logger.info('Destroying virtual network...')
net = self._find_network(driver, app)
while True:
try:
driver.ex_delete_network(net.id)
except BaseHTTPError as h:
if h.code == 202:
break
logging.info('%d: %s' % (h.code, h.message))
inuse = "is in use" in h.message
if h.code == 400 and inuse:
time.sleep(10)
break
while self._find_network(driver, app):
sleep(0.5)
logger.info('Destroying resource group...')
group = self._find_resource_group(driver, app)
while True:
try:
driver.ex_delete_resource_group(group.id)
except BaseHTTPError as h:
if h.code == 202:
break
logging.info('%d: %s' % (h.code, h.message))
inuse = "InUse" in h.message
if h.code == 400 and inuse:
time.sleep(10)
break
while self._find_resource_group(driver, app):
sleep(0.5)
| true
| true
|
f715567b1c04fc53d84059088a5b453d461d55e7
| 1,293
|
py
|
Python
|
airbyte-integrations/connectors/source-hubspot/main_dev.py
|
luizgribeiro/airbyte
|
71a96f5417b678c39b34e2e92234d8a51529e086
|
[
"MIT"
] | 2
|
2021-03-02T09:17:41.000Z
|
2021-03-02T11:02:23.000Z
|
airbyte-integrations/connectors/source-hubspot/main_dev.py
|
luizgribeiro/airbyte
|
71a96f5417b678c39b34e2e92234d8a51529e086
|
[
"MIT"
] | 52
|
2021-06-11T12:39:05.000Z
|
2022-03-30T04:59:35.000Z
|
airbyte-integrations/connectors/source-hubspot/main_dev.py
|
luizgribeiro/airbyte
|
71a96f5417b678c39b34e2e92234d8a51529e086
|
[
"MIT"
] | 2
|
2021-12-14T17:15:40.000Z
|
2021-12-14T17:18:03.000Z
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
from base_python.entrypoint import launch
from source_hubspot import SourceHubspot
if __name__ == "__main__":
source = SourceHubspot()
launch(source, sys.argv[1:])
| 38.029412
| 80
| 0.771075
|
import sys
from base_python.entrypoint import launch
from source_hubspot import SourceHubspot
if __name__ == "__main__":
source = SourceHubspot()
launch(source, sys.argv[1:])
| true
| true
|
f7155888da8319908c50672d71d366b286e97a8a
| 8,907
|
py
|
Python
|
apis/nb/clients/inventory_manager_client/models/LicenseInfoDTO.py
|
CiscoDevNet/APIC-EM-Generic-Scripts-
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 45
|
2016-06-09T15:41:25.000Z
|
2019-08-06T17:13:11.000Z
|
apis/nb/clients/inventory_manager_client/models/LicenseInfoDTO.py
|
CiscoDevNet/APIC-EM-Generic-Scripts
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 36
|
2016-06-12T03:03:56.000Z
|
2017-03-13T18:20:11.000Z
|
apis/nb/clients/inventory_manager_client/models/LicenseInfoDTO.py
|
CiscoDevNet/APIC-EM-Generic-Scripts
|
74211d9488f1e77cf56ef86dba20ec8e8eb49cc1
|
[
"ECL-2.0",
"Apache-2.0"
] | 15
|
2016-06-22T03:51:37.000Z
|
2019-07-10T10:06:02.000Z
|
#!/usr/bin/env python
#pylint: skip-file
# This source code is licensed under the Apache license found in the
# LICENSE file in the root directory of this project.
class LicenseInfoDTO(object):
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'name': 'str',
'priority': 'str',
'type': 'str',
'description': 'str',
'validityPeriodRemaining': 'int',
'maxUsageCount': 'int',
'eulaStatus': 'bool',
'validityPeriod': 'int',
'usageCount': 'int',
'physicalIndex': 'str',
'licenseIndex': 'int',
'featureVersion': 'str',
'counted': 'bool',
'totalCount': 'int',
'provisionState': 'int',
'parentId': 'int',
'expiredPeriod': 'int',
'usageCountRemaining': 'int',
'status': 'str',
'id': 'str',
'deployPending': 'int',
'hostId': 'str',
'evalPeriodLeft': 'str',
'evalPeriodUsed': 'str',
'expiredDate': 'str',
'isCounted': 'bool',
'isEulaAccepted': 'bool',
'isEulaApplicable': 'bool',
'isTechnologyLicense': 'bool',
'licenseFileCount': 'int',
'licenseFileName': 'str',
'storeName': 'str',
'storedUsed': 'int',
'attributeInfo': 'dict'
}
self.attributeMap = {
'name': 'name',
'priority': 'priority',
'type': 'type',
'description': 'description',
'validityPeriodRemaining': 'validityPeriodRemaining',
'maxUsageCount': 'maxUsageCount',
'eulaStatus': 'eulaStatus',
'validityPeriod': 'validityPeriod',
'usageCount': 'usageCount',
'physicalIndex': 'physicalIndex',
'licenseIndex': 'licenseIndex',
'featureVersion': 'featureVersion',
'counted': 'counted',
'totalCount': 'totalCount',
'provisionState': 'provisionState',
'parentId': 'parentId',
'expiredPeriod': 'expiredPeriod',
'usageCountRemaining': 'usageCountRemaining',
'status': 'status',
'id': 'id',
'deployPending': 'deployPending',
'hostId': 'hostId',
'evalPeriodLeft': 'evalPeriodLeft',
'evalPeriodUsed': 'evalPeriodUsed',
'expiredDate': 'expiredDate',
'isCounted': 'isCounted',
'isEulaAccepted': 'isEulaAccepted',
'isEulaApplicable': 'isEulaApplicable',
'isTechnologyLicense': 'isTechnologyLicense',
'licenseFileCount': 'licenseFileCount',
'licenseFileName': 'licenseFileName',
'storeName': 'storeName',
'storedUsed': 'storedUsed',
'attributeInfo': 'attributeInfo'
}
#Name of the feature that is using or can use this license. Ex: 'IPBASE', 'ADVIPSERVICE'
self.name = None # str
#License priority
self.priority = None # str
#Type of license based on the validity period
self.type = None # str
#Description about the license. It is populated with comments from the license file
self.description = None # str
#Time period remaining before the license expires or transitions to rightToUse(9) license. Value will be in milliseconds
self.validityPeriodRemaining = None # int
#Maximum number of entities that can use this license
self.maxUsageCount = None # int
#Whether the user accepted end user license agreement for this license. Values are true(1) - EULA accepted, false(2) - EULA not accepted
self.eulaStatus = None # bool
#Time period the license is valid for. Value will be in milliseconds
self.validityPeriod = None # int
#Number of current usages of this licensed feature
self.usageCount = None # int
#Physical entity index
self.physicalIndex = None # str
#Index of the license to uniquely identify a license within the device
self.licenseIndex = None # int
#Version of the feature that is using or can use this license. Ex: '1.0', '2.0'
self.featureVersion = None # str
#If license feature is counted as part of the license. Values are true(1) - counted license, false(2) - uncounted license
self.counted = None # bool
#Total number of this licensed feature
self.totalCount = None # int
#Provision state of the license feature
self.provisionState = None # int
#Parent Id of the license
self.parentId = None # int
#Time period after the license expires. Value will be in milliseconds
self.expiredPeriod = None # int
#Number of entities that can still use this license
self.usageCountRemaining = None # int
#Status of the license
self.status = None # str
#Id of the license
self.id = None # str
#Deploy Pending information of license
self.deployPending = None # int
#An administratively-assigned fully-qualified domain name for this managed node
self.hostId = None # str
#Number of days remaining in the eval period
self.evalPeriodLeft = None # str
#Number of days used in the eval period
self.evalPeriodUsed = None # str
#Expired date of the license
self.expiredDate = None # str
#Whether the license is counted license. Values are true(1) - counted license,false(2) - uncounted license
self.isCounted = None # bool
#This field is based on eulaStatus. Ex: If eulaStatus is true then it will be accepted else false
self.isEulaAccepted = None # bool
#This field is based on eulaStatus. Ex: If eulaStatus is true then it will be applicable else false
self.isEulaApplicable = None # bool
#Whether the license is technology license. Values are true(1) - technology license,false(2) - nontechnology license
self.isTechnologyLicense = None # bool
#Number of installed license file in this feature
self.licenseFileCount = None # int
#Installed License file name
self.licenseFileName = None # str
#Name of the license store within the device. Ex: 'disk1:lic_store_1.txt' or 'flash:lic_store_2.txt
self.storeName = None # str
#License store that is used for storing this license
self.storedUsed = None # int
self.attributeInfo = None # dict
| 27.072948
| 145
| 0.461659
|
class LicenseInfoDTO(object):
def __init__(self):
self.swaggerTypes = {
'name': 'str',
'priority': 'str',
'type': 'str',
'description': 'str',
'validityPeriodRemaining': 'int',
'maxUsageCount': 'int',
'eulaStatus': 'bool',
'validityPeriod': 'int',
'usageCount': 'int',
'physicalIndex': 'str',
'licenseIndex': 'int',
'featureVersion': 'str',
'counted': 'bool',
'totalCount': 'int',
'provisionState': 'int',
'parentId': 'int',
'expiredPeriod': 'int',
'usageCountRemaining': 'int',
'status': 'str',
'id': 'str',
'deployPending': 'int',
'hostId': 'str',
'evalPeriodLeft': 'str',
'evalPeriodUsed': 'str',
'expiredDate': 'str',
'isCounted': 'bool',
'isEulaAccepted': 'bool',
'isEulaApplicable': 'bool',
'isTechnologyLicense': 'bool',
'licenseFileCount': 'int',
'licenseFileName': 'str',
'storeName': 'str',
'storedUsed': 'int',
'attributeInfo': 'dict'
}
self.attributeMap = {
'name': 'name',
'priority': 'priority',
'type': 'type',
'description': 'description',
'validityPeriodRemaining': 'validityPeriodRemaining',
'maxUsageCount': 'maxUsageCount',
'eulaStatus': 'eulaStatus',
'validityPeriod': 'validityPeriod',
'usageCount': 'usageCount',
'physicalIndex': 'physicalIndex',
'licenseIndex': 'licenseIndex',
'featureVersion': 'featureVersion',
'counted': 'counted',
'totalCount': 'totalCount',
'provisionState': 'provisionState',
'parentId': 'parentId',
'expiredPeriod': 'expiredPeriod',
'usageCountRemaining': 'usageCountRemaining',
'status': 'status',
'id': 'id',
'deployPending': 'deployPending',
'hostId': 'hostId',
'evalPeriodLeft': 'evalPeriodLeft',
'evalPeriodUsed': 'evalPeriodUsed',
'expiredDate': 'expiredDate',
'isCounted': 'isCounted',
'isEulaAccepted': 'isEulaAccepted',
'isEulaApplicable': 'isEulaApplicable',
'isTechnologyLicense': 'isTechnologyLicense',
'licenseFileCount': 'licenseFileCount',
'licenseFileName': 'licenseFileName',
'storeName': 'storeName',
'storedUsed': 'storedUsed',
'attributeInfo': 'attributeInfo'
}
self.type = None
self.description = None
self.validityPeriodRemaining = None
self.maxUsageCount = None
self.eulaStatus = None
self.validityPeriod = None
self.usageCount = None
self.physicalIndex = None
self.licenseIndex = None
self.counted = None
self.totalCount = None
self.provisionState = None
self.parentId = None
self.expiredPeriod = None
self.usageCountRemaining = None
self.status = None
self.id = None
self.deployPending = None
self.hostId = None
self.evalPeriodLeft = None
self.evalPeriodUsed = None
self.expiredDate = None
self.isCounted = None
self.isEulaAccepted = None
self.isEulaApplicable = None
self.isTechnologyLicense = None
self.licenseFileCount = None
self.licenseFileName = None
self.attributeInfo = None
| true
| true
|
f715593dca93c7ea7889b286ec0ff7c88525f4e6
| 6,383
|
py
|
Python
|
flowvision/models/style_transfer/stylenet.py
|
Oneflow-Inc/vision
|
352e9240f63118112ea174bb2d0b502fa54be16f
|
[
"BSD-3-Clause"
] | 40
|
2021-10-19T02:34:56.000Z
|
2022-03-25T07:49:44.000Z
|
flowvision/models/style_transfer/stylenet.py
|
Oneflow-Inc/vision
|
352e9240f63118112ea174bb2d0b502fa54be16f
|
[
"BSD-3-Clause"
] | 53
|
2021-10-22T02:24:44.000Z
|
2022-03-31T04:20:47.000Z
|
flowvision/models/style_transfer/stylenet.py
|
Oneflow-Inc/vision
|
352e9240f63118112ea174bb2d0b502fa54be16f
|
[
"BSD-3-Clause"
] | 11
|
2022-01-06T02:57:07.000Z
|
2022-03-23T15:19:51.000Z
|
"""
Modified from https://github.com/Oneflow-Inc/models/blob/main/Vision/style_transform/fast_neural_style/neural_style/transformer_net.py
"""
from typing import Any
import oneflow as flow
from ..registry import ModelCreator
from ..utils import load_state_dict_from_url
__all__ = ["FastNeuralStyle", "fast_neural_style"]
style_model_urls = {
"sketch": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/sketch_oneflow.tar.gz",
"candy": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/candy_oneflow.tar.gz",
"mosaic": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/mosaic_oneflow.tar.gz",
"rain_princess": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/rain_princess_oneflow.tar.gz",
"udnie": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/udnie_oneflow.tar.gz",
}
class ConvLayer(flow.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = flow.nn.ReflectionPad2d(reflection_padding)
self.conv2d = flow.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(flow.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = flow.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = flow.nn.InstanceNorm2d(channels, affine=True)
self.relu = flow.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(flow.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
if self.upsample:
self.interpolate = flow.nn.UpsamplingNearest2d(scale_factor=upsample)
self.reflection_pad = flow.nn.ReflectionPad2d(reflection_padding)
self.conv2d = flow.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.interpolate(x_in)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
class FastNeuralStyle(flow.nn.Module):
def __init__(self):
super(FastNeuralStyle, self).__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = flow.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = flow.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = flow.nn.InstanceNorm2d(128, affine=True)
# Residual layers
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = flow.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = flow.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
self.relu = flow.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
y = flow.clamp(y, 0, 255)
return y
@ModelCreator.register_model
def fast_neural_style(
pretrained: bool = False,
progress: bool = True,
style_model: str = "sketch",
**kwargs: Any
) -> FastNeuralStyle:
"""
Constructs the Fast Neural Style Transfer model.
.. note::
`Perceptual Losses for Real-Time Style Transfer and Super-Resolution <https://arxiv.org/abs/1603.08155>`_.
The required minimum input size of the model is 256x256.
For more details for how to use this model, users can refer to: `neural_style_transfer project <https://github.com/Oneflow-Inc/vision/tree/main/projects/neural_style_transfer>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
style_model (str): Which pretrained style model to download, user can choose from [sketch, candy, mosaic, rain_princess, udnie]. Default: ``sketch``
For example:
.. code-block:: python
>>> import flowvision
>>> stylenet = flowvision.models.style_transfer.fast_neural_style(pretrained=True, progress=True, style_model = "sketch")
"""
assert (
style_model in style_model_urls.keys()
), "`style_model` must choose from [sketch, candy, mosaic, rain_princess, udnie]"
model = FastNeuralStyle(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(
style_model_urls[style_model], progress=progress
)
model.load_state_dict(state_dict)
return model
| 39.159509
| 186
| 0.675858
|
from typing import Any
import oneflow as flow
from ..registry import ModelCreator
from ..utils import load_state_dict_from_url
__all__ = ["FastNeuralStyle", "fast_neural_style"]
style_model_urls = {
"sketch": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/sketch_oneflow.tar.gz",
"candy": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/candy_oneflow.tar.gz",
"mosaic": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/mosaic_oneflow.tar.gz",
"rain_princess": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/rain_princess_oneflow.tar.gz",
"udnie": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/neural_style_transfer/udnie_oneflow.tar.gz",
}
class ConvLayer(flow.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = flow.nn.ReflectionPad2d(reflection_padding)
self.conv2d = flow.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(flow.nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = flow.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = flow.nn.InstanceNorm2d(channels, affine=True)
self.relu = flow.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(flow.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
if self.upsample:
self.interpolate = flow.nn.UpsamplingNearest2d(scale_factor=upsample)
self.reflection_pad = flow.nn.ReflectionPad2d(reflection_padding)
self.conv2d = flow.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.interpolate(x_in)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
class FastNeuralStyle(flow.nn.Module):
def __init__(self):
super(FastNeuralStyle, self).__init__()
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = flow.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = flow.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = flow.nn.InstanceNorm2d(128, affine=True)
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = flow.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = flow.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
self.relu = flow.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
y = flow.clamp(y, 0, 255)
return y
@ModelCreator.register_model
def fast_neural_style(
pretrained: bool = False,
progress: bool = True,
style_model: str = "sketch",
**kwargs: Any
) -> FastNeuralStyle:
assert (
style_model in style_model_urls.keys()
), "`style_model` must choose from [sketch, candy, mosaic, rain_princess, udnie]"
model = FastNeuralStyle(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(
style_model_urls[style_model], progress=progress
)
model.load_state_dict(state_dict)
return model
| true
| true
|
f71559ccbf89d77b0fb702498ba29b015dc7c215
| 3,597
|
py
|
Python
|
examples/pytorch/nlp/huggingface_models/common/tests/test_tokenization_small_blenderbot.py
|
huggingface/neural-compressor
|
aaad4c357a86914ffa583753c9a26d949838a2a5
|
[
"Apache-2.0"
] | 172
|
2021-09-14T18:34:17.000Z
|
2022-03-30T06:49:53.000Z
|
examples/pytorch/nlp/huggingface_models/common/tests/test_tokenization_small_blenderbot.py
|
intel/lp-opt-tool
|
130eefa3586b38df6c0ff78cc8807ae273f6a63f
|
[
"Apache-2.0"
] | 40
|
2021-09-14T02:26:12.000Z
|
2022-03-29T08:34:04.000Z
|
examples/pytorch/nlp/huggingface_models/common/tests/test_tokenization_small_blenderbot.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 33
|
2021-09-15T07:27:25.000Z
|
2022-03-25T08:30:57.000Z
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Blenderbot small tokenizer."""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from .test_tokenization_common import TokenizerTesterMixin
class BlenderbotSmallTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BlenderbotSmallTokenizer
def setUp(self):
super().setUp()
vocab = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
self.special_tokens_map = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "adapt act apte"
output_text = "adapt act apte"
return input_text, output_text
def test_full_blenderbot_small_tokenizer(self):
tokenizer = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "adapt act apte"
bpe_tokens = ["adapt", "act", "ap@@", "te"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
input_bpe_tokens = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_special_tokens_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
assert tok("sam").input_ids == [1384]
src_text = "I am a small frog."
encoded = tok([src_text], padding=False, truncation=False)["input_ids"]
decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def test_empty_word_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
src_text = "I am a small frog ."
src_text_dot = ""
encoded = tok(src_text)["input_ids"]
encoded_dot = tok(src_text_dot)["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 41.344828
| 108
| 0.687517
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from .test_tokenization_common import TokenizerTesterMixin
class BlenderbotSmallTokenizerTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BlenderbotSmallTokenizer
def setUp(self):
super().setUp()
vocab = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
self.special_tokens_map = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "adapt act apte"
output_text = "adapt act apte"
return input_text, output_text
def test_full_blenderbot_small_tokenizer(self):
tokenizer = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "adapt act apte"
bpe_tokens = ["adapt", "act", "ap@@", "te"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
input_bpe_tokens = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
def test_special_tokens_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
assert tok("sam").input_ids == [1384]
src_text = "I am a small frog."
encoded = tok([src_text], padding=False, truncation=False)["input_ids"]
decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
assert src_text != decoded
assert decoded == "i am a small frog ."
def test_empty_word_small_tok(self):
tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M")
src_text = "I am a small frog ."
src_text_dot = ""
encoded = tok(src_text)["input_ids"]
encoded_dot = tok(src_text_dot)["input_ids"]
assert encoded[-1] == encoded_dot[0]
| true
| true
|
f71559da0abd1737aa33927ca6ae4d82a909ed60
| 1,426
|
py
|
Python
|
enable-s3-encryption.py
|
thilinajayanath/s3-server-side-encryption
|
b1de6cc2785825df0c6f6769ff0693edd5d2e5f6
|
[
"MIT"
] | null | null | null |
enable-s3-encryption.py
|
thilinajayanath/s3-server-side-encryption
|
b1de6cc2785825df0c6f6769ff0693edd5d2e5f6
|
[
"MIT"
] | null | null | null |
enable-s3-encryption.py
|
thilinajayanath/s3-server-side-encryption
|
b1de6cc2785825df0c6f6769ff0693edd5d2e5f6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import boto3
from botocore.exceptions import ClientError
parser = argparse.ArgumentParser(description='Check all S3 buckets in the AWS account and enables default encryption with AES256')
parser.add_argument('aws_account_name', type=str, help='Named AWS user account')
args = parser.parse_args()
session = boto3.session.Session(profile_name=args.aws_account_name)
s3 = session.client(service_name='s3')
enc_config = {
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
},
]
}
for bucket in s3.list_buckets()['Buckets']:
try:
enc_algorithm = s3.get_bucket_encryption(Bucket=bucket['Name'])['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm']
print('Bucket %s has default server-side encryption enabled with %s' % (bucket['Name'],enc_algorithm))
except ClientError as e:
if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
print('Bucket: %s does not have default server-side encryption enabled' % bucket['Name'])
try:
s3.put_bucket_encryption(Bucket=bucket['Name'],ServerSideEncryptionConfiguration=enc_config)
print('Enabled encryption on bucket: %s' % bucket['Name'])
except ClientError as e:
print(e.response['Error']['Code'])
else:
print(e.response['Error']['Code'])
| 36.564103
| 170
| 0.718093
|
import argparse
import boto3
from botocore.exceptions import ClientError
parser = argparse.ArgumentParser(description='Check all S3 buckets in the AWS account and enables default encryption with AES256')
parser.add_argument('aws_account_name', type=str, help='Named AWS user account')
args = parser.parse_args()
session = boto3.session.Session(profile_name=args.aws_account_name)
s3 = session.client(service_name='s3')
enc_config = {
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'AES256'
}
},
]
}
for bucket in s3.list_buckets()['Buckets']:
try:
enc_algorithm = s3.get_bucket_encryption(Bucket=bucket['Name'])['ServerSideEncryptionConfiguration']['Rules'][0]['ApplyServerSideEncryptionByDefault']['SSEAlgorithm']
print('Bucket %s has default server-side encryption enabled with %s' % (bucket['Name'],enc_algorithm))
except ClientError as e:
if e.response['Error']['Code'] == 'ServerSideEncryptionConfigurationNotFoundError':
print('Bucket: %s does not have default server-side encryption enabled' % bucket['Name'])
try:
s3.put_bucket_encryption(Bucket=bucket['Name'],ServerSideEncryptionConfiguration=enc_config)
print('Enabled encryption on bucket: %s' % bucket['Name'])
except ClientError as e:
print(e.response['Error']['Code'])
else:
print(e.response['Error']['Code'])
| true
| true
|
f7155a0bc67e529b57bf0704543609089406c58a
| 1,557
|
py
|
Python
|
a1/__init__.py
|
o-ran-sc/ric-plt-a1
|
902771612ffcf0541ea27dce35eb6f20bf885cf3
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
a1/__init__.py
|
o-ran-sc/ric-plt-a1
|
902771612ffcf0541ea27dce35eb6f20bf885cf3
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
a1/__init__.py
|
o-ran-sc/ric-plt-a1
|
902771612ffcf0541ea27dce35eb6f20bf885cf3
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 3
|
2020-05-24T05:51:03.000Z
|
2021-08-23T07:21:49.000Z
|
# ==================================================================================
# Copyright (c) 2019 Nokia
# Copyright (c) 2018-2019 AT&T Intellectual Property.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================
"""
contains the app; broken out here for ease of unit testing
"""
import connexion
from prometheus_client import CollectorRegistry, generate_latest, multiprocess
app = connexion.App(__name__, specification_dir=".")
app.add_api("openapi.yaml", arguments={"title": "My Title"})
# python decorators feel like black magic to me
@app.app.route('/a1-p/metrics', methods=['GET'])
def metrics(): # pylint: disable=unused-variable
# /metrics API shouldn't be visible in the API documentation,
# hence it's added here in the create_app step
# requires environment variable prometheus_multiproc_dir
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
return generate_latest(registry)
| 42.081081
| 84
| 0.659602
|
import connexion
from prometheus_client import CollectorRegistry, generate_latest, multiprocess
app = connexion.App(__name__, specification_dir=".")
app.add_api("openapi.yaml", arguments={"title": "My Title"})
@app.app.route('/a1-p/metrics', methods=['GET'])
def metrics():
# hence it's added here in the create_app step
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
return generate_latest(registry)
| true
| true
|
f7155a5748b43f2a16ce87a6c062feb7b20551ab
| 234
|
py
|
Python
|
cieloApi3/__init__.py
|
thiagosm/API-3.0-Python
|
2fab59d5e0ce7191d3c458e8fab9b1d3c6298748
|
[
"MIT"
] | null | null | null |
cieloApi3/__init__.py
|
thiagosm/API-3.0-Python
|
2fab59d5e0ce7191d3c458e8fab9b1d3c6298748
|
[
"MIT"
] | null | null | null |
cieloApi3/__init__.py
|
thiagosm/API-3.0-Python
|
2fab59d5e0ce7191d3c458e8fab9b1d3c6298748
|
[
"MIT"
] | null | null | null |
from .environment import *
from .merchant import *
from .sale import *
from .customer import *
from .creditCard import *
from .debitCard import *
from .payment import *
from .recurrentPayment import *
from .cieloEcommerce import *
| 18
| 31
| 0.75641
|
from .environment import *
from .merchant import *
from .sale import *
from .customer import *
from .creditCard import *
from .debitCard import *
from .payment import *
from .recurrentPayment import *
from .cieloEcommerce import *
| true
| true
|
f7155a7ee909d8a5c4d0f88e8722b5abeabb44e3
| 13,929
|
py
|
Python
|
google/ads/google_ads/v3/proto/services/asset_service_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2019-11-30T23:42:39.000Z
|
2019-11-30T23:42:39.000Z
|
google/ads/google_ads/v3/proto/services/asset_service_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v3/proto/services/asset_service_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2020-09-30T17:04:06.000Z
|
2020-09-30T17:04:06.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/services/asset_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.resources import asset_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/services/asset_service.proto',
package='google.ads.googleads.v3.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v3.servicesB\021AssetServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V3.Services\312\002 Google\\Ads\\GoogleAds\\V3\\Services\352\002$Google::Ads::GoogleAds::V3::Services'),
serialized_pb=_b('\n:google/ads/googleads_v3/proto/services/asset_service.proto\x12 google.ads.googleads.v3.services\x1a\x33google/ads/googleads_v3/proto/resources/asset.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"P\n\x0fGetAssetRequest\x12=\n\rresource_name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1egoogleads.googleapis.com/Asset\"z\n\x13MutateAssetsRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12I\n\noperations\x18\x02 \x03(\x0b\x32\x30.google.ads.googleads.v3.services.AssetOperationB\x03\xe0\x41\x02\"Y\n\x0e\x41ssetOperation\x12:\n\x06\x63reate\x18\x01 \x01(\x0b\x32(.google.ads.googleads.v3.resources.AssetH\x00\x42\x0b\n\toperation\"\\\n\x14MutateAssetsResponse\x12\x44\n\x07results\x18\x02 \x03(\x0b\x32\x33.google.ads.googleads.v3.services.MutateAssetResult\"*\n\x11MutateAssetResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xa8\x03\n\x0c\x41ssetService\x12\xa9\x01\n\x08GetAsset\x12\x31.google.ads.googleads.v3.services.GetAssetRequest\x1a(.google.ads.googleads.v3.resources.Asset\"@\x82\xd3\xe4\x93\x02*\x12(/v3/{resource_name=customers/*/assets/*}\xda\x41\rresource_name\x12\xce\x01\n\x0cMutateAssets\x12\x35.google.ads.googleads.v3.services.MutateAssetsRequest\x1a\x36.google.ads.googleads.v3.services.MutateAssetsResponse\"O\x82\xd3\xe4\x93\x02\x30\"+/v3/customers/{customer_id=*}/assets:mutate:\x01*\xda\x41\x16\x63ustomer_id,operations\x1a\x1b\xca\x41\x18googleads.googleapis.comB\xf8\x01\n$com.google.ads.googleads.v3.servicesB\x11\x41ssetServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V3.Services\xca\x02 Google\\Ads\\GoogleAds\\V3\\Services\xea\x02$Google::Ads::GoogleAds::V3::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETASSETREQUEST = _descriptor.Descriptor(
name='GetAssetRequest',
full_name='google.ads.googleads.v3.services.GetAssetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.GetAssetRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002\372A \n\036googleads.googleapis.com/Asset'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=264,
serialized_end=344,
)
_MUTATEASSETSREQUEST = _descriptor.Descriptor(
name='MutateAssetsRequest',
full_name='google.ads.googleads.v3.services.MutateAssetsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operations', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=346,
serialized_end=468,
)
_ASSETOPERATION = _descriptor.Descriptor(
name='AssetOperation',
full_name='google.ads.googleads.v3.services.AssetOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v3.services.AssetOperation.create', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v3.services.AssetOperation.operation',
index=0, containing_type=None, fields=[]),
],
serialized_start=470,
serialized_end=559,
)
_MUTATEASSETSRESPONSE = _descriptor.Descriptor(
name='MutateAssetsResponse',
full_name='google.ads.googleads.v3.services.MutateAssetsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='google.ads.googleads.v3.services.MutateAssetsResponse.results', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=653,
)
_MUTATEASSETRESULT = _descriptor.Descriptor(
name='MutateAssetResult',
full_name='google.ads.googleads.v3.services.MutateAssetResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.MutateAssetResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=655,
serialized_end=697,
)
_MUTATEASSETSREQUEST.fields_by_name['operations'].message_type = _ASSETOPERATION
_ASSETOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET
_ASSETOPERATION.oneofs_by_name['operation'].fields.append(
_ASSETOPERATION.fields_by_name['create'])
_ASSETOPERATION.fields_by_name['create'].containing_oneof = _ASSETOPERATION.oneofs_by_name['operation']
_MUTATEASSETSRESPONSE.fields_by_name['results'].message_type = _MUTATEASSETRESULT
DESCRIPTOR.message_types_by_name['GetAssetRequest'] = _GETASSETREQUEST
DESCRIPTOR.message_types_by_name['MutateAssetsRequest'] = _MUTATEASSETSREQUEST
DESCRIPTOR.message_types_by_name['AssetOperation'] = _ASSETOPERATION
DESCRIPTOR.message_types_by_name['MutateAssetsResponse'] = _MUTATEASSETSRESPONSE
DESCRIPTOR.message_types_by_name['MutateAssetResult'] = _MUTATEASSETRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAssetRequest = _reflection.GeneratedProtocolMessageType('GetAssetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETASSETREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.GetAsset][google.ads.googleads.v3.services.AssetService.GetAsset]
Attributes:
resource_name:
Required. The resource name of the asset to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.GetAssetRequest)
))
_sym_db.RegisterMessage(GetAssetRequest)
MutateAssetsRequest = _reflection.GeneratedProtocolMessageType('MutateAssetsRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.MutateAssets][google.ads.googleads.v3.services.AssetService.MutateAssets]
Attributes:
customer_id:
Required. The ID of the customer whose assets are being
modified.
operations:
Required. The list of operations to perform on individual
assets.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.MutateAssetsRequest)
))
_sym_db.RegisterMessage(MutateAssetsRequest)
AssetOperation = _reflection.GeneratedProtocolMessageType('AssetOperation', (_message.Message,), dict(
DESCRIPTOR = _ASSETOPERATION,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """A single operation to create an asset. Supported asset types are
YoutubeVideoAsset, MediaBundleAsset, ImageAsset, and LeadFormAsset.
TextAsset should be created with Ad inline.
Attributes:
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
asset.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.AssetOperation)
))
_sym_db.RegisterMessage(AssetOperation)
MutateAssetsResponse = _reflection.GeneratedProtocolMessageType('MutateAssetsResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSRESPONSE,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Response message for an asset mutate.
Attributes:
results:
All results for the mutate.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.MutateAssetsResponse)
))
_sym_db.RegisterMessage(MutateAssetsResponse)
MutateAssetResult = _reflection.GeneratedProtocolMessageType('MutateAssetResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETRESULT,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """The result for the asset mutate.
Attributes:
resource_name:
The resource name returned for successful operations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.MutateAssetResult)
))
_sym_db.RegisterMessage(MutateAssetResult)
DESCRIPTOR._options = None
_GETASSETREQUEST.fields_by_name['resource_name']._options = None
_MUTATEASSETSREQUEST.fields_by_name['customer_id']._options = None
_MUTATEASSETSREQUEST.fields_by_name['operations']._options = None
_ASSETSERVICE = _descriptor.ServiceDescriptor(
name='AssetService',
full_name='google.ads.googleads.v3.services.AssetService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=700,
serialized_end=1124,
methods=[
_descriptor.MethodDescriptor(
name='GetAsset',
full_name='google.ads.googleads.v3.services.AssetService.GetAsset',
index=0,
containing_service=None,
input_type=_GETASSETREQUEST,
output_type=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET,
serialized_options=_b('\202\323\344\223\002*\022(/v3/{resource_name=customers/*/assets/*}\332A\rresource_name'),
),
_descriptor.MethodDescriptor(
name='MutateAssets',
full_name='google.ads.googleads.v3.services.AssetService.MutateAssets',
index=1,
containing_service=None,
input_type=_MUTATEASSETSREQUEST,
output_type=_MUTATEASSETSRESPONSE,
serialized_options=_b('\202\323\344\223\0020\"+/v3/customers/{customer_id=*}/assets:mutate:\001*\332A\026customer_id,operations'),
),
])
_sym_db.RegisterServiceDescriptor(_ASSETSERVICE)
DESCRIPTOR.services_by_name['AssetService'] = _ASSETSERVICE
# @@protoc_insertion_point(module_scope)
| 41.332344
| 1,840
| 0.77414
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.resources import asset_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/services/asset_service.proto',
package='google.ads.googleads.v3.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v3.servicesB\021AssetServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V3.Services\312\002 Google\\Ads\\GoogleAds\\V3\\Services\352\002$Google::Ads::GoogleAds::V3::Services'),
serialized_pb=_b('\n:google/ads/googleads_v3/proto/services/asset_service.proto\x12 google.ads.googleads.v3.services\x1a\x33google/ads/googleads_v3/proto/resources/asset.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"P\n\x0fGetAssetRequest\x12=\n\rresource_name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1egoogleads.googleapis.com/Asset\"z\n\x13MutateAssetsRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12I\n\noperations\x18\x02 \x03(\x0b\x32\x30.google.ads.googleads.v3.services.AssetOperationB\x03\xe0\x41\x02\"Y\n\x0e\x41ssetOperation\x12:\n\x06\x63reate\x18\x01 \x01(\x0b\x32(.google.ads.googleads.v3.resources.AssetH\x00\x42\x0b\n\toperation\"\\\n\x14MutateAssetsResponse\x12\x44\n\x07results\x18\x02 \x03(\x0b\x32\x33.google.ads.googleads.v3.services.MutateAssetResult\"*\n\x11MutateAssetResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xa8\x03\n\x0c\x41ssetService\x12\xa9\x01\n\x08GetAsset\x12\x31.google.ads.googleads.v3.services.GetAssetRequest\x1a(.google.ads.googleads.v3.resources.Asset\"@\x82\xd3\xe4\x93\x02*\x12(/v3/{resource_name=customers/*/assets/*}\xda\x41\rresource_name\x12\xce\x01\n\x0cMutateAssets\x12\x35.google.ads.googleads.v3.services.MutateAssetsRequest\x1a\x36.google.ads.googleads.v3.services.MutateAssetsResponse\"O\x82\xd3\xe4\x93\x02\x30\"+/v3/customers/{customer_id=*}/assets:mutate:\x01*\xda\x41\x16\x63ustomer_id,operations\x1a\x1b\xca\x41\x18googleads.googleapis.comB\xf8\x01\n$com.google.ads.googleads.v3.servicesB\x11\x41ssetServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V3.Services\xca\x02 Google\\Ads\\GoogleAds\\V3\\Services\xea\x02$Google::Ads::GoogleAds::V3::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETASSETREQUEST = _descriptor.Descriptor(
name='GetAssetRequest',
full_name='google.ads.googleads.v3.services.GetAssetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.GetAssetRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002\372A \n\036googleads.googleapis.com/Asset'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=264,
serialized_end=344,
)
_MUTATEASSETSREQUEST = _descriptor.Descriptor(
name='MutateAssetsRequest',
full_name='google.ads.googleads.v3.services.MutateAssetsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operations', full_name='google.ads.googleads.v3.services.MutateAssetsRequest.operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=346,
serialized_end=468,
)
_ASSETOPERATION = _descriptor.Descriptor(
name='AssetOperation',
full_name='google.ads.googleads.v3.services.AssetOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v3.services.AssetOperation.create', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v3.services.AssetOperation.operation',
index=0, containing_type=None, fields=[]),
],
serialized_start=470,
serialized_end=559,
)
_MUTATEASSETSRESPONSE = _descriptor.Descriptor(
name='MutateAssetsResponse',
full_name='google.ads.googleads.v3.services.MutateAssetsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='results', full_name='google.ads.googleads.v3.services.MutateAssetsResponse.results', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=561,
serialized_end=653,
)
_MUTATEASSETRESULT = _descriptor.Descriptor(
name='MutateAssetResult',
full_name='google.ads.googleads.v3.services.MutateAssetResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.MutateAssetResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=655,
serialized_end=697,
)
_MUTATEASSETSREQUEST.fields_by_name['operations'].message_type = _ASSETOPERATION
_ASSETOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET
_ASSETOPERATION.oneofs_by_name['operation'].fields.append(
_ASSETOPERATION.fields_by_name['create'])
_ASSETOPERATION.fields_by_name['create'].containing_oneof = _ASSETOPERATION.oneofs_by_name['operation']
_MUTATEASSETSRESPONSE.fields_by_name['results'].message_type = _MUTATEASSETRESULT
DESCRIPTOR.message_types_by_name['GetAssetRequest'] = _GETASSETREQUEST
DESCRIPTOR.message_types_by_name['MutateAssetsRequest'] = _MUTATEASSETSREQUEST
DESCRIPTOR.message_types_by_name['AssetOperation'] = _ASSETOPERATION
DESCRIPTOR.message_types_by_name['MutateAssetsResponse'] = _MUTATEASSETSRESPONSE
DESCRIPTOR.message_types_by_name['MutateAssetResult'] = _MUTATEASSETRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAssetRequest = _reflection.GeneratedProtocolMessageType('GetAssetRequest', (_message.Message,), dict(
DESCRIPTOR = _GETASSETREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.GetAsset][google.ads.googleads.v3.services.AssetService.GetAsset]
Attributes:
resource_name:
Required. The resource name of the asset to fetch.
""",
))
_sym_db.RegisterMessage(GetAssetRequest)
MutateAssetsRequest = _reflection.GeneratedProtocolMessageType('MutateAssetsRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Request message for
[AssetService.MutateAssets][google.ads.googleads.v3.services.AssetService.MutateAssets]
Attributes:
customer_id:
Required. The ID of the customer whose assets are being
modified.
operations:
Required. The list of operations to perform on individual
assets.
""",
))
_sym_db.RegisterMessage(MutateAssetsRequest)
AssetOperation = _reflection.GeneratedProtocolMessageType('AssetOperation', (_message.Message,), dict(
DESCRIPTOR = _ASSETOPERATION,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """A single operation to create an asset. Supported asset types are
YoutubeVideoAsset, MediaBundleAsset, ImageAsset, and LeadFormAsset.
TextAsset should be created with Ad inline.
Attributes:
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
asset.
""",
))
_sym_db.RegisterMessage(AssetOperation)
MutateAssetsResponse = _reflection.GeneratedProtocolMessageType('MutateAssetsResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETSRESPONSE,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """Response message for an asset mutate.
Attributes:
results:
All results for the mutate.
""",
))
_sym_db.RegisterMessage(MutateAssetsResponse)
MutateAssetResult = _reflection.GeneratedProtocolMessageType('MutateAssetResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATEASSETRESULT,
__module__ = 'google.ads.googleads_v3.proto.services.asset_service_pb2'
,
__doc__ = """The result for the asset mutate.
Attributes:
resource_name:
The resource name returned for successful operations.
""",
))
_sym_db.RegisterMessage(MutateAssetResult)
DESCRIPTOR._options = None
_GETASSETREQUEST.fields_by_name['resource_name']._options = None
_MUTATEASSETSREQUEST.fields_by_name['customer_id']._options = None
_MUTATEASSETSREQUEST.fields_by_name['operations']._options = None
_ASSETSERVICE = _descriptor.ServiceDescriptor(
name='AssetService',
full_name='google.ads.googleads.v3.services.AssetService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=700,
serialized_end=1124,
methods=[
_descriptor.MethodDescriptor(
name='GetAsset',
full_name='google.ads.googleads.v3.services.AssetService.GetAsset',
index=0,
containing_service=None,
input_type=_GETASSETREQUEST,
output_type=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_asset__pb2._ASSET,
serialized_options=_b('\202\323\344\223\002*\022(/v3/{resource_name=customers/*/assets/*}\332A\rresource_name'),
),
_descriptor.MethodDescriptor(
name='MutateAssets',
full_name='google.ads.googleads.v3.services.AssetService.MutateAssets',
index=1,
containing_service=None,
input_type=_MUTATEASSETSREQUEST,
output_type=_MUTATEASSETSRESPONSE,
serialized_options=_b('\202\323\344\223\0020\"+/v3/customers/{customer_id=*}/assets:mutate:\001*\332A\026customer_id,operations'),
),
])
_sym_db.RegisterServiceDescriptor(_ASSETSERVICE)
DESCRIPTOR.services_by_name['AssetService'] = _ASSETSERVICE
# @@protoc_insertion_point(module_scope)
| true
| true
|
f7155b122a1e35ce2adb4b96a2aadaade4b5777f
| 567
|
py
|
Python
|
center/app/playback/parsers/ip_parser.py
|
netSensTeam/netSens
|
7ab5f41a7103e6c86aa6cb2eff3df68c301e48c1
|
[
"MIT"
] | null | null | null |
center/app/playback/parsers/ip_parser.py
|
netSensTeam/netSens
|
7ab5f41a7103e6c86aa6cb2eff3df68c301e48c1
|
[
"MIT"
] | 3
|
2021-05-10T13:50:55.000Z
|
2022-03-02T08:12:46.000Z
|
center/app/playback/parsers/ip_parser.py
|
netSensTeam/netSens
|
7ab5f41a7103e6c86aa6cb2eff3df68c301e48c1
|
[
"MIT"
] | null | null | null |
import dpkt
from parsers.utils import *
name = 'ip_parser'
def parseFunc(ts, eth):
if getMACString(eth.dst) == 'FF:FF:FF:FF:FF:FF':
return None
if isinstance(eth.data, dpkt.ip.IP):
return parseIPPacket(ts, eth)
def parseIPPacket(ts, eth):
ip = eth.data
tpa = getIPString(ip.dst)
tha = getMACString(eth.dst)
return {
'protocol': 'ip',
'layer': 3,
'time': ts,
'description': 'ip packet to (%s,%s)' % (tha, tpa),
'target': {
'ip': tpa,
'mac': tha
}
}
| 21.807692
| 59
| 0.527337
|
import dpkt
from parsers.utils import *
name = 'ip_parser'
def parseFunc(ts, eth):
if getMACString(eth.dst) == 'FF:FF:FF:FF:FF:FF':
return None
if isinstance(eth.data, dpkt.ip.IP):
return parseIPPacket(ts, eth)
def parseIPPacket(ts, eth):
ip = eth.data
tpa = getIPString(ip.dst)
tha = getMACString(eth.dst)
return {
'protocol': 'ip',
'layer': 3,
'time': ts,
'description': 'ip packet to (%s,%s)' % (tha, tpa),
'target': {
'ip': tpa,
'mac': tha
}
}
| true
| true
|
f7155c6b7863bc6529f4f6f16e61fb8e883beed8
| 6,035
|
py
|
Python
|
pandas_study/PandasTest.py
|
BreezeDawn/numpy-pandas-matplotlib-
|
e55dccb2442e57c2fccb2081966a7c19e731083a
|
[
"MIT"
] | null | null | null |
pandas_study/PandasTest.py
|
BreezeDawn/numpy-pandas-matplotlib-
|
e55dccb2442e57c2fccb2081966a7c19e731083a
|
[
"MIT"
] | null | null | null |
pandas_study/PandasTest.py
|
BreezeDawn/numpy-pandas-matplotlib-
|
e55dccb2442e57c2fccb2081966a7c19e731083a
|
[
"MIT"
] | 1
|
2018-10-24T07:33:51.000Z
|
2018-10-24T07:33:51.000Z
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def base():
index = pd.date_range('20181023', periods=9) # 生成9个行索引
column = ['a', 'b', 'c', 'd'] # 生成4个列索引
a = np.random.randn(9, 4) # 随便生成的9行4列的数据
df = pd.DataFrame(a, index=index, columns=column)
print(df)
print(pd.DataFrame(np.arange(9).reshape((3, 3)))) # 行和列的默认索引为从0开始的数字
print(df.dtypes) # 查看每列的数据类型
print(df.index) # 查看每行的行索引
print(df.columns) # 查看每列的列索引
print(df.values) # 查看所有值
print(df.describe()) # 查看每列的详细统计 数目/平均值/....
print(df.T) # pandas的转置
print(df.sort_index(axis=1, ascending=False)) # 按索引排序 axis: 1列排序 0行排序 ascending: False反排序(从小向大) True正排序(从大向小)
print(df.sort_values(by='a')) # 把a列的值进行排序 默认从小向大
def select():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
print(df)
print(df.A) # 取出A列数据(带索引)
print(df[2:3]) # 切片取数据
print(df[2:3]) # 切片取数据
print(df['2018-10-25':'2018-10-26']) # 切片取数据
print(df.loc['2018-10-25', ['A', 'B']]) # 按照标签取数据
print(df.iloc[[1, 3, 5], 1:5]) # 按照数字取数据
print(df.ix['2018-10-25':'2018-10-26', 1:5]) # 数字标签结合取数据
print(df[df.A > 8]) # A列中的元素大于8的都显示
def update():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
df.iloc[2, 3] = -555 # 修改值 选中就能修改
df.B[df.A > 8] = 0 # A列中的元素大于8的都把B修改为0
print(df)
df['E'] = pd.Series(np.arange(6), pd.date_range('20181023', periods=6)) # 增加一列
print(df)
def handle_NaN():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
df.iloc[1, 2] = np.nan
df.iloc[0, 1] = np.nan
print(df)
print(df.dropna(axis=1, how='any')) # 丢掉缺失值(返回新的结果不影响原始数据) axis: 1丢掉列 0丢掉行 how: any任何一个是NaN就丢掉 all全是NaN就丢掉
print(df.fillna(value=0)) # 填充缺失值 填充为0
print(df.isnull()) # 检查每个元素是否缺失值,结果返回一个bool填充
print(np.any(df.isnull())) # np.any 检查至少有一个False,是的话返回True
def read_save_data():
data = pd.read_csv('./pand.csv') # 读取csv文件数据(csv内部逗号分隔)
print(data)
data.to_pickle('./pand.pickle') # 保存数据到pickle文件
def merge_DataFrame():
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(2 * np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
print(df1)
print(df2)
print(df3)
res = pd.concat([df1, df2, df3], axis=0) # axis: 0上下合并 1左右合并
print(res)
res = pd.concat([df1, df2, df3], axis=1, ignore_index=True) # ignore_index 忽略前面所有的index并重新排序
print(res)
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'], index=[1, 2, 3])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['b', 'c', 'd', 'e'], index=[2, 3, 4])
res = pd.concat([df1, df2], axis=0, join='outer', sort=True) # 上下合并,outer如果有不一样的列字段,就用NaN填充
print(res)
res = pd.concat([df1, df2], axis=0, join='inner', sort=True, ignore_index=True) # 上下合并, inner有不一样的列字段就丢掉那一列,保留相同字段
print(res)
res = pd.concat([df1, df2], axis=1, ) # 左右合并,有不一样的行字段就用NaN填充
print(res)
res = pd.concat([df1, df2], axis=1, join_axes=[df1.index]) # 左右合并,行字段按照df1的行字段来,缺失值用NaN填充,其余df1没有的字段丢掉
print(res)
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
res = df1.append(df2, ignore_index=True) # df1后面加上df2
print(res)
res = df1.append([df2, df3], ignore_index=True) # df1后面加上df2,df3
print(res)
sl = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
res = df1.append(sl, ignore_index=True)
print(res)
def merge():
left = pd.DataFrame({
'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']
})
right = pd.DataFrame({
'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']
})
print(left)
print(right)
res = pd.merge(left, right, on='key') # 左右合并,key字段保留一个
print(res)
left = pd.DataFrame({
'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']
})
right = pd.DataFrame({
'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']
})
res = pd.merge(left, right, on=['key1', 'key2'], how='inner') # 解释不清,看结果
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='outer',indicator='indicator_column') # 不管一不一样都保留 indicator写出哪些一样哪些不一样,写字符串可改名
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='left') # 左的on字段完全不动的保留
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='right') # 右的on字段完全不动的保留
print(res)
res = pd.merge(left, right, left_index=True,right_index=True, how='right') # 根据索引保留
print(res)
def plot_test():
# 1000个一维数据累加
data = pd.Series(np.random.randn(1000),index=np.arange(1000))
data = data.cumsum()
# data.plot()
# plt.show()
# 矩阵
data = pd.DataFrame(np.random.randn(1000,4),index=np.arange(1000),columns=list('ABCD'))
data = data.cumsum()
print(data.head()) # head显示前五个数据,默认5个
data.plot() # 线性
ax = data.plot.scatter(x='A',y='B',color='DarkBlue', label='Class 1') # scatter 数据点 只有x,y
data.plot.scatter(x='A',y='C',color='DarkGreen', label='Class 2',ax=ax) # ax和前面的在一张图上
plt.show()
# plot method : bar条形图 hist box kde area scatter hexbin pie
if __name__ == '__main__':
# base()
# select()
# update()
# handle_NaN()
# read_save_data()
# merge_DataFrame()
# merge()
plot_test()
| 35.922619
| 136
| 0.566031
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def base():
index = pd.date_range('20181023', periods=9)
column = ['a', 'b', 'c', 'd']
a = np.random.randn(9, 4)
df = pd.DataFrame(a, index=index, columns=column)
print(df)
print(pd.DataFrame(np.arange(9).reshape((3, 3))))
print(df.dtypes)
print(df.index)
print(df.columns)
print(df.values)
print(df.describe())
print(df.T)
print(df.sort_index(axis=1, ascending=False))
print(df.sort_values(by='a'))
def select():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
print(df)
print(df.A)
print(df[2:3])
print(df[2:3])
print(df['2018-10-25':'2018-10-26'])
print(df.loc['2018-10-25', ['A', 'B']])
print(df.iloc[[1, 3, 5], 1:5])
print(df.ix['2018-10-25':'2018-10-26', 1:5])
print(df[df.A > 8])
def update():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
df.iloc[2, 3] = -555
df.B[df.A > 8] = 0
print(df)
df['E'] = pd.Series(np.arange(6), pd.date_range('20181023', periods=6))
print(df)
def handle_NaN():
index = pd.date_range('20181023', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6, 4)), index=index, columns=['A', 'B', 'C', 'D'])
df.iloc[1, 2] = np.nan
df.iloc[0, 1] = np.nan
print(df)
print(df.dropna(axis=1, how='any'))
print(df.fillna(value=0))
print(df.isnull())
print(np.any(df.isnull()))
def read_save_data():
data = pd.read_csv('./pand.csv')
print(data)
data.to_pickle('./pand.pickle')
def merge_DataFrame():
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(2 * np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
print(df1)
print(df2)
print(df3)
res = pd.concat([df1, df2, df3], axis=0)
print(res)
res = pd.concat([df1, df2, df3], axis=1, ignore_index=True)
print(res)
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'], index=[1, 2, 3])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['b', 'c', 'd', 'e'], index=[2, 3, 4])
res = pd.concat([df1, df2], axis=0, join='outer', sort=True)
print(res)
res = pd.concat([df1, df2], axis=0, join='inner', sort=True, ignore_index=True)
print(res)
res = pd.concat([df1, df2], axis=1, )
print(res)
res = pd.concat([df1, df2], axis=1, join_axes=[df1.index])
print(res)
df1 = pd.DataFrame(np.zeros((3, 4)), columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4)), columns=['a', 'b', 'c', 'd'])
res = df1.append(df2, ignore_index=True)
print(res)
res = df1.append([df2, df3], ignore_index=True)
print(res)
sl = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
res = df1.append(sl, ignore_index=True)
print(res)
def merge():
left = pd.DataFrame({
'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']
})
right = pd.DataFrame({
'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']
})
print(left)
print(right)
res = pd.merge(left, right, on='key')
print(res)
left = pd.DataFrame({
'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']
})
right = pd.DataFrame({
'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']
})
res = pd.merge(left, right, on=['key1', 'key2'], how='inner')
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='outer',indicator='indicator_column')
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='left')
print(res)
res = pd.merge(left, right, on=['key1', 'key2'], how='right')
print(res)
res = pd.merge(left, right, left_index=True,right_index=True, how='right')
print(res)
def plot_test():
data = pd.Series(np.random.randn(1000),index=np.arange(1000))
data = data.cumsum()
data = pd.DataFrame(np.random.randn(1000,4),index=np.arange(1000),columns=list('ABCD'))
data = data.cumsum()
print(data.head())
data.plot()
ax = data.plot.scatter(x='A',y='B',color='DarkBlue', label='Class 1')
data.plot.scatter(x='A',y='C',color='DarkGreen', label='Class 2',ax=ax)
plt.show()
if __name__ == '__main__':
plot_test()
| true
| true
|
f7155cefed90acb6d45f43fe242cbb9b2848c3cd
| 5,649
|
py
|
Python
|
recognition/ArcFace/sample_config.py
|
santapo/insightface
|
d61b09938bce244c4f775cee1d9d76ff641b7b0c
|
[
"MIT"
] | null | null | null |
recognition/ArcFace/sample_config.py
|
santapo/insightface
|
d61b09938bce244c4f775cee1d9d76ff641b7b0c
|
[
"MIT"
] | null | null | null |
recognition/ArcFace/sample_config.py
|
santapo/insightface
|
d61b09938bce244c4f775cee1d9d76ff641b7b0c
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
from easydict import EasyDict as edict
config = edict()
config.bn_mom = 0.9
config.workspace = 256
config.emb_size = 512
config.ckpt_embedding = True
config.net_se = 0
config.net_act = 'prelu'
config.net_unit = 3
config.net_input = 1
config.net_blocks = [1, 4, 6, 2]
config.net_output = 'E'
config.net_multiplier = 1.0
config.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
config.ce_loss = True
config.fc7_lr_mult = 1.0
config.fc7_wd_mult = 1.0
config.fc7_no_bias = False
config.max_steps = 0
config.data_rand_mirror = True
config.data_cutoff = False
config.data_color = 0
config.data_images_filter = 0
config.count_flops = False
config.memonger = False #not work now
# network settings
network = edict()
network.r100 = edict()
network.r100.net_name = 'fresnet'
network.r100.num_layers = 100
network.r100fc = edict()
network.r100fc.net_name = 'fresnet'
network.r100fc.num_layers = 100
network.r100fc.net_output = 'FC'
network.r50 = edict()
network.r50.net_name = 'fresnet'
network.r50.num_layers = 50
network.r50v1 = edict()
network.r50v1.net_name = 'fresnet'
network.r50v1.num_layers = 50
network.r50v1.net_unit = 1
network.d169 = edict()
network.d169.net_name = 'fdensenet'
network.d169.num_layers = 169
network.d169.per_batch_size = 64
network.d169.densenet_dropout = 0.0
network.d201 = edict()
network.d201.net_name = 'fdensenet'
network.d201.num_layers = 201
network.d201.per_batch_size = 64
network.d201.densenet_dropout = 0.0
network.y1 = edict()
network.y1.net_name = 'fmobilefacenet'
network.y1.emb_size = 128
network.y1.net_output = 'GDC'
network.y2 = edict()
network.y2.net_name = 'fmobilefacenet'
network.y2.emb_size = 256
network.y2.net_output = 'GDC'
network.y2.net_blocks = [2, 8, 16, 4]
network.m1 = edict()
network.m1.net_name = 'fmobilenet'
network.m1.emb_size = 256
network.m1.net_output = 'GDC'
network.m1.net_multiplier = 1.0
network.m05 = edict()
network.m05.net_name = 'fmobilenet'
network.m05.emb_size = 256
network.m05.net_output = 'GDC'
network.m05.net_multiplier = 0.5
network.mnas = edict()
network.mnas.net_name = 'fmnasnet'
network.mnas.emb_size = 256
network.mnas.net_output = 'GDC'
network.mnas.net_multiplier = 1.0
network.mnas05 = edict()
network.mnas05.net_name = 'fmnasnet'
network.mnas05.emb_size = 256
network.mnas05.net_output = 'GDC'
network.mnas05.net_multiplier = 0.5
network.mnas025 = edict()
network.mnas025.net_name = 'fmnasnet'
network.mnas025.emb_size = 256
network.mnas025.net_output = 'GDC'
network.mnas025.net_multiplier = 0.25
network.vargfacenet = edict()
network.vargfacenet.net_name = 'vargfacenet'
network.vargfacenet.net_multiplier = 1.25
network.vargfacenet.emb_size = 512
network.vargfacenet.net_output = 'J'
# dataset settings
dataset = edict()
dataset.emore = edict()
dataset.emore.dataset = 'emore'
dataset.emore.dataset_path = '../datasets/faces_emore'
dataset.emore.num_classes = 85742
dataset.emore.image_shape = (112, 112, 3)
dataset.emore.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
dataset.retina = edict()
dataset.retina.dataset = 'retina'
dataset.retina.dataset_path = '../datasets/ms1m-retinaface-t1'
dataset.retina.num_classes = 93431
dataset.retina.image_shape = (112, 112, 3)
dataset.retina.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
loss = edict()
loss.softmax = edict()
loss.softmax.loss_name = 'softmax'
loss.nsoftmax = edict()
loss.nsoftmax.loss_name = 'margin_softmax'
loss.nsoftmax.loss_s = 64.0
loss.nsoftmax.loss_m1 = 1.0
loss.nsoftmax.loss_m2 = 0.0
loss.nsoftmax.loss_m3 = 0.0
loss.arcface = edict()
loss.arcface.loss_name = 'margin_softmax'
loss.arcface.loss_s = 64.0
loss.arcface.loss_m1 = 1.0
loss.arcface.loss_m2 = 0.5
loss.arcface.loss_m3 = 0.0
loss.cosface = edict()
loss.cosface.loss_name = 'margin_softmax'
loss.cosface.loss_s = 64.0
loss.cosface.loss_m1 = 1.0
loss.cosface.loss_m2 = 0.0
loss.cosface.loss_m3 = 0.35
loss.combined = edict()
loss.combined.loss_name = 'margin_softmax'
loss.combined.loss_s = 64.0
loss.combined.loss_m1 = 1.0
loss.combined.loss_m2 = 0.3
loss.combined.loss_m3 = 0.2
loss.triplet = edict()
loss.triplet.loss_name = 'triplet'
loss.triplet.images_per_identity = 5
loss.triplet.triplet_alpha = 0.3
loss.triplet.triplet_bag_size = 7200
loss.triplet.triplet_max_ap = 0.0
loss.triplet.per_batch_size = 60
loss.triplet.lr = 0.05
loss.atriplet = edict()
loss.atriplet.loss_name = 'atriplet'
loss.atriplet.images_per_identity = 5
loss.atriplet.triplet_alpha = 0.35
loss.atriplet.triplet_bag_size = 7200
loss.atriplet.triplet_max_ap = 0.0
loss.atriplet.per_batch_size = 60
loss.atriplet.lr = 0.05
# default settings
default = edict()
# default network
default.network = 'r100'
default.pretrained = ''
default.pretrained_epoch = 1
# default dataset
default.dataset = 'emore'
default.loss = 'arcface'
default.frequent = 20
default.verbose = 2000
default.kvstore = 'device'
default.end_epoch = 10000
default.lr = 0.1
default.wd = 0.0005
default.mom = 0.9
default.per_batch_size = 128
default.ckpt = 3
default.lr_steps = '100000,160000,220000'
default.models_root = './models'
def generate_config(_network, _dataset, _loss):
for k, v in loss[_loss].items():
config[k] = v
if k in default:
default[k] = v
for k, v in network[_network].items():
config[k] = v
if k in default:
default[k] = v
for k, v in dataset[_dataset].items():
config[k] = v
if k in default:
default[k] = v
config.loss = _loss
config.network = _network
config.dataset = _dataset
config.num_workers = 1
if 'DMLC_NUM_WORKER' in os.environ:
config.num_workers = int(os.environ['DMLC_NUM_WORKER'])
| 25.561086
| 63
| 0.738007
|
import numpy as np
import os
from easydict import EasyDict as edict
config = edict()
config.bn_mom = 0.9
config.workspace = 256
config.emb_size = 512
config.ckpt_embedding = True
config.net_se = 0
config.net_act = 'prelu'
config.net_unit = 3
config.net_input = 1
config.net_blocks = [1, 4, 6, 2]
config.net_output = 'E'
config.net_multiplier = 1.0
config.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
config.ce_loss = True
config.fc7_lr_mult = 1.0
config.fc7_wd_mult = 1.0
config.fc7_no_bias = False
config.max_steps = 0
config.data_rand_mirror = True
config.data_cutoff = False
config.data_color = 0
config.data_images_filter = 0
config.count_flops = False
config.memonger = False
network = edict()
network.r100 = edict()
network.r100.net_name = 'fresnet'
network.r100.num_layers = 100
network.r100fc = edict()
network.r100fc.net_name = 'fresnet'
network.r100fc.num_layers = 100
network.r100fc.net_output = 'FC'
network.r50 = edict()
network.r50.net_name = 'fresnet'
network.r50.num_layers = 50
network.r50v1 = edict()
network.r50v1.net_name = 'fresnet'
network.r50v1.num_layers = 50
network.r50v1.net_unit = 1
network.d169 = edict()
network.d169.net_name = 'fdensenet'
network.d169.num_layers = 169
network.d169.per_batch_size = 64
network.d169.densenet_dropout = 0.0
network.d201 = edict()
network.d201.net_name = 'fdensenet'
network.d201.num_layers = 201
network.d201.per_batch_size = 64
network.d201.densenet_dropout = 0.0
network.y1 = edict()
network.y1.net_name = 'fmobilefacenet'
network.y1.emb_size = 128
network.y1.net_output = 'GDC'
network.y2 = edict()
network.y2.net_name = 'fmobilefacenet'
network.y2.emb_size = 256
network.y2.net_output = 'GDC'
network.y2.net_blocks = [2, 8, 16, 4]
network.m1 = edict()
network.m1.net_name = 'fmobilenet'
network.m1.emb_size = 256
network.m1.net_output = 'GDC'
network.m1.net_multiplier = 1.0
network.m05 = edict()
network.m05.net_name = 'fmobilenet'
network.m05.emb_size = 256
network.m05.net_output = 'GDC'
network.m05.net_multiplier = 0.5
network.mnas = edict()
network.mnas.net_name = 'fmnasnet'
network.mnas.emb_size = 256
network.mnas.net_output = 'GDC'
network.mnas.net_multiplier = 1.0
network.mnas05 = edict()
network.mnas05.net_name = 'fmnasnet'
network.mnas05.emb_size = 256
network.mnas05.net_output = 'GDC'
network.mnas05.net_multiplier = 0.5
network.mnas025 = edict()
network.mnas025.net_name = 'fmnasnet'
network.mnas025.emb_size = 256
network.mnas025.net_output = 'GDC'
network.mnas025.net_multiplier = 0.25
network.vargfacenet = edict()
network.vargfacenet.net_name = 'vargfacenet'
network.vargfacenet.net_multiplier = 1.25
network.vargfacenet.emb_size = 512
network.vargfacenet.net_output = 'J'
dataset = edict()
dataset.emore = edict()
dataset.emore.dataset = 'emore'
dataset.emore.dataset_path = '../datasets/faces_emore'
dataset.emore.num_classes = 85742
dataset.emore.image_shape = (112, 112, 3)
dataset.emore.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
dataset.retina = edict()
dataset.retina.dataset = 'retina'
dataset.retina.dataset_path = '../datasets/ms1m-retinaface-t1'
dataset.retina.num_classes = 93431
dataset.retina.image_shape = (112, 112, 3)
dataset.retina.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
loss = edict()
loss.softmax = edict()
loss.softmax.loss_name = 'softmax'
loss.nsoftmax = edict()
loss.nsoftmax.loss_name = 'margin_softmax'
loss.nsoftmax.loss_s = 64.0
loss.nsoftmax.loss_m1 = 1.0
loss.nsoftmax.loss_m2 = 0.0
loss.nsoftmax.loss_m3 = 0.0
loss.arcface = edict()
loss.arcface.loss_name = 'margin_softmax'
loss.arcface.loss_s = 64.0
loss.arcface.loss_m1 = 1.0
loss.arcface.loss_m2 = 0.5
loss.arcface.loss_m3 = 0.0
loss.cosface = edict()
loss.cosface.loss_name = 'margin_softmax'
loss.cosface.loss_s = 64.0
loss.cosface.loss_m1 = 1.0
loss.cosface.loss_m2 = 0.0
loss.cosface.loss_m3 = 0.35
loss.combined = edict()
loss.combined.loss_name = 'margin_softmax'
loss.combined.loss_s = 64.0
loss.combined.loss_m1 = 1.0
loss.combined.loss_m2 = 0.3
loss.combined.loss_m3 = 0.2
loss.triplet = edict()
loss.triplet.loss_name = 'triplet'
loss.triplet.images_per_identity = 5
loss.triplet.triplet_alpha = 0.3
loss.triplet.triplet_bag_size = 7200
loss.triplet.triplet_max_ap = 0.0
loss.triplet.per_batch_size = 60
loss.triplet.lr = 0.05
loss.atriplet = edict()
loss.atriplet.loss_name = 'atriplet'
loss.atriplet.images_per_identity = 5
loss.atriplet.triplet_alpha = 0.35
loss.atriplet.triplet_bag_size = 7200
loss.atriplet.triplet_max_ap = 0.0
loss.atriplet.per_batch_size = 60
loss.atriplet.lr = 0.05
default = edict()
default.network = 'r100'
default.pretrained = ''
default.pretrained_epoch = 1
default.dataset = 'emore'
default.loss = 'arcface'
default.frequent = 20
default.verbose = 2000
default.kvstore = 'device'
default.end_epoch = 10000
default.lr = 0.1
default.wd = 0.0005
default.mom = 0.9
default.per_batch_size = 128
default.ckpt = 3
default.lr_steps = '100000,160000,220000'
default.models_root = './models'
def generate_config(_network, _dataset, _loss):
for k, v in loss[_loss].items():
config[k] = v
if k in default:
default[k] = v
for k, v in network[_network].items():
config[k] = v
if k in default:
default[k] = v
for k, v in dataset[_dataset].items():
config[k] = v
if k in default:
default[k] = v
config.loss = _loss
config.network = _network
config.dataset = _dataset
config.num_workers = 1
if 'DMLC_NUM_WORKER' in os.environ:
config.num_workers = int(os.environ['DMLC_NUM_WORKER'])
| true
| true
|
f7155df326b76283c65218da1b03afe376a21473
| 540
|
py
|
Python
|
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/wonderworks-33344
|
42cb504b280e4ad33598ae0d5ac64f8654e28205
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/wonderworks-33344
|
42cb504b280e4ad33598ae0d5ac64f8654e28205
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/migrations/0001_load_initial_data.py
|
crowdbotics-apps/wonderworks-33344
|
42cb504b280e4ad33598ae0d5ac64f8654e28205
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "wonderworks-33344.botics.co"
site_params = {
"name": "Wonderworks",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.769231
| 61
| 0.659259
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "wonderworks-33344.botics.co"
site_params = {
"name": "Wonderworks",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| true
| true
|
f7155e52ae7e7e0208d80c721374978ecf74f3d8
| 6,483
|
py
|
Python
|
src/HCTool-sg-1.py
|
iShog/huaweicloudTool
|
56b231d4707570e7690b68d31b3bfd8920e995bc
|
[
"MIT"
] | null | null | null |
src/HCTool-sg-1.py
|
iShog/huaweicloudTool
|
56b231d4707570e7690b68d31b3bfd8920e995bc
|
[
"MIT"
] | null | null | null |
src/HCTool-sg-1.py
|
iShog/huaweicloudTool
|
56b231d4707570e7690b68d31b3bfd8920e995bc
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from huaweicloudsdkcore.auth.credentials import BasicCredentials
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.http.http_config import HttpConfig
"""
# 导入指定云服务的库 huaweicloudsdk{service}
"""
from huaweicloudsdkvpc.v2 import *
from huaweicloudsdkvpc.v2.region.vpc_region import VpcRegion
"""
# 导入其它依赖库
"""
from urllib.request import urlopen
from json import load, loads
from Crypto.Cipher import AES
import time, os, base64, sys, getopt
"""
# 导入IPy
# --(Class and tools for handling of IPv4 and IPv6 addresses and networks)
#用于判断当前公网IP地址是IPv4 or IPv6
"""
import IPy
aes_key_from_cli = ''
ip_from_cli = ''
"""
# 从命令行获取解密秘钥、指定的IP地址等信息
"""
def start(argv):
if not argv:
print('Get usage info by # HCTool-XXX.py -h')
sys.exit(2)
try:
opts, args = getopt.getopt(argv, "hk:i:", ["help", "key=", "ip="])
except getopt.GetoptError:
print('Get usage info by # HCTool-XXX.py -h')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('# HCTool-XXX.py -k <aes_key> -i <ip_addr> OR \n# HCTool-XXX.py --key=<aes_key> --ip=<ip_addr>')
sys.exit()
elif opt in ("-k", "--key"):
global aes_key_from_cli
aes_key_from_cli = arg
if aes_key_from_cli == '':
print({'create_security_group_rule_tool: error@start()': 'ERROR: key must not be NULL!'})
sys.exit(2)
else:
print({'create_security_group_rule_tool: message@start()': 'key is: ' + aes_key_from_cli})
elif opt in ("-i", "--ip"):
global ip_from_cli
ip_from_cli = arg
if ip_from_cli != '':
print({'create_security_group_rule_tool: message@start()': 'ip addr is: ' + ip_from_cli})
else:
print({'create_security_group_rule_tool: error@start()': 'ERROR: ip is NULL!'})
sys.exit(2)
"""
# en_val为经过base64编码后的密文string
"""
def decrypt_env(en_val):
(aes_key, aes_iv, aes_mode) = (aes_key_from_cli, 'knx5FQtE4XOQ', AES.MODE_GCM)
if aes_key_from_cli == '':
print({'create_security_group_rule_tool: error@decrypt_env()': 'ERROR: key must not be NULL!'})
sys.exit(2)
aes_de_instance = AES.new(aes_key.encode('utf-8'), aes_mode, aes_iv.encode('utf-8'))
plain_val = aes_de_instance.decrypt(base64.b64decode(en_val.encode('utf-8'))).decode('utf-8')
return plain_val
"""
# 获取个人云环境配置
# en_cred_dict = {'EN_AK':' ','EN_SK':' ','EN_ProjectID':' ','Region':' '}
"""
def get_cred_config():
en_env_data = os.getenv('EN_CRED_JSON_STR')
en_cred_dict = loads(en_env_data)
en_ak = en_cred_dict['EN_AK']
en_sk = en_cred_dict['EN_SK']
en_project_id = en_cred_dict['EN_ProjectID']
ak = decrypt_env(en_ak)
sk = decrypt_env(en_sk)
project_id = decrypt_env(en_project_id)
region = en_cred_dict['Region']
security_group_id = en_cred_dict['SecurityGroupID']
endpoint = "https://" + "vpc." + region + ".myhwclouds.com"
print({'create_security_group_rule_tool: message@get_cred_config()': 'current endpoint is: ' + endpoint})
return ak, sk, project_id, region, endpoint, security_group_id
"""
# demo 列出所有VPC
"""
def list_vpc(client):
try:
request = ListVpcsRequest()
response = client.list_vpcs(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
"""
# demo 列出所有SecurityGroupRules
"""
def list_sg(client):
try:
request = ListSecurityGroupRulesRequest()
response = client.list_security_group_rules(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
"""
# 创建放通通当前工具所在主机公网IP的安全组
"""
def get_pub_ip_from_inet():
ip_from_inet = ''
for num in range(1, 3):
if num == 1:
ip_from_inet = load(urlopen('https://httpbin.org/ip'))['origin']
elif num == 2:
ip_from_inet = load(urlopen('https://api.ipify.org/?format=json'))['ip']
else:
ip_from_inet = load(urlopen('https://jsonip.com'))['ip']
if IPy.IP(ip_from_inet).version() == 4:
break
return ip_from_inet
"""
# 创建放通通当前工具所在主机公网IP的安全组
"""
def create_sg(client, security_group_id):
global ip_from_cli
cur_ip = ip_from_cli
if cur_ip == '':
cur_ip = get_pub_ip_from_inet()
print({'create_security_group_rule_tool: message@create_sg()': 'current public network IP is: ' + cur_ip})
try:
if IPy.IP(cur_ip).version() == 6:
ethertype = 'IPv6'
remote_ip_prefix = cur_ip
elif IPy.IP(cur_ip).version() == 4:
ethertype = 'IPv4'
remote_ip_prefix = cur_ip
else:
print({'create_security_group_rule_tool: error@create_sg()': 'not IPv4 nor IPv6: ' + cur_ip})
sys.exit(2)
except ValueError:
print({'create_security_group_rule_tool: error@create_sg()': 'invaild IP addr: ' + cur_ip})
sys.exit(2)
loca_ltime = time.asctime(time.localtime(time.time()))
try:
rule = CreateSecurityGroupRuleOption(security_group_id, description=loca_ltime, direction="ingress",
ethertype=ethertype, remote_ip_prefix=remote_ip_prefix)
body = CreateSecurityGroupRuleRequestBody(rule)
request = CreateSecurityGroupRuleRequest(body)
response = client.create_security_group_rule(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
if __name__ == "__main__":
start(sys.argv[1:])
(ak, sk, project_id, region, endpoint, security_group_id) = get_cred_config()
config = HttpConfig.get_default_config()
config.ignore_ssl_verification = False
credentials = BasicCredentials(ak, sk, project_id)
vpc_client = VpcClient.new_builder(VpcClient) \
.with_http_config(config) \
.with_credentials(credentials) \
.with_region(VpcRegion.value_of(region)) \
.build()
# list_vpc(vpc_client)
# list_sg(vpc_client)
create_sg(vpc_client, security_group_id)
| 29.334842
| 114
| 0.637051
|
from huaweicloudsdkcore.auth.credentials import BasicCredentials
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.http.http_config import HttpConfig
from huaweicloudsdkvpc.v2 import *
from huaweicloudsdkvpc.v2.region.vpc_region import VpcRegion
from urllib.request import urlopen
from json import load, loads
from Crypto.Cipher import AES
import time, os, base64, sys, getopt
import IPy
aes_key_from_cli = ''
ip_from_cli = ''
def start(argv):
if not argv:
print('Get usage info by # HCTool-XXX.py -h')
sys.exit(2)
try:
opts, args = getopt.getopt(argv, "hk:i:", ["help", "key=", "ip="])
except getopt.GetoptError:
print('Get usage info by # HCTool-XXX.py -h')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('# HCTool-XXX.py -k <aes_key> -i <ip_addr> OR \n# HCTool-XXX.py --key=<aes_key> --ip=<ip_addr>')
sys.exit()
elif opt in ("-k", "--key"):
global aes_key_from_cli
aes_key_from_cli = arg
if aes_key_from_cli == '':
print({'create_security_group_rule_tool: error@start()': 'ERROR: key must not be NULL!'})
sys.exit(2)
else:
print({'create_security_group_rule_tool: message@start()': 'key is: ' + aes_key_from_cli})
elif opt in ("-i", "--ip"):
global ip_from_cli
ip_from_cli = arg
if ip_from_cli != '':
print({'create_security_group_rule_tool: message@start()': 'ip addr is: ' + ip_from_cli})
else:
print({'create_security_group_rule_tool: error@start()': 'ERROR: ip is NULL!'})
sys.exit(2)
def decrypt_env(en_val):
(aes_key, aes_iv, aes_mode) = (aes_key_from_cli, 'knx5FQtE4XOQ', AES.MODE_GCM)
if aes_key_from_cli == '':
print({'create_security_group_rule_tool: error@decrypt_env()': 'ERROR: key must not be NULL!'})
sys.exit(2)
aes_de_instance = AES.new(aes_key.encode('utf-8'), aes_mode, aes_iv.encode('utf-8'))
plain_val = aes_de_instance.decrypt(base64.b64decode(en_val.encode('utf-8'))).decode('utf-8')
return plain_val
def get_cred_config():
en_env_data = os.getenv('EN_CRED_JSON_STR')
en_cred_dict = loads(en_env_data)
en_ak = en_cred_dict['EN_AK']
en_sk = en_cred_dict['EN_SK']
en_project_id = en_cred_dict['EN_ProjectID']
ak = decrypt_env(en_ak)
sk = decrypt_env(en_sk)
project_id = decrypt_env(en_project_id)
region = en_cred_dict['Region']
security_group_id = en_cred_dict['SecurityGroupID']
endpoint = "https://" + "vpc." + region + ".myhwclouds.com"
print({'create_security_group_rule_tool: message@get_cred_config()': 'current endpoint is: ' + endpoint})
return ak, sk, project_id, region, endpoint, security_group_id
def list_vpc(client):
try:
request = ListVpcsRequest()
response = client.list_vpcs(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
def list_sg(client):
try:
request = ListSecurityGroupRulesRequest()
response = client.list_security_group_rules(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
def get_pub_ip_from_inet():
ip_from_inet = ''
for num in range(1, 3):
if num == 1:
ip_from_inet = load(urlopen('https://httpbin.org/ip'))['origin']
elif num == 2:
ip_from_inet = load(urlopen('https://api.ipify.org/?format=json'))['ip']
else:
ip_from_inet = load(urlopen('https://jsonip.com'))['ip']
if IPy.IP(ip_from_inet).version() == 4:
break
return ip_from_inet
def create_sg(client, security_group_id):
global ip_from_cli
cur_ip = ip_from_cli
if cur_ip == '':
cur_ip = get_pub_ip_from_inet()
print({'create_security_group_rule_tool: message@create_sg()': 'current public network IP is: ' + cur_ip})
try:
if IPy.IP(cur_ip).version() == 6:
ethertype = 'IPv6'
remote_ip_prefix = cur_ip
elif IPy.IP(cur_ip).version() == 4:
ethertype = 'IPv4'
remote_ip_prefix = cur_ip
else:
print({'create_security_group_rule_tool: error@create_sg()': 'not IPv4 nor IPv6: ' + cur_ip})
sys.exit(2)
except ValueError:
print({'create_security_group_rule_tool: error@create_sg()': 'invaild IP addr: ' + cur_ip})
sys.exit(2)
loca_ltime = time.asctime(time.localtime(time.time()))
try:
rule = CreateSecurityGroupRuleOption(security_group_id, description=loca_ltime, direction="ingress",
ethertype=ethertype, remote_ip_prefix=remote_ip_prefix)
body = CreateSecurityGroupRuleRequestBody(rule)
request = CreateSecurityGroupRuleRequest(body)
response = client.create_security_group_rule(request)
print(response)
except exceptions.ClientRequestException as e:
print(e.status_code)
print(e.request_id)
print(e.error_code)
print(e.error_msg)
if __name__ == "__main__":
start(sys.argv[1:])
(ak, sk, project_id, region, endpoint, security_group_id) = get_cred_config()
config = HttpConfig.get_default_config()
config.ignore_ssl_verification = False
credentials = BasicCredentials(ak, sk, project_id)
vpc_client = VpcClient.new_builder(VpcClient) \
.with_http_config(config) \
.with_credentials(credentials) \
.with_region(VpcRegion.value_of(region)) \
.build()
create_sg(vpc_client, security_group_id)
| true
| true
|
f7155f2dc872416bc9d84bdcf46fa337e5c2a7ff
| 2,567
|
py
|
Python
|
aiida/orm/nodes/data/base.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | 1
|
2019-07-31T04:08:13.000Z
|
2019-07-31T04:08:13.000Z
|
aiida/orm/nodes/data/base.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/orm/nodes/data/base.py
|
PercivalN/aiida-core
|
b215ed5a7ce9342bb7f671b67e95c1f474cc5940
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`Data` sub class to be used as a base for data containers that represent base python data types."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import abc
import six
try:
from functools import singledispatch # Python 3.4+
except ImportError:
from singledispatch import singledispatch
from .data import Data
__all__ = ('BaseType', 'to_aiida_type')
@singledispatch
def to_aiida_type(value):
"""
Turns basic Python types (str, int, float, bool) into the corresponding AiiDA types.
"""
raise TypeError("Cannot convert value of type {} to AiiDA type.".format(type(value)))
@six.add_metaclass(abc.ABCMeta)
class BaseType(Data):
"""`Data` sub class to be used as a base for data containers that represent base python data types."""
def __init__(self, *args, **kwargs):
try:
getattr(self, '_type')
except AttributeError:
raise RuntimeError('Derived class must define the `_type` class member')
super(BaseType, self).__init__(**kwargs)
try:
value = args[0]
except IndexError:
value = self._type() # pylint: disable=no-member
self.value = value
@property
def value(self):
return self.get_attribute('value', None)
@value.setter
def value(self, value):
self.set_attribute('value', self._type(value)) # pylint: disable=no-member
def __str__(self):
return super(BaseType, self).__str__() + ' value: {}'.format(self.value)
def __eq__(self, other):
if isinstance(other, BaseType):
return self.value == other.value
return self.value == other
def __ne__(self, other):
if isinstance(other, BaseType):
return self.value != other.value
return self.value != other
def new(self, value=None):
return self.__class__(value)
| 32.910256
| 106
| 0.586287
| true
| true
|
|
f7155fab8edcad4a0881545f1487a35076b0b70c
| 1,983
|
py
|
Python
|
matchms/exporting/save_as_json.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
matchms/exporting/save_as_json.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
matchms/exporting/save_as_json.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
import json
from typing import List
import numpy
from ..Spectrum import Spectrum
def save_as_json(spectrums: List[Spectrum], filename: str):
"""Save spectrum(s) as json file.
:py:attr:`~matchms.Spectrum.losses` of spectrum will not be saved.
Example:
.. code-block:: python
import numpy
from matchms import Spectrum
from matchms.exporting import save_as_json
# Create dummy spectrum
spectrum = Spectrum(mz=numpy.array([100, 200, 300], dtype="float"),
intensities=numpy.array([10, 10, 500], dtype="float"),
metadata={"charge": -1,
"inchi": '"InChI=1S/C6H12"',
"precursor_mz": 222.2})
# Write spectrum to test file
save_as_json(spectrum, "test.json")
Parameters
----------
spectrums:
Expected input is a list of :py:class:`~matchms.Spectrum.Spectrum` objects.
filename:
Provide filename to save spectrum(s).
"""
if not isinstance(spectrums, list):
# Assume that input was single Spectrum
spectrums = [spectrums]
# Write to json file
with open(filename, 'w', encoding="utf-8") as fout:
json.dump(spectrums, fout, cls=SpectrumJSONEncoder)
class SpectrumJSONEncoder(json.JSONEncoder):
# See https://github.com/PyCQA/pylint/issues/414 for reference
def default(self, o):
"""JSON Encoder which can encode a :py:class:`~matchms.Spectrum.Spectrum` object"""
if isinstance(o, Spectrum):
spec = o.clone()
peaks_list = numpy.vstack((spec.peaks.mz, spec.peaks.intensities)).T.tolist()
# Convert matchms.Spectrum() into dictionaries
spectrum_dict = {key: spec.metadata[key] for key in spec.metadata}
spectrum_dict["peaks_json"] = peaks_list
return spectrum_dict
return json.JSONEncoder.default(self, o)
| 33.610169
| 91
| 0.606657
|
import json
from typing import List
import numpy
from ..Spectrum import Spectrum
def save_as_json(spectrums: List[Spectrum], filename: str):
if not isinstance(spectrums, list):
spectrums = [spectrums]
with open(filename, 'w', encoding="utf-8") as fout:
json.dump(spectrums, fout, cls=SpectrumJSONEncoder)
class SpectrumJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Spectrum):
spec = o.clone()
peaks_list = numpy.vstack((spec.peaks.mz, spec.peaks.intensities)).T.tolist()
spectrum_dict = {key: spec.metadata[key] for key in spec.metadata}
spectrum_dict["peaks_json"] = peaks_list
return spectrum_dict
return json.JSONEncoder.default(self, o)
| true
| true
|
f7156064ea7c64a030e87c3aff9f1fc1fc6f9c9f
| 455
|
py
|
Python
|
cvat/apps/git/migrations/0002_auto_20190123_1305.py
|
raunilillemets/cvat
|
c083b5d3a60270121abc3f3fe596ff94ae0eb60f
|
[
"MIT"
] | 2
|
2020-03-16T03:41:27.000Z
|
2020-03-16T03:53:01.000Z
|
cvat/apps/git/migrations/0002_auto_20190123_1305.py
|
raunilillemets/cvat
|
c083b5d3a60270121abc3f3fe596ff94ae0eb60f
|
[
"MIT"
] | 29
|
2020-01-28T23:08:18.000Z
|
2022-03-12T00:05:33.000Z
|
cvat/apps/git/migrations/0002_auto_20190123_1305.py
|
raunilillemets/cvat
|
c083b5d3a60270121abc3f3fe596ff94ae0eb60f
|
[
"MIT"
] | 7
|
2021-07-27T09:15:22.000Z
|
2022-03-29T21:20:00.000Z
|
# Generated by Django 2.1.3 on 2019-01-23 10:05
import cvat.apps.git.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('git', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='gitdata',
name='status',
field=models.CharField(default=cvat.apps.git.models.GitStatusChoice('!sync'), max_length=20),
),
]
| 22.75
| 105
| 0.621978
|
import cvat.apps.git.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('git', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='gitdata',
name='status',
field=models.CharField(default=cvat.apps.git.models.GitStatusChoice('!sync'), max_length=20),
),
]
| true
| true
|
f71560690c6142fd8314899effc45720b4df6fdb
| 1,016
|
py
|
Python
|
Instagram/urls.py
|
samsoluoch/Instagram
|
ea6305c0592c8efe173cf3e6b5f1c477650678db
|
[
"MIT"
] | null | null | null |
Instagram/urls.py
|
samsoluoch/Instagram
|
ea6305c0592c8efe173cf3e6b5f1c477650678db
|
[
"MIT"
] | null | null | null |
Instagram/urls.py
|
samsoluoch/Instagram
|
ea6305c0592c8efe173cf3e6b5f1c477650678db
|
[
"MIT"
] | null | null | null |
"""instagram URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('clone.urls')),
url(r'^', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^tinymce/', include('tinymce.urls')),
]
| 37.62963
| 79
| 0.687992
|
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('clone.urls')),
url(r'^', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
url(r'^tinymce/', include('tinymce.urls')),
]
| true
| true
|
f7156125912203ecb4aa0ec75b8a4e46334dc991
| 7,299
|
py
|
Python
|
setuper desktop app/gui/mainwindow/menubar.py
|
dragondjf/CloudSetuper
|
31aefe629f7f2d59d287981eda3e4e618ace9e9f
|
[
"MIT"
] | 22
|
2015-01-08T12:54:20.000Z
|
2021-05-16T04:15:45.000Z
|
setuper desktop app/gui/mainwindow/menubar.py
|
dragondjf/CloudSetuper
|
31aefe629f7f2d59d287981eda3e4e618ace9e9f
|
[
"MIT"
] | null | null | null |
setuper desktop app/gui/mainwindow/menubar.py
|
dragondjf/CloudSetuper
|
31aefe629f7f2d59d287981eda3e4e618ace9e9f
|
[
"MIT"
] | 11
|
2015-01-25T01:26:45.000Z
|
2021-08-18T01:40:40.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from .guiconfig import collectView
class MenuBar(QtWidgets.QMenuBar):
viewID = "MenuBar"
@collectView
def __init__(self, parent):
super(MenuBar, self).__init__()
self.parent = parent
self.actionlists = {}
self.menusettings = {
'visual': False,
'menus': [
{
'name': self.tr('File'),
'trigger': 'File',
'actions': [
{
'name': self.tr('Settings'),
'icon': u'',
'shortcut': u'',
'trigger': 'Settings',
},
{
'name': self.tr('Language'),
'trigger': 'Language',
'type': 'submenu',
'actions': [
{
'name': 'English',
'icon': u'',
'shortcut': u'',
'trigger': 'English',
"checkable": True
},
{
'name': 'Chinese',
'icon': u'',
'shortcut': u'',
'trigger': 'Chinese',
"checkable": True
},
]
},
{
'name': self.tr('Exit'),
'icon': u'',
'shortcut': u'',
'trigger': 'Exit',
},
]
},
{
'name': self.tr('Screen'),
'trigger': 'Screen',
'actions': [
{
'name': self.tr('MFD3'),
'icon': u'',
'shortcut': u'',
'trigger': 'MFD3',
"checkable": True
},
{
'name': self.tr('MFD4'),
'icon': u'',
'shortcut': u'',
'trigger': 'MFD4',
"checkable": True
},
]
},
{
'name': self.tr('Device'),
'trigger': 'Device',
'actions': [
# {
# 'name': self.tr('Enable Bluetooth'),
# 'icon': u'',
# 'shortcut': u'',
# 'trigger': 'EnableBluetooth',
# },
{
'name': self.tr('Search Devices'),
'icon': u'',
'shortcut': u'',
'trigger': 'SearchDevices',
},
]
},
{
'name': self.tr('View'),
'trigger': 'View',
'actions': [
]
},
{
'name': self.tr('Report'),
'trigger': 'Test Rig',
'actions': [
{
'name': self.tr('Report'),
'icon': u'',
'shortcut': u'',
'trigger': 'TestRigAll',
},
{
'name': self.tr('Start'),
'icon': u'',
'shortcut': u'',
'trigger': 'TestRig',
}
]
},
{
'name': self.tr(' Help '),
'trigger': 'Help',
'actions': [
{
'name': self.tr('About ALE'),
'icon': u'',
'shortcut': u'',
'trigger': 'About',
},
{
'name': self.tr('Feedback to us'),
'icon': u'',
'shortcut': u'',
'trigger': 'Feedbackus',
},
]
}
]
}
self.creatMenus(self.menusettings)
def creatMenus(self, menusettings):
self.setVisible(menusettings['visual'])
for menu in menusettings['menus']:
setattr(
self,
'%smenu' % menu['trigger'],
self.addMenu(u'%s' % menu['name'])
)
submenu = getattr(self, '%smenu' % menu['trigger'])
for menuaction in menu['actions']:
if 'type' in menuaction and menuaction['type'] == "submenu":
self.createSubAction(menu['trigger'], menuaction)
else:
self.creatAction(submenu, menuaction)
def createSubAction(self, pmenu_name, menu):
childmenu = getattr(self, '%smenu' % pmenu_name)
submenu = childmenu.addMenu(u'%s' % menu['name'])
setattr(
self,
'%smenu' % menu['trigger'],
submenu)
for menuaction in menu['actions']:
self.creatAction(submenu, menuaction)
def creatAction(self, submenu, menuaction):
if 'checkable' in menuaction:
setattr(
self,
'%sAction' % menuaction['trigger'],
QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap(menuaction['icon'])),
u'%s' % menuaction['name'],
self,
checkable=menuaction['checkable']
)
)
else:
setattr(
self,
'%sAction' % menuaction['trigger'],
QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap(menuaction['icon'])),
u'%s' % menuaction['name'],
self,
)
)
action = getattr(self, '%sAction' % menuaction['trigger'])
action.setShortcut(QtGui.QKeySequence(menuaction['shortcut']))
submenu.addAction(action)
self.actionlists.update({menuaction['trigger']: action})
if hasattr(self.parent, 'action%s' % menuaction['trigger']):
action.triggered.connect(
getattr(self.parent, 'action%s' % menuaction['trigger'])
)
| 35.779412
| 76
| 0.307028
|
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from .guiconfig import collectView
class MenuBar(QtWidgets.QMenuBar):
viewID = "MenuBar"
@collectView
def __init__(self, parent):
super(MenuBar, self).__init__()
self.parent = parent
self.actionlists = {}
self.menusettings = {
'visual': False,
'menus': [
{
'name': self.tr('File'),
'trigger': 'File',
'actions': [
{
'name': self.tr('Settings'),
'icon': u'',
'shortcut': u'',
'trigger': 'Settings',
},
{
'name': self.tr('Language'),
'trigger': 'Language',
'type': 'submenu',
'actions': [
{
'name': 'English',
'icon': u'',
'shortcut': u'',
'trigger': 'English',
"checkable": True
},
{
'name': 'Chinese',
'icon': u'',
'shortcut': u'',
'trigger': 'Chinese',
"checkable": True
},
]
},
{
'name': self.tr('Exit'),
'icon': u'',
'shortcut': u'',
'trigger': 'Exit',
},
]
},
{
'name': self.tr('Screen'),
'trigger': 'Screen',
'actions': [
{
'name': self.tr('MFD3'),
'icon': u'',
'shortcut': u'',
'trigger': 'MFD3',
"checkable": True
},
{
'name': self.tr('MFD4'),
'icon': u'',
'shortcut': u'',
'trigger': 'MFD4',
"checkable": True
},
]
},
{
'name': self.tr('Device'),
'trigger': 'Device',
'actions': [
{
'name': self.tr('Search Devices'),
'icon': u'',
'shortcut': u'',
'trigger': 'SearchDevices',
},
]
},
{
'name': self.tr('View'),
'trigger': 'View',
'actions': [
]
},
{
'name': self.tr('Report'),
'trigger': 'Test Rig',
'actions': [
{
'name': self.tr('Report'),
'icon': u'',
'shortcut': u'',
'trigger': 'TestRigAll',
},
{
'name': self.tr('Start'),
'icon': u'',
'shortcut': u'',
'trigger': 'TestRig',
}
]
},
{
'name': self.tr(' Help '),
'trigger': 'Help',
'actions': [
{
'name': self.tr('About ALE'),
'icon': u'',
'shortcut': u'',
'trigger': 'About',
},
{
'name': self.tr('Feedback to us'),
'icon': u'',
'shortcut': u'',
'trigger': 'Feedbackus',
},
]
}
]
}
self.creatMenus(self.menusettings)
def creatMenus(self, menusettings):
self.setVisible(menusettings['visual'])
for menu in menusettings['menus']:
setattr(
self,
'%smenu' % menu['trigger'],
self.addMenu(u'%s' % menu['name'])
)
submenu = getattr(self, '%smenu' % menu['trigger'])
for menuaction in menu['actions']:
if 'type' in menuaction and menuaction['type'] == "submenu":
self.createSubAction(menu['trigger'], menuaction)
else:
self.creatAction(submenu, menuaction)
def createSubAction(self, pmenu_name, menu):
childmenu = getattr(self, '%smenu' % pmenu_name)
submenu = childmenu.addMenu(u'%s' % menu['name'])
setattr(
self,
'%smenu' % menu['trigger'],
submenu)
for menuaction in menu['actions']:
self.creatAction(submenu, menuaction)
def creatAction(self, submenu, menuaction):
if 'checkable' in menuaction:
setattr(
self,
'%sAction' % menuaction['trigger'],
QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap(menuaction['icon'])),
u'%s' % menuaction['name'],
self,
checkable=menuaction['checkable']
)
)
else:
setattr(
self,
'%sAction' % menuaction['trigger'],
QtWidgets.QAction(
QtGui.QIcon(QtGui.QPixmap(menuaction['icon'])),
u'%s' % menuaction['name'],
self,
)
)
action = getattr(self, '%sAction' % menuaction['trigger'])
action.setShortcut(QtGui.QKeySequence(menuaction['shortcut']))
submenu.addAction(action)
self.actionlists.update({menuaction['trigger']: action})
if hasattr(self.parent, 'action%s' % menuaction['trigger']):
action.triggered.connect(
getattr(self.parent, 'action%s' % menuaction['trigger'])
)
| true
| true
|
f715614c9c22bf521a77e23832bea7384f69ed20
| 729
|
py
|
Python
|
var/spack/repos/builtin/packages/smartmontools/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/smartmontools/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/smartmontools/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Smartmontools(AutotoolsPackage):
"""S.M.A.R.T. utility toolset."""
homepage = "https://smartmontools.sourceforge.net"
url = "https://nchc.dl.sourceforge.net/project/smartmontools/smartmontools/6.6/smartmontools-6.6.tar.gz"
version('6.6', sha256='51f43d0fb064fccaf823bbe68cf0d317d0895ff895aa353b3339a3b316a53054')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
env.prepend_path('LD_LIBRARY_PATH', self.prefix.usr.lib)
| 36.45
| 113
| 0.742112
|
from spack.package import *
class Smartmontools(AutotoolsPackage):
homepage = "https://smartmontools.sourceforge.net"
url = "https://nchc.dl.sourceforge.net/project/smartmontools/smartmontools/6.6/smartmontools-6.6.tar.gz"
version('6.6', sha256='51f43d0fb064fccaf823bbe68cf0d317d0895ff895aa353b3339a3b316a53054')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
env.prepend_path('LD_LIBRARY_PATH', self.prefix.usr.lib)
| true
| true
|
f71563894bd2d01be507073cac15fd01a629492a
| 201,289
|
py
|
Python
|
pyfakefs/fake_filesystem.py
|
jcwilson/pyfakefs
|
95f15b7de426f6f6c75181f6d06abb6a75bba668
|
[
"Apache-2.0"
] | null | null | null |
pyfakefs/fake_filesystem.py
|
jcwilson/pyfakefs
|
95f15b7de426f6f6c75181f6d06abb6a75bba668
|
[
"Apache-2.0"
] | null | null | null |
pyfakefs/fake_filesystem.py
|
jcwilson/pyfakefs
|
95f15b7de426f6f6c75181f6d06abb6a75bba668
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A fake filesystem implementation for unit testing.
:Includes:
* :py:class:`FakeFile`: Provides the appearance of a real file.
* :py:class:`FakeDirectory`: Provides the appearance of a real directory.
* :py:class:`FakeFilesystem`: Provides the appearance of a real directory
hierarchy.
* :py:class:`FakeOsModule`: Uses :py:class:`FakeFilesystem` to provide a
fake :py:mod:`os` module replacement.
* :py:class:`FakeIoModule`: Uses :py:class:`FakeFilesystem` to provide a
fake ``io`` module replacement.
* :py:class:`FakePathModule`: Faked ``os.path`` module replacement.
* :py:class:`FakeFileOpen`: Faked ``file()`` and ``open()`` function
replacements.
:Usage:
>>> from pyfakefs import fake_filesystem
>>> filesystem = fake_filesystem.FakeFilesystem()
>>> os_module = fake_filesystem.FakeOsModule(filesystem)
>>> pathname = '/a/new/dir/new-file'
Create a new file object, creating parent directory objects as needed:
>>> os_module.path.exists(pathname)
False
>>> new_file = filesystem.create_file(pathname)
File objects can't be overwritten:
>>> os_module.path.exists(pathname)
True
>>> try:
... filesystem.create_file(pathname)
... except OSError as e:
... assert e.errno == errno.EEXIST, 'unexpected errno: %d' % e.errno
... assert e.strerror == 'File exists in the fake filesystem'
Remove a file object:
>>> filesystem.remove_object(pathname)
>>> os_module.path.exists(pathname)
False
Create a new file object at the previous path:
>>> beatles_file = filesystem.create_file(pathname,
... contents='Dear Prudence\\nWon\\'t you come out to play?\\n')
>>> os_module.path.exists(pathname)
True
Use the FakeFileOpen class to read fake file objects:
>>> file_module = fake_filesystem.FakeFileOpen(filesystem)
>>> for line in file_module(pathname):
... print(line.rstrip())
...
Dear Prudence
Won't you come out to play?
File objects cannot be treated like directory objects:
>>> try:
... os_module.listdir(pathname)
... except OSError as e:
... assert e.errno == errno.ENOTDIR, 'unexpected errno: %d' % e.errno
... assert e.strerror == 'Not a directory in the fake filesystem'
The FakeOsModule can list fake directory objects:
>>> os_module.listdir(os_module.path.dirname(pathname))
['new-file']
The FakeOsModule also supports stat operations:
>>> import stat
>>> stat.S_ISREG(os_module.stat(pathname).st_mode)
True
>>> stat.S_ISDIR(os_module.stat(os_module.path.dirname(pathname)).st_mode)
True
"""
import errno
import heapq
import io
import locale
import os
import sys
import time
import uuid
from collections import namedtuple
from stat import (
S_IFREG, S_IFDIR, S_ISLNK, S_IFMT, S_ISDIR, S_IFLNK, S_ISREG, S_IFSOCK
)
from pyfakefs.deprecator import Deprecator
from pyfakefs.extra_packages import use_scandir
from pyfakefs.fake_scandir import scandir, walk
from pyfakefs.helpers import (
FakeStatResult, FileBufferIO, NullFileBufferIO,
is_int_type, is_byte_string, is_unicode_string,
make_string_path, IS_WIN, to_string)
from pyfakefs import __version__ # noqa: F401 for upwards compatibility
__pychecker__ = 'no-reimportself'
PERM_READ = 0o400 # Read permission bit.
PERM_WRITE = 0o200 # Write permission bit.
PERM_EXE = 0o100 # Execute permission bit.
PERM_DEF = 0o777 # Default permission bits.
PERM_DEF_FILE = 0o666 # Default permission bits (regular file)
PERM_ALL = 0o7777 # All permission bits.
_OpenModes = namedtuple(
'open_modes',
'must_exist can_read can_write truncate append must_not_exist'
)
_OPEN_MODE_MAP = {
# mode name:(file must exist, can read, can write,
# truncate, append, must not exist)
'r': (True, True, False, False, False, False),
'w': (False, False, True, True, False, False),
'a': (False, False, True, False, True, False),
'r+': (True, True, True, False, False, False),
'w+': (False, True, True, True, False, False),
'a+': (False, True, True, False, True, False),
'x': (False, False, True, False, False, True),
'x+': (False, True, True, False, False, True)
}
if sys.platform.startswith('linux'):
# on newer Linux system, the default maximum recursion depth is 40
# we ignore older systems here
_MAX_LINK_DEPTH = 40
else:
# on MacOS and Windows, the maximum recursion depth is 32
_MAX_LINK_DEPTH = 32
NR_STD_STREAMS = 3
USER_ID = 1 if IS_WIN else os.getuid()
GROUP_ID = 1 if IS_WIN else os.getgid()
def set_uid(uid):
"""Set the global user id. This is used as st_uid for new files
and to differentiate between a normal user and the root user (uid 0).
For the root user, some permission restrictions are ignored.
Args:
uid: (int) the user ID of the user calling the file system functions.
"""
global USER_ID
USER_ID = uid
def set_gid(gid):
"""Set the global group id. This is only used to set st_gid for new files,
no permision checks are performed.
Args:
gid: (int) the group ID of the user calling the file system functions.
"""
global GROUP_ID
GROUP_ID = gid
def reset_ids():
"""Set the global user ID and group ID back to default values."""
set_uid(1 if IS_WIN else os.getuid())
set_gid(1 if IS_WIN else os.getgid())
def is_root():
"""Return True if the current user is the root user."""
return USER_ID == 0
class FakeLargeFileIoException(Exception):
"""Exception thrown on unsupported operations for fake large files.
Fake large files have a size with no real content.
"""
def __init__(self, file_path):
super(FakeLargeFileIoException, self).__init__(
'Read and write operations not supported for '
'fake large file: %s' % file_path)
def _copy_module(old):
"""Recompiles and creates new module object."""
saved = sys.modules.pop(old.__name__, None)
new = __import__(old.__name__)
sys.modules[old.__name__] = saved
return new
class FakeFile:
"""Provides the appearance of a real file.
Attributes currently faked out:
* `st_mode`: user-specified, otherwise S_IFREG
* `st_ctime`: the time.time() timestamp of the file change time (updated
each time a file's attributes is modified).
* `st_atime`: the time.time() timestamp when the file was last accessed.
* `st_mtime`: the time.time() timestamp when the file was last modified.
* `st_size`: the size of the file
* `st_nlink`: the number of hard links to the file
* `st_ino`: the inode number - a unique number identifying the file
* `st_dev`: a unique number identifying the (fake) file system device
the file belongs to
* `st_uid`: always set to USER_ID, which can be changed globally using
`set_uid`
* `st_gid`: always set to GROUP_ID, which can be changed globally using
`set_gid`
.. note:: The resolution for `st_ctime`, `st_mtime` and `st_atime` in the
real file system depends on the used file system (for example it is
only 1s for HFS+ and older Linux file systems, but much higher for
ext4 and NTFS). This is currently ignored by pyfakefs, which uses
the resolution of `time.time()`.
Under Windows, `st_atime` is not updated for performance reasons by
default. pyfakefs never updates `st_atime` under Windows, assuming
the default setting.
"""
stat_types = (
'st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid',
'st_size', 'st_atime', 'st_mtime', 'st_ctime',
'st_atime_ns', 'st_mtime_ns', 'st_ctime_ns'
)
def __init__(self, name, st_mode=S_IFREG | PERM_DEF_FILE,
contents=None, filesystem=None, encoding=None, errors=None,
side_effect=None):
"""
Args:
name: Name of the file/directory, without parent path information
st_mode: The stat.S_IF* constant representing the file type (i.e.
stat.S_IFREG, stat.S_IFDIR)
contents: The contents of the filesystem object; should be a string
or byte object for regular files, and a list of other
FakeFile or FakeDirectory objects for FakeDirectory objects
filesystem: The fake filesystem where the file is created.
encoding: If contents is a unicode string, the encoding used
for serialization.
errors: The error mode used for encoding/decoding errors.
side_effect: function handle that is executed when file is written,
must accept the file object as an argument.
"""
# to be backwards compatible regarding argument order, we raise on None
if filesystem is None:
raise ValueError('filesystem shall not be None')
self.filesystem = filesystem
self._side_effect = side_effect
self.name = name
self.stat_result = FakeStatResult(
filesystem.is_windows_fs, USER_ID, GROUP_ID, time.time())
self.stat_result.st_mode = st_mode
self.encoding = encoding
self.errors = errors or 'strict'
self._byte_contents = self._encode_contents(contents)
self.stat_result.st_size = (
len(self._byte_contents) if self._byte_contents is not None else 0)
self.epoch = 0
self.parent_dir = None
# Linux specific: extended file system attributes
self.xattr = {}
@property
def byte_contents(self):
"""Return the contents as raw byte array."""
return self._byte_contents
@property
def contents(self):
"""Return the contents as string with the original encoding."""
if isinstance(self.byte_contents, bytes):
return self.byte_contents.decode(
self.encoding or locale.getpreferredencoding(False),
errors=self.errors)
return self.byte_contents
@property
def st_ctime(self):
"""Return the creation time of the fake file."""
return self.stat_result.st_ctime
@property
def st_atime(self):
"""Return the access time of the fake file."""
return self.stat_result.st_atime
@property
def st_mtime(self):
"""Return the modification time of the fake file."""
return self.stat_result.st_mtime
@st_ctime.setter
def st_ctime(self, val):
"""Set the creation time of the fake file."""
self.stat_result.st_ctime = val
@st_atime.setter
def st_atime(self, val):
"""Set the access time of the fake file."""
self.stat_result.st_atime = val
@st_mtime.setter
def st_mtime(self, val):
"""Set the modification time of the fake file."""
self.stat_result.st_mtime = val
def set_large_file_size(self, st_size):
"""Sets the self.st_size attribute and replaces self.content with None.
Provided specifically to simulate very large files without regards
to their content (which wouldn't fit in memory).
Note that read/write operations with such a file raise
:py:class:`FakeLargeFileIoException`.
Args:
st_size: (int) The desired file size
Raises:
OSError: if the st_size is not a non-negative integer,
or if st_size exceeds the available file system space
"""
self._check_positive_int(st_size)
if self.st_size:
self.size = 0
if self.filesystem:
self.filesystem.change_disk_usage(st_size, self.name, self.st_dev)
self.st_size = st_size
self._byte_contents = None
def _check_positive_int(self, size):
# the size should be an positive integer value
if not is_int_type(size) or size < 0:
self.filesystem.raise_os_error(errno.ENOSPC, self.name)
def is_large_file(self):
"""Return `True` if this file was initialized with size but no contents.
"""
return self._byte_contents is None
def _encode_contents(self, contents):
if is_unicode_string(contents):
contents = bytes(
contents,
self.encoding or locale.getpreferredencoding(False),
self.errors)
return contents
def _set_initial_contents(self, contents):
"""Sets the file contents and size.
Called internally after initial file creation.
Args:
contents: string, new content of file.
Returns:
True if the contents have been changed.
Raises:
OSError: if the st_size is not a non-negative integer,
or if st_size exceeds the available file system space
"""
contents = self._encode_contents(contents)
changed = self._byte_contents != contents
st_size = len(contents)
if self._byte_contents:
self.size = 0
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
self._byte_contents = contents
self.st_size = st_size
self.epoch += 1
return changed
def set_contents(self, contents, encoding=None):
"""Sets the file contents and size and increases the modification time.
Also executes the side_effects if available.
Args:
contents: (str, bytes, unicode) new content of file.
encoding: (str) the encoding to be used for writing the contents
if they are a unicode string.
If not given, the locale preferred encoding is used.
Raises:
OSError: if `st_size` is not a non-negative integer,
or if it exceeds the available file system space.
"""
self.encoding = encoding
changed = self._set_initial_contents(contents)
if self._side_effect is not None:
self._side_effect(self)
return changed
@property
def size(self):
"""Return the size in bytes of the file contents.
"""
return self.st_size
@property
def path(self):
"""Return the full path of the current object."""
names = []
obj = self
while obj:
names.insert(0, obj.name)
obj = obj.parent_dir
sep = self.filesystem._path_separator(self.name)
if names[0] == sep:
names.pop(0)
dir_path = sep.join(names)
# Windows paths with drive have a root separator entry
# which should be removed
is_drive = names and len(names[0]) == 2 and names[0][1] == ':'
if not is_drive:
dir_path = sep + dir_path
else:
dir_path = sep.join(names)
dir_path = self.filesystem.absnormpath(dir_path)
return dir_path
@Deprecator('property path')
def GetPath(self):
return self.path
@Deprecator('property size')
def GetSize(self):
return self.size
@size.setter
def size(self, st_size):
"""Resizes file content, padding with nulls if new size exceeds the
old size.
Args:
st_size: The desired size for the file.
Raises:
OSError: if the st_size arg is not a non-negative integer
or if st_size exceeds the available file system space
"""
self._check_positive_int(st_size)
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
if self._byte_contents:
if st_size < current_size:
self._byte_contents = self._byte_contents[:st_size]
else:
self._byte_contents += b'\0' * (st_size - current_size)
self.st_size = st_size
self.epoch += 1
@Deprecator('property size')
def SetSize(self, value):
self.size = value
@Deprecator('property st_atime')
def SetATime(self, st_atime):
"""Set the self.st_atime attribute.
Args:
st_atime: The desired access time.
"""
self.st_atime = st_atime
@Deprecator('property st_mtime')
def SetMTime(self, st_mtime):
"""Set the self.st_mtime attribute.
Args:
st_mtime: The desired modification time.
"""
self.st_mtime = st_mtime
@Deprecator('property st_ctime')
def SetCTime(self, st_ctime):
"""Set the self.st_ctime attribute.
Args:
st_ctime: The desired creation time.
"""
self.st_ctime = st_ctime
def __getattr__(self, item):
"""Forward some properties to stat_result."""
if item in self.stat_types:
return getattr(self.stat_result, item)
return super(FakeFile, self).__getattr__(item)
def __setattr__(self, key, value):
"""Forward some properties to stat_result."""
if key in self.stat_types:
return setattr(self.stat_result, key, value)
return super(FakeFile, self).__setattr__(key, value)
def __str__(self):
return '%s(%o)' % (self.name, self.st_mode)
@Deprecator('st_ino')
def SetIno(self, st_ino):
"""Set the self.st_ino attribute.
Note that a unique inode is assigned automatically to a new fake file.
This function does not guarantee uniqueness and should be used with
caution.
Args:
st_ino: (int) The desired inode.
"""
self.st_ino = st_ino
class FakeNullFile(FakeFile):
def __init__(self, filesystem):
devnull = '/dev/nul' if filesystem.is_windows_fs else '/dev/nul'
super(FakeNullFile, self).__init__(
devnull, filesystem=filesystem, contents=b'')
@property
def byte_contents(self):
return b''
def _set_initial_contents(self, contents):
pass
Deprecator.add(FakeFile, FakeFile.set_large_file_size, 'SetLargeFileSize')
Deprecator.add(FakeFile, FakeFile.set_contents, 'SetContents')
Deprecator.add(FakeFile, FakeFile.is_large_file, 'IsLargeFile')
class FakeFileFromRealFile(FakeFile):
"""Represents a fake file copied from the real file system.
The contents of the file are read on demand only.
"""
def __init__(self, file_path, filesystem, side_effect=None):
"""
Args:
file_path: Path to the existing file.
filesystem: The fake filesystem where the file is created.
Raises:
OSError: if the file does not exist in the real file system.
OSError: if the file already exists in the fake file system.
"""
super(FakeFileFromRealFile, self).__init__(
name=os.path.basename(file_path), filesystem=filesystem,
side_effect=side_effect)
self.contents_read = False
@property
def byte_contents(self):
if not self.contents_read:
self.contents_read = True
with io.open(self.file_path, 'rb') as f:
self._byte_contents = f.read()
# On MacOS and BSD, the above io.open() updates atime on the real file
self.st_atime = os.stat(self.file_path).st_atime
return self._byte_contents
def set_contents(self, contents, encoding=None):
self.contents_read = True
super(FakeFileFromRealFile, self).set_contents(contents, encoding)
def is_large_file(self):
"""The contents are never faked."""
return False
class FakeDirectory(FakeFile):
"""Provides the appearance of a real directory."""
def __init__(self, name, perm_bits=PERM_DEF, filesystem=None):
"""
Args:
name: name of the file/directory, without parent path information
perm_bits: permission bits. defaults to 0o777.
filesystem: if set, the fake filesystem where the directory
is created
"""
FakeFile.__init__(
self, name, S_IFDIR | perm_bits, {}, filesystem=filesystem)
# directories have the link count of contained entries,
# inclusing '.' and '..'
self.st_nlink += 1
def set_contents(self, contents, encoding=None):
raise self.filesystem.raise_os_error(errno.EISDIR, self.path)
@property
def contents(self):
"""Return the list of contained directory entries."""
return self.byte_contents
@property
def ordered_dirs(self):
"""Return the list of contained directory entry names ordered by
creation order.
"""
return [item[0] for item in sorted(
self.byte_contents.items(), key=lambda entry: entry[1].st_ino)]
def add_entry(self, path_object):
"""Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
"""
if (not is_root() and not self.st_mode & PERM_WRITE and
not self.filesystem.is_windows_fs):
raise OSError(errno.EACCES, 'Permission Denied', self.path)
path_object_name = to_string(path_object.name)
if path_object_name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object_name] = path_object
path_object.parent_dir = self
if path_object.st_ino is None:
self.filesystem.last_ino += 1
path_object.st_ino = self.filesystem.last_ino
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(
path_object.size, path_object.name, self.st_dev)
def get_entry(self, pathname_name):
"""Retrieves the specified child file or directory entry.
Args:
pathname_name: The basename of the child object to retrieve.
Returns:
The fake file or directory object.
Raises:
KeyError: if no child exists by the specified name.
"""
pathname_name = self._normalized_entryname(pathname_name)
return self.contents[to_string(pathname_name)]
def _normalized_entryname(self, pathname_name):
if not self.filesystem.is_case_sensitive:
matching_names = [name for name in self.contents
if name.lower() == pathname_name.lower()]
if matching_names:
pathname_name = matching_names[0]
return pathname_name
def remove_entry(self, pathname_name, recursive=True):
"""Removes the specified child file or directory.
Args:
pathname_name: Basename of the child object to remove.
recursive: If True (default), the entries in contained directories
are deleted first. Used to propagate removal errors
(e.g. permission problems) from contained entries.
Raises:
KeyError: if no child exists by the specified name.
OSError: if user lacks permission to delete the file,
or (Windows only) the file is open.
"""
pathname_name = self._normalized_entryname(pathname_name)
entry = self.get_entry(pathname_name)
if self.filesystem.is_windows_fs:
if entry.st_mode & PERM_WRITE == 0:
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if self.filesystem.has_open_file(entry):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
else:
if (not is_root() and (self.st_mode & (PERM_WRITE | PERM_EXE) !=
PERM_WRITE | PERM_EXE)):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if recursive and isinstance(entry, FakeDirectory):
while entry.contents:
entry.remove_entry(list(entry.contents)[0])
elif entry.st_nlink == 1:
self.filesystem.change_disk_usage(
-entry.size, pathname_name, entry.st_dev)
self.st_nlink -= 1
entry.st_nlink -= 1
assert entry.st_nlink >= 0
del self.contents[to_string(pathname_name)]
@property
def size(self):
"""Return the total size of all files contained in this directory tree.
"""
return sum([item[1].size for item in self.contents.items()])
@Deprecator('property size')
def GetSize(self):
return self.size
def has_parent_object(self, dir_object):
"""Return `True` if dir_object is a direct or indirect parent
directory, or if both are the same object."""
obj = self
while obj:
if obj == dir_object:
return True
obj = obj.parent_dir
return False
def __str__(self):
description = super(FakeDirectory, self).__str__() + ':\n'
for item in self.contents:
item_desc = self.contents[item].__str__()
for line in item_desc.split('\n'):
if line:
description = description + ' ' + line + '\n'
return description
Deprecator.add(FakeDirectory, FakeDirectory.add_entry, 'AddEntry')
Deprecator.add(FakeDirectory, FakeDirectory.get_entry, 'GetEntry')
Deprecator.add(FakeDirectory, FakeDirectory.set_contents, 'SetContents')
Deprecator.add(FakeDirectory, FakeDirectory.remove_entry, 'RemoveEntry')
class FakeDirectoryFromRealDirectory(FakeDirectory):
"""Represents a fake directory copied from the real file system.
The contents of the directory are read on demand only.
"""
def __init__(self, source_path, filesystem, read_only,
target_path=None):
"""
Args:
source_path: Full directory path.
filesystem: The fake filesystem where the directory is created.
read_only: If set, all files under the directory are treated
as read-only, e.g. a write access raises an exception;
otherwise, writing to the files changes the fake files
only as usually.
target_path: If given, the target path of the directory,
otherwise the target is the same as `source_path`.
Raises:
OSError: if the directory does not exist in the real file system
"""
target_path = target_path or source_path
real_stat = os.stat(source_path)
super(FakeDirectoryFromRealDirectory, self).__init__(
name=os.path.split(target_path)[1],
perm_bits=real_stat.st_mode,
filesystem=filesystem)
self.st_ctime = real_stat.st_ctime
self.st_atime = real_stat.st_atime
self.st_mtime = real_stat.st_mtime
self.st_gid = real_stat.st_gid
self.st_uid = real_stat.st_uid
self.source_path = source_path
self.read_only = read_only
self.contents_read = False
@property
def contents(self):
"""Return the list of contained directory entries, loading them
if not already loaded."""
if not self.contents_read:
self.contents_read = True
base = self.path
for entry in os.listdir(self.source_path):
source_path = os.path.join(self.source_path, entry)
target_path = os.path.join(base, entry)
if os.path.islink(source_path):
self.filesystem.add_real_symlink(source_path, target_path)
elif os.path.isdir(source_path):
self.filesystem.add_real_directory(
source_path, self.read_only, target_path=target_path)
else:
self.filesystem.add_real_file(
source_path, self.read_only, target_path=target_path)
return self.byte_contents
@property
def size(self):
# we cannot get the size until the contents are loaded
if not self.contents_read:
return 0
return super(FakeDirectoryFromRealDirectory, self).size
class FakeFilesystem:
"""Provides the appearance of a real directory tree for unit testing.
Attributes:
path_separator: The path separator, corresponds to `os.path.sep`.
alternative_path_separator: Corresponds to `os.path.altsep`.
is_windows_fs: `True` in a real or faked Windows file system.
is_macos: `True` under MacOS, or if we are faking it.
is_case_sensitive: `True` if a case-sensitive file system is assumed.
root: The root :py:class:`FakeDirectory` entry of the file system.
cwd: The current working directory path.
umask: The umask used for newly created files, see `os.umask`.
patcher: Holds the Patcher object if created from it. Allows access
to the patcher object if using the pytest fs fixture.
"""
def __init__(self, path_separator=os.path.sep, total_size=None,
patcher=None):
"""
Args:
path_separator: optional substitute for os.path.sep
total_size: if not None, the total size in bytes of the
root filesystem.
Example usage to use the same path separator under all systems:
>>> filesystem = FakeFilesystem(path_separator='/')
"""
self.path_separator = path_separator
self.alternative_path_separator = os.path.altsep
self.patcher = patcher
if path_separator != os.sep:
self.alternative_path_separator = None
# is_windows_fs can be used to test the behavior of pyfakefs under
# Windows fs on non-Windows systems and vice verse;
# is it used to support drive letters, UNC paths and some other
# Windows-specific features
self.is_windows_fs = sys.platform == 'win32'
# can be used to test some MacOS-specific behavior under other systems
self.is_macos = sys.platform == 'darwin'
# is_case_sensitive can be used to test pyfakefs for case-sensitive
# file systems on non-case-sensitive systems and vice verse
self.is_case_sensitive = not (self.is_windows_fs or self.is_macos)
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
# We can't query the current value without changing it:
self.umask = os.umask(0o22)
os.umask(self.umask)
# A list of open file objects. Their position in the list is their
# file descriptor number
self.open_files = []
# A heap containing all free positions in self.open_files list
self._free_fd_heap = []
# last used numbers for inodes (st_ino) and devices (st_dev)
self.last_ino = 0
self.last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams()
self.dev_null = FakeNullFile(self)
@property
def is_linux(self):
return not self.is_windows_fs and not self.is_macos
def reset(self, total_size=None):
"""Remove all file system contents and reset the root."""
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
self.open_files = []
self._free_fd_heap = []
self.last_ino = 0
self.last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams()
def pause(self):
"""Pause the patching of the file system modules until `resume` is
called. After that call, all file system calls are executed in the
real file system.
Calling pause() twice is silently ignored.
Only allowed if the file system object was created by a
Patcher object. This is also the case for the pytest `fs` fixture.
Raises:
RuntimeError: if the file system was not created by a Patcher.
"""
if self.patcher is None:
raise RuntimeError('pause() can only be called from a fake file '
'system object created by a Patcher object')
self.patcher.pause()
def resume(self):
"""Resume the patching of the file system modules if `pause` has
been called before. After that call, all file system calls are
executed in the fake file system.
Does nothing if patching is not paused.
Raises:
RuntimeError: if the file system has not been created by `Patcher`.
"""
if self.patcher is None:
raise RuntimeError('resume() can only be called from a fake file '
'system object created by a Patcher object')
self.patcher.resume()
def line_separator(self):
return '\r\n' if self.is_windows_fs else '\n'
def _error_message(self, errno):
return os.strerror(errno) + ' in the fake filesystem'
def raise_os_error(self, errno, filename=None, winerror=None):
"""Raises OSError.
The error message is constructed from the given error code and shall
start with the error string issued in the real system.
Note: this is not true under Windows if winerror is given - in this
case a localized message specific to winerror will be shown in the
real file system.
Args:
errno: A numeric error code from the C variable errno.
filename: The name of the affected file, if any.
winerror: Windows only - the specific Windows error code.
"""
message = self._error_message(errno)
if (winerror is not None and sys.platform == 'win32' and
self.is_windows_fs):
raise OSError(errno, message, filename, winerror)
raise OSError(errno, message, filename)
@staticmethod
def _matching_string(matched, string):
"""Return the string as byte or unicode depending
on the type of matched, assuming string is an ASCII string.
"""
if string is None:
return string
if isinstance(matched, bytes) and isinstance(string, str):
return string.encode(locale.getpreferredencoding(False))
return string
def _path_separator(self, path):
"""Return the path separator as the same type as path"""
return self._matching_string(path, self.path_separator)
def _alternative_path_separator(self, path):
"""Return the alternative path separator as the same type as path"""
return self._matching_string(path, self.alternative_path_separator)
def add_mount_point(self, path, total_size=None):
"""Add a new mount point for a filesystem device.
The mount point gets a new unique device number.
Args:
path: The root path for the new mount path.
total_size: The new total size of the added filesystem device
in bytes. Defaults to infinite size.
Returns:
The newly created mount point dict.
Raises:
OSError: if trying to mount an existing mount point again.
"""
path = self.absnormpath(path)
if path in self.mount_points:
self.raise_os_error(errno.EEXIST, path)
self.last_dev += 1
self.mount_points[path] = {
'idev': self.last_dev, 'total_size': total_size, 'used_size': 0
}
# special handling for root path: has been created before
if path == self.root.name:
root_dir = self.root
self.last_ino += 1
root_dir.st_ino = self.last_ino
else:
root_dir = self.create_dir(path)
root_dir.st_dev = self.last_dev
return self.mount_points[path]
def _auto_mount_drive_if_needed(self, path, force=False):
if (self.is_windows_fs and
(force or not self._mount_point_for_path(path))):
drive = self.splitdrive(path)[0]
if drive:
return self.add_mount_point(path=drive)
def _mount_point_for_path(self, path):
def to_str(string):
"""Convert the str, unicode or byte object to a str
using the default encoding."""
if string is None or isinstance(string, str):
return string
return string.decode(locale.getpreferredencoding(False))
path = self.absnormpath(self._original_path(path))
if path in self.mount_points:
return self.mount_points[path]
mount_path = self._matching_string(path, '')
drive = self.splitdrive(path)[:1]
for root_path in self.mount_points:
root_path = self._matching_string(path, root_path)
if drive and not root_path.startswith(drive):
continue
if path.startswith(root_path) and len(root_path) > len(mount_path):
mount_path = root_path
if mount_path:
return self.mount_points[to_str(mount_path)]
mount_point = self._auto_mount_drive_if_needed(path, force=True)
assert mount_point
return mount_point
def _mount_point_for_device(self, idev):
for mount_point in self.mount_points.values():
if mount_point['idev'] == idev:
return mount_point
def get_disk_usage(self, path=None):
"""Return the total, used and free disk space in bytes as named tuple,
or placeholder values simulating unlimited space if not set.
.. note:: This matches the return value of shutil.disk_usage().
Args:
path: The disk space is returned for the file system device where
`path` resides.
Defaults to the root path (e.g. '/' on Unix systems).
"""
DiskUsage = namedtuple('usage', 'total, used, free')
if path is None:
mount_point = self.mount_points[self.root.name]
else:
mount_point = self._mount_point_for_path(path)
if mount_point and mount_point['total_size'] is not None:
return DiskUsage(mount_point['total_size'],
mount_point['used_size'],
mount_point['total_size'] -
mount_point['used_size'])
return DiskUsage(
1024 * 1024 * 1024 * 1024, 0, 1024 * 1024 * 1024 * 1024)
def set_disk_usage(self, total_size, path=None):
"""Changes the total size of the file system, preserving the used space.
Example usage: set the size of an auto-mounted Windows drive.
Args:
total_size: The new total size of the filesystem in bytes.
path: The disk space is changed for the file system device where
`path` resides.
Defaults to the root path (e.g. '/' on Unix systems).
Raises:
OSError: if the new space is smaller than the used size.
"""
if path is None:
path = self.root.name
mount_point = self._mount_point_for_path(path)
if (mount_point['total_size'] is not None and
mount_point['used_size'] > total_size):
self.raise_os_error(errno.ENOSPC, path)
mount_point['total_size'] = total_size
def change_disk_usage(self, usage_change, file_path, st_dev):
"""Change the used disk space by the given amount.
Args:
usage_change: Number of bytes added to the used space.
If negative, the used space will be decreased.
file_path: The path of the object needing the disk space.
st_dev: The device ID for the respective file system.
Raises:
OSError: if usage_change exceeds the free file system space
"""
mount_point = self._mount_point_for_device(st_dev)
if mount_point:
total_size = mount_point['total_size']
if total_size is not None:
if total_size - mount_point['used_size'] < usage_change:
self.raise_os_error(errno.ENOSPC, file_path)
mount_point['used_size'] += usage_change
def stat(self, entry_path, follow_symlinks=True):
"""Return the os.stat-like tuple for the FakeFile object of entry_path.
Args:
entry_path: Path to filesystem object to retrieve.
follow_symlinks: If False and entry_path points to a symlink,
the link itself is inspected instead of the linked object.
Returns:
The FakeStatResult object corresponding to entry_path.
Raises:
OSError: if the filesystem object doesn't exist.
"""
# stat should return the tuple representing return value of os.stat
file_object = self.resolve(
entry_path, follow_symlinks,
allow_fd=True, check_read_perm=False)
if not is_root():
# make sure stat raises if a parent dir is not readable
parent_dir = file_object.parent_dir
if parent_dir:
self.get_object(parent_dir.path)
self.raise_for_filepath_ending_with_separator(
entry_path, file_object, follow_symlinks)
return file_object.stat_result.copy()
def raise_for_filepath_ending_with_separator(self, entry_path,
file_object,
follow_symlinks=True,
macos_handling=False):
if self.ends_with_path_separator(entry_path):
if S_ISLNK(file_object.st_mode):
try:
link_object = self.resolve(entry_path)
except OSError as exc:
if self.is_macos and exc.errno != errno.ENOENT:
return
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, entry_path)
raise
if not follow_symlinks or self.is_windows_fs or self.is_macos:
file_object = link_object
if self.is_windows_fs:
is_error = S_ISREG(file_object.st_mode)
elif self.is_macos and macos_handling:
is_error = not S_ISLNK(file_object.st_mode)
else:
is_error = not S_ISDIR(file_object.st_mode)
if is_error:
error_nr = (errno.EINVAL if self.is_windows_fs
else errno.ENOTDIR)
self.raise_os_error(error_nr, entry_path)
def chmod(self, path, mode, follow_symlinks=True):
"""Change the permissions of a file as encoded in integer mode.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
follow_symlinks: If `False` and `path` points to a symlink,
the link itself is affected instead of the linked object.
"""
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
if self.is_windows_fs:
if mode & PERM_WRITE:
file_object.st_mode = file_object.st_mode | 0o222
else:
file_object.st_mode = file_object.st_mode & 0o777555
else:
file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) |
(mode & PERM_ALL))
file_object.st_ctime = time.time()
def utime(self, path, times=None, *, ns=None, follow_symlinks=True):
"""Change the access and modified times of a file.
Args:
path: (str) Path to the file.
times: 2-tuple of int or float numbers, of the form (atime, mtime)
which is used to set the access and modified times in seconds.
If None, both times are set to the current time.
ns: 2-tuple of int numbers, of the form (atime, mtime) which is
used to set the access and modified times in nanoseconds.
If `None`, both times are set to the current time.
follow_symlinks: If `False` and entry_path points to a symlink,
the link itself is queried instead of the linked object.
Raises:
TypeError: If anything other than the expected types is
specified in the passed `times` or `ns` tuple,
or if the tuple length is not equal to 2.
ValueError: If both times and ns are specified.
"""
self._handle_utime_arg_errors(ns, times)
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
if times is not None:
for file_time in times:
if not isinstance(file_time, (int, float)):
raise TypeError('atime and mtime must be numbers')
file_object.st_atime = times[0]
file_object.st_mtime = times[1]
elif ns is not None:
for file_time in ns:
if not isinstance(file_time, int):
raise TypeError('atime and mtime must be ints')
file_object.st_atime_ns = ns[0]
file_object.st_mtime_ns = ns[1]
else:
current_time = time.time()
file_object.st_atime = current_time
file_object.st_mtime = current_time
def _handle_utime_arg_errors(self, ns, times):
if times is not None and ns is not None:
raise ValueError(
"utime: you may specify either 'times' or 'ns' but not both")
if times is not None and len(times) != 2:
raise TypeError(
"utime: 'times' must be either a tuple of two ints or None")
if ns is not None and len(ns) != 2:
raise TypeError("utime: 'ns' must be a tuple of two ints")
@Deprecator
def SetIno(self, path, st_ino):
"""Set the self.st_ino attribute of file at 'path'.
Note that a unique inode is assigned automatically to a new fake file.
Using this function does not guarantee uniqueness and should used
with caution.
Args:
path: Path to file.
st_ino: The desired inode.
"""
self.get_object(path).st_ino = st_ino
def _add_open_file(self, file_obj):
"""Add file_obj to the list of open files on the filesystem.
Used internally to manage open files.
The position in the open_files array is the file descriptor number.
Args:
file_obj: File object to be added to open files list.
Returns:
File descriptor number for the file object.
"""
if self._free_fd_heap:
open_fd = heapq.heappop(self._free_fd_heap)
self.open_files[open_fd] = [file_obj]
return open_fd
self.open_files.append([file_obj])
return len(self.open_files) - 1
def _close_open_file(self, file_des):
"""Remove file object with given descriptor from the list
of open files.
Sets the entry in open_files to None.
Args:
file_des: Descriptor of file object to be removed from
open files list.
"""
self.open_files[file_des] = None
heapq.heappush(self._free_fd_heap, file_des)
def get_open_file(self, file_des):
"""Return an open file.
Args:
file_des: File descriptor of the open file.
Raises:
OSError: an invalid file descriptor.
TypeError: filedes is not an integer.
Returns:
Open file object.
"""
if not is_int_type(file_des):
raise TypeError('an integer is required')
if (file_des >= len(self.open_files) or
self.open_files[file_des] is None):
self.raise_os_error(errno.EBADF, str(file_des))
return self.open_files[file_des][0]
def has_open_file(self, file_object):
"""Return True if the given file object is in the list of open files.
Args:
file_object: The FakeFile object to be checked.
Returns:
`True` if the file is open.
"""
return (file_object in [wrappers[0].get_object()
for wrappers in self.open_files if wrappers])
def _normalize_path_sep(self, path):
if self.alternative_path_separator is None or not path:
return path
return path.replace(self._alternative_path_separator(path),
self._path_separator(path))
def normcase(self, path):
"""Replace all appearances of alternative path separator
with path separator.
Do nothing if no alternative separator is set.
Args:
path: The path to be normalized.
Returns:
The normalized path that will be used internally.
"""
path = make_string_path(path)
return self._normalize_path_sep(path)
def normpath(self, path):
"""Mimic os.path.normpath using the specified path_separator.
Mimics os.path.normpath using the path_separator that was specified
for this FakeFilesystem. Normalizes the path, but unlike the method
absnormpath, does not make it absolute. Eliminates dot components
(. and ..) and combines repeated path separators (//). Initial ..
components are left in place for relative paths.
If the result is an empty path, '.' is returned instead.
This also replaces alternative path separator with path separator.
That is, it behaves like the real os.path.normpath on Windows if
initialized with '\\' as path separator and '/' as alternative
separator.
Args:
path: (str) The path to normalize.
Returns:
(str) A copy of path with empty components and dot components
removed.
"""
path = self.normcase(path)
drive, path = self.splitdrive(path)
sep = self._path_separator(path)
is_absolute_path = path.startswith(sep)
path_components = path.split(sep)
collapsed_path_components = []
dot = self._matching_string(path, '.')
dotdot = self._matching_string(path, '..')
for component in path_components:
if (not component) or (component == dot):
continue
if component == dotdot:
if collapsed_path_components and (
collapsed_path_components[-1] != dotdot):
# Remove an up-reference: directory/..
collapsed_path_components.pop()
continue
elif is_absolute_path:
# Ignore leading .. components if starting from the
# root directory.
continue
collapsed_path_components.append(component)
collapsed_path = sep.join(collapsed_path_components)
if is_absolute_path:
collapsed_path = sep + collapsed_path
return drive + collapsed_path or dot
def _original_path(self, path):
"""Return a normalized case version of the given path for
case-insensitive file systems. For case-sensitive file systems,
return path unchanged.
Args:
path: the file path to be transformed
Returns:
A version of path matching the case of existing path elements.
"""
def components_to_path():
if len(path_components) > len(normalized_components):
normalized_components.extend(
path_components[len(normalized_components):])
sep = self._path_separator(path)
normalized_path = sep.join(normalized_components)
if path.startswith(sep) and not normalized_path.startswith(sep):
normalized_path = sep + normalized_path
return normalized_path
if self.is_case_sensitive or not path:
return path
path_components = self._path_components(path)
normalized_components = []
current_dir = self.root
for component in path_components:
if not isinstance(current_dir, FakeDirectory):
return components_to_path()
dir_name, current_dir = self._directory_content(
current_dir, component)
if current_dir is None or (
isinstance(current_dir, FakeDirectory) and
current_dir._byte_contents is None and
current_dir.st_size == 0):
return components_to_path()
normalized_components.append(dir_name)
return components_to_path()
def absnormpath(self, path):
"""Absolutize and minimalize the given path.
Forces all relative paths to be absolute, and normalizes the path to
eliminate dot and empty components.
Args:
path: Path to normalize.
Returns:
The normalized path relative to the current working directory,
or the root directory if path is empty.
"""
path = self.normcase(path)
cwd = self._matching_string(path, self.cwd)
if not path:
path = self.path_separator
if path == self._matching_string(path, '.'):
path = cwd
elif not self._starts_with_root_path(path):
# Prefix relative paths with cwd, if cwd is not root.
root_name = self._matching_string(path, self.root.name)
empty = self._matching_string(path, '')
path = self._path_separator(path).join(
(cwd != root_name and cwd or empty, path))
if path == self._matching_string(path, '.'):
path = cwd
return self.normpath(path)
def splitpath(self, path):
"""Mimic os.path.splitpath using the specified path_separator.
Mimics os.path.splitpath using the path_separator that was specified
for this FakeFilesystem.
Args:
path: (str) The path to split.
Returns:
(str) A duple (pathname, basename) for which pathname does not
end with a slash, and basename does not contain a slash.
"""
path = self.normcase(path)
sep = self._path_separator(path)
path_components = path.split(sep)
if not path_components:
return ('', '')
starts_with_drive = self._starts_with_drive_letter(path)
basename = path_components.pop()
colon = self._matching_string(path, ':')
if not path_components:
if starts_with_drive:
components = basename.split(colon)
return (components[0] + colon, components[1])
return ('', basename)
for component in path_components:
if component:
# The path is not the root; it contains a non-separator
# component. Strip all trailing separators.
while not path_components[-1]:
path_components.pop()
if starts_with_drive:
if not path_components:
components = basename.split(colon)
return (components[0] + colon, components[1])
if (len(path_components) == 1 and
path_components[0].endswith(colon)):
return (path_components[0] + sep, basename)
return (sep.join(path_components), basename)
# Root path. Collapse all leading separators.
return (sep, basename)
def splitdrive(self, path):
"""Splits the path into the drive part and the rest of the path.
Taken from Windows specific implementation in Python 3.5
and slightly adapted.
Args:
path: the full path to be splitpath.
Returns:
A tuple of the drive part and the rest of the path, or of
an empty string and the full path if drive letters are
not supported or no drive is present.
"""
path = make_string_path(path)
if self.is_windows_fs:
if len(path) >= 2:
path = self.normcase(path)
sep = self._path_separator(path)
# UNC path handling
if (path[0:2] == sep * 2) and (
path[2:3] != sep):
# UNC path handling - splits off the mount point
# instead of the drive
sep_index = path.find(sep, 2)
if sep_index == -1:
return path[:0], path
sep_index2 = path.find(sep, sep_index + 1)
if sep_index2 == sep_index + 1:
return path[:0], path
if sep_index2 == -1:
sep_index2 = len(path)
return path[:sep_index2], path[sep_index2:]
if path[1:2] == self._matching_string(path, ':'):
return path[:2], path[2:]
return path[:0], path
def _join_paths_with_drive_support(self, *all_paths):
"""Taken from Python 3.5 os.path.join() code in ntpath.py
and slightly adapted"""
base_path = all_paths[0]
paths_to_add = all_paths[1:]
sep = self._path_separator(base_path)
seps = [sep, self._alternative_path_separator(base_path)]
result_drive, result_path = self.splitdrive(base_path)
for path in paths_to_add:
drive_part, path_part = self.splitdrive(path)
if path_part and path_part[:1] in seps:
# Second path is absolute
if drive_part or not result_drive:
result_drive = drive_part
result_path = path_part
continue
elif drive_part and drive_part != result_drive:
if (self.is_case_sensitive or
drive_part.lower() != result_drive.lower()):
# Different drives => ignore the first path entirely
result_drive = drive_part
result_path = path_part
continue
# Same drive in different case
result_drive = drive_part
# Second path is relative to the first
if result_path and result_path[-1:] not in seps:
result_path = result_path + sep
result_path = result_path + path_part
# add separator between UNC and non-absolute path
colon = self._matching_string(base_path, ':')
if (result_path and result_path[:1] not in seps and
result_drive and result_drive[-1:] != colon):
return result_drive + sep + result_path
return result_drive + result_path
def joinpaths(self, *paths):
"""Mimic os.path.join using the specified path_separator.
Args:
*paths: (str) Zero or more paths to join.
Returns:
(str) The paths joined by the path separator, starting with
the last absolute path in paths.
"""
if sys.version_info >= (3, 6):
paths = [os.fspath(path) for path in paths]
if len(paths) == 1:
return paths[0]
if self.is_windows_fs:
return self._join_paths_with_drive_support(*paths)
joined_path_segments = []
sep = self._path_separator(paths[0])
for path_segment in paths:
if self._starts_with_root_path(path_segment):
# An absolute path
joined_path_segments = [path_segment]
else:
if (joined_path_segments and
not joined_path_segments[-1].endswith(sep)):
joined_path_segments.append(sep)
if path_segment:
joined_path_segments.append(path_segment)
return self._matching_string(paths[0], '').join(joined_path_segments)
def _path_components(self, path):
"""Breaks the path into a list of component names.
Does not include the root directory as a component, as all paths
are considered relative to the root directory for the FakeFilesystem.
Callers should basically follow this pattern:
.. code:: python
file_path = self.absnormpath(file_path)
path_components = self._path_components(file_path)
current_dir = self.root
for component in path_components:
if component not in current_dir.contents:
raise OSError
_do_stuff_with_component(current_dir, component)
current_dir = current_dir.get_entry(component)
Args:
path: Path to tokenize.
Returns:
The list of names split from path.
"""
if not path or path == self._path_separator(path):
return []
drive, path = self.splitdrive(path)
path_components = path.split(self._path_separator(path))
assert drive or path_components
if not path_components[0]:
if len(path_components) > 1 and not path_components[1]:
path_components = []
else:
# This is an absolute path.
path_components = path_components[1:]
if drive:
path_components.insert(0, drive)
return path_components
def _starts_with_drive_letter(self, file_path):
"""Return True if file_path starts with a drive letter.
Args:
file_path: the full path to be examined.
Returns:
`True` if drive letter support is enabled in the filesystem and
the path starts with a drive letter.
"""
colon = self._matching_string(file_path, ':')
return (self.is_windows_fs and len(file_path) >= 2 and
file_path[:1].isalpha and (file_path[1:2]) == colon)
def _starts_with_root_path(self, file_path):
root_name = self._matching_string(file_path, self.root.name)
file_path = self._normalize_path_sep(file_path)
return (file_path.startswith(root_name) or
not self.is_case_sensitive and file_path.lower().startswith(
root_name.lower()) or
self._starts_with_drive_letter(file_path))
def _is_root_path(self, file_path):
root_name = self._matching_string(file_path, self.root.name)
return (file_path == root_name or not self.is_case_sensitive and
file_path.lower() == root_name.lower() or
2 <= len(file_path) <= 3 and
self._starts_with_drive_letter(file_path))
def ends_with_path_separator(self, file_path):
"""Return True if ``file_path`` ends with a valid path separator."""
if is_int_type(file_path):
return False
file_path = make_string_path(file_path)
return (file_path and
file_path not in (self.path_separator,
self.alternative_path_separator) and
(file_path.endswith(self._path_separator(file_path)) or
self.alternative_path_separator is not None and
file_path.endswith(
self._alternative_path_separator(file_path))))
def is_filepath_ending_with_separator(self, path):
if not self.ends_with_path_separator(path):
return False
return self.isfile(self._path_without_trailing_separators(path))
def _directory_content(self, directory, component):
if not isinstance(directory, FakeDirectory):
return None, None
if component in directory.contents:
return component, directory.contents[component]
if not self.is_case_sensitive:
matching_content = [(subdir, directory.contents[subdir]) for
subdir in directory.contents
if subdir.lower() == component.lower()]
if matching_content:
return matching_content[0]
return None, None
def exists(self, file_path, check_link=False):
"""Return true if a path points to an existing file system object.
Args:
file_path: The path to examine.
Returns:
(bool) True if the corresponding object exists.
Raises:
TypeError: if file_path is None.
"""
if check_link and self.islink(file_path):
return True
file_path = make_string_path(file_path)
if file_path is None:
raise TypeError
if not file_path:
return False
if file_path == self.dev_null.name:
return not self.is_windows_fs or sys.version_info >= (3, 8)
try:
if self.is_filepath_ending_with_separator(file_path):
return False
file_path = self.resolve_path(file_path)
except OSError:
return False
if file_path == self.root.name:
return True
path_components = self._path_components(file_path)
current_dir = self.root
for component in path_components:
current_dir = self._directory_content(current_dir, component)[1]
if not current_dir:
return False
return True
def resolve_path(self, file_path, allow_fd=False, raw_io=True):
"""Follow a path, resolving symlinks.
ResolvePath traverses the filesystem along the specified file path,
resolving file names and symbolic links until all elements of the path
are exhausted, or we reach a file which does not exist.
If all the elements are not consumed, they just get appended to the
path resolved so far.
This gives us the path which is as resolved as it can be, even if the
file does not exist.
This behavior mimics Unix semantics, and is best shown by example.
Given a file system that looks like this:
/a/b/
/a/b/c -> /a/b2 c is a symlink to /a/b2
/a/b2/x
/a/c -> ../d
/a/x -> y
Then:
/a/b/x => /a/b/x
/a/c => /a/d
/a/x => /a/y
/a/b/c/d/e => /a/b2/d/e
Args:
file_path: The path to examine.
allow_fd: If `True`, `file_path` may be open file descriptor.
raw_io: `True` if called from low-level I/O functions.
Returns:
The resolved_path (string) or None.
Raises:
TypeError: if `file_path` is `None`.
OSError: if `file_path` is '' or a part of the path doesn't exist.
"""
if allow_fd and isinstance(file_path, int):
return self.get_open_file(file_path).get_object().path
file_path = make_string_path(file_path)
if file_path is None:
# file.open(None) raises TypeError, so mimic that.
raise TypeError('Expected file system path string, received None')
if not file_path or not self._valid_relative_path(file_path):
# file.open('') raises OSError, so mimic that, and validate that
# all parts of a relative path exist.
self.raise_os_error(errno.ENOENT, file_path)
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
return file_path
if file_path == self.dev_null.name:
return file_path
path_components = self._path_components(file_path)
resolved_components = self._resolve_components(path_components, raw_io)
return self._components_to_path(resolved_components)
def _components_to_path(self, component_folders):
sep = (self._path_separator(component_folders[0])
if component_folders else self.path_separator)
path = sep.join(component_folders)
if not self._starts_with_root_path(path):
path = sep + path
return path
def _resolve_components(self, path_components, raw_io):
current_dir = self.root
link_depth = 0
resolved_components = []
while path_components:
component = path_components.pop(0)
resolved_components.append(component)
current_dir = self._directory_content(current_dir, component)[1]
if current_dir is None:
# The component of the path at this point does not actually
# exist in the folder. We can't resolve the path any more.
# It is legal to link to a file that does not yet exist, so
# rather than raise an error, we just append the remaining
# components to what return path we have built so far and
# return that.
resolved_components.extend(path_components)
break
# Resolve any possible symlinks in the current path component.
if S_ISLNK(current_dir.st_mode):
# This link_depth check is not really meant to be an accurate
# check. It is just a quick hack to prevent us from looping
# forever on cycles.
if link_depth > _MAX_LINK_DEPTH:
self.raise_os_error(errno.ELOOP,
self._components_to_path(
resolved_components))
link_path = self._follow_link(resolved_components, current_dir)
# Following the link might result in the complete replacement
# of the current_dir, so we evaluate the entire resulting path.
target_components = self._path_components(link_path)
path_components = target_components + path_components
resolved_components = []
current_dir = self.root
link_depth += 1
return resolved_components
def _valid_relative_path(self, file_path):
if self.is_windows_fs:
return True
slash_dotdot = self._matching_string(
file_path, self.path_separator + '..')
while file_path and slash_dotdot in file_path:
file_path = file_path[:file_path.rfind(slash_dotdot)]
if not self.exists(self.absnormpath(file_path)):
return False
return True
def _follow_link(self, link_path_components, link):
"""Follow a link w.r.t. a path resolved so far.
The component is either a real file, which is a no-op, or a
symlink. In the case of a symlink, we have to modify the path
as built up so far
/a/b => ../c should yield /a/../c (which will normalize to /a/c)
/a/b => x should yield /a/x
/a/b => /x/y/z should yield /x/y/z
The modified path may land us in a new spot which is itself a
link, so we may repeat the process.
Args:
link_path_components: The resolved path built up to the link
so far.
link: The link object itself.
Returns:
(string) The updated path resolved after following the link.
Raises:
OSError: if there are too many levels of symbolic link
"""
link_path = link.contents
# ignore UNC prefix for local files
if self.is_windows_fs and link_path.startswith('\\\\?\\'):
link_path = link_path[4:]
sep = self._path_separator(link_path)
# For links to absolute paths, we want to throw out everything
# in the path built so far and replace with the link. For relative
# links, we have to append the link to what we have so far,
if not self._starts_with_root_path(link_path):
# Relative path. Append remainder of path to what we have
# processed so far, excluding the name of the link itself.
# /a/b => ../c should yield /a/../c
# (which will normalize to /c)
# /a/b => d should yield a/d
components = link_path_components[:-1]
components.append(link_path)
link_path = sep.join(components)
# Don't call self.NormalizePath(), as we don't want to prepend
# self.cwd.
return self.normpath(link_path)
def get_object_from_normpath(self, file_path, check_read_perm=True):
"""Search for the specified filesystem object within the fake
filesystem.
Args:
file_path: Specifies target FakeFile object to retrieve, with a
path that has already been normalized/resolved.
check_read_perm: If True, raises OSError if a parent directory
does not have read permission
Returns:
The FakeFile object corresponding to file_path.
Raises:
OSError: if the object is not found.
"""
file_path = make_string_path(file_path)
if file_path == self.root.name:
return self.root
if file_path == self.dev_null.name:
return self.dev_null
file_path = self._original_path(file_path)
path_components = self._path_components(file_path)
target_object = self.root
try:
for component in path_components:
if S_ISLNK(target_object.st_mode):
target_object = self.resolve(target_object.contents)
if not S_ISDIR(target_object.st_mode):
if not self.is_windows_fs:
self.raise_os_error(errno.ENOTDIR, file_path)
self.raise_os_error(errno.ENOENT, file_path)
target_object = target_object.get_entry(component)
if (not is_root() and check_read_perm and target_object and
not target_object.st_mode & PERM_READ):
self.raise_os_error(errno.EACCES, target_object.path)
except KeyError:
self.raise_os_error(errno.ENOENT, file_path)
return target_object
def get_object(self, file_path, check_read_perm=True):
"""Search for the specified filesystem object within the fake
filesystem.
Args:
file_path: Specifies the target FakeFile object to retrieve.
check_read_perm: If True, raises OSError if a parent directory
does not have read permission
Returns:
The FakeFile object corresponding to `file_path`.
Raises:
OSError: if the object is not found.
"""
file_path = make_string_path(file_path)
file_path = self.absnormpath(self._original_path(file_path))
return self.get_object_from_normpath(file_path, check_read_perm)
def resolve(self, file_path, follow_symlinks=True, allow_fd=False,
check_read_perm=True):
"""Search for the specified filesystem object, resolving all links.
Args:
file_path: Specifies the target FakeFile object to retrieve.
follow_symlinks: If `False`, the link itself is resolved,
otherwise the object linked to.
allow_fd: If `True`, `file_path` may be an open file descriptor
check_read_perm: If True, raises OSError if a parent directory
does not have read permission
Returns:
The FakeFile object corresponding to `file_path`.
Raises:
OSError: if the object is not found.
"""
if isinstance(file_path, int):
if allow_fd:
return self.get_open_file(file_path).get_object()
raise TypeError('path should be string, bytes or '
'os.PathLike (if supported), not int')
if follow_symlinks:
file_path = make_string_path(file_path)
return self.get_object_from_normpath(self.resolve_path(
file_path, check_read_perm), check_read_perm)
return self.lresolve(file_path)
def lresolve(self, path):
"""Search for the specified object, resolving only parent links.
This is analogous to the stat/lstat difference. This resolves links
*to* the object but not of the final object itself.
Args:
path: Specifies target FakeFile object to retrieve.
Returns:
The FakeFile object corresponding to path.
Raises:
OSError: if the object is not found.
"""
path = make_string_path(path)
if not path:
raise OSError(errno.ENOENT, path)
if path == self.root.name:
# The root directory will never be a link
return self.root
# remove trailing separator
path = self._path_without_trailing_separators(path)
if path == self._matching_string(path, '.'):
path = self.cwd
path = self._original_path(path)
parent_directory, child_name = self.splitpath(path)
if not parent_directory:
parent_directory = self.cwd
try:
parent_obj = self.resolve(parent_directory)
assert parent_obj
if not isinstance(parent_obj, FakeDirectory):
if not self.is_windows_fs and isinstance(parent_obj, FakeFile):
self.raise_os_error(errno.ENOTDIR, path)
self.raise_os_error(errno.ENOENT, path)
if not parent_obj.st_mode & PERM_READ:
self.raise_os_error(errno.EACCES, parent_directory)
return (parent_obj.get_entry(child_name) if child_name
else parent_obj)
except KeyError:
self.raise_os_error(errno.ENOENT, path)
def add_object(self, file_path, file_object):
"""Add a fake file or directory into the filesystem at file_path.
Args:
file_path: The path to the file to be added relative to self.
file_object: File or directory to add.
Raises:
OSError: if file_path does not correspond to a
directory.
"""
if not file_path:
target_directory = self.root
else:
target_directory = self.resolve(file_path)
if not S_ISDIR(target_directory.st_mode):
error = errno.ENOENT if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, file_path)
target_directory.add_entry(file_object)
def rename(self, old_file_path, new_file_path, force_replace=False):
"""Renames a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Args:
old_file_path: Path to filesystem object to rename.
new_file_path: Path to where the filesystem object will live
after this call.
force_replace: If set and destination is an existing file, it
will be replaced even under Windows if the user has
permissions, otherwise replacement happens under Unix only.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory
(Windows, or Posix if old_file_path points to a regular file)
OSError: if old_file_path is a directory and new_file_path a file
OSError: if new_file_path is an existing file and force_replace
not set (Windows only).
OSError: if new_file_path is an existing file and could not be
removed (Posix, or Windows with force_replace set).
OSError: if dirname(new_file_path) does not exist.
OSError: if the file would be moved to another filesystem
(e.g. mount point).
"""
ends_with_sep = self.ends_with_path_separator(old_file_path)
old_file_path = self.absnormpath(old_file_path)
new_file_path = self.absnormpath(new_file_path)
if not self.exists(old_file_path, check_link=True):
self.raise_os_error(errno.ENOENT, old_file_path, 2)
if ends_with_sep:
self._handle_broken_link_with_trailing_sep(old_file_path)
old_object = self.lresolve(old_file_path)
if not self.is_windows_fs:
self._handle_posix_dir_link_errors(
new_file_path, old_file_path, ends_with_sep)
if self.exists(new_file_path, check_link=True):
new_file_path = self._rename_to_existing_path(
force_replace, new_file_path, old_file_path,
old_object, ends_with_sep)
if not new_file_path:
return
old_dir, old_name = self.splitpath(old_file_path)
new_dir, new_name = self.splitpath(new_file_path)
if not self.exists(new_dir):
self.raise_os_error(errno.ENOENT, new_dir)
old_dir_object = self.resolve(old_dir)
new_dir_object = self.resolve(new_dir)
if old_dir_object.st_dev != new_dir_object.st_dev:
self.raise_os_error(errno.EXDEV, old_file_path)
if not S_ISDIR(new_dir_object.st_mode):
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.ENOTDIR,
new_file_path)
if new_dir_object.has_parent_object(old_object):
self.raise_os_error(errno.EINVAL, new_file_path)
object_to_rename = old_dir_object.get_entry(old_name)
old_dir_object.remove_entry(old_name, recursive=False)
object_to_rename.name = new_name
new_name = new_dir_object._normalized_entryname(new_name)
if new_name in new_dir_object.contents:
# in case of overwriting remove the old entry first
new_dir_object.remove_entry(new_name)
new_dir_object.add_entry(object_to_rename)
def _handle_broken_link_with_trailing_sep(self, path):
# note that the check for trailing sep has to be done earlier
if self.islink(path):
if not self.exists(path):
error = (errno.ENOENT if self.is_macos else
errno.EINVAL if self.is_windows_fs else errno.ENOTDIR)
self.raise_os_error(error, path)
def _handle_posix_dir_link_errors(self, new_file_path, old_file_path,
ends_with_sep):
if (self.isdir(old_file_path, follow_symlinks=False) and
self.islink(new_file_path)):
self.raise_os_error(errno.ENOTDIR, new_file_path)
if (self.isdir(new_file_path, follow_symlinks=False) and
self.islink(old_file_path)):
if ends_with_sep and self.is_macos:
return
error = errno.ENOTDIR if ends_with_sep else errno.EISDIR
self.raise_os_error(error, new_file_path)
if (ends_with_sep and self.islink(old_file_path) and
old_file_path == new_file_path and not self.is_windows_fs):
self.raise_os_error(errno.ENOTDIR, new_file_path)
def _rename_to_existing_path(self, force_replace, new_file_path,
old_file_path, old_object, ends_with_sep):
new_object = self.get_object(new_file_path)
if old_file_path == new_file_path:
if not S_ISLNK(new_object.st_mode) and ends_with_sep:
error = errno.EINVAL if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, old_file_path)
return # Nothing to do here.
if old_object == new_object:
new_file_path = self._rename_same_object(
new_file_path, old_file_path)
elif (S_ISDIR(new_object.st_mode) or S_ISLNK(new_object.st_mode)):
self._handle_rename_error_for_dir_or_link(
force_replace, new_file_path,
new_object, old_object, ends_with_sep)
elif S_ISDIR(old_object.st_mode):
error = errno.EEXIST if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, new_file_path)
elif self.is_windows_fs and not force_replace:
self.raise_os_error(errno.EEXIST, new_file_path)
else:
self.remove_object(new_file_path)
return new_file_path
def _handle_rename_error_for_dir_or_link(self, force_replace,
new_file_path, new_object,
old_object, ends_with_sep):
if self.is_windows_fs:
if force_replace:
self.raise_os_error(errno.EACCES, new_file_path)
else:
self.raise_os_error(errno.EEXIST, new_file_path)
if not S_ISLNK(new_object.st_mode):
if new_object.contents:
if (not S_ISLNK(old_object.st_mode) or
not ends_with_sep or not self.is_macos):
self.raise_os_error(errno.ENOTEMPTY, new_file_path)
if S_ISREG(old_object.st_mode):
self.raise_os_error(errno.EISDIR, new_file_path)
def _rename_same_object(self, new_file_path, old_file_path):
do_rename = old_file_path.lower() == new_file_path.lower()
if not do_rename:
try:
real_old_path = self.resolve_path(old_file_path)
original_old_path = self._original_path(real_old_path)
real_new_path = self.resolve_path(new_file_path)
if (real_new_path == original_old_path and
(new_file_path == real_old_path) ==
(new_file_path.lower() ==
real_old_path.lower())):
real_object = self.resolve(old_file_path,
follow_symlinks=False)
do_rename = (os.path.basename(old_file_path) ==
real_object.name or not self.is_macos)
else:
do_rename = (real_new_path.lower() ==
real_old_path.lower())
if do_rename:
# only case is changed in case-insensitive file
# system - do the rename
parent, file_name = self.splitpath(new_file_path)
new_file_path = self.joinpaths(
self._original_path(parent), file_name)
except OSError:
# ResolvePath may fail due to symlink loop issues or
# similar - in this case just assume the paths
# to be different
pass
if not do_rename:
# hard links to the same file - nothing to do
new_file_path = None
return new_file_path
def remove_object(self, file_path):
"""Remove an existing file or directory.
Args:
file_path: The path to the file relative to self.
Raises:
OSError: if file_path does not correspond to an existing file, or
if part of the path refers to something other than a directory.
OSError: if the directory is in use (eg, if it is '/').
"""
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
self.raise_os_error(errno.EBUSY, file_path)
try:
dirname, basename = self.splitpath(file_path)
target_directory = self.resolve(dirname, check_read_perm=False)
target_directory.remove_entry(basename)
except KeyError:
self.raise_os_error(errno.ENOENT, file_path)
except AttributeError:
self.raise_os_error(errno.ENOTDIR, file_path)
def make_string_path(self, path):
path = make_string_path(path)
os_sep = self._matching_string(path, os.sep)
fake_sep = self._matching_string(path, self.path_separator)
return path.replace(os_sep, fake_sep)
def create_dir(self, directory_path, perm_bits=PERM_DEF):
"""Create `directory_path`, and all the parent directories.
Helper method to set up your test faster.
Args:
directory_path: The full directory path to create.
perm_bits: The permission bits as set by `chmod`.
Returns:
The newly created FakeDirectory object.
Raises:
OSError: if the directory already exists.
"""
directory_path = self.make_string_path(directory_path)
directory_path = self.absnormpath(directory_path)
self._auto_mount_drive_if_needed(directory_path)
if self.exists(directory_path, check_link=True):
self.raise_os_error(errno.EEXIST, directory_path)
path_components = self._path_components(directory_path)
current_dir = self.root
new_dirs = []
for component in path_components:
directory = self._directory_content(current_dir, component)[1]
if not directory:
new_dir = FakeDirectory(component, filesystem=self)
new_dirs.append(new_dir)
current_dir.add_entry(new_dir)
current_dir = new_dir
else:
if S_ISLNK(directory.st_mode):
directory = self.resolve(directory.contents)
current_dir = directory
if directory.st_mode & S_IFDIR != S_IFDIR:
self.raise_os_error(errno.ENOTDIR, current_dir.path)
# set the permission after creating the directories
# to allow directory creation inside a read-only directory
for new_dir in new_dirs:
new_dir.st_mode = S_IFDIR | perm_bits
return current_dir
def create_file(self, file_path, st_mode=S_IFREG | PERM_DEF_FILE,
contents='', st_size=None, create_missing_dirs=True,
apply_umask=False, encoding=None, errors=None,
side_effect=None):
"""Create `file_path`, including all the parent directories along
the way.
This helper method can be used to set up tests more easily.
Args:
file_path: The path to the file to create.
st_mode: The stat constant representing the file type.
contents: the contents of the file. If not given and st_size is
None, an empty file is assumed.
st_size: file size; only valid if contents not given. If given,
the file is considered to be in "large file mode" and trying
to read from or write to the file will result in an exception.
create_missing_dirs: If `True`, auto create missing directories.
apply_umask: `True` if the current umask must be applied
on `st_mode`.
encoding: If `contents` is a unicode string, the encoding used
for serialization.
errors: The error mode used for encoding/decoding errors.
side_effect: function handle that is executed when file is written,
must accept the file object as an argument.
Returns:
The newly created FakeFile object.
Raises:
OSError: if the file already exists.
OSError: if the containing directory is required and missing.
"""
return self.create_file_internally(
file_path, st_mode, contents, st_size, create_missing_dirs,
apply_umask, encoding, errors, side_effect=side_effect)
def add_real_file(self, source_path, read_only=True, target_path=None):
"""Create `file_path`, including all the parent directories along the
way, for an existing real file. The contents of the real file are read
only on demand.
Args:
source_path: Path to an existing file in the real file system
read_only: If `True` (the default), writing to the fake file
raises an exception. Otherwise, writing to the file changes
the fake file only.
target_path: If given, the path of the target direction,
otherwise it is equal to `source_path`.
Returns:
the newly created FakeFile object.
Raises:
OSError: if the file does not exist in the real file system.
OSError: if the file already exists in the fake file system.
.. note:: On most systems, accessing the fake file's contents may
update both the real and fake files' `atime` (access time).
In this particular case, `add_real_file()` violates the rule
that `pyfakefs` must not modify the real file system.
"""
target_path = target_path or source_path
source_path = make_string_path(source_path)
target_path = self.make_string_path(target_path)
real_stat = os.stat(source_path)
fake_file = self.create_file_internally(target_path,
read_from_real_fs=True)
# for read-only mode, remove the write/executable permission bits
fake_file.stat_result.set_from_stat_result(real_stat)
if read_only:
fake_file.st_mode &= 0o777444
fake_file.file_path = source_path
self.change_disk_usage(fake_file.size, fake_file.name,
fake_file.st_dev)
return fake_file
def add_real_symlink(self, source_path, target_path=None):
"""Create a symlink at source_path (or target_path, if given). It will
point to the same path as the symlink on the real filesystem. Relative
symlinks will point relative to their new location. Absolute symlinks
will point to the same, absolute path as on the real filesystem.
Args:
source_path: The path to the existing symlink.
target_path: If given, the name of the symlink in the fake
fileystem, otherwise, the same as `source_path`.
Returns:
the newly created FakeDirectory object.
Raises:
OSError: if the directory does not exist in the real file system.
OSError: if the symlink could not be created
(see :py:meth:`create_file`).
OSError: if the directory already exists in the fake file system.
"""
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path) and not os.path.islink(source_path):
self.raise_os_error(errno.ENOENT, source_path)
target = os.readlink(source_path)
if target_path:
return self.create_symlink(target_path, target)
else:
return self.create_symlink(source_path, target)
def add_real_directory(self, source_path, read_only=True, lazy_read=True,
target_path=None):
"""Create a fake directory corresponding to the real directory at the
specified path. Add entries in the fake directory corresponding to
the entries in the real directory. Symlinks are supported.
Args:
source_path: The path to the existing directory.
read_only: If set, all files under the directory are treated as
read-only, e.g. a write access raises an exception;
otherwise, writing to the files changes the fake files only
as usually.
lazy_read: If set (default), directory contents are only read when
accessed, and only until the needed subdirectory level.
.. note:: This means that the file system size is only updated
at the time the directory contents are read; set this to
`False` only if you are dependent on accurate file system
size in your test
target_path: If given, the target directory, otherwise,
the target directory is the same as `source_path`.
Returns:
the newly created FakeDirectory object.
Raises:
OSError: if the directory does not exist in the real file system.
OSError: if the directory already exists in the fake file system.
"""
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path):
self.raise_os_error(errno.ENOENT, source_path)
target_path = target_path or source_path
if lazy_read:
parent_path = os.path.split(target_path)[0]
if self.exists(parent_path):
parent_dir = self.get_object(parent_path)
else:
parent_dir = self.create_dir(parent_path)
new_dir = FakeDirectoryFromRealDirectory(
source_path, self, read_only, target_path)
parent_dir.add_entry(new_dir)
else:
new_dir = self.create_dir(target_path)
for base, _, files in os.walk(source_path):
new_base = os.path.join(new_dir.path,
os.path.relpath(base, source_path))
for fileEntry in os.listdir(base):
abs_fileEntry = os.path.join(base, fileEntry)
if not os.path.islink(abs_fileEntry):
continue
self.add_real_symlink(
abs_fileEntry, os.path.join(new_base, fileEntry))
for fileEntry in files:
path = os.path.join(base, fileEntry)
if os.path.islink(path):
continue
self.add_real_file(path,
read_only,
os.path.join(new_base, fileEntry))
return new_dir
def add_real_paths(self, path_list, read_only=True, lazy_dir_read=True):
"""This convenience method adds multiple files and/or directories from
the real file system to the fake file system. See `add_real_file()` and
`add_real_directory()`.
Args:
path_list: List of file and directory paths in the real file
system.
read_only: If set, all files and files under under the directories
are treated as read-only, e.g. a write access raises an
exception; otherwise, writing to the files changes the fake
files only as usually.
lazy_dir_read: Uses lazy reading of directory contents if set
(see `add_real_directory`)
Raises:
OSError: if any of the files and directories in the list
does not exist in the real file system.
OSError: if any of the files and directories in the list
already exists in the fake file system.
"""
for path in path_list:
if os.path.isdir(path):
self.add_real_directory(path, read_only, lazy_dir_read)
else:
self.add_real_file(path, read_only)
def create_file_internally(self, file_path,
st_mode=S_IFREG | PERM_DEF_FILE,
contents='', st_size=None,
create_missing_dirs=True,
apply_umask=False, encoding=None, errors=None,
read_from_real_fs=False, raw_io=False,
side_effect=None):
"""Internal fake file creator that supports both normal fake files
and fake files based on real files.
Args:
file_path: path to the file to create.
st_mode: the stat.S_IF constant representing the file type.
contents: the contents of the file. If not given and st_size is
None, an empty file is assumed.
st_size: file size; only valid if contents not given. If given,
the file is considered to be in "large file mode" and trying
to read from or write to the file will result in an exception.
create_missing_dirs: if True, auto create missing directories.
apply_umask: whether or not the current umask must be applied
on st_mode.
encoding: if contents is a unicode string, the encoding used for
serialization.
errors: the error mode used for encoding/decoding errors
read_from_real_fs: if True, the contents are read from the real
file system on demand.
raw_io: `True` if called from low-level API (`os.open`)
side_effect: function handle that is executed when file is written,
must accept the file object as an argument.
"""
file_path = self.make_string_path(file_path)
file_path = self.absnormpath(file_path)
if not is_int_type(st_mode):
raise TypeError(
'st_mode must be of int type - did you mean to set contents?')
if self.exists(file_path, check_link=True):
self.raise_os_error(errno.EEXIST, file_path)
parent_directory, new_file = self.splitpath(file_path)
if not parent_directory:
parent_directory = self.cwd
self._auto_mount_drive_if_needed(parent_directory)
if not self.exists(parent_directory):
if not create_missing_dirs:
self.raise_os_error(errno.ENOENT, parent_directory)
self.create_dir(parent_directory)
else:
parent_directory = self._original_path(parent_directory)
if apply_umask:
st_mode &= ~self.umask
if read_from_real_fs:
file_object = FakeFileFromRealFile(file_path, filesystem=self,
side_effect=side_effect)
else:
file_object = FakeFile(new_file, st_mode, filesystem=self,
encoding=encoding, errors=errors,
side_effect=side_effect)
self.add_object(parent_directory, file_object)
if st_size is None and contents is None:
contents = ''
if (not read_from_real_fs and
(contents is not None or st_size is not None)):
try:
if st_size is not None:
file_object.set_large_file_size(st_size)
else:
file_object._set_initial_contents(contents)
except OSError:
self.remove_object(file_path)
raise
return file_object
# pylint: disable=unused-argument
def create_symlink(self, file_path, link_target, create_missing_dirs=True):
"""Create the specified symlink, pointed at the specified link target.
Args:
file_path: path to the symlink to create
link_target: the target of the symlink
create_missing_dirs: If `True`, any missing parent directories of
file_path will be created
Returns:
The newly created FakeFile object.
Raises:
OSError: if the symlink could not be created
(see :py:meth:`create_file`).
"""
# the link path cannot end with a path separator
file_path = self.make_string_path(file_path)
link_target = self.make_string_path(link_target)
file_path = self.normcase(file_path)
if self.ends_with_path_separator(file_path):
if self.exists(file_path):
self.raise_os_error(errno.EEXIST, file_path)
if self.exists(link_target):
if not self.is_windows_fs:
self.raise_os_error(errno.ENOENT, file_path)
else:
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, link_target)
if not self.exists(
self._path_without_trailing_separators(file_path),
check_link=True):
self.raise_os_error(errno.ENOENT, link_target)
if self.is_macos:
# to avoid EEXIST exception, remove the link
# if it already exists
if self.exists(file_path, check_link=True):
self.remove_object(file_path)
else:
self.raise_os_error(errno.EEXIST, link_target)
# resolve the link path only if it is not a link itself
if not self.islink(file_path):
file_path = self.resolve_path(file_path)
link_target = make_string_path(link_target)
return self.create_file_internally(
file_path, st_mode=S_IFLNK | PERM_DEF,
contents=link_target,
create_missing_dirs=create_missing_dirs,
raw_io=True)
def link(self, old_path, new_path, follow_symlinks=True):
"""Create a hard link at new_path, pointing at old_path.
Args:
old_path: An existing link to the target file.
new_path: The destination path to create a new link at.
follow_symlinks: If False and old_path is a symlink, link the
symlink instead of the object it points to.
Returns:
The FakeFile object referred to by old_path.
Raises:
OSError: if something already exists at new_path.
OSError: if old_path is a directory.
OSError: if the parent directory doesn't exist.
"""
new_path_normalized = self.absnormpath(new_path)
if self.exists(new_path_normalized, check_link=True):
self.raise_os_error(errno.EEXIST, new_path)
new_parent_directory, new_basename = self.splitpath(
new_path_normalized)
if not new_parent_directory:
new_parent_directory = self.cwd
if not self.exists(new_parent_directory):
self.raise_os_error(errno.ENOENT, new_parent_directory)
if self.ends_with_path_separator(old_path):
error = errno.EINVAL if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, old_path)
if not self.is_windows_fs and self.ends_with_path_separator(new_path):
self.raise_os_error(errno.ENOENT, old_path)
# Retrieve the target file
try:
old_file = self.resolve(old_path, follow_symlinks=follow_symlinks)
except OSError:
self.raise_os_error(errno.ENOENT, old_path)
if old_file.st_mode & S_IFDIR:
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.EPERM, old_path)
# abuse the name field to control the filename of the
# newly created link
old_file.name = new_basename
self.add_object(new_parent_directory, old_file)
return old_file
def _is_circular_link(self, link_obj):
try:
self.resolve_path(link_obj.contents)
except OSError as exc:
return exc.errno == errno.ELOOP
return False
def readlink(self, path):
"""Read the target of a symlink.
Args:
path: symlink to read the target of.
Returns:
the string representing the path to which the symbolic link points.
Raises:
TypeError: if path is None
OSError: (with errno=ENOENT) if path is not a valid path, or
(with errno=EINVAL) if path is valid, but is not a symlink,
or if the path ends with a path separator (Posix only)
"""
if path is None:
raise TypeError
link_obj = self.lresolve(path)
if S_IFMT(link_obj.st_mode) != S_IFLNK:
self.raise_os_error(errno.EINVAL, path)
if self.ends_with_path_separator(path):
if not self.is_windows_fs and self.exists(path):
self.raise_os_error(errno.EINVAL, path)
if not self.exists(link_obj.path):
if self.is_windows_fs:
error = errno.EINVAL
elif self._is_circular_link(link_obj):
if self.is_macos:
return link_obj.path
error = errno.ELOOP
else:
error = errno.ENOENT
self.raise_os_error(error, link_obj.path)
return link_obj.contents
def makedir(self, dir_name, mode=PERM_DEF):
"""Create a leaf Fake directory.
Args:
dir_name: (str) Name of directory to create.
Relative paths are assumed to be relative to '/'.
mode: (int) Mode to create directory with. This argument defaults
to 0o777. The umask is applied to this mode.
Raises:
OSError: if the directory name is invalid or parent directory is
read only or as per :py:meth:`add_object`.
"""
dir_name = make_string_path(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self._path_without_trailing_separators(dir_name)
if not dir_name:
self.raise_os_error(errno.ENOENT, '')
if self.is_windows_fs:
dir_name = self.absnormpath(dir_name)
parent_dir, _ = self.splitpath(dir_name)
if parent_dir:
base_dir = self.normpath(parent_dir)
ellipsis = self._matching_string(
parent_dir, self.path_separator + '..')
if parent_dir.endswith(ellipsis) and not self.is_windows_fs:
base_dir, dummy_dotdot, _ = parent_dir.partition(ellipsis)
if not self.exists(base_dir):
self.raise_os_error(errno.ENOENT, base_dir)
dir_name = self.absnormpath(dir_name)
if self.exists(dir_name, check_link=True):
if self.is_windows_fs and dir_name == self.path_separator:
error_nr = errno.EACCES
else:
error_nr = errno.EEXIST
if ends_with_sep and self.is_macos and not self.exists(dir_name):
# to avoid EEXIST exception, remove the link
self.remove_object(dir_name)
else:
self.raise_os_error(error_nr, dir_name)
head, tail = self.splitpath(dir_name)
self.add_object(
head, FakeDirectory(tail, mode & ~self.umask, filesystem=self))
def _path_without_trailing_separators(self, path):
while self.ends_with_path_separator(path):
path = path[:-1]
return path
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=False):
"""Create a leaf Fake directory and create any non-existent
parent dirs.
Args:
dir_name: (str) Name of directory to create.
mode: (int) Mode to create directory (and any necessary parent
directories) with. This argument defaults to 0o777.
The umask is applied to this mode.
exist_ok: (boolean) If exist_ok is False (the default), an OSError is
raised if the target directory already exists.
Raises:
OSError: if the directory already exists and exist_ok=False,
or as per :py:meth:`create_dir`.
"""
if not dir_name:
self.raise_os_error(errno.ENOENT, '')
dir_name = to_string(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self.absnormpath(dir_name)
if (ends_with_sep and self.is_macos and
self.exists(dir_name, check_link=True) and
not self.exists(dir_name)):
# to avoid EEXIST exception, remove the link
self.remove_object(dir_name)
path_components = self._path_components(dir_name)
# Raise a permission denied error if thioe first existing directory
# is not writeable.
current_dir = self.root
for component in path_components:
if (component not in current_dir.contents
or not isinstance(current_dir.contents, dict)):
break
else:
current_dir = current_dir.contents[component]
try:
self.create_dir(dir_name, mode & ~self.umask)
except OSError as e:
if e.errno == errno.EACCES:
# permission denied - propagate exception
raise
if (not exist_ok or
not isinstance(self.resolve(dir_name), FakeDirectory)):
if self.is_windows_fs and e.errno == errno.ENOTDIR:
e.errno = errno.ENOENT
self.raise_os_error(e.errno, e.filename)
def _is_of_type(self, path, st_flag, follow_symlinks=True):
"""Helper function to implement isdir(), islink(), etc.
See the stat(2) man page for valid stat.S_I* flag values
Args:
path: Path to file to stat and test
st_flag: The stat.S_I* flag checked for the file's st_mode
Returns:
(boolean) `True` if the st_flag is set in path's st_mode.
Raises:
TypeError: if path is None
"""
path = make_string_path(path)
if path is None:
raise TypeError
try:
obj = self.resolve(path, follow_symlinks)
if obj:
self.raise_for_filepath_ending_with_separator(
path, obj, macos_handling=not follow_symlinks)
return S_IFMT(obj.st_mode) == st_flag
except OSError:
return False
return False
def isdir(self, path, follow_symlinks=True):
"""Determine if path identifies a directory.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a directory (following symlinks).
Raises:
TypeError: if path is None.
"""
return self._is_of_type(path, S_IFDIR, follow_symlinks)
def isfile(self, path, follow_symlinks=True):
"""Determine if path identifies a regular file.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a regular file (following symlinks).
Raises:
TypeError: if path is None.
"""
return self._is_of_type(path, S_IFREG, follow_symlinks)
def islink(self, path):
"""Determine if path identifies a symbolic link.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a symlink (S_IFLNK set in st_mode)
Raises:
TypeError: if path is None.
"""
return self._is_of_type(path, S_IFLNK, follow_symlinks=False)
def confirmdir(self, target_directory):
"""Test that the target is actually a directory, raising OSError
if not.
Args:
target_directory: Path to the target directory within the fake
filesystem.
Returns:
The FakeDirectory object corresponding to target_directory.
Raises:
OSError: if the target is not a directory.
"""
directory = self.resolve(target_directory)
if not directory.st_mode & S_IFDIR:
self.raise_os_error(errno.ENOTDIR, target_directory, 267)
return directory
def remove(self, path):
"""Remove the FakeFile object at the specified file path.
Args:
path: Path to file to be removed.
Raises:
OSError: if path points to a directory.
OSError: if path does not exist.
OSError: if removal failed.
"""
norm_path = self.absnormpath(path)
if self.ends_with_path_separator(path):
self._handle_broken_link_with_trailing_sep(norm_path)
if self.exists(norm_path):
obj = self.resolve(norm_path, check_read_perm=False)
if S_IFMT(obj.st_mode) == S_IFDIR:
link_obj = self.lresolve(norm_path)
if S_IFMT(link_obj.st_mode) != S_IFLNK:
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.EISDIR
self.raise_os_error(error, norm_path)
norm_path = make_string_path(norm_path)
if path.endswith(self.path_separator):
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.ENOTDIR
self.raise_os_error(error, norm_path)
else:
self.raise_for_filepath_ending_with_separator(path, obj)
self.remove_object(norm_path)
def rmdir(self, target_directory, allow_symlink=False):
"""Remove a leaf Fake directory.
Args:
target_directory: (str) Name of directory to remove.
allow_symlink: (bool) if `target_directory` is a symlink,
the function just returns, otherwise it raises (Posix only)
Raises:
OSError: if target_directory does not exist.
OSError: if target_directory does not point to a directory.
OSError: if removal failed per FakeFilesystem.RemoveObject.
Cannot remove '.'.
"""
if target_directory in (b'.', u'.'):
error_nr = errno.EACCES if self.is_windows_fs else errno.EINVAL
self.raise_os_error(error_nr, target_directory)
ends_with_sep = self.ends_with_path_separator(target_directory)
target_directory = self.absnormpath(target_directory)
if self.confirmdir(target_directory):
if not self.is_windows_fs and self.islink(target_directory):
if allow_symlink:
return
if not ends_with_sep or not self.is_macos:
self.raise_os_error(errno.ENOTDIR, target_directory)
dir_object = self.resolve(target_directory)
if dir_object.contents:
self.raise_os_error(errno.ENOTEMPTY, target_directory)
self.remove_object(target_directory)
def listdir(self, target_directory):
"""Return a list of file names in target_directory.
Args:
target_directory: Path to the target directory within the
fake filesystem.
Returns:
A list of file names within the target directory in arbitrary
order.
Raises:
OSError: if the target is not a directory.
"""
target_directory = self.resolve_path(target_directory, allow_fd=True)
directory = self.confirmdir(target_directory)
directory_contents = directory.contents
return list(directory_contents.keys())
def __str__(self):
return str(self.root)
def _add_standard_streams(self):
self._add_open_file(StandardStreamWrapper(sys.stdin))
self._add_open_file(StandardStreamWrapper(sys.stdout))
self._add_open_file(StandardStreamWrapper(sys.stderr))
Deprecator.add(FakeFilesystem, FakeFilesystem.get_disk_usage, 'GetDiskUsage')
Deprecator.add(FakeFilesystem, FakeFilesystem.set_disk_usage, 'SetDiskUsage')
Deprecator.add(FakeFilesystem,
FakeFilesystem.change_disk_usage, 'ChangeDiskUsage')
Deprecator.add(FakeFilesystem, FakeFilesystem.add_mount_point, 'AddMountPoint')
Deprecator.add(FakeFilesystem, FakeFilesystem.stat, 'GetStat')
Deprecator.add(FakeFilesystem, FakeFilesystem.chmod, 'ChangeMode')
Deprecator.add(FakeFilesystem, FakeFilesystem.utime, 'UpdateTime')
Deprecator.add(FakeFilesystem, FakeFilesystem._add_open_file, 'AddOpenFile')
Deprecator.add(FakeFilesystem,
FakeFilesystem._close_open_file, 'CloseOpenFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.has_open_file, 'HasOpenFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_open_file, 'GetOpenFile')
Deprecator.add(FakeFilesystem,
FakeFilesystem.normcase, 'NormalizePathSeparator')
Deprecator.add(FakeFilesystem, FakeFilesystem.normpath, 'CollapsePath')
Deprecator.add(FakeFilesystem, FakeFilesystem._original_path, 'NormalizeCase')
Deprecator.add(FakeFilesystem, FakeFilesystem.absnormpath, 'NormalizePath')
Deprecator.add(FakeFilesystem, FakeFilesystem.splitpath, 'SplitPath')
Deprecator.add(FakeFilesystem, FakeFilesystem.splitdrive, 'SplitDrive')
Deprecator.add(FakeFilesystem, FakeFilesystem.joinpaths, 'JoinPaths')
Deprecator.add(FakeFilesystem,
FakeFilesystem._path_components, 'GetPathComponents')
Deprecator.add(FakeFilesystem, FakeFilesystem._starts_with_drive_letter,
'StartsWithDriveLetter')
Deprecator.add(FakeFilesystem, FakeFilesystem.exists, 'Exists')
Deprecator.add(FakeFilesystem, FakeFilesystem.resolve_path, 'ResolvePath')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_object_from_normpath,
'GetObjectFromNormalizedPath')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_object, 'GetObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.resolve, 'ResolveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.lresolve, 'LResolveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.add_object, 'AddObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.remove_object, 'RemoveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.rename, 'RenameObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_dir, 'CreateDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_file, 'CreateFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_symlink, 'CreateLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.link, 'CreateHardLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.readlink, 'ReadLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.makedir, 'MakeDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.makedirs, 'MakeDirectories')
Deprecator.add(FakeFilesystem, FakeFilesystem.isdir, 'IsDir')
Deprecator.add(FakeFilesystem, FakeFilesystem.isfile, 'IsFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.islink, 'IsLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.confirmdir, 'ConfirmDir')
Deprecator.add(FakeFilesystem, FakeFilesystem.remove, 'RemoveFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.rmdir, 'RemoveDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.listdir, 'ListDir')
class FakePathModule:
"""Faked os.path module replacement.
FakePathModule should *only* be instantiated by FakeOsModule. See the
FakeOsModule docstring for details.
"""
_OS_PATH_COPY = _copy_module(os.path)
@staticmethod
def dir():
"""Return the list of patched function names. Used for patching
functions imported from the module.
"""
return [
'abspath', 'dirname', 'exists', 'expanduser', 'getatime',
'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile',
'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath',
'realpath', 'relpath', 'split', 'splitdrive', 'samefile'
]
def __init__(self, filesystem, os_module):
"""Init.
Args:
filesystem: FakeFilesystem used to provide file system information
"""
self.filesystem = filesystem
self._os_path = self._OS_PATH_COPY
self._os_path.os = self.os = os_module
self.sep = self.filesystem.path_separator
self.altsep = self.filesystem.alternative_path_separator
def exists(self, path):
"""Determine whether the file object exists within the fake filesystem.
Args:
path: The path to the file object.
Returns:
(bool) `True` if the file exists.
"""
return self.filesystem.exists(path)
def lexists(self, path):
"""Test whether a path exists. Returns True for broken symbolic links.
Args:
path: path to the symlink object.
Returns:
bool (if file exists).
"""
return self.filesystem.exists(path, check_link=True)
def getsize(self, path):
"""Return the file object size in bytes.
Args:
path: path to the file object.
Returns:
file size in bytes.
"""
file_obj = self.filesystem.resolve(path)
if (self.filesystem.ends_with_path_separator(path) and
S_IFMT(file_obj.st_mode) != S_IFDIR):
error_nr = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOTDIR)
self.filesystem.raise_os_error(error_nr, path)
return file_obj.st_size
def isabs(self, path):
"""Return True if path is an absolute pathname."""
if self.filesystem.is_windows_fs:
path = self.splitdrive(path)[1]
path = make_string_path(path)
sep = self.filesystem._path_separator(path)
altsep = self.filesystem._alternative_path_separator(path)
if self.filesystem.is_windows_fs:
return len(path) > 0 and path[:1] in (sep, altsep)
else:
return (path.startswith(sep) or
altsep is not None and path.startswith(altsep))
def isdir(self, path):
"""Determine if path identifies a directory."""
return self.filesystem.isdir(path)
def isfile(self, path):
"""Determine if path identifies a regular file."""
return self.filesystem.isfile(path)
def islink(self, path):
"""Determine if path identifies a symbolic link.
Args:
path: Path to filesystem object.
Returns:
`True` if path points to a symbolic link.
Raises:
TypeError: if path is None.
"""
return self.filesystem.islink(path)
def getmtime(self, path):
"""Returns the modification time of the fake file.
Args:
path: the path to fake file.
Returns:
(int, float) the modification time of the fake file
in number of seconds since the epoch.
Raises:
OSError: if the file does not exist.
"""
try:
file_obj = self.filesystem.resolve(path)
return file_obj.st_mtime
except OSError:
self.filesystem.raise_os_error(errno.ENOENT, winerror=3)
def getatime(self, path):
"""Returns the last access time of the fake file.
Note: Access time is not set automatically in fake filesystem
on access.
Args:
path: the path to fake file.
Returns:
(int, float) the access time of the fake file in number of seconds
since the epoch.
Raises:
OSError: if the file does not exist.
"""
try:
file_obj = self.filesystem.resolve(path)
except OSError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_atime
def getctime(self, path):
"""Returns the creation time of the fake file.
Args:
path: the path to fake file.
Returns:
(int, float) the creation time of the fake file in number of
seconds since the epoch.
Raises:
OSError: if the file does not exist.
"""
try:
file_obj = self.filesystem.resolve(path)
except OSError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_ctime
def abspath(self, path):
"""Return the absolute version of a path."""
def getcwd():
"""Return the current working directory."""
# pylint: disable=undefined-variable
if isinstance(path, bytes):
return self.os.getcwdb()
else:
return self.os.getcwd()
path = make_string_path(path)
sep = self.filesystem._path_separator(path)
altsep = self.filesystem._alternative_path_separator(path)
if not self.isabs(path):
path = self.join(getcwd(), path)
elif (self.filesystem.is_windows_fs and
path.startswith(sep) or altsep is not None and
path.startswith(altsep)):
cwd = getcwd()
if self.filesystem._starts_with_drive_letter(cwd):
path = self.join(cwd[:2], path)
return self.normpath(path)
def join(self, *p):
"""Return the completed path with a separator of the parts."""
return self.filesystem.joinpaths(*p)
def split(self, path):
"""Split the path into the directory and the filename of the path.
"""
return self.filesystem.splitpath(path)
def splitdrive(self, path):
"""Split the path into the drive part and the rest of the path, if
supported."""
return self.filesystem.splitdrive(path)
def normpath(self, path):
"""Normalize path, eliminating double slashes, etc."""
return self.filesystem.normpath(path)
def normcase(self, path):
"""Convert to lower case under windows, replaces additional path
separator."""
path = self.filesystem.normcase(path)
if self.filesystem.is_windows_fs:
path = path.lower()
return path
def relpath(self, path, start=None):
"""We mostly rely on the native implementation and adapt the
path separator."""
if not path:
raise ValueError("no path specified")
path = make_string_path(path)
if start is not None:
start = make_string_path(start)
else:
start = self.filesystem.cwd
if self.filesystem.alternative_path_separator is not None:
path = path.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
start = start.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
path = path.replace(self.filesystem.path_separator, self._os_path.sep)
start = start.replace(
self.filesystem.path_separator, self._os_path.sep)
path = self._os_path.relpath(path, start)
return path.replace(self._os_path.sep, self.filesystem.path_separator)
def realpath(self, filename):
"""Return the canonical path of the specified filename, eliminating any
symbolic links encountered in the path.
"""
if self.filesystem.is_windows_fs:
return self.abspath(filename)
filename = make_string_path(filename)
path, ok = self._joinrealpath(filename[:0], filename, {})
return self.abspath(path)
def samefile(self, path1, path2):
"""Return whether path1 and path2 point to the same file.
Args:
path1: first file path or path object (Python >=3.6)
path2: second file path or path object (Python >=3.6)
Raises:
OSError: if one of the paths does not point to an existing
file system object.
"""
stat1 = self.filesystem.stat(path1)
stat2 = self.filesystem.stat(path2)
return (stat1.st_ino == stat2.st_ino and
stat1.st_dev == stat2.st_dev)
def _joinrealpath(self, path, rest, seen):
"""Join two paths, normalizing and eliminating any symbolic links
encountered in the second path.
Taken from Python source and adapted.
"""
curdir = self.filesystem._matching_string(path, '.')
pardir = self.filesystem._matching_string(path, '..')
sep = self.filesystem._path_separator(path)
if self.isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
# current dir
continue
if name == pardir:
# parent dir
if path:
path, name = self.filesystem.splitpath(path)
if name == pardir:
path = self.filesystem.joinpaths(path, pardir, pardir)
else:
path = pardir
continue
newpath = self.filesystem.joinpaths(path, name)
if not self.filesystem.islink(newpath):
path = newpath
continue
# Resolve the symbolic link
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None:
# use cached value
continue
# The symlink is not resolved, so we must have a symlink loop.
# Return already resolved part + rest of the path unchanged.
return self.filesystem.joinpaths(newpath, rest), False
seen[newpath] = None # not resolved symlink
path, ok = self._joinrealpath(
path, self.filesystem.readlink(newpath), seen)
if not ok:
return self.filesystem.joinpaths(path, rest), False
seen[newpath] = path # resolved symlink
return path, True
def dirname(self, path):
"""Returns the first part of the result of `split()`."""
return self.split(path)[0]
def expanduser(self, path):
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
"""
return self._os_path.expanduser(path).replace(
self._os_path.sep, self.sep)
def ismount(self, path):
"""Return true if the given path is a mount point.
Args:
path: Path to filesystem object to be checked
Returns:
`True` if path is a mount point added to the fake file system.
Under Windows also returns True for drive and UNC roots
(independent of their existence).
"""
path = make_string_path(path)
if not path:
return False
normed_path = self.filesystem.absnormpath(path)
sep = self.filesystem._path_separator(path)
if self.filesystem.is_windows_fs:
if self.filesystem.alternative_path_separator is not None:
path_seps = (
sep, self.filesystem._alternative_path_separator(path)
)
else:
path_seps = (sep,)
drive, rest = self.filesystem.splitdrive(normed_path)
if drive and drive[:1] in path_seps:
return (not rest) or (rest in path_seps)
if rest in path_seps:
return True
for mount_point in self.filesystem.mount_points:
if normed_path.rstrip(sep) == mount_point.rstrip(sep):
return True
return False
def __getattr__(self, name):
"""Forwards any non-faked calls to the real os.path."""
return getattr(self._os_path, name)
class FakeOsModule:
"""Uses FakeFilesystem to provide a fake os module replacement.
Do not create os.path separately from os, as there is a necessary circular
dependency between os and os.path to replicate the behavior of the standard
Python modules. What you want to do is to just let FakeOsModule take care
of `os.path` setup itself.
# You always want to do this.
filesystem = fake_filesystem.FakeFilesystem()
my_os_module = fake_filesystem.FakeOsModule(filesystem)
"""
devnull = None
@staticmethod
def dir():
"""Return the list of patched function names. Used for patching
functions imported from the module.
"""
dir = [
'access', 'chdir', 'chmod', 'chown', 'close', 'fstat', 'fsync',
'getcwd', 'lchmod', 'link', 'listdir', 'lstat', 'makedirs',
'mkdir', 'mknod', 'open', 'read', 'readlink', 'remove',
'removedirs', 'rename', 'rmdir', 'stat', 'symlink', 'umask',
'unlink', 'utime', 'walk', 'write', 'getcwdb', 'replace'
]
if sys.platform.startswith('linux'):
dir += [
'fdatasync', 'getxattr', 'listxattr',
'removexattr', 'setxattr'
]
if use_scandir:
dir += ['scandir']
return dir
def __init__(self, filesystem):
"""Also exposes self.path (to fake os.path).
Args:
filesystem: FakeFilesystem used to provide file system information
"""
self.filesystem = filesystem
self.sep = filesystem.path_separator
self.altsep = filesystem.alternative_path_separator
self.linesep = filesystem.line_separator()
self._os_module = os
self.path = FakePathModule(self.filesystem, self)
self.__class__.devnull = ('/dev/nul' if filesystem.is_windows_fs
else '/dev/nul')
def fdopen(self, fd, *args, **kwargs):
"""Redirector to open() builtin function.
Args:
fd: The file descriptor of the file to open.
*args: Pass through args.
**kwargs: Pass through kwargs.
Returns:
File object corresponding to file_des.
Raises:
TypeError: if file descriptor is not an integer.
"""
if not is_int_type(fd):
raise TypeError('an integer is required')
return FakeFileOpen(self.filesystem)(fd, *args, **kwargs)
def _umask(self):
"""Return the current umask."""
if self.filesystem.is_windows_fs:
# windows always returns 0 - it has no real notion of umask
return 0
if sys.platform == 'win32':
# if we are testing Unix under Windows we assume a default mask
return 0o002
else:
# under Unix, we return the real umask;
# as there is no pure getter for umask, so we have to first
# set a mode to get the previous one and then re-set that
mask = os.umask(0)
os.umask(mask)
return mask
def open(self, path, flags, mode=None, *, dir_fd=None):
"""Return the file descriptor for a FakeFile.
Args:
path: the path to the file
flags: low-level bits to indicate io operation
mode: bits to define default permissions
Note: only basic modes are supported, OS-specific modes are
ignored
dir_fd: If not `None`, the file descriptor of a directory,
with `file_path` being relative to this directory.
Returns:
A file descriptor.
Raises:
OSError: if the path cannot be found
ValueError: if invalid mode is given
NotImplementedError: if `os.O_EXCL` is used without `os.O_CREAT`
"""
path = self._path_with_dir_fd(path, self.open, dir_fd)
if mode is None:
if self.filesystem.is_windows_fs:
mode = 0o666
else:
mode = 0o777 & ~self._umask()
has_tmpfile_flag = (hasattr(os, 'O_TMPFILE') and
flags & getattr(os, 'O_TMPFILE'))
open_modes = _OpenModes(
must_exist=not flags & os.O_CREAT and not has_tmpfile_flag,
can_read=not flags & os.O_WRONLY,
can_write=flags & (os.O_RDWR | os.O_WRONLY) != 0,
truncate=flags & os.O_TRUNC != 0,
append=flags & os.O_APPEND != 0,
must_not_exist=flags & os.O_EXCL != 0
)
if open_modes.must_not_exist and open_modes.must_exist:
raise NotImplementedError(
'O_EXCL without O_CREAT mode is not supported')
if has_tmpfile_flag:
# this is a workaround for tempfiles that do not have a filename
# as we do not support this directly, we just add a unique filename
# and set the file to delete on close
path = self.filesystem.joinpaths(
path, str(uuid.uuid4()))
if (not self.filesystem.is_windows_fs and
self.filesystem.exists(path)):
# handle opening directory - only allowed under Posix
# with read-only mode
obj = self.filesystem.resolve(path)
if isinstance(obj, FakeDirectory):
if ((not open_modes.must_exist and
not self.filesystem.is_macos)
or open_modes.can_write):
self.filesystem.raise_os_error(errno.EISDIR, path)
dir_wrapper = FakeDirWrapper(obj, path, self.filesystem)
file_des = self.filesystem._add_open_file(dir_wrapper)
dir_wrapper.filedes = file_des
return file_des
# low level open is always binary
str_flags = 'b'
delete_on_close = has_tmpfile_flag
if hasattr(os, 'O_TEMPORARY'):
delete_on_close = flags & os.O_TEMPORARY == os.O_TEMPORARY
fake_file = FakeFileOpen(
self.filesystem, delete_on_close=delete_on_close, raw_io=True)(
path, str_flags, open_modes=open_modes)
if fake_file.file_object != self.filesystem.dev_null:
self.chmod(path, mode)
return fake_file.fileno()
def close(self, fd):
"""Close a file descriptor.
Args:
fd: An integer file descriptor for the file object requested.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer.
"""
file_handle = self.filesystem.get_open_file(fd)
file_handle.close()
def read(self, fd, n):
"""Read number of bytes from a file descriptor, returns bytes read.
Args:
fd: An integer file descriptor for the file object requested.
n: Number of bytes to read from file.
Returns:
Bytes read from file.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer.
"""
file_handle = self.filesystem.get_open_file(fd)
file_handle.raw_io = True
return file_handle.read(n)
def write(self, fd, contents):
"""Write string to file descriptor, returns number of bytes written.
Args:
fd: An integer file descriptor for the file object requested.
contents: String of bytes to write to file.
Returns:
Number of bytes written.
Raises:
OSError: bad file descriptor.
TypeError: if file descriptor is not an integer.
"""
file_handle = self.filesystem.get_open_file(fd)
if isinstance(file_handle, FakeDirWrapper):
self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path)
if isinstance(file_handle, FakePipeWrapper):
return file_handle.write(contents)
file_handle.raw_io = True
file_handle._sync_io()
file_handle.update_flush_pos()
file_handle.write(contents)
file_handle.flush()
return len(contents)
def pipe(self):
read_fd, write_fd = os.pipe()
read_wrapper = FakePipeWrapper(self.filesystem, read_fd)
file_des = self.filesystem._add_open_file(read_wrapper)
read_wrapper.filedes = file_des
write_wrapper = FakePipeWrapper(self.filesystem, write_fd)
file_des = self.filesystem._add_open_file(write_wrapper)
write_wrapper.filedes = file_des
return read_wrapper.filedes, write_wrapper.filedes
@staticmethod
def stat_float_times(newvalue=None):
"""Determine whether a file's time stamps are reported as floats
or ints.
Calling without arguments returns the current value. The value is
shared by all instances of FakeOsModule.
Args:
newvalue: If `True`, mtime, ctime, atime are reported as floats.
Otherwise, they are returned as ints (rounding down).
"""
return FakeStatResult.stat_float_times(newvalue)
def fstat(self, fd):
"""Return the os.stat-like tuple for the FakeFile object of file_des.
Args:
fd: The file descriptor of filesystem object to retrieve.
Returns:
The FakeStatResult object corresponding to entry_path.
Raises:
OSError: if the filesystem object doesn't exist.
"""
# stat should return the tuple representing return value of os.stat
file_object = self.filesystem.get_open_file(fd).get_object()
return file_object.stat_result.copy()
def umask(self, mask):
"""Change the current umask.
Args:
mask: (int) The new umask value.
Returns:
The old umask.
Raises:
TypeError: if new_mask is of an invalid type.
"""
if not is_int_type(mask):
raise TypeError('an integer is required')
old_umask = self.filesystem.umask
self.filesystem.umask = mask
return old_umask
def chdir(self, path):
"""Change current working directory to target directory.
Args:
path: The path to new current working directory.
Raises:
OSError: if user lacks permission to enter the argument directory
or if the target is not a directory.
"""
path = self.filesystem.resolve_path(
path, allow_fd=True)
self.filesystem.confirmdir(path)
directory = self.filesystem.resolve(path)
# A full implementation would check permissions all the way
# up the tree.
if not is_root() and not directory.st_mode | PERM_EXE:
self.filesystem.raise_os_error(errno.EACCES, directory)
self.filesystem.cwd = path
def getcwd(self):
"""Return current working directory."""
return self.filesystem.cwd
def getcwdb(self):
"""Return current working directory as bytes."""
return bytes(
self.filesystem.cwd, locale.getpreferredencoding(False))
def listdir(self, path):
"""Return a list of file names in target_directory.
Args:
path: Path to the target directory within the fake
filesystem.
Returns:
A list of file names within the target directory in arbitrary
order.
Raises:
OSError: if the target is not a directory.
"""
return self.filesystem.listdir(path)
XATTR_CREATE = 1
XATTR_REPLACE = 2
def getxattr(self, path, attribute, *, follow_symlinks=True):
"""Return the value of the given extended filesystem attribute for
`path`.
Args:
path: File path, file descriptor or path-like object (for
Python >= 3.6).
attribute: (str or bytes) The attribute name.
follow_symlinks: (bool) If True (the default), symlinks in the
path are traversed.
Returns:
The contents of the extended attribute as bytes or None if
the attribute does not exist.
Raises:
OSError: if the path does not exist.
"""
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'getxattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
return file_obj.xattr.get(attribute)
def listxattr(self, path=None, *, follow_symlinks=True):
"""Return a list of the extended filesystem attributes on `path`.
Args:
path: File path, file descriptor or path-like object (for
Python >= 3.6). If None, the current directory is used.
follow_symlinks: (bool) If True (the default), symlinks in the
path are traversed.
Returns:
A list of all attribute names for the given path as str.
Raises:
OSError: if the path does not exist.
"""
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'listxattr'")
if path is None:
path = self.getcwd()
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
return list(file_obj.xattr.keys())
def removexattr(self, path, attribute, *, follow_symlinks=True):
"""Removes the extended filesystem attribute attribute from `path`.
Args:
path: File path, file descriptor or path-like object (for
Python >= 3.6).
attribute: (str or bytes) The attribute name.
follow_symlinks: (bool) If True (the default), symlinks in the
path are traversed.
Raises:
OSError: if the path does not exist.
"""
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'removexattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
if attribute in file_obj.xattr:
del file_obj.xattr[attribute]
def setxattr(self, path, attribute, value,
flags=0, *, follow_symlinks=True):
"""Sets the value of the given extended filesystem attribute for
`path`.
Args:
path: File path, file descriptor or path-like object (for
Python >= 3.6).
attribute: The attribute name (str or bytes).
value: (byte-like) The value to be set.
follow_symlinks: (bool) If True (the default), symlinks in the
path are traversed.
Raises:
OSError: if the path does not exist.
TypeError: if `value` is not a byte-like object.
"""
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'setxattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
if not is_byte_string(value):
raise TypeError('a bytes-like object is required')
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
exists = attribute in file_obj.xattr
if exists and flags == self.XATTR_CREATE:
self.filesystem.raise_os_error(errno.ENODATA, file_obj.path)
if not exists and flags == self.XATTR_REPLACE:
self.filesystem.raise_os_error(errno.EEXIST, file_obj.path)
file_obj.xattr[attribute] = value
if use_scandir:
def scandir(self, path='.'):
"""Return an iterator of DirEntry objects corresponding to the
entries in the directory given by path.
Args:
path: Path to the target directory within the fake filesystem.
Returns:
An iterator to an unsorted list of os.DirEntry objects for
each entry in path.
Raises:
OSError: if the target is not a directory.
"""
return scandir(self.filesystem, path)
def walk(self, top, topdown=True, onerror=None, followlinks=False):
"""Perform an os.walk operation over the fake filesystem.
Args:
top: The root directory from which to begin walk.
topdown: Determines whether to return the tuples with the root as
the first entry (`True`) or as the last, after all the child
directory tuples (`False`).
onerror: If not `None`, function which will be called to handle the
`os.error` instance provided when `os.listdir()` fails.
followlinks: If `True`, symbolic links are followed.
Yields:
(path, directories, nondirectories) for top and each of its
subdirectories. See the documentation for the builtin os module
for further details.
"""
return walk(self.filesystem, top, topdown, onerror, followlinks)
def readlink(self, path, dir_fd=None):
"""Read the target of a symlink.
Args:
path: Symlink to read the target of.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Returns:
the string representing the path to which the symbolic link points.
Raises:
TypeError: if `path` is None
OSError: (with errno=ENOENT) if path is not a valid path, or
(with errno=EINVAL) if path is valid, but is not a symlink
"""
path = self._path_with_dir_fd(path, self.readlink, dir_fd)
return self.filesystem.readlink(path)
def stat(self, path, *, dir_fd=None, follow_symlinks=True):
"""Return the os.stat-like tuple for the FakeFile object of entry_path.
Args:
path: path to filesystem object to retrieve.
dir_fd: (int) If not `None`, the file descriptor of a directory,
with `entry_path` being relative to this directory.
follow_symlinks: (bool) If `False` and `entry_path` points to a
symlink, the link itself is changed instead of the linked
object.
Returns:
The FakeStatResult object corresponding to entry_path.
Raises:
OSError: if the filesystem object doesn't exist.
"""
path = self._path_with_dir_fd(path, self.stat, dir_fd)
return self.filesystem.stat(path, follow_symlinks)
def lstat(self, path, *, dir_fd=None):
"""Return the os.stat-like tuple for entry_path, not following symlinks.
Args:
path: path to filesystem object to retrieve.
dir_fd: If not `None`, the file descriptor of a directory, with
`path` being relative to this directory.
Returns:
the FakeStatResult object corresponding to `path`.
Raises:
OSError: if the filesystem object doesn't exist.
"""
# stat should return the tuple representing return value of os.stat
path = self._path_with_dir_fd(path, self.lstat, dir_fd)
return self.filesystem.stat(path, follow_symlinks=False)
def remove(self, path, dir_fd=None):
"""Remove the FakeFile object at the specified file path.
Args:
path: Path to file to be removed.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if path points to a directory.
OSError: if path does not exist.
OSError: if removal failed.
"""
path = self._path_with_dir_fd(path, self.remove, dir_fd)
self.filesystem.remove(path)
def unlink(self, path, *, dir_fd=None):
"""Remove the FakeFile object at the specified file path.
Args:
path: Path to file to be removed.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if path points to a directory.
OSError: if path does not exist.
OSError: if removal failed.
"""
path = self._path_with_dir_fd(path, self.unlink, dir_fd)
self.filesystem.remove(path)
def rename(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
"""Rename a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Also replaces existing new_file_path object, if one existed
(Unix only).
Args:
src: Path to filesystem object to rename.
dst: Path to where the filesystem object will live
after this call.
src_dir_fd: If not `None`, the file descriptor of a directory,
with `src` being relative to this directory.
dst_dir_fd: If not `None`, the file descriptor of a directory,
with `dst` being relative to this directory.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory.
OSError: if new_file_path is an existing file (Windows only)
OSError: if new_file_path is an existing file and could not
be removed (Unix)
OSError: if `dirname(new_file)` does not exist
OSError: if the file would be moved to another filesystem
(e.g. mount point)
"""
src = self._path_with_dir_fd(src, self.rename, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.rename, dst_dir_fd)
self.filesystem.rename(src, dst)
def replace(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
"""Renames a FakeFile object at old_file_path to new_file_path,
preserving all properties.
Also replaces existing new_file_path object, if one existed.
Arg
src: Path to filesystem object to rename.
dst: Path to where the filesystem object will live
after this call.
src_dir_fd: If not `None`, the file descriptor of a directory,
with `src` being relative to this directory.
dst_dir_fd: If not `None`, the file descriptor of a directory,
with `dst` being relative to this directory.
Raises:
OSError: if old_file_path does not exist.
OSError: if new_file_path is an existing directory.
OSError: if new_file_path is an existing file and could
not be removed
OSError: if `dirname(new_file)` does not exist
OSError: if the file would be moved to another filesystem
(e.g. mount point)
"""
src = self._path_with_dir_fd(src, self.rename, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.rename, dst_dir_fd)
self.filesystem.rename(src, dst, force_replace=True)
def rmdir(self, path, *, dir_fd=None):
"""Remove a leaf Fake directory.
Args:
path: (str) Name of directory to remove.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if `path` does not exist or is not a directory,
or as per FakeFilesystem.remove_object. Cannot remove '.'.
"""
path = self._path_with_dir_fd(path, self.rmdir, dir_fd)
self.filesystem.rmdir(path)
def removedirs(self, name):
"""Remove a leaf fake directory and all empty intermediate ones.
Args:
name: the directory to be removed.
Raises:
OSError: if target_directory does not exist or is not a directory.
OSError: if target_directory is not empty.
"""
name = self.filesystem.absnormpath(name)
directory = self.filesystem.confirmdir(name)
if directory.contents:
self.filesystem.raise_os_error(
errno.ENOTEMPTY, self.path.basename(name))
else:
self.rmdir(name)
head, tail = self.path.split(name)
if not tail:
head, tail = self.path.split(head)
while head and tail:
head_dir = self.filesystem.confirmdir(head)
if head_dir.contents:
break
# only the top-level dir may not be a symlink
self.filesystem.rmdir(head, allow_symlink=True)
head, tail = self.path.split(head)
def mkdir(self, path, mode=PERM_DEF, *, dir_fd=None):
"""Create a leaf Fake directory.
Args:
path: (str) Name of directory to create.
Relative paths are assumed to be relative to '/'.
mode: (int) Mode to create directory with. This argument defaults
to 0o777. The umask is applied to this mode.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if the directory name is invalid or parent directory is
read only or as per FakeFilesystem.add_object.
"""
path = self._path_with_dir_fd(path, self.mkdir, dir_fd)
try:
self.filesystem.makedir(path, mode)
except OSError as e:
if e.errno == errno.EACCES:
self.filesystem.raise_os_error(e.errno, path)
raise
def makedirs(self, name, mode=PERM_DEF, exist_ok=None):
"""Create a leaf Fake directory + create any non-existent parent dirs.
Args:
name: (str) Name of directory to create.
mode: (int) Mode to create directory (and any necessary parent
directories) with. This argument defaults to 0o777.
The umask is applied to this mode.
exist_ok: (boolean) If exist_ok is False (the default), an OSError
is raised if the target directory already exists.
Raises:
OSError: if the directory already exists and exist_ok=False, or as
per :py:meth:`FakeFilesystem.create_dir`.
"""
if exist_ok is None:
exist_ok = False
self.filesystem.makedirs(name, mode, exist_ok)
def _path_with_dir_fd(self, path, fct, dir_fd):
"""Return the path considering dir_fd. Raise on invalid parameters."""
path = to_string(path)
if dir_fd is not None:
# check if fd is supported for the built-in real function
real_fct = getattr(os, fct.__name__)
if real_fct not in self.supports_dir_fd:
raise NotImplementedError(
'dir_fd unavailable on this platform')
if isinstance(path, int):
raise ValueError("%s: Can't specify dir_fd without "
"matching path" % fct.__name__)
if not self.path.isabs(path):
return self.path.join(
self.filesystem.get_open_file(
dir_fd).get_object().path, path)
return path
def access(self, path, mode, *, dir_fd=None, follow_symlinks=True):
"""Check if a file exists and has the specified permissions.
Args:
path: (str) Path to the file.
mode: (int) Permissions represented as a bitwise-OR combination of
os.F_OK, os.R_OK, os.W_OK, and os.X_OK.
dir_fd: If not `None`, the file descriptor of a directory, with
`path` being relative to this directory.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
Returns:
bool, `True` if file is accessible, `False` otherwise.
"""
path = self._path_with_dir_fd(path, self.access, dir_fd)
try:
stat_result = self.stat(path, follow_symlinks=follow_symlinks)
except OSError as os_error:
if os_error.errno == errno.ENOENT:
return False
raise
if is_root():
mode &= ~os.W_OK
return (mode & ((stat_result.st_mode >> 6) & 7)) == mode
def chmod(self, path, mode, *, dir_fd=None, follow_symlinks=True):
"""Change the permissions of a file as encoded in integer mode.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
dir_fd: If not `None`, the file descriptor of a directory, with
`path` being relative to this directory.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
"""
path = self._path_with_dir_fd(path, self.chmod, dir_fd)
self.filesystem.chmod(path, mode, follow_symlinks)
def lchmod(self, path, mode):
"""Change the permissions of a file as encoded in integer mode.
If the file is a link, the permissions of the link are changed.
Args:
path: (str) Path to the file.
mode: (int) Permissions.
"""
if self.filesystem.is_windows_fs:
raise (NameError, "name 'lchmod' is not defined")
self.filesystem.chmod(path, mode, follow_symlinks=False)
def utime(self, path, times=None, ns=None,
dir_fd=None, follow_symlinks=True):
"""Change the access and modified times of a file.
Args:
path: (str) Path to the file.
times: 2-tuple of int or float numbers, of the form (atime, mtime)
which is used to set the access and modified times in seconds.
If None, both times are set to the current time.
ns: 2-tuple of int numbers, of the form (atime, mtime) which is
used to set the access and modified times in nanoseconds.
If None, both times are set to the current time.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
Raises:
TypeError: If anything other than the expected types is
specified in the passed `times` or `ns` tuple,
or if the tuple length is not equal to 2.
ValueError: If both times and ns are specified.
"""
path = self._path_with_dir_fd(path, self.utime, dir_fd)
self.filesystem.utime(
path, times=times, ns=ns, follow_symlinks=follow_symlinks)
def chown(self, path, uid, gid, *, dir_fd=None, follow_symlinks=True):
"""Set ownership of a faked file.
Args:
path: (str) Path to the file or directory.
uid: (int) Numeric uid to set the file or directory to.
gid: (int) Numeric gid to set the file or directory to.
dir_fd: (int) If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
follow_symlinks: (bool) If `False` and path points to a symlink,
the link itself is changed instead of the linked object.
Raises:
OSError: if path does not exist.
`None` is also allowed for `uid` and `gid`. This permits `os.rename`
to use `os.chown` even when the source file `uid` and `gid` are
`None` (unset).
"""
path = self._path_with_dir_fd(path, self.chown, dir_fd)
file_object = self.filesystem.resolve(
path, follow_symlinks, allow_fd=True)
if not ((is_int_type(uid) or uid is None) and
(is_int_type(gid) or gid is None)):
raise TypeError("An integer is required")
if uid != -1:
file_object.st_uid = uid
if gid != -1:
file_object.st_gid = gid
def mknod(self, path, mode=None, device=0, *, dir_fd=None):
"""Create a filesystem node named 'filename'.
Does not support device special files or named pipes as the real os
module does.
Args:
path: (str) Name of the file to create
mode: (int) Permissions to use and type of file to be created.
Default permissions are 0o666. Only the stat.S_IFREG file type
is supported by the fake implementation. The umask is applied
to this mode.
device: not supported in fake implementation
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
Raises:
OSError: if called with unsupported options or the file can not be
created.
"""
if self.filesystem.is_windows_fs:
raise (AttributeError, "module 'os' has no attribute 'mknode'")
if mode is None:
# note that a default value of 0o600 without a device type is
# documented - this is not how it seems to work
mode = S_IFREG | 0o600
if device or not mode & S_IFREG and not is_root():
self.filesystem.raise_os_error(errno.EPERM)
path = self._path_with_dir_fd(path, self.mknod, dir_fd)
head, tail = self.path.split(path)
if not tail:
if self.filesystem.exists(head, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, path)
self.filesystem.raise_os_error(errno.ENOENT, path)
if tail in (b'.', u'.', b'..', u'..'):
self.filesystem.raise_os_error(errno.ENOENT, path)
if self.filesystem.exists(path, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, path)
self.filesystem.add_object(head, FakeFile(
tail, mode & ~self.filesystem.umask,
filesystem=self.filesystem))
def symlink(self, src, dst, *, dir_fd=None):
"""Creates the specified symlink, pointed at the specified link target.
Args:
src: The target of the symlink.
dst: Path to the symlink to create.
dir_fd: If not `None`, the file descriptor of a directory,
with `src` being relative to this directory.
Raises:
OSError: if the file already exists.
"""
src = self._path_with_dir_fd(src, self.symlink, dir_fd)
self.filesystem.create_symlink(
dst, src, create_missing_dirs=False)
def link(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
"""Create a hard link at new_path, pointing at old_path.
Args:
src: An existing path to the target file.
dst: The destination path to create a new link at.
src_dir_fd: If not `None`, the file descriptor of a directory,
with `src` being relative to this directory.
dst_dir_fd: If not `None`, the file descriptor of a directory,
with `dst` being relative to this directory.
Returns:
The FakeFile object referred to by `src`.
Raises:
OSError: if something already exists at new_path.
OSError: if the parent directory doesn't exist.
"""
src = self._path_with_dir_fd(src, self.link, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.link, dst_dir_fd)
self.filesystem.link(src, dst)
def fsync(self, fd):
"""Perform fsync for a fake file (in other words, do nothing).
Args:
fd: The file descriptor of the open file.
Raises:
OSError: file_des is an invalid file descriptor.
TypeError: file_des is not an integer.
"""
# Throw an error if file_des isn't valid
if 0 <= fd < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
file_object = self.filesystem.get_open_file(fd)
if self.filesystem.is_windows_fs:
if (not hasattr(file_object, 'allow_update') or
not file_object.allow_update):
self.filesystem.raise_os_error(
errno.EBADF, file_object.file_path)
def fdatasync(self, fd):
"""Perform fdatasync for a fake file (in other words, do nothing).
Args:
fd: The file descriptor of the open file.
Raises:
OSError: `fd` is an invalid file descriptor.
TypeError: `fd` is not an integer.
"""
if self.filesystem.is_windows_fs or self.filesystem.is_macos:
raise AttributeError("module 'os' has no attribute 'fdatasync'")
# Throw an error if file_des isn't valid
if 0 <= fd < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
self.filesystem.get_open_file(fd)
def sendfile(self, fd_out, fd_in, offset, count):
"""Copy count bytes from file descriptor fd_in to file descriptor
fd_out starting at offset.
Args:
fd_out: The file descriptor of the destination file.
fd_in: The file descriptor of the source file.
offset: The offset in bytes where to start the copy in the
source file. If `None` (Linux only), copying is started at
the current position, and the position is updated.
count: The number of bytes to copy. If 0, all remaining bytes
are copied (MacOs only).
Raises:
OSError: If `fd_in` or `fd_out` is an invalid file descriptor.
TypeError: If `fd_in` or `fd_out` is not an integer.
TypeError: If `offset` is None under MacOs.
"""
if self.filesystem.is_windows_fs:
raise AttributeError("module 'os' has no attribute 'sendfile'")
if 0 <= fd_in < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
if 0 <= fd_out < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
source = self.filesystem.get_open_file(fd_in)
dest = self.filesystem.get_open_file(fd_out)
if self.filesystem.is_macos:
if dest.get_object().stat_result.st_mode & 0o777000 != S_IFSOCK:
raise OSError('Socket operation on non-socket')
if offset is None:
if self.filesystem.is_macos:
raise TypeError('None is not a valid offset')
contents = source.read(count)
else:
position = source.tell()
source.seek(offset)
if count == 0 and self.filesystem.is_macos:
contents = source.read()
else:
contents = source.read(count)
source.seek(position)
if contents:
written = dest.write(contents)
dest.flush()
return written
return 0
def __getattr__(self, name):
"""Forwards any unfaked calls to the standard os module."""
return getattr(self._os_module, name)
class FakeIoModule:
"""Uses FakeFilesystem to provide a fake io module replacement.
Currently only used to wrap `io.open()` which is an alias to `open()`.
You need a fake_filesystem to use this:
filesystem = fake_filesystem.FakeFilesystem()
my_io_module = fake_filesystem.FakeIoModule(filesystem)
"""
@staticmethod
def dir():
"""Return the list of patched function names. Used for patching
functions imported from the module.
"""
return 'open',
def __init__(self, filesystem):
"""
Args:
filesystem: FakeFilesystem used to provide file system information.
"""
self.filesystem = filesystem
self._io_module = io
def open(self, file, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None):
"""Redirect the call to FakeFileOpen.
See FakeFileOpen.call() for description.
"""
fake_open = FakeFileOpen(self.filesystem)
return fake_open(file, mode, buffering, encoding, errors,
newline, closefd, opener)
def __getattr__(self, name):
"""Forwards any unfaked calls to the standard io module."""
return getattr(self._io_module, name)
class FakeFileWrapper:
"""Wrapper for a stream object for use by a FakeFile object.
If the wrapper has any data written to it, it will propagate to
the FakeFile object on close() or flush().
"""
def __init__(self, file_object, file_path, update=False, read=False,
append=False, delete_on_close=False, filesystem=None,
newline=None, binary=True, closefd=True, encoding=None,
errors=None, raw_io=False, is_stream=False):
self.file_object = file_object
self.file_path = file_path
self._append = append
self._read = read
self.allow_update = update
self._closefd = closefd
self._file_epoch = file_object.epoch
self.raw_io = raw_io
self._binary = binary
self.is_stream = is_stream
self._changed = False
contents = file_object.byte_contents
self._encoding = encoding or locale.getpreferredencoding(False)
errors = errors or 'strict'
buffer_class = (NullFileBufferIO if file_object == filesystem.dev_null
else FileBufferIO)
self._io = buffer_class(contents, linesep=filesystem.line_separator(),
binary=binary, encoding=encoding,
newline=newline, errors=errors)
self._read_whence = 0
self._read_seek = 0
self._flush_pos = 0
if contents:
self._flush_pos = len(contents)
if update:
if not append:
self._io.seek(0)
else:
self._io.seek(self._flush_pos)
self._read_seek = self._io.tell()
if delete_on_close:
assert filesystem, 'delete_on_close=True requires filesystem'
self._filesystem = filesystem
self.delete_on_close = delete_on_close
# override, don't modify FakeFile.name, as FakeFilesystem expects
# it to be the file name only, no directories.
self.name = file_object.opened_as
self.filedes = None
def __enter__(self):
"""To support usage of this fake file with the 'with' statement."""
return self
def __exit__(self, type, value, traceback):
"""To support usage of this fake file with the 'with' statement."""
self.close()
def _raise(self, message):
if self.raw_io:
self._filesystem.raise_os_error(errno.EBADF, self.file_path)
raise io.UnsupportedOperation(message)
def get_object(self):
"""Return the FakeFile object that is wrapped by the current instance.
"""
return self.file_object
def fileno(self):
"""Return the file descriptor of the file object."""
return self.filedes
def close(self):
"""Close the file."""
# ignore closing a closed file
if not self._is_open():
return
# for raw io, all writes are flushed immediately
if self.allow_update and not self.raw_io:
self.flush()
if self._filesystem.is_windows_fs and self._changed:
self.file_object.st_mtime = time.time()
if self._closefd:
self._filesystem._close_open_file(self.filedes)
else:
self._filesystem.open_files[self.filedes].remove(self)
if self.delete_on_close:
self._filesystem.remove_object(self.get_object().path)
@property
def closed(self):
"""Simulate the `closed` attribute on file."""
return not self._is_open()
def flush(self):
"""Flush file contents to 'disk'."""
self._check_open_file()
if self.allow_update and not self.is_stream:
contents = self._io.getvalue()
if self._append:
self._sync_io()
old_contents = (self.file_object.byte_contents
if is_byte_string(contents) else
self.file_object.contents)
contents = old_contents + contents[self._flush_pos:]
self._set_stream_contents(contents)
self.update_flush_pos()
else:
self._io.flush()
if self.file_object.set_contents(contents, self._encoding):
if self._filesystem.is_windows_fs:
self._changed = True
else:
current_time = time.time()
self.file_object.st_ctime = current_time
self.file_object.st_mtime = current_time
self._file_epoch = self.file_object.epoch
if not self.is_stream:
self._flush_related_files()
def update_flush_pos(self):
self._flush_pos = self._io.tell()
def _flush_related_files(self):
for open_files in self._filesystem.open_files[3:]:
if open_files is not None:
for open_file in open_files:
if (open_file is not self and
self.file_object == open_file.file_object and
not open_file._append):
open_file._sync_io()
def seek(self, offset, whence=0):
"""Move read/write pointer in 'file'."""
self._check_open_file()
if not self._append:
self._io.seek(offset, whence)
else:
self._read_seek = offset
self._read_whence = whence
if not self.is_stream:
self.flush()
def tell(self):
"""Return the file's current position.
Returns:
int, file's current position in bytes.
"""
self._check_open_file()
if not self.is_stream:
self.flush()
if not self._append:
return self._io.tell()
if self._read_whence:
write_seek = self._io.tell()
self._io.seek(self._read_seek, self._read_whence)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(write_seek)
return self._read_seek
def _sync_io(self):
"""Update the stream with changes to the file object contents."""
if self._file_epoch == self.file_object.epoch:
return
if self._io.binary:
contents = self.file_object.byte_contents
else:
contents = self.file_object.contents
self._set_stream_contents(contents)
self._file_epoch = self.file_object.epoch
def _set_stream_contents(self, contents):
whence = self._io.tell()
self._io.seek(0)
self._io.truncate()
if not self._io.binary and is_byte_string(contents):
contents = contents.decode(self._encoding)
self._io.putvalue(contents)
if not self._append:
self._io.seek(whence)
def _read_wrappers(self, name):
"""Wrap a stream attribute in a read wrapper.
Returns a read_wrapper which tracks our own read pointer since the
stream object has no concept of a different read and write pointer.
Args:
name: The name of the attribute to wrap. Should be a read call.
Returns:
The read_wrapper function.
"""
io_attr = getattr(self._io, name)
def read_wrapper(*args, **kwargs):
"""Wrap all read calls to the stream object.
We do this to track the read pointer separate from the write
pointer. Anything that wants to read from the stream object
while we're in append mode goes through this.
Args:
*args: pass through args
**kwargs: pass through kwargs
Returns:
Wrapped stream object method
"""
self._io.seek(self._read_seek, self._read_whence)
ret_value = io_attr(*args, **kwargs)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(0, 2)
return ret_value
return read_wrapper
def _other_wrapper(self, name, writing):
"""Wrap a stream attribute in an other_wrapper.
Args:
name: the name of the stream attribute to wrap.
Returns:
other_wrapper which is described below.
"""
io_attr = getattr(self._io, name)
def other_wrapper(*args, **kwargs):
"""Wrap all other calls to the stream Object.
We do this to track changes to the write pointer. Anything that
moves the write pointer in a file open for appending should move
the read pointer as well.
Args:
*args: Pass through args.
**kwargs: Pass through kwargs.
Returns:
Wrapped stream object method.
"""
write_seek = self._io.tell()
ret_value = io_attr(*args, **kwargs)
if write_seek != self._io.tell():
self._read_seek = self._io.tell()
self._read_whence = 0
return ret_value
return other_wrapper
def _adapt_size_for_related_files(self, size):
for open_files in self._filesystem.open_files[3:]:
if open_files is not None:
for open_file in open_files:
if (open_file is not self and
self.file_object == open_file.file_object and
open_file._append):
open_file._read_seek += size
def _truncate_wrapper(self):
"""Wrap truncate() to allow flush after truncate.
Returns:
Wrapper which is described below.
"""
io_attr = getattr(self._io, 'truncate')
def truncate_wrapper(*args, **kwargs):
"""Wrap truncate call to call flush after truncate."""
if self._append:
self._io.seek(self._read_seek, self._read_whence)
size = io_attr(*args, **kwargs)
self.flush()
if not self.is_stream:
self.file_object.size = size
buffer_size = len(self._io.getvalue())
if buffer_size < size:
self._io.seek(buffer_size)
self._io.write('\0' * (size - buffer_size))
self.file_object.set_contents(
self._io.getvalue(), self._encoding)
self._flush_pos = size
self._adapt_size_for_related_files(size - buffer_size)
self.flush()
return size
return truncate_wrapper
def size(self):
"""Return the content size in bytes of the wrapped file."""
return self.file_object.st_size
def __getattr__(self, name):
if self.file_object.is_large_file():
raise FakeLargeFileIoException(self.file_path)
reading = name.startswith('read') or name == 'next'
truncate = name == 'truncate'
writing = name.startswith('write') or truncate
if reading or writing:
self._check_open_file()
if not self._read and reading:
return self._read_error()
if not self.allow_update and writing:
return self._write_error()
if reading:
self._sync_io()
if not self.is_stream:
self.flush()
if not self._filesystem.is_windows_fs:
self.file_object.st_atime = time.time()
if truncate:
return self._truncate_wrapper()
if self._append:
if reading:
return self._read_wrappers(name)
else:
return self._other_wrapper(name, writing)
return getattr(self._io, name)
def _read_error(self):
def read_error(*args, **kwargs):
"""Throw an error unless the argument is zero."""
if args and args[0] == 0:
if self._filesystem.is_windows_fs and self.raw_io:
return b'' if self._binary else u''
self._raise('File is not open for reading.')
return read_error
def _write_error(self):
def write_error(*args, **kwargs):
"""Throw an error."""
if self.raw_io:
if (self._filesystem.is_windows_fs and args
and len(args[0]) == 0):
return 0
self._raise('File is not open for writing.')
return write_error
def _is_open(self):
return (self.filedes < len(self._filesystem.open_files) and
self._filesystem.open_files[self.filedes] is not None and
self in self._filesystem.open_files[self.filedes])
def _check_open_file(self):
if not self.is_stream and not self._is_open():
raise ValueError('I/O operation on closed file')
def __iter__(self):
if not self._read:
self._raise('File is not open for reading')
return self._io.__iter__()
def __next__(self):
if not self._read:
self._raise('File is not open for reading')
return next(self._io)
class StandardStreamWrapper:
"""Wrapper for a system standard stream to be used in open files list.
"""
def __init__(self, stream_object):
self._stream_object = stream_object
self.filedes = None
def get_object(self):
return self._stream_object
def fileno(self):
"""Return the file descriptor of the wrapped standard stream."""
return self.filedes
def close(self):
"""We do not support closing standard streams."""
pass
def is_stream(self):
return True
class FakeDirWrapper:
"""Wrapper for a FakeDirectory object to be used in open files list.
"""
def __init__(self, file_object, file_path, filesystem):
self.file_object = file_object
self.file_path = file_path
self._filesystem = filesystem
self.filedes = None
def get_object(self):
"""Return the FakeFile object that is wrapped by the current instance.
"""
return self.file_object
def fileno(self):
"""Return the file descriptor of the file object."""
return self.filedes
def close(self):
"""Close the directory."""
self._filesystem._close_open_file(self.filedes)
class FakePipeWrapper:
"""Wrapper for a read or write descriptor of a real pipe object to be
used in open files list.
"""
def __init__(self, filesystem, fd):
self._filesystem = filesystem
self.fd = fd # the real file descriptor
self.file_object = None
self.filedes = None
def get_object(self):
return self.file_object
def fileno(self):
"""Return the fake file descriptor of the pipe object."""
return self.filedes
def read(self, numBytes):
"""Read from the real pipe."""
return os.read(self.fd, numBytes)
def write(self, contents):
"""Write to the real pipe."""
return os.write(self.fd, contents)
def close(self):
"""Close the pipe descriptor."""
self._filesystem.open_files[self.filedes].remove(self)
os.close(self.fd)
Deprecator.add(FakeFileWrapper, FakeFileWrapper.get_object, 'GetObject')
Deprecator.add(FakeFileWrapper, FakeFileWrapper.size, 'Size')
class FakeFileOpen:
"""Faked `file()` and `open()` function replacements.
Returns FakeFile objects in a FakeFilesystem in place of the `file()`
or `open()` function.
"""
__name__ = 'FakeFileOpen'
def __init__(self, filesystem, delete_on_close=False, raw_io=False):
"""
Args:
filesystem: FakeFilesystem used to provide file system information
delete_on_close: optional boolean, deletes file on close()
"""
self.filesystem = filesystem
self._delete_on_close = delete_on_close
self.raw_io = raw_io
def __call__(self, *args, **kwargs):
"""Redirects calls to file() or open() to appropriate method."""
return self.call(*args, **kwargs)
def call(self, file_, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None,
open_modes=None):
"""Return a file-like object with the contents of the target
file object.
Args:
file_: Path to target file or a file descriptor.
mode: Additional file modes (all modes in `open()` are supported).
buffering: ignored. (Used for signature compliance with
__builtin__.open)
encoding: The encoding used to encode unicode strings / decode
bytes.
errors: (str) Defines how encoding errors are handled.
newline: Controls universal newlines, passed to stream object.
closefd: If a file descriptor rather than file name is passed,
and this is set to `False`, then the file descriptor is kept
open when file is closed.
opener: not supported.
open_modes: Modes for opening files if called from low-level API.
Returns:
A file-like object containing the contents of the target file.
Raises:
OSError depending on Python version / call mode:
- if the target object is a directory
- on an invalid path
- if the file does not exist when it should
- if the file exists but should not
- if permission is denied
ValueError: for an invalid mode or mode combination
"""
binary = 'b' in mode
newline, open_modes = self._handle_file_mode(mode, newline, open_modes)
file_object, file_path, filedes, real_path = self._handle_file_arg(
file_)
if not filedes:
closefd = True
if (open_modes.must_not_exist and
(file_object or self.filesystem.islink(file_path) and
not self.filesystem.is_windows_fs)):
self.filesystem.raise_os_error(errno.EEXIST, file_path)
file_object = self._init_file_object(file_object,
file_path, open_modes,
real_path)
if S_ISDIR(file_object.st_mode):
if self.filesystem.is_windows_fs:
self.filesystem.raise_os_error(errno.EACCES, file_path)
else:
self.filesystem.raise_os_error(errno.EISDIR, file_path)
# If you print obj.name, the argument to open() must be printed.
# Not the abspath, not the filename, but the actual argument.
file_object.opened_as = file_path
if open_modes.truncate:
current_time = time.time()
file_object.st_mtime = current_time
if not self.filesystem.is_windows_fs:
file_object.st_ctime = current_time
fakefile = FakeFileWrapper(file_object,
file_path,
update=open_modes.can_write,
read=open_modes.can_read,
append=open_modes.append,
delete_on_close=self._delete_on_close,
filesystem=self.filesystem,
newline=newline,
binary=binary,
closefd=closefd,
encoding=encoding,
errors=errors,
raw_io=self.raw_io)
if filedes is not None:
fakefile.filedes = filedes
# replace the file wrapper
self.filesystem.open_files[filedes].append(fakefile)
else:
fakefile.filedes = self.filesystem._add_open_file(fakefile)
return fakefile
def _init_file_object(self, file_object, file_path,
open_modes, real_path):
if file_object:
if (not is_root() and
((open_modes.can_read and
not file_object.st_mode & PERM_READ)
or (open_modes.can_write and
not file_object.st_mode & PERM_WRITE))):
self.filesystem.raise_os_error(errno.EACCES, file_path)
if open_modes.can_write:
if open_modes.truncate:
file_object.set_contents('')
else:
if open_modes.must_exist:
self.filesystem.raise_os_error(errno.ENOENT, file_path)
if self.filesystem.islink(file_path):
link_object = self.filesystem.resolve(file_path,
follow_symlinks=False)
target_path = link_object.contents
else:
target_path = file_path
if self.filesystem.ends_with_path_separator(target_path):
error = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOENT if self.filesystem.is_macos
else errno.EISDIR)
self.filesystem.raise_os_error(error, file_path)
file_object = self.filesystem.create_file_internally(
real_path, create_missing_dirs=False,
apply_umask=True, raw_io=self.raw_io)
return file_object
def _handle_file_arg(self, file_):
file_object = None
if isinstance(file_, int):
# opening a file descriptor
filedes = file_
wrapper = self.filesystem.get_open_file(filedes)
self._delete_on_close = wrapper.delete_on_close
file_object = self.filesystem.get_open_file(filedes).get_object()
file_path = file_object.name
real_path = file_path
else:
# open a file file by path
filedes = None
file_path = file_
if file_path == self.filesystem.dev_null.name:
file_object = self.filesystem.dev_null
real_path = file_path
else:
real_path = self.filesystem.resolve_path(
file_path, raw_io=self.raw_io)
if self.filesystem.exists(file_path):
file_object = self.filesystem.get_object_from_normpath(
real_path, check_read_perm=False)
return file_object, file_path, filedes, real_path
def _handle_file_mode(self, mode, newline, open_modes):
orig_modes = mode # Save original modes for error messages.
# Normalize modes. Handle 't' and 'U'.
if 'b' in mode and 't' in mode:
raise ValueError('Invalid mode: ' + mode)
mode = mode.replace('t', '').replace('b', '')
mode = mode.replace('rU', 'r').replace('U', 'r')
if not self.raw_io:
if mode not in _OPEN_MODE_MAP:
raise ValueError('Invalid mode: %r' % orig_modes)
open_modes = _OpenModes(*_OPEN_MODE_MAP[mode])
return newline, open_modes
def _run_doctest():
import doctest
import pyfakefs
return doctest.testmod(pyfakefs.fake_filesystem)
if __name__ == '__main__':
_run_doctest()
| 39.383487
| 80
| 0.605801
|
import errno
import heapq
import io
import locale
import os
import sys
import time
import uuid
from collections import namedtuple
from stat import (
S_IFREG, S_IFDIR, S_ISLNK, S_IFMT, S_ISDIR, S_IFLNK, S_ISREG, S_IFSOCK
)
from pyfakefs.deprecator import Deprecator
from pyfakefs.extra_packages import use_scandir
from pyfakefs.fake_scandir import scandir, walk
from pyfakefs.helpers import (
FakeStatResult, FileBufferIO, NullFileBufferIO,
is_int_type, is_byte_string, is_unicode_string,
make_string_path, IS_WIN, to_string)
from pyfakefs import __version__
__pychecker__ = 'no-reimportself'
PERM_READ = 0o400
PERM_WRITE = 0o200
PERM_EXE = 0o100
PERM_DEF = 0o777
PERM_DEF_FILE = 0o666
PERM_ALL = 0o7777
_OpenModes = namedtuple(
'open_modes',
'must_exist can_read can_write truncate append must_not_exist'
)
_OPEN_MODE_MAP = {
'r': (True, True, False, False, False, False),
'w': (False, False, True, True, False, False),
'a': (False, False, True, False, True, False),
'r+': (True, True, True, False, False, False),
'w+': (False, True, True, True, False, False),
'a+': (False, True, True, False, True, False),
'x': (False, False, True, False, False, True),
'x+': (False, True, True, False, False, True)
}
if sys.platform.startswith('linux'):
_MAX_LINK_DEPTH = 40
else:
_MAX_LINK_DEPTH = 32
NR_STD_STREAMS = 3
USER_ID = 1 if IS_WIN else os.getuid()
GROUP_ID = 1 if IS_WIN else os.getgid()
def set_uid(uid):
global USER_ID
USER_ID = uid
def set_gid(gid):
global GROUP_ID
GROUP_ID = gid
def reset_ids():
set_uid(1 if IS_WIN else os.getuid())
set_gid(1 if IS_WIN else os.getgid())
def is_root():
return USER_ID == 0
class FakeLargeFileIoException(Exception):
def __init__(self, file_path):
super(FakeLargeFileIoException, self).__init__(
'Read and write operations not supported for '
'fake large file: %s' % file_path)
def _copy_module(old):
saved = sys.modules.pop(old.__name__, None)
new = __import__(old.__name__)
sys.modules[old.__name__] = saved
return new
class FakeFile:
stat_types = (
'st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid',
'st_size', 'st_atime', 'st_mtime', 'st_ctime',
'st_atime_ns', 'st_mtime_ns', 'st_ctime_ns'
)
def __init__(self, name, st_mode=S_IFREG | PERM_DEF_FILE,
contents=None, filesystem=None, encoding=None, errors=None,
side_effect=None):
if filesystem is None:
raise ValueError('filesystem shall not be None')
self.filesystem = filesystem
self._side_effect = side_effect
self.name = name
self.stat_result = FakeStatResult(
filesystem.is_windows_fs, USER_ID, GROUP_ID, time.time())
self.stat_result.st_mode = st_mode
self.encoding = encoding
self.errors = errors or 'strict'
self._byte_contents = self._encode_contents(contents)
self.stat_result.st_size = (
len(self._byte_contents) if self._byte_contents is not None else 0)
self.epoch = 0
self.parent_dir = None
self.xattr = {}
@property
def byte_contents(self):
return self._byte_contents
@property
def contents(self):
if isinstance(self.byte_contents, bytes):
return self.byte_contents.decode(
self.encoding or locale.getpreferredencoding(False),
errors=self.errors)
return self.byte_contents
@property
def st_ctime(self):
return self.stat_result.st_ctime
@property
def st_atime(self):
return self.stat_result.st_atime
@property
def st_mtime(self):
return self.stat_result.st_mtime
@st_ctime.setter
def st_ctime(self, val):
self.stat_result.st_ctime = val
@st_atime.setter
def st_atime(self, val):
self.stat_result.st_atime = val
@st_mtime.setter
def st_mtime(self, val):
self.stat_result.st_mtime = val
def set_large_file_size(self, st_size):
self._check_positive_int(st_size)
if self.st_size:
self.size = 0
if self.filesystem:
self.filesystem.change_disk_usage(st_size, self.name, self.st_dev)
self.st_size = st_size
self._byte_contents = None
def _check_positive_int(self, size):
if not is_int_type(size) or size < 0:
self.filesystem.raise_os_error(errno.ENOSPC, self.name)
def is_large_file(self):
return self._byte_contents is None
def _encode_contents(self, contents):
if is_unicode_string(contents):
contents = bytes(
contents,
self.encoding or locale.getpreferredencoding(False),
self.errors)
return contents
def _set_initial_contents(self, contents):
contents = self._encode_contents(contents)
changed = self._byte_contents != contents
st_size = len(contents)
if self._byte_contents:
self.size = 0
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
self._byte_contents = contents
self.st_size = st_size
self.epoch += 1
return changed
def set_contents(self, contents, encoding=None):
self.encoding = encoding
changed = self._set_initial_contents(contents)
if self._side_effect is not None:
self._side_effect(self)
return changed
@property
def size(self):
return self.st_size
@property
def path(self):
names = []
obj = self
while obj:
names.insert(0, obj.name)
obj = obj.parent_dir
sep = self.filesystem._path_separator(self.name)
if names[0] == sep:
names.pop(0)
dir_path = sep.join(names)
is_drive = names and len(names[0]) == 2 and names[0][1] == ':'
if not is_drive:
dir_path = sep + dir_path
else:
dir_path = sep.join(names)
dir_path = self.filesystem.absnormpath(dir_path)
return dir_path
@Deprecator('property path')
def GetPath(self):
return self.path
@Deprecator('property size')
def GetSize(self):
return self.size
@size.setter
def size(self, st_size):
self._check_positive_int(st_size)
current_size = self.st_size or 0
self.filesystem.change_disk_usage(
st_size - current_size, self.name, self.st_dev)
if self._byte_contents:
if st_size < current_size:
self._byte_contents = self._byte_contents[:st_size]
else:
self._byte_contents += b'\0' * (st_size - current_size)
self.st_size = st_size
self.epoch += 1
@Deprecator('property size')
def SetSize(self, value):
self.size = value
@Deprecator('property st_atime')
def SetATime(self, st_atime):
self.st_atime = st_atime
@Deprecator('property st_mtime')
def SetMTime(self, st_mtime):
self.st_mtime = st_mtime
@Deprecator('property st_ctime')
def SetCTime(self, st_ctime):
self.st_ctime = st_ctime
def __getattr__(self, item):
if item in self.stat_types:
return getattr(self.stat_result, item)
return super(FakeFile, self).__getattr__(item)
def __setattr__(self, key, value):
if key in self.stat_types:
return setattr(self.stat_result, key, value)
return super(FakeFile, self).__setattr__(key, value)
def __str__(self):
return '%s(%o)' % (self.name, self.st_mode)
@Deprecator('st_ino')
def SetIno(self, st_ino):
self.st_ino = st_ino
class FakeNullFile(FakeFile):
def __init__(self, filesystem):
devnull = '/dev/nul' if filesystem.is_windows_fs else '/dev/nul'
super(FakeNullFile, self).__init__(
devnull, filesystem=filesystem, contents=b'')
@property
def byte_contents(self):
return b''
def _set_initial_contents(self, contents):
pass
Deprecator.add(FakeFile, FakeFile.set_large_file_size, 'SetLargeFileSize')
Deprecator.add(FakeFile, FakeFile.set_contents, 'SetContents')
Deprecator.add(FakeFile, FakeFile.is_large_file, 'IsLargeFile')
class FakeFileFromRealFile(FakeFile):
def __init__(self, file_path, filesystem, side_effect=None):
super(FakeFileFromRealFile, self).__init__(
name=os.path.basename(file_path), filesystem=filesystem,
side_effect=side_effect)
self.contents_read = False
@property
def byte_contents(self):
if not self.contents_read:
self.contents_read = True
with io.open(self.file_path, 'rb') as f:
self._byte_contents = f.read()
self.st_atime = os.stat(self.file_path).st_atime
return self._byte_contents
def set_contents(self, contents, encoding=None):
self.contents_read = True
super(FakeFileFromRealFile, self).set_contents(contents, encoding)
def is_large_file(self):
return False
class FakeDirectory(FakeFile):
def __init__(self, name, perm_bits=PERM_DEF, filesystem=None):
FakeFile.__init__(
self, name, S_IFDIR | perm_bits, {}, filesystem=filesystem)
self.st_nlink += 1
def set_contents(self, contents, encoding=None):
raise self.filesystem.raise_os_error(errno.EISDIR, self.path)
@property
def contents(self):
return self.byte_contents
@property
def ordered_dirs(self):
return [item[0] for item in sorted(
self.byte_contents.items(), key=lambda entry: entry[1].st_ino)]
def add_entry(self, path_object):
if (not is_root() and not self.st_mode & PERM_WRITE and
not self.filesystem.is_windows_fs):
raise OSError(errno.EACCES, 'Permission Denied', self.path)
path_object_name = to_string(path_object.name)
if path_object_name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object_name] = path_object
path_object.parent_dir = self
if path_object.st_ino is None:
self.filesystem.last_ino += 1
path_object.st_ino = self.filesystem.last_ino
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(
path_object.size, path_object.name, self.st_dev)
def get_entry(self, pathname_name):
pathname_name = self._normalized_entryname(pathname_name)
return self.contents[to_string(pathname_name)]
def _normalized_entryname(self, pathname_name):
if not self.filesystem.is_case_sensitive:
matching_names = [name for name in self.contents
if name.lower() == pathname_name.lower()]
if matching_names:
pathname_name = matching_names[0]
return pathname_name
def remove_entry(self, pathname_name, recursive=True):
pathname_name = self._normalized_entryname(pathname_name)
entry = self.get_entry(pathname_name)
if self.filesystem.is_windows_fs:
if entry.st_mode & PERM_WRITE == 0:
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if self.filesystem.has_open_file(entry):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
else:
if (not is_root() and (self.st_mode & (PERM_WRITE | PERM_EXE) !=
PERM_WRITE | PERM_EXE)):
self.filesystem.raise_os_error(errno.EACCES, pathname_name)
if recursive and isinstance(entry, FakeDirectory):
while entry.contents:
entry.remove_entry(list(entry.contents)[0])
elif entry.st_nlink == 1:
self.filesystem.change_disk_usage(
-entry.size, pathname_name, entry.st_dev)
self.st_nlink -= 1
entry.st_nlink -= 1
assert entry.st_nlink >= 0
del self.contents[to_string(pathname_name)]
@property
def size(self):
return sum([item[1].size for item in self.contents.items()])
@Deprecator('property size')
def GetSize(self):
return self.size
def has_parent_object(self, dir_object):
obj = self
while obj:
if obj == dir_object:
return True
obj = obj.parent_dir
return False
def __str__(self):
description = super(FakeDirectory, self).__str__() + ':\n'
for item in self.contents:
item_desc = self.contents[item].__str__()
for line in item_desc.split('\n'):
if line:
description = description + ' ' + line + '\n'
return description
Deprecator.add(FakeDirectory, FakeDirectory.add_entry, 'AddEntry')
Deprecator.add(FakeDirectory, FakeDirectory.get_entry, 'GetEntry')
Deprecator.add(FakeDirectory, FakeDirectory.set_contents, 'SetContents')
Deprecator.add(FakeDirectory, FakeDirectory.remove_entry, 'RemoveEntry')
class FakeDirectoryFromRealDirectory(FakeDirectory):
def __init__(self, source_path, filesystem, read_only,
target_path=None):
target_path = target_path or source_path
real_stat = os.stat(source_path)
super(FakeDirectoryFromRealDirectory, self).__init__(
name=os.path.split(target_path)[1],
perm_bits=real_stat.st_mode,
filesystem=filesystem)
self.st_ctime = real_stat.st_ctime
self.st_atime = real_stat.st_atime
self.st_mtime = real_stat.st_mtime
self.st_gid = real_stat.st_gid
self.st_uid = real_stat.st_uid
self.source_path = source_path
self.read_only = read_only
self.contents_read = False
@property
def contents(self):
if not self.contents_read:
self.contents_read = True
base = self.path
for entry in os.listdir(self.source_path):
source_path = os.path.join(self.source_path, entry)
target_path = os.path.join(base, entry)
if os.path.islink(source_path):
self.filesystem.add_real_symlink(source_path, target_path)
elif os.path.isdir(source_path):
self.filesystem.add_real_directory(
source_path, self.read_only, target_path=target_path)
else:
self.filesystem.add_real_file(
source_path, self.read_only, target_path=target_path)
return self.byte_contents
@property
def size(self):
if not self.contents_read:
return 0
return super(FakeDirectoryFromRealDirectory, self).size
class FakeFilesystem:
def __init__(self, path_separator=os.path.sep, total_size=None,
patcher=None):
self.path_separator = path_separator
self.alternative_path_separator = os.path.altsep
self.patcher = patcher
if path_separator != os.sep:
self.alternative_path_separator = None
self.is_windows_fs = sys.platform == 'win32'
self.is_macos = sys.platform == 'darwin'
self.is_case_sensitive = not (self.is_windows_fs or self.is_macos)
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
self.umask = os.umask(0o22)
os.umask(self.umask)
# A list of open file objects. Their position in the list is their
# file descriptor number
self.open_files = []
# A heap containing all free positions in self.open_files list
self._free_fd_heap = []
# last used numbers for inodes (st_ino) and devices (st_dev)
self.last_ino = 0
self.last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams()
self.dev_null = FakeNullFile(self)
@property
def is_linux(self):
return not self.is_windows_fs and not self.is_macos
def reset(self, total_size=None):
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.cwd = self.root.name
self.open_files = []
self._free_fd_heap = []
self.last_ino = 0
self.last_dev = 0
self.mount_points = {}
self.add_mount_point(self.root.name, total_size)
self._add_standard_streams()
def pause(self):
if self.patcher is None:
raise RuntimeError('pause() can only be called from a fake file '
'system object created by a Patcher object')
self.patcher.pause()
def resume(self):
if self.patcher is None:
raise RuntimeError('resume() can only be called from a fake file '
'system object created by a Patcher object')
self.patcher.resume()
def line_separator(self):
return '\r\n' if self.is_windows_fs else '\n'
def _error_message(self, errno):
return os.strerror(errno) + ' in the fake filesystem'
def raise_os_error(self, errno, filename=None, winerror=None):
message = self._error_message(errno)
if (winerror is not None and sys.platform == 'win32' and
self.is_windows_fs):
raise OSError(errno, message, filename, winerror)
raise OSError(errno, message, filename)
@staticmethod
def _matching_string(matched, string):
if string is None:
return string
if isinstance(matched, bytes) and isinstance(string, str):
return string.encode(locale.getpreferredencoding(False))
return string
def _path_separator(self, path):
return self._matching_string(path, self.path_separator)
def _alternative_path_separator(self, path):
return self._matching_string(path, self.alternative_path_separator)
def add_mount_point(self, path, total_size=None):
path = self.absnormpath(path)
if path in self.mount_points:
self.raise_os_error(errno.EEXIST, path)
self.last_dev += 1
self.mount_points[path] = {
'idev': self.last_dev, 'total_size': total_size, 'used_size': 0
}
# special handling for root path: has been created before
if path == self.root.name:
root_dir = self.root
self.last_ino += 1
root_dir.st_ino = self.last_ino
else:
root_dir = self.create_dir(path)
root_dir.st_dev = self.last_dev
return self.mount_points[path]
def _auto_mount_drive_if_needed(self, path, force=False):
if (self.is_windows_fs and
(force or not self._mount_point_for_path(path))):
drive = self.splitdrive(path)[0]
if drive:
return self.add_mount_point(path=drive)
def _mount_point_for_path(self, path):
def to_str(string):
if string is None or isinstance(string, str):
return string
return string.decode(locale.getpreferredencoding(False))
path = self.absnormpath(self._original_path(path))
if path in self.mount_points:
return self.mount_points[path]
mount_path = self._matching_string(path, '')
drive = self.splitdrive(path)[:1]
for root_path in self.mount_points:
root_path = self._matching_string(path, root_path)
if drive and not root_path.startswith(drive):
continue
if path.startswith(root_path) and len(root_path) > len(mount_path):
mount_path = root_path
if mount_path:
return self.mount_points[to_str(mount_path)]
mount_point = self._auto_mount_drive_if_needed(path, force=True)
assert mount_point
return mount_point
def _mount_point_for_device(self, idev):
for mount_point in self.mount_points.values():
if mount_point['idev'] == idev:
return mount_point
def get_disk_usage(self, path=None):
DiskUsage = namedtuple('usage', 'total, used, free')
if path is None:
mount_point = self.mount_points[self.root.name]
else:
mount_point = self._mount_point_for_path(path)
if mount_point and mount_point['total_size'] is not None:
return DiskUsage(mount_point['total_size'],
mount_point['used_size'],
mount_point['total_size'] -
mount_point['used_size'])
return DiskUsage(
1024 * 1024 * 1024 * 1024, 0, 1024 * 1024 * 1024 * 1024)
def set_disk_usage(self, total_size, path=None):
if path is None:
path = self.root.name
mount_point = self._mount_point_for_path(path)
if (mount_point['total_size'] is not None and
mount_point['used_size'] > total_size):
self.raise_os_error(errno.ENOSPC, path)
mount_point['total_size'] = total_size
def change_disk_usage(self, usage_change, file_path, st_dev):
mount_point = self._mount_point_for_device(st_dev)
if mount_point:
total_size = mount_point['total_size']
if total_size is not None:
if total_size - mount_point['used_size'] < usage_change:
self.raise_os_error(errno.ENOSPC, file_path)
mount_point['used_size'] += usage_change
def stat(self, entry_path, follow_symlinks=True):
# stat should return the tuple representing return value of os.stat
file_object = self.resolve(
entry_path, follow_symlinks,
allow_fd=True, check_read_perm=False)
if not is_root():
# make sure stat raises if a parent dir is not readable
parent_dir = file_object.parent_dir
if parent_dir:
self.get_object(parent_dir.path)
self.raise_for_filepath_ending_with_separator(
entry_path, file_object, follow_symlinks)
return file_object.stat_result.copy()
def raise_for_filepath_ending_with_separator(self, entry_path,
file_object,
follow_symlinks=True,
macos_handling=False):
if self.ends_with_path_separator(entry_path):
if S_ISLNK(file_object.st_mode):
try:
link_object = self.resolve(entry_path)
except OSError as exc:
if self.is_macos and exc.errno != errno.ENOENT:
return
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, entry_path)
raise
if not follow_symlinks or self.is_windows_fs or self.is_macos:
file_object = link_object
if self.is_windows_fs:
is_error = S_ISREG(file_object.st_mode)
elif self.is_macos and macos_handling:
is_error = not S_ISLNK(file_object.st_mode)
else:
is_error = not S_ISDIR(file_object.st_mode)
if is_error:
error_nr = (errno.EINVAL if self.is_windows_fs
else errno.ENOTDIR)
self.raise_os_error(error_nr, entry_path)
def chmod(self, path, mode, follow_symlinks=True):
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
if self.is_windows_fs:
if mode & PERM_WRITE:
file_object.st_mode = file_object.st_mode | 0o222
else:
file_object.st_mode = file_object.st_mode & 0o777555
else:
file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) |
(mode & PERM_ALL))
file_object.st_ctime = time.time()
def utime(self, path, times=None, *, ns=None, follow_symlinks=True):
self._handle_utime_arg_errors(ns, times)
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
if times is not None:
for file_time in times:
if not isinstance(file_time, (int, float)):
raise TypeError('atime and mtime must be numbers')
file_object.st_atime = times[0]
file_object.st_mtime = times[1]
elif ns is not None:
for file_time in ns:
if not isinstance(file_time, int):
raise TypeError('atime and mtime must be ints')
file_object.st_atime_ns = ns[0]
file_object.st_mtime_ns = ns[1]
else:
current_time = time.time()
file_object.st_atime = current_time
file_object.st_mtime = current_time
def _handle_utime_arg_errors(self, ns, times):
if times is not None and ns is not None:
raise ValueError(
"utime: you may specify either 'times' or 'ns' but not both")
if times is not None and len(times) != 2:
raise TypeError(
"utime: 'times' must be either a tuple of two ints or None")
if ns is not None and len(ns) != 2:
raise TypeError("utime: 'ns' must be a tuple of two ints")
@Deprecator
def SetIno(self, path, st_ino):
self.get_object(path).st_ino = st_ino
def _add_open_file(self, file_obj):
if self._free_fd_heap:
open_fd = heapq.heappop(self._free_fd_heap)
self.open_files[open_fd] = [file_obj]
return open_fd
self.open_files.append([file_obj])
return len(self.open_files) - 1
def _close_open_file(self, file_des):
self.open_files[file_des] = None
heapq.heappush(self._free_fd_heap, file_des)
def get_open_file(self, file_des):
if not is_int_type(file_des):
raise TypeError('an integer is required')
if (file_des >= len(self.open_files) or
self.open_files[file_des] is None):
self.raise_os_error(errno.EBADF, str(file_des))
return self.open_files[file_des][0]
def has_open_file(self, file_object):
return (file_object in [wrappers[0].get_object()
for wrappers in self.open_files if wrappers])
def _normalize_path_sep(self, path):
if self.alternative_path_separator is None or not path:
return path
return path.replace(self._alternative_path_separator(path),
self._path_separator(path))
def normcase(self, path):
path = make_string_path(path)
return self._normalize_path_sep(path)
def normpath(self, path):
path = self.normcase(path)
drive, path = self.splitdrive(path)
sep = self._path_separator(path)
is_absolute_path = path.startswith(sep)
path_components = path.split(sep)
collapsed_path_components = []
dot = self._matching_string(path, '.')
dotdot = self._matching_string(path, '..')
for component in path_components:
if (not component) or (component == dot):
continue
if component == dotdot:
if collapsed_path_components and (
collapsed_path_components[-1] != dotdot):
# Remove an up-reference: directory/..
collapsed_path_components.pop()
continue
elif is_absolute_path:
# Ignore leading .. components if starting from the
# root directory.
continue
collapsed_path_components.append(component)
collapsed_path = sep.join(collapsed_path_components)
if is_absolute_path:
collapsed_path = sep + collapsed_path
return drive + collapsed_path or dot
def _original_path(self, path):
def components_to_path():
if len(path_components) > len(normalized_components):
normalized_components.extend(
path_components[len(normalized_components):])
sep = self._path_separator(path)
normalized_path = sep.join(normalized_components)
if path.startswith(sep) and not normalized_path.startswith(sep):
normalized_path = sep + normalized_path
return normalized_path
if self.is_case_sensitive or not path:
return path
path_components = self._path_components(path)
normalized_components = []
current_dir = self.root
for component in path_components:
if not isinstance(current_dir, FakeDirectory):
return components_to_path()
dir_name, current_dir = self._directory_content(
current_dir, component)
if current_dir is None or (
isinstance(current_dir, FakeDirectory) and
current_dir._byte_contents is None and
current_dir.st_size == 0):
return components_to_path()
normalized_components.append(dir_name)
return components_to_path()
def absnormpath(self, path):
path = self.normcase(path)
cwd = self._matching_string(path, self.cwd)
if not path:
path = self.path_separator
if path == self._matching_string(path, '.'):
path = cwd
elif not self._starts_with_root_path(path):
# Prefix relative paths with cwd, if cwd is not root.
root_name = self._matching_string(path, self.root.name)
empty = self._matching_string(path, '')
path = self._path_separator(path).join(
(cwd != root_name and cwd or empty, path))
if path == self._matching_string(path, '.'):
path = cwd
return self.normpath(path)
def splitpath(self, path):
path = self.normcase(path)
sep = self._path_separator(path)
path_components = path.split(sep)
if not path_components:
return ('', '')
starts_with_drive = self._starts_with_drive_letter(path)
basename = path_components.pop()
colon = self._matching_string(path, ':')
if not path_components:
if starts_with_drive:
components = basename.split(colon)
return (components[0] + colon, components[1])
return ('', basename)
for component in path_components:
if component:
# The path is not the root; it contains a non-separator
# component. Strip all trailing separators.
while not path_components[-1]:
path_components.pop()
if starts_with_drive:
if not path_components:
components = basename.split(colon)
return (components[0] + colon, components[1])
if (len(path_components) == 1 and
path_components[0].endswith(colon)):
return (path_components[0] + sep, basename)
return (sep.join(path_components), basename)
# Root path. Collapse all leading separators.
return (sep, basename)
def splitdrive(self, path):
path = make_string_path(path)
if self.is_windows_fs:
if len(path) >= 2:
path = self.normcase(path)
sep = self._path_separator(path)
# UNC path handling
if (path[0:2] == sep * 2) and (
path[2:3] != sep):
# UNC path handling - splits off the mount point
# instead of the drive
sep_index = path.find(sep, 2)
if sep_index == -1:
return path[:0], path
sep_index2 = path.find(sep, sep_index + 1)
if sep_index2 == sep_index + 1:
return path[:0], path
if sep_index2 == -1:
sep_index2 = len(path)
return path[:sep_index2], path[sep_index2:]
if path[1:2] == self._matching_string(path, ':'):
return path[:2], path[2:]
return path[:0], path
def _join_paths_with_drive_support(self, *all_paths):
base_path = all_paths[0]
paths_to_add = all_paths[1:]
sep = self._path_separator(base_path)
seps = [sep, self._alternative_path_separator(base_path)]
result_drive, result_path = self.splitdrive(base_path)
for path in paths_to_add:
drive_part, path_part = self.splitdrive(path)
if path_part and path_part[:1] in seps:
# Second path is absolute
if drive_part or not result_drive:
result_drive = drive_part
result_path = path_part
continue
elif drive_part and drive_part != result_drive:
if (self.is_case_sensitive or
drive_part.lower() != result_drive.lower()):
# Different drives => ignore the first path entirely
result_drive = drive_part
result_path = path_part
continue
# Same drive in different case
result_drive = drive_part
# Second path is relative to the first
if result_path and result_path[-1:] not in seps:
result_path = result_path + sep
result_path = result_path + path_part
# add separator between UNC and non-absolute path
colon = self._matching_string(base_path, ':')
if (result_path and result_path[:1] not in seps and
result_drive and result_drive[-1:] != colon):
return result_drive + sep + result_path
return result_drive + result_path
def joinpaths(self, *paths):
if sys.version_info >= (3, 6):
paths = [os.fspath(path) for path in paths]
if len(paths) == 1:
return paths[0]
if self.is_windows_fs:
return self._join_paths_with_drive_support(*paths)
joined_path_segments = []
sep = self._path_separator(paths[0])
for path_segment in paths:
if self._starts_with_root_path(path_segment):
# An absolute path
joined_path_segments = [path_segment]
else:
if (joined_path_segments and
not joined_path_segments[-1].endswith(sep)):
joined_path_segments.append(sep)
if path_segment:
joined_path_segments.append(path_segment)
return self._matching_string(paths[0], '').join(joined_path_segments)
def _path_components(self, path):
if not path or path == self._path_separator(path):
return []
drive, path = self.splitdrive(path)
path_components = path.split(self._path_separator(path))
assert drive or path_components
if not path_components[0]:
if len(path_components) > 1 and not path_components[1]:
path_components = []
else:
# This is an absolute path.
path_components = path_components[1:]
if drive:
path_components.insert(0, drive)
return path_components
def _starts_with_drive_letter(self, file_path):
colon = self._matching_string(file_path, ':')
return (self.is_windows_fs and len(file_path) >= 2 and
file_path[:1].isalpha and (file_path[1:2]) == colon)
def _starts_with_root_path(self, file_path):
root_name = self._matching_string(file_path, self.root.name)
file_path = self._normalize_path_sep(file_path)
return (file_path.startswith(root_name) or
not self.is_case_sensitive and file_path.lower().startswith(
root_name.lower()) or
self._starts_with_drive_letter(file_path))
def _is_root_path(self, file_path):
root_name = self._matching_string(file_path, self.root.name)
return (file_path == root_name or not self.is_case_sensitive and
file_path.lower() == root_name.lower() or
2 <= len(file_path) <= 3 and
self._starts_with_drive_letter(file_path))
def ends_with_path_separator(self, file_path):
if is_int_type(file_path):
return False
file_path = make_string_path(file_path)
return (file_path and
file_path not in (self.path_separator,
self.alternative_path_separator) and
(file_path.endswith(self._path_separator(file_path)) or
self.alternative_path_separator is not None and
file_path.endswith(
self._alternative_path_separator(file_path))))
def is_filepath_ending_with_separator(self, path):
if not self.ends_with_path_separator(path):
return False
return self.isfile(self._path_without_trailing_separators(path))
def _directory_content(self, directory, component):
if not isinstance(directory, FakeDirectory):
return None, None
if component in directory.contents:
return component, directory.contents[component]
if not self.is_case_sensitive:
matching_content = [(subdir, directory.contents[subdir]) for
subdir in directory.contents
if subdir.lower() == component.lower()]
if matching_content:
return matching_content[0]
return None, None
def exists(self, file_path, check_link=False):
if check_link and self.islink(file_path):
return True
file_path = make_string_path(file_path)
if file_path is None:
raise TypeError
if not file_path:
return False
if file_path == self.dev_null.name:
return not self.is_windows_fs or sys.version_info >= (3, 8)
try:
if self.is_filepath_ending_with_separator(file_path):
return False
file_path = self.resolve_path(file_path)
except OSError:
return False
if file_path == self.root.name:
return True
path_components = self._path_components(file_path)
current_dir = self.root
for component in path_components:
current_dir = self._directory_content(current_dir, component)[1]
if not current_dir:
return False
return True
def resolve_path(self, file_path, allow_fd=False, raw_io=True):
if allow_fd and isinstance(file_path, int):
return self.get_open_file(file_path).get_object().path
file_path = make_string_path(file_path)
if file_path is None:
# file.open(None) raises TypeError, so mimic that.
raise TypeError('Expected file system path string, received None')
if not file_path or not self._valid_relative_path(file_path):
# file.open('') raises OSError, so mimic that, and validate that
# all parts of a relative path exist.
self.raise_os_error(errno.ENOENT, file_path)
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
return file_path
if file_path == self.dev_null.name:
return file_path
path_components = self._path_components(file_path)
resolved_components = self._resolve_components(path_components, raw_io)
return self._components_to_path(resolved_components)
def _components_to_path(self, component_folders):
sep = (self._path_separator(component_folders[0])
if component_folders else self.path_separator)
path = sep.join(component_folders)
if not self._starts_with_root_path(path):
path = sep + path
return path
def _resolve_components(self, path_components, raw_io):
current_dir = self.root
link_depth = 0
resolved_components = []
while path_components:
component = path_components.pop(0)
resolved_components.append(component)
current_dir = self._directory_content(current_dir, component)[1]
if current_dir is None:
# The component of the path at this point does not actually
# exist in the folder. We can't resolve the path any more.
resolved_components.extend(path_components)
break
if S_ISLNK(current_dir.st_mode):
if link_depth > _MAX_LINK_DEPTH:
self.raise_os_error(errno.ELOOP,
self._components_to_path(
resolved_components))
link_path = self._follow_link(resolved_components, current_dir)
target_components = self._path_components(link_path)
path_components = target_components + path_components
resolved_components = []
current_dir = self.root
link_depth += 1
return resolved_components
def _valid_relative_path(self, file_path):
if self.is_windows_fs:
return True
slash_dotdot = self._matching_string(
file_path, self.path_separator + '..')
while file_path and slash_dotdot in file_path:
file_path = file_path[:file_path.rfind(slash_dotdot)]
if not self.exists(self.absnormpath(file_path)):
return False
return True
def _follow_link(self, link_path_components, link):
link_path = link.contents
if self.is_windows_fs and link_path.startswith('\\\\?\\'):
link_path = link_path[4:]
sep = self._path_separator(link_path)
if not self._starts_with_root_path(link_path):
components = link_path_components[:-1]
components.append(link_path)
link_path = sep.join(components)
return self.normpath(link_path)
def get_object_from_normpath(self, file_path, check_read_perm=True):
file_path = make_string_path(file_path)
if file_path == self.root.name:
return self.root
if file_path == self.dev_null.name:
return self.dev_null
file_path = self._original_path(file_path)
path_components = self._path_components(file_path)
target_object = self.root
try:
for component in path_components:
if S_ISLNK(target_object.st_mode):
target_object = self.resolve(target_object.contents)
if not S_ISDIR(target_object.st_mode):
if not self.is_windows_fs:
self.raise_os_error(errno.ENOTDIR, file_path)
self.raise_os_error(errno.ENOENT, file_path)
target_object = target_object.get_entry(component)
if (not is_root() and check_read_perm and target_object and
not target_object.st_mode & PERM_READ):
self.raise_os_error(errno.EACCES, target_object.path)
except KeyError:
self.raise_os_error(errno.ENOENT, file_path)
return target_object
def get_object(self, file_path, check_read_perm=True):
file_path = make_string_path(file_path)
file_path = self.absnormpath(self._original_path(file_path))
return self.get_object_from_normpath(file_path, check_read_perm)
def resolve(self, file_path, follow_symlinks=True, allow_fd=False,
check_read_perm=True):
if isinstance(file_path, int):
if allow_fd:
return self.get_open_file(file_path).get_object()
raise TypeError('path should be string, bytes or '
'os.PathLike (if supported), not int')
if follow_symlinks:
file_path = make_string_path(file_path)
return self.get_object_from_normpath(self.resolve_path(
file_path, check_read_perm), check_read_perm)
return self.lresolve(file_path)
def lresolve(self, path):
path = make_string_path(path)
if not path:
raise OSError(errno.ENOENT, path)
if path == self.root.name:
return self.root
path = self._path_without_trailing_separators(path)
if path == self._matching_string(path, '.'):
path = self.cwd
path = self._original_path(path)
parent_directory, child_name = self.splitpath(path)
if not parent_directory:
parent_directory = self.cwd
try:
parent_obj = self.resolve(parent_directory)
assert parent_obj
if not isinstance(parent_obj, FakeDirectory):
if not self.is_windows_fs and isinstance(parent_obj, FakeFile):
self.raise_os_error(errno.ENOTDIR, path)
self.raise_os_error(errno.ENOENT, path)
if not parent_obj.st_mode & PERM_READ:
self.raise_os_error(errno.EACCES, parent_directory)
return (parent_obj.get_entry(child_name) if child_name
else parent_obj)
except KeyError:
self.raise_os_error(errno.ENOENT, path)
def add_object(self, file_path, file_object):
if not file_path:
target_directory = self.root
else:
target_directory = self.resolve(file_path)
if not S_ISDIR(target_directory.st_mode):
error = errno.ENOENT if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, file_path)
target_directory.add_entry(file_object)
def rename(self, old_file_path, new_file_path, force_replace=False):
ends_with_sep = self.ends_with_path_separator(old_file_path)
old_file_path = self.absnormpath(old_file_path)
new_file_path = self.absnormpath(new_file_path)
if not self.exists(old_file_path, check_link=True):
self.raise_os_error(errno.ENOENT, old_file_path, 2)
if ends_with_sep:
self._handle_broken_link_with_trailing_sep(old_file_path)
old_object = self.lresolve(old_file_path)
if not self.is_windows_fs:
self._handle_posix_dir_link_errors(
new_file_path, old_file_path, ends_with_sep)
if self.exists(new_file_path, check_link=True):
new_file_path = self._rename_to_existing_path(
force_replace, new_file_path, old_file_path,
old_object, ends_with_sep)
if not new_file_path:
return
old_dir, old_name = self.splitpath(old_file_path)
new_dir, new_name = self.splitpath(new_file_path)
if not self.exists(new_dir):
self.raise_os_error(errno.ENOENT, new_dir)
old_dir_object = self.resolve(old_dir)
new_dir_object = self.resolve(new_dir)
if old_dir_object.st_dev != new_dir_object.st_dev:
self.raise_os_error(errno.EXDEV, old_file_path)
if not S_ISDIR(new_dir_object.st_mode):
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.ENOTDIR,
new_file_path)
if new_dir_object.has_parent_object(old_object):
self.raise_os_error(errno.EINVAL, new_file_path)
object_to_rename = old_dir_object.get_entry(old_name)
old_dir_object.remove_entry(old_name, recursive=False)
object_to_rename.name = new_name
new_name = new_dir_object._normalized_entryname(new_name)
if new_name in new_dir_object.contents:
new_dir_object.remove_entry(new_name)
new_dir_object.add_entry(object_to_rename)
def _handle_broken_link_with_trailing_sep(self, path):
if self.islink(path):
if not self.exists(path):
error = (errno.ENOENT if self.is_macos else
errno.EINVAL if self.is_windows_fs else errno.ENOTDIR)
self.raise_os_error(error, path)
def _handle_posix_dir_link_errors(self, new_file_path, old_file_path,
ends_with_sep):
if (self.isdir(old_file_path, follow_symlinks=False) and
self.islink(new_file_path)):
self.raise_os_error(errno.ENOTDIR, new_file_path)
if (self.isdir(new_file_path, follow_symlinks=False) and
self.islink(old_file_path)):
if ends_with_sep and self.is_macos:
return
error = errno.ENOTDIR if ends_with_sep else errno.EISDIR
self.raise_os_error(error, new_file_path)
if (ends_with_sep and self.islink(old_file_path) and
old_file_path == new_file_path and not self.is_windows_fs):
self.raise_os_error(errno.ENOTDIR, new_file_path)
def _rename_to_existing_path(self, force_replace, new_file_path,
old_file_path, old_object, ends_with_sep):
new_object = self.get_object(new_file_path)
if old_file_path == new_file_path:
if not S_ISLNK(new_object.st_mode) and ends_with_sep:
error = errno.EINVAL if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, old_file_path)
return
if old_object == new_object:
new_file_path = self._rename_same_object(
new_file_path, old_file_path)
elif (S_ISDIR(new_object.st_mode) or S_ISLNK(new_object.st_mode)):
self._handle_rename_error_for_dir_or_link(
force_replace, new_file_path,
new_object, old_object, ends_with_sep)
elif S_ISDIR(old_object.st_mode):
error = errno.EEXIST if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, new_file_path)
elif self.is_windows_fs and not force_replace:
self.raise_os_error(errno.EEXIST, new_file_path)
else:
self.remove_object(new_file_path)
return new_file_path
def _handle_rename_error_for_dir_or_link(self, force_replace,
new_file_path, new_object,
old_object, ends_with_sep):
if self.is_windows_fs:
if force_replace:
self.raise_os_error(errno.EACCES, new_file_path)
else:
self.raise_os_error(errno.EEXIST, new_file_path)
if not S_ISLNK(new_object.st_mode):
if new_object.contents:
if (not S_ISLNK(old_object.st_mode) or
not ends_with_sep or not self.is_macos):
self.raise_os_error(errno.ENOTEMPTY, new_file_path)
if S_ISREG(old_object.st_mode):
self.raise_os_error(errno.EISDIR, new_file_path)
def _rename_same_object(self, new_file_path, old_file_path):
do_rename = old_file_path.lower() == new_file_path.lower()
if not do_rename:
try:
real_old_path = self.resolve_path(old_file_path)
original_old_path = self._original_path(real_old_path)
real_new_path = self.resolve_path(new_file_path)
if (real_new_path == original_old_path and
(new_file_path == real_old_path) ==
(new_file_path.lower() ==
real_old_path.lower())):
real_object = self.resolve(old_file_path,
follow_symlinks=False)
do_rename = (os.path.basename(old_file_path) ==
real_object.name or not self.is_macos)
else:
do_rename = (real_new_path.lower() ==
real_old_path.lower())
if do_rename:
parent, file_name = self.splitpath(new_file_path)
new_file_path = self.joinpaths(
self._original_path(parent), file_name)
except OSError:
pass
if not do_rename:
new_file_path = None
return new_file_path
def remove_object(self, file_path):
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
self.raise_os_error(errno.EBUSY, file_path)
try:
dirname, basename = self.splitpath(file_path)
target_directory = self.resolve(dirname, check_read_perm=False)
target_directory.remove_entry(basename)
except KeyError:
self.raise_os_error(errno.ENOENT, file_path)
except AttributeError:
self.raise_os_error(errno.ENOTDIR, file_path)
def make_string_path(self, path):
path = make_string_path(path)
os_sep = self._matching_string(path, os.sep)
fake_sep = self._matching_string(path, self.path_separator)
return path.replace(os_sep, fake_sep)
def create_dir(self, directory_path, perm_bits=PERM_DEF):
directory_path = self.make_string_path(directory_path)
directory_path = self.absnormpath(directory_path)
self._auto_mount_drive_if_needed(directory_path)
if self.exists(directory_path, check_link=True):
self.raise_os_error(errno.EEXIST, directory_path)
path_components = self._path_components(directory_path)
current_dir = self.root
new_dirs = []
for component in path_components:
directory = self._directory_content(current_dir, component)[1]
if not directory:
new_dir = FakeDirectory(component, filesystem=self)
new_dirs.append(new_dir)
current_dir.add_entry(new_dir)
current_dir = new_dir
else:
if S_ISLNK(directory.st_mode):
directory = self.resolve(directory.contents)
current_dir = directory
if directory.st_mode & S_IFDIR != S_IFDIR:
self.raise_os_error(errno.ENOTDIR, current_dir.path)
for new_dir in new_dirs:
new_dir.st_mode = S_IFDIR | perm_bits
return current_dir
def create_file(self, file_path, st_mode=S_IFREG | PERM_DEF_FILE,
contents='', st_size=None, create_missing_dirs=True,
apply_umask=False, encoding=None, errors=None,
side_effect=None):
return self.create_file_internally(
file_path, st_mode, contents, st_size, create_missing_dirs,
apply_umask, encoding, errors, side_effect=side_effect)
def add_real_file(self, source_path, read_only=True, target_path=None):
target_path = target_path or source_path
source_path = make_string_path(source_path)
target_path = self.make_string_path(target_path)
real_stat = os.stat(source_path)
fake_file = self.create_file_internally(target_path,
read_from_real_fs=True)
fake_file.stat_result.set_from_stat_result(real_stat)
if read_only:
fake_file.st_mode &= 0o777444
fake_file.file_path = source_path
self.change_disk_usage(fake_file.size, fake_file.name,
fake_file.st_dev)
return fake_file
def add_real_symlink(self, source_path, target_path=None):
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path) and not os.path.islink(source_path):
self.raise_os_error(errno.ENOENT, source_path)
target = os.readlink(source_path)
if target_path:
return self.create_symlink(target_path, target)
else:
return self.create_symlink(source_path, target)
def add_real_directory(self, source_path, read_only=True, lazy_read=True,
target_path=None):
source_path = self._path_without_trailing_separators(source_path)
if not os.path.exists(source_path):
self.raise_os_error(errno.ENOENT, source_path)
target_path = target_path or source_path
if lazy_read:
parent_path = os.path.split(target_path)[0]
if self.exists(parent_path):
parent_dir = self.get_object(parent_path)
else:
parent_dir = self.create_dir(parent_path)
new_dir = FakeDirectoryFromRealDirectory(
source_path, self, read_only, target_path)
parent_dir.add_entry(new_dir)
else:
new_dir = self.create_dir(target_path)
for base, _, files in os.walk(source_path):
new_base = os.path.join(new_dir.path,
os.path.relpath(base, source_path))
for fileEntry in os.listdir(base):
abs_fileEntry = os.path.join(base, fileEntry)
if not os.path.islink(abs_fileEntry):
continue
self.add_real_symlink(
abs_fileEntry, os.path.join(new_base, fileEntry))
for fileEntry in files:
path = os.path.join(base, fileEntry)
if os.path.islink(path):
continue
self.add_real_file(path,
read_only,
os.path.join(new_base, fileEntry))
return new_dir
def add_real_paths(self, path_list, read_only=True, lazy_dir_read=True):
for path in path_list:
if os.path.isdir(path):
self.add_real_directory(path, read_only, lazy_dir_read)
else:
self.add_real_file(path, read_only)
def create_file_internally(self, file_path,
st_mode=S_IFREG | PERM_DEF_FILE,
contents='', st_size=None,
create_missing_dirs=True,
apply_umask=False, encoding=None, errors=None,
read_from_real_fs=False, raw_io=False,
side_effect=None):
file_path = self.make_string_path(file_path)
file_path = self.absnormpath(file_path)
if not is_int_type(st_mode):
raise TypeError(
'st_mode must be of int type - did you mean to set contents?')
if self.exists(file_path, check_link=True):
self.raise_os_error(errno.EEXIST, file_path)
parent_directory, new_file = self.splitpath(file_path)
if not parent_directory:
parent_directory = self.cwd
self._auto_mount_drive_if_needed(parent_directory)
if not self.exists(parent_directory):
if not create_missing_dirs:
self.raise_os_error(errno.ENOENT, parent_directory)
self.create_dir(parent_directory)
else:
parent_directory = self._original_path(parent_directory)
if apply_umask:
st_mode &= ~self.umask
if read_from_real_fs:
file_object = FakeFileFromRealFile(file_path, filesystem=self,
side_effect=side_effect)
else:
file_object = FakeFile(new_file, st_mode, filesystem=self,
encoding=encoding, errors=errors,
side_effect=side_effect)
self.add_object(parent_directory, file_object)
if st_size is None and contents is None:
contents = ''
if (not read_from_real_fs and
(contents is not None or st_size is not None)):
try:
if st_size is not None:
file_object.set_large_file_size(st_size)
else:
file_object._set_initial_contents(contents)
except OSError:
self.remove_object(file_path)
raise
return file_object
def create_symlink(self, file_path, link_target, create_missing_dirs=True):
file_path = self.make_string_path(file_path)
link_target = self.make_string_path(link_target)
file_path = self.normcase(file_path)
if self.ends_with_path_separator(file_path):
if self.exists(file_path):
self.raise_os_error(errno.EEXIST, file_path)
if self.exists(link_target):
if not self.is_windows_fs:
self.raise_os_error(errno.ENOENT, file_path)
else:
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, link_target)
if not self.exists(
self._path_without_trailing_separators(file_path),
check_link=True):
self.raise_os_error(errno.ENOENT, link_target)
if self.is_macos:
if self.exists(file_path, check_link=True):
self.remove_object(file_path)
else:
self.raise_os_error(errno.EEXIST, link_target)
if not self.islink(file_path):
file_path = self.resolve_path(file_path)
link_target = make_string_path(link_target)
return self.create_file_internally(
file_path, st_mode=S_IFLNK | PERM_DEF,
contents=link_target,
create_missing_dirs=create_missing_dirs,
raw_io=True)
def link(self, old_path, new_path, follow_symlinks=True):
new_path_normalized = self.absnormpath(new_path)
if self.exists(new_path_normalized, check_link=True):
self.raise_os_error(errno.EEXIST, new_path)
new_parent_directory, new_basename = self.splitpath(
new_path_normalized)
if not new_parent_directory:
new_parent_directory = self.cwd
if not self.exists(new_parent_directory):
self.raise_os_error(errno.ENOENT, new_parent_directory)
if self.ends_with_path_separator(old_path):
error = errno.EINVAL if self.is_windows_fs else errno.ENOTDIR
self.raise_os_error(error, old_path)
if not self.is_windows_fs and self.ends_with_path_separator(new_path):
self.raise_os_error(errno.ENOENT, old_path)
try:
old_file = self.resolve(old_path, follow_symlinks=follow_symlinks)
except OSError:
self.raise_os_error(errno.ENOENT, old_path)
if old_file.st_mode & S_IFDIR:
self.raise_os_error(
errno.EACCES if self.is_windows_fs else errno.EPERM, old_path)
old_file.name = new_basename
self.add_object(new_parent_directory, old_file)
return old_file
def _is_circular_link(self, link_obj):
try:
self.resolve_path(link_obj.contents)
except OSError as exc:
return exc.errno == errno.ELOOP
return False
def readlink(self, path):
if path is None:
raise TypeError
link_obj = self.lresolve(path)
if S_IFMT(link_obj.st_mode) != S_IFLNK:
self.raise_os_error(errno.EINVAL, path)
if self.ends_with_path_separator(path):
if not self.is_windows_fs and self.exists(path):
self.raise_os_error(errno.EINVAL, path)
if not self.exists(link_obj.path):
if self.is_windows_fs:
error = errno.EINVAL
elif self._is_circular_link(link_obj):
if self.is_macos:
return link_obj.path
error = errno.ELOOP
else:
error = errno.ENOENT
self.raise_os_error(error, link_obj.path)
return link_obj.contents
def makedir(self, dir_name, mode=PERM_DEF):
dir_name = make_string_path(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self._path_without_trailing_separators(dir_name)
if not dir_name:
self.raise_os_error(errno.ENOENT, '')
if self.is_windows_fs:
dir_name = self.absnormpath(dir_name)
parent_dir, _ = self.splitpath(dir_name)
if parent_dir:
base_dir = self.normpath(parent_dir)
ellipsis = self._matching_string(
parent_dir, self.path_separator + '..')
if parent_dir.endswith(ellipsis) and not self.is_windows_fs:
base_dir, dummy_dotdot, _ = parent_dir.partition(ellipsis)
if not self.exists(base_dir):
self.raise_os_error(errno.ENOENT, base_dir)
dir_name = self.absnormpath(dir_name)
if self.exists(dir_name, check_link=True):
if self.is_windows_fs and dir_name == self.path_separator:
error_nr = errno.EACCES
else:
error_nr = errno.EEXIST
if ends_with_sep and self.is_macos and not self.exists(dir_name):
self.remove_object(dir_name)
else:
self.raise_os_error(error_nr, dir_name)
head, tail = self.splitpath(dir_name)
self.add_object(
head, FakeDirectory(tail, mode & ~self.umask, filesystem=self))
def _path_without_trailing_separators(self, path):
while self.ends_with_path_separator(path):
path = path[:-1]
return path
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=False):
if not dir_name:
self.raise_os_error(errno.ENOENT, '')
dir_name = to_string(dir_name)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self.absnormpath(dir_name)
if (ends_with_sep and self.is_macos and
self.exists(dir_name, check_link=True) and
not self.exists(dir_name)):
self.remove_object(dir_name)
path_components = self._path_components(dir_name)
current_dir = self.root
for component in path_components:
if (component not in current_dir.contents
or not isinstance(current_dir.contents, dict)):
break
else:
current_dir = current_dir.contents[component]
try:
self.create_dir(dir_name, mode & ~self.umask)
except OSError as e:
if e.errno == errno.EACCES:
raise
if (not exist_ok or
not isinstance(self.resolve(dir_name), FakeDirectory)):
if self.is_windows_fs and e.errno == errno.ENOTDIR:
e.errno = errno.ENOENT
self.raise_os_error(e.errno, e.filename)
def _is_of_type(self, path, st_flag, follow_symlinks=True):
path = make_string_path(path)
if path is None:
raise TypeError
try:
obj = self.resolve(path, follow_symlinks)
if obj:
self.raise_for_filepath_ending_with_separator(
path, obj, macos_handling=not follow_symlinks)
return S_IFMT(obj.st_mode) == st_flag
except OSError:
return False
return False
def isdir(self, path, follow_symlinks=True):
return self._is_of_type(path, S_IFDIR, follow_symlinks)
def isfile(self, path, follow_symlinks=True):
return self._is_of_type(path, S_IFREG, follow_symlinks)
def islink(self, path):
return self._is_of_type(path, S_IFLNK, follow_symlinks=False)
def confirmdir(self, target_directory):
directory = self.resolve(target_directory)
if not directory.st_mode & S_IFDIR:
self.raise_os_error(errno.ENOTDIR, target_directory, 267)
return directory
def remove(self, path):
norm_path = self.absnormpath(path)
if self.ends_with_path_separator(path):
self._handle_broken_link_with_trailing_sep(norm_path)
if self.exists(norm_path):
obj = self.resolve(norm_path, check_read_perm=False)
if S_IFMT(obj.st_mode) == S_IFDIR:
link_obj = self.lresolve(norm_path)
if S_IFMT(link_obj.st_mode) != S_IFLNK:
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.EISDIR
self.raise_os_error(error, norm_path)
norm_path = make_string_path(norm_path)
if path.endswith(self.path_separator):
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.ENOTDIR
self.raise_os_error(error, norm_path)
else:
self.raise_for_filepath_ending_with_separator(path, obj)
self.remove_object(norm_path)
def rmdir(self, target_directory, allow_symlink=False):
if target_directory in (b'.', u'.'):
error_nr = errno.EACCES if self.is_windows_fs else errno.EINVAL
self.raise_os_error(error_nr, target_directory)
ends_with_sep = self.ends_with_path_separator(target_directory)
target_directory = self.absnormpath(target_directory)
if self.confirmdir(target_directory):
if not self.is_windows_fs and self.islink(target_directory):
if allow_symlink:
return
if not ends_with_sep or not self.is_macos:
self.raise_os_error(errno.ENOTDIR, target_directory)
dir_object = self.resolve(target_directory)
if dir_object.contents:
self.raise_os_error(errno.ENOTEMPTY, target_directory)
self.remove_object(target_directory)
def listdir(self, target_directory):
target_directory = self.resolve_path(target_directory, allow_fd=True)
directory = self.confirmdir(target_directory)
directory_contents = directory.contents
return list(directory_contents.keys())
def __str__(self):
return str(self.root)
def _add_standard_streams(self):
self._add_open_file(StandardStreamWrapper(sys.stdin))
self._add_open_file(StandardStreamWrapper(sys.stdout))
self._add_open_file(StandardStreamWrapper(sys.stderr))
Deprecator.add(FakeFilesystem, FakeFilesystem.get_disk_usage, 'GetDiskUsage')
Deprecator.add(FakeFilesystem, FakeFilesystem.set_disk_usage, 'SetDiskUsage')
Deprecator.add(FakeFilesystem,
FakeFilesystem.change_disk_usage, 'ChangeDiskUsage')
Deprecator.add(FakeFilesystem, FakeFilesystem.add_mount_point, 'AddMountPoint')
Deprecator.add(FakeFilesystem, FakeFilesystem.stat, 'GetStat')
Deprecator.add(FakeFilesystem, FakeFilesystem.chmod, 'ChangeMode')
Deprecator.add(FakeFilesystem, FakeFilesystem.utime, 'UpdateTime')
Deprecator.add(FakeFilesystem, FakeFilesystem._add_open_file, 'AddOpenFile')
Deprecator.add(FakeFilesystem,
FakeFilesystem._close_open_file, 'CloseOpenFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.has_open_file, 'HasOpenFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_open_file, 'GetOpenFile')
Deprecator.add(FakeFilesystem,
FakeFilesystem.normcase, 'NormalizePathSeparator')
Deprecator.add(FakeFilesystem, FakeFilesystem.normpath, 'CollapsePath')
Deprecator.add(FakeFilesystem, FakeFilesystem._original_path, 'NormalizeCase')
Deprecator.add(FakeFilesystem, FakeFilesystem.absnormpath, 'NormalizePath')
Deprecator.add(FakeFilesystem, FakeFilesystem.splitpath, 'SplitPath')
Deprecator.add(FakeFilesystem, FakeFilesystem.splitdrive, 'SplitDrive')
Deprecator.add(FakeFilesystem, FakeFilesystem.joinpaths, 'JoinPaths')
Deprecator.add(FakeFilesystem,
FakeFilesystem._path_components, 'GetPathComponents')
Deprecator.add(FakeFilesystem, FakeFilesystem._starts_with_drive_letter,
'StartsWithDriveLetter')
Deprecator.add(FakeFilesystem, FakeFilesystem.exists, 'Exists')
Deprecator.add(FakeFilesystem, FakeFilesystem.resolve_path, 'ResolvePath')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_object_from_normpath,
'GetObjectFromNormalizedPath')
Deprecator.add(FakeFilesystem, FakeFilesystem.get_object, 'GetObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.resolve, 'ResolveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.lresolve, 'LResolveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.add_object, 'AddObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.remove_object, 'RemoveObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.rename, 'RenameObject')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_dir, 'CreateDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_file, 'CreateFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.create_symlink, 'CreateLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.link, 'CreateHardLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.readlink, 'ReadLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.makedir, 'MakeDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.makedirs, 'MakeDirectories')
Deprecator.add(FakeFilesystem, FakeFilesystem.isdir, 'IsDir')
Deprecator.add(FakeFilesystem, FakeFilesystem.isfile, 'IsFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.islink, 'IsLink')
Deprecator.add(FakeFilesystem, FakeFilesystem.confirmdir, 'ConfirmDir')
Deprecator.add(FakeFilesystem, FakeFilesystem.remove, 'RemoveFile')
Deprecator.add(FakeFilesystem, FakeFilesystem.rmdir, 'RemoveDirectory')
Deprecator.add(FakeFilesystem, FakeFilesystem.listdir, 'ListDir')
class FakePathModule:
_OS_PATH_COPY = _copy_module(os.path)
@staticmethod
def dir():
return [
'abspath', 'dirname', 'exists', 'expanduser', 'getatime',
'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile',
'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath',
'realpath', 'relpath', 'split', 'splitdrive', 'samefile'
]
def __init__(self, filesystem, os_module):
self.filesystem = filesystem
self._os_path = self._OS_PATH_COPY
self._os_path.os = self.os = os_module
self.sep = self.filesystem.path_separator
self.altsep = self.filesystem.alternative_path_separator
def exists(self, path):
return self.filesystem.exists(path)
def lexists(self, path):
return self.filesystem.exists(path, check_link=True)
def getsize(self, path):
file_obj = self.filesystem.resolve(path)
if (self.filesystem.ends_with_path_separator(path) and
S_IFMT(file_obj.st_mode) != S_IFDIR):
error_nr = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOTDIR)
self.filesystem.raise_os_error(error_nr, path)
return file_obj.st_size
def isabs(self, path):
if self.filesystem.is_windows_fs:
path = self.splitdrive(path)[1]
path = make_string_path(path)
sep = self.filesystem._path_separator(path)
altsep = self.filesystem._alternative_path_separator(path)
if self.filesystem.is_windows_fs:
return len(path) > 0 and path[:1] in (sep, altsep)
else:
return (path.startswith(sep) or
altsep is not None and path.startswith(altsep))
def isdir(self, path):
return self.filesystem.isdir(path)
def isfile(self, path):
return self.filesystem.isfile(path)
def islink(self, path):
return self.filesystem.islink(path)
def getmtime(self, path):
try:
file_obj = self.filesystem.resolve(path)
return file_obj.st_mtime
except OSError:
self.filesystem.raise_os_error(errno.ENOENT, winerror=3)
def getatime(self, path):
try:
file_obj = self.filesystem.resolve(path)
except OSError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_atime
def getctime(self, path):
try:
file_obj = self.filesystem.resolve(path)
except OSError:
self.filesystem.raise_os_error(errno.ENOENT)
return file_obj.st_ctime
def abspath(self, path):
def getcwd():
if isinstance(path, bytes):
return self.os.getcwdb()
else:
return self.os.getcwd()
path = make_string_path(path)
sep = self.filesystem._path_separator(path)
altsep = self.filesystem._alternative_path_separator(path)
if not self.isabs(path):
path = self.join(getcwd(), path)
elif (self.filesystem.is_windows_fs and
path.startswith(sep) or altsep is not None and
path.startswith(altsep)):
cwd = getcwd()
if self.filesystem._starts_with_drive_letter(cwd):
path = self.join(cwd[:2], path)
return self.normpath(path)
def join(self, *p):
return self.filesystem.joinpaths(*p)
def split(self, path):
return self.filesystem.splitpath(path)
def splitdrive(self, path):
return self.filesystem.splitdrive(path)
def normpath(self, path):
return self.filesystem.normpath(path)
def normcase(self, path):
path = self.filesystem.normcase(path)
if self.filesystem.is_windows_fs:
path = path.lower()
return path
def relpath(self, path, start=None):
if not path:
raise ValueError("no path specified")
path = make_string_path(path)
if start is not None:
start = make_string_path(start)
else:
start = self.filesystem.cwd
if self.filesystem.alternative_path_separator is not None:
path = path.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
start = start.replace(self.filesystem.alternative_path_separator,
self._os_path.sep)
path = path.replace(self.filesystem.path_separator, self._os_path.sep)
start = start.replace(
self.filesystem.path_separator, self._os_path.sep)
path = self._os_path.relpath(path, start)
return path.replace(self._os_path.sep, self.filesystem.path_separator)
def realpath(self, filename):
if self.filesystem.is_windows_fs:
return self.abspath(filename)
filename = make_string_path(filename)
path, ok = self._joinrealpath(filename[:0], filename, {})
return self.abspath(path)
def samefile(self, path1, path2):
stat1 = self.filesystem.stat(path1)
stat2 = self.filesystem.stat(path2)
return (stat1.st_ino == stat2.st_ino and
stat1.st_dev == stat2.st_dev)
def _joinrealpath(self, path, rest, seen):
curdir = self.filesystem._matching_string(path, '.')
pardir = self.filesystem._matching_string(path, '..')
sep = self.filesystem._path_separator(path)
if self.isabs(rest):
rest = rest[1:]
path = sep
while rest:
name, _, rest = rest.partition(sep)
if not name or name == curdir:
continue
if name == pardir:
if path:
path, name = self.filesystem.splitpath(path)
if name == pardir:
path = self.filesystem.joinpaths(path, pardir, pardir)
else:
path = pardir
continue
newpath = self.filesystem.joinpaths(path, name)
if not self.filesystem.islink(newpath):
path = newpath
continue
if newpath in seen:
path = seen[newpath]
if path is not None:
continue
return self.filesystem.joinpaths(newpath, rest), False
seen[newpath] = None
path, ok = self._joinrealpath(
path, self.filesystem.readlink(newpath), seen)
if not ok:
return self.filesystem.joinpaths(path, rest), False
seen[newpath] = path
return path, True
def dirname(self, path):
return self.split(path)[0]
def expanduser(self, path):
return self._os_path.expanduser(path).replace(
self._os_path.sep, self.sep)
def ismount(self, path):
path = make_string_path(path)
if not path:
return False
normed_path = self.filesystem.absnormpath(path)
sep = self.filesystem._path_separator(path)
if self.filesystem.is_windows_fs:
if self.filesystem.alternative_path_separator is not None:
path_seps = (
sep, self.filesystem._alternative_path_separator(path)
)
else:
path_seps = (sep,)
drive, rest = self.filesystem.splitdrive(normed_path)
if drive and drive[:1] in path_seps:
return (not rest) or (rest in path_seps)
if rest in path_seps:
return True
for mount_point in self.filesystem.mount_points:
if normed_path.rstrip(sep) == mount_point.rstrip(sep):
return True
return False
def __getattr__(self, name):
return getattr(self._os_path, name)
class FakeOsModule:
devnull = None
@staticmethod
def dir():
dir = [
'access', 'chdir', 'chmod', 'chown', 'close', 'fstat', 'fsync',
'getcwd', 'lchmod', 'link', 'listdir', 'lstat', 'makedirs',
'mkdir', 'mknod', 'open', 'read', 'readlink', 'remove',
'removedirs', 'rename', 'rmdir', 'stat', 'symlink', 'umask',
'unlink', 'utime', 'walk', 'write', 'getcwdb', 'replace'
]
if sys.platform.startswith('linux'):
dir += [
'fdatasync', 'getxattr', 'listxattr',
'removexattr', 'setxattr'
]
if use_scandir:
dir += ['scandir']
return dir
def __init__(self, filesystem):
self.filesystem = filesystem
self.sep = filesystem.path_separator
self.altsep = filesystem.alternative_path_separator
self.linesep = filesystem.line_separator()
self._os_module = os
self.path = FakePathModule(self.filesystem, self)
self.__class__.devnull = ('/dev/nul' if filesystem.is_windows_fs
else '/dev/nul')
def fdopen(self, fd, *args, **kwargs):
if not is_int_type(fd):
raise TypeError('an integer is required')
return FakeFileOpen(self.filesystem)(fd, *args, **kwargs)
def _umask(self):
if self.filesystem.is_windows_fs:
return 0
if sys.platform == 'win32':
return 0o002
else:
mask = os.umask(0)
os.umask(mask)
return mask
def open(self, path, flags, mode=None, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.open, dir_fd)
if mode is None:
if self.filesystem.is_windows_fs:
mode = 0o666
else:
mode = 0o777 & ~self._umask()
has_tmpfile_flag = (hasattr(os, 'O_TMPFILE') and
flags & getattr(os, 'O_TMPFILE'))
open_modes = _OpenModes(
must_exist=not flags & os.O_CREAT and not has_tmpfile_flag,
can_read=not flags & os.O_WRONLY,
can_write=flags & (os.O_RDWR | os.O_WRONLY) != 0,
truncate=flags & os.O_TRUNC != 0,
append=flags & os.O_APPEND != 0,
must_not_exist=flags & os.O_EXCL != 0
)
if open_modes.must_not_exist and open_modes.must_exist:
raise NotImplementedError(
'O_EXCL without O_CREAT mode is not supported')
if has_tmpfile_flag:
path = self.filesystem.joinpaths(
path, str(uuid.uuid4()))
if (not self.filesystem.is_windows_fs and
self.filesystem.exists(path)):
obj = self.filesystem.resolve(path)
if isinstance(obj, FakeDirectory):
if ((not open_modes.must_exist and
not self.filesystem.is_macos)
or open_modes.can_write):
self.filesystem.raise_os_error(errno.EISDIR, path)
dir_wrapper = FakeDirWrapper(obj, path, self.filesystem)
file_des = self.filesystem._add_open_file(dir_wrapper)
dir_wrapper.filedes = file_des
return file_des
str_flags = 'b'
delete_on_close = has_tmpfile_flag
if hasattr(os, 'O_TEMPORARY'):
delete_on_close = flags & os.O_TEMPORARY == os.O_TEMPORARY
fake_file = FakeFileOpen(
self.filesystem, delete_on_close=delete_on_close, raw_io=True)(
path, str_flags, open_modes=open_modes)
if fake_file.file_object != self.filesystem.dev_null:
self.chmod(path, mode)
return fake_file.fileno()
def close(self, fd):
file_handle = self.filesystem.get_open_file(fd)
file_handle.close()
def read(self, fd, n):
file_handle = self.filesystem.get_open_file(fd)
file_handle.raw_io = True
return file_handle.read(n)
def write(self, fd, contents):
file_handle = self.filesystem.get_open_file(fd)
if isinstance(file_handle, FakeDirWrapper):
self.filesystem.raise_os_error(errno.EBADF, file_handle.file_path)
if isinstance(file_handle, FakePipeWrapper):
return file_handle.write(contents)
file_handle.raw_io = True
file_handle._sync_io()
file_handle.update_flush_pos()
file_handle.write(contents)
file_handle.flush()
return len(contents)
def pipe(self):
read_fd, write_fd = os.pipe()
read_wrapper = FakePipeWrapper(self.filesystem, read_fd)
file_des = self.filesystem._add_open_file(read_wrapper)
read_wrapper.filedes = file_des
write_wrapper = FakePipeWrapper(self.filesystem, write_fd)
file_des = self.filesystem._add_open_file(write_wrapper)
write_wrapper.filedes = file_des
return read_wrapper.filedes, write_wrapper.filedes
@staticmethod
def stat_float_times(newvalue=None):
return FakeStatResult.stat_float_times(newvalue)
def fstat(self, fd):
file_object = self.filesystem.get_open_file(fd).get_object()
return file_object.stat_result.copy()
def umask(self, mask):
if not is_int_type(mask):
raise TypeError('an integer is required')
old_umask = self.filesystem.umask
self.filesystem.umask = mask
return old_umask
def chdir(self, path):
path = self.filesystem.resolve_path(
path, allow_fd=True)
self.filesystem.confirmdir(path)
directory = self.filesystem.resolve(path)
if not is_root() and not directory.st_mode | PERM_EXE:
self.filesystem.raise_os_error(errno.EACCES, directory)
self.filesystem.cwd = path
def getcwd(self):
return self.filesystem.cwd
def getcwdb(self):
return bytes(
self.filesystem.cwd, locale.getpreferredencoding(False))
def listdir(self, path):
return self.filesystem.listdir(path)
XATTR_CREATE = 1
XATTR_REPLACE = 2
def getxattr(self, path, attribute, *, follow_symlinks=True):
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'getxattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
return file_obj.xattr.get(attribute)
def listxattr(self, path=None, *, follow_symlinks=True):
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'listxattr'")
if path is None:
path = self.getcwd()
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
return list(file_obj.xattr.keys())
def removexattr(self, path, attribute, *, follow_symlinks=True):
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'removexattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
if attribute in file_obj.xattr:
del file_obj.xattr[attribute]
def setxattr(self, path, attribute, value,
flags=0, *, follow_symlinks=True):
if not self.filesystem.is_linux:
raise AttributeError(
"module 'os' has no attribute 'setxattr'")
if isinstance(attribute, bytes):
attribute = attribute.decode(sys.getfilesystemencoding())
if not is_byte_string(value):
raise TypeError('a bytes-like object is required')
file_obj = self.filesystem.resolve(path, follow_symlinks,
allow_fd=True)
exists = attribute in file_obj.xattr
if exists and flags == self.XATTR_CREATE:
self.filesystem.raise_os_error(errno.ENODATA, file_obj.path)
if not exists and flags == self.XATTR_REPLACE:
self.filesystem.raise_os_error(errno.EEXIST, file_obj.path)
file_obj.xattr[attribute] = value
if use_scandir:
def scandir(self, path='.'):
return scandir(self.filesystem, path)
def walk(self, top, topdown=True, onerror=None, followlinks=False):
return walk(self.filesystem, top, topdown, onerror, followlinks)
def readlink(self, path, dir_fd=None):
path = self._path_with_dir_fd(path, self.readlink, dir_fd)
return self.filesystem.readlink(path)
def stat(self, path, *, dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.stat, dir_fd)
return self.filesystem.stat(path, follow_symlinks)
def lstat(self, path, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.lstat, dir_fd)
return self.filesystem.stat(path, follow_symlinks=False)
def remove(self, path, dir_fd=None):
path = self._path_with_dir_fd(path, self.remove, dir_fd)
self.filesystem.remove(path)
def unlink(self, path, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.unlink, dir_fd)
self.filesystem.remove(path)
def rename(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
src = self._path_with_dir_fd(src, self.rename, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.rename, dst_dir_fd)
self.filesystem.rename(src, dst)
def replace(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
src = self._path_with_dir_fd(src, self.rename, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.rename, dst_dir_fd)
self.filesystem.rename(src, dst, force_replace=True)
def rmdir(self, path, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.rmdir, dir_fd)
self.filesystem.rmdir(path)
def removedirs(self, name):
name = self.filesystem.absnormpath(name)
directory = self.filesystem.confirmdir(name)
if directory.contents:
self.filesystem.raise_os_error(
errno.ENOTEMPTY, self.path.basename(name))
else:
self.rmdir(name)
head, tail = self.path.split(name)
if not tail:
head, tail = self.path.split(head)
while head and tail:
head_dir = self.filesystem.confirmdir(head)
if head_dir.contents:
break
self.filesystem.rmdir(head, allow_symlink=True)
head, tail = self.path.split(head)
def mkdir(self, path, mode=PERM_DEF, *, dir_fd=None):
path = self._path_with_dir_fd(path, self.mkdir, dir_fd)
try:
self.filesystem.makedir(path, mode)
except OSError as e:
if e.errno == errno.EACCES:
self.filesystem.raise_os_error(e.errno, path)
raise
def makedirs(self, name, mode=PERM_DEF, exist_ok=None):
if exist_ok is None:
exist_ok = False
self.filesystem.makedirs(name, mode, exist_ok)
def _path_with_dir_fd(self, path, fct, dir_fd):
path = to_string(path)
if dir_fd is not None:
real_fct = getattr(os, fct.__name__)
if real_fct not in self.supports_dir_fd:
raise NotImplementedError(
'dir_fd unavailable on this platform')
if isinstance(path, int):
raise ValueError("%s: Can't specify dir_fd without "
"matching path" % fct.__name__)
if not self.path.isabs(path):
return self.path.join(
self.filesystem.get_open_file(
dir_fd).get_object().path, path)
return path
def access(self, path, mode, *, dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.access, dir_fd)
try:
stat_result = self.stat(path, follow_symlinks=follow_symlinks)
except OSError as os_error:
if os_error.errno == errno.ENOENT:
return False
raise
if is_root():
mode &= ~os.W_OK
return (mode & ((stat_result.st_mode >> 6) & 7)) == mode
def chmod(self, path, mode, *, dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.chmod, dir_fd)
self.filesystem.chmod(path, mode, follow_symlinks)
def lchmod(self, path, mode):
if self.filesystem.is_windows_fs:
raise (NameError, "name 'lchmod' is not defined")
self.filesystem.chmod(path, mode, follow_symlinks=False)
def utime(self, path, times=None, ns=None,
dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.utime, dir_fd)
self.filesystem.utime(
path, times=times, ns=ns, follow_symlinks=follow_symlinks)
def chown(self, path, uid, gid, *, dir_fd=None, follow_symlinks=True):
path = self._path_with_dir_fd(path, self.chown, dir_fd)
file_object = self.filesystem.resolve(
path, follow_symlinks, allow_fd=True)
if not ((is_int_type(uid) or uid is None) and
(is_int_type(gid) or gid is None)):
raise TypeError("An integer is required")
if uid != -1:
file_object.st_uid = uid
if gid != -1:
file_object.st_gid = gid
def mknod(self, path, mode=None, device=0, *, dir_fd=None):
if self.filesystem.is_windows_fs:
raise (AttributeError, "module 'os' has no attribute 'mknode'")
if mode is None:
# note that a default value of 0o600 without a device type is
# documented - this is not how it seems to work
mode = S_IFREG | 0o600
if device or not mode & S_IFREG and not is_root():
self.filesystem.raise_os_error(errno.EPERM)
path = self._path_with_dir_fd(path, self.mknod, dir_fd)
head, tail = self.path.split(path)
if not tail:
if self.filesystem.exists(head, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, path)
self.filesystem.raise_os_error(errno.ENOENT, path)
if tail in (b'.', u'.', b'..', u'..'):
self.filesystem.raise_os_error(errno.ENOENT, path)
if self.filesystem.exists(path, check_link=True):
self.filesystem.raise_os_error(errno.EEXIST, path)
self.filesystem.add_object(head, FakeFile(
tail, mode & ~self.filesystem.umask,
filesystem=self.filesystem))
def symlink(self, src, dst, *, dir_fd=None):
src = self._path_with_dir_fd(src, self.symlink, dir_fd)
self.filesystem.create_symlink(
dst, src, create_missing_dirs=False)
def link(self, src, dst, *, src_dir_fd=None, dst_dir_fd=None):
src = self._path_with_dir_fd(src, self.link, src_dir_fd)
dst = self._path_with_dir_fd(dst, self.link, dst_dir_fd)
self.filesystem.link(src, dst)
def fsync(self, fd):
# Throw an error if file_des isn't valid
if 0 <= fd < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
file_object = self.filesystem.get_open_file(fd)
if self.filesystem.is_windows_fs:
if (not hasattr(file_object, 'allow_update') or
not file_object.allow_update):
self.filesystem.raise_os_error(
errno.EBADF, file_object.file_path)
def fdatasync(self, fd):
if self.filesystem.is_windows_fs or self.filesystem.is_macos:
raise AttributeError("module 'os' has no attribute 'fdatasync'")
if 0 <= fd < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
self.filesystem.get_open_file(fd)
def sendfile(self, fd_out, fd_in, offset, count):
if self.filesystem.is_windows_fs:
raise AttributeError("module 'os' has no attribute 'sendfile'")
if 0 <= fd_in < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
if 0 <= fd_out < NR_STD_STREAMS:
self.filesystem.raise_os_error(errno.EINVAL)
source = self.filesystem.get_open_file(fd_in)
dest = self.filesystem.get_open_file(fd_out)
if self.filesystem.is_macos:
if dest.get_object().stat_result.st_mode & 0o777000 != S_IFSOCK:
raise OSError('Socket operation on non-socket')
if offset is None:
if self.filesystem.is_macos:
raise TypeError('None is not a valid offset')
contents = source.read(count)
else:
position = source.tell()
source.seek(offset)
if count == 0 and self.filesystem.is_macos:
contents = source.read()
else:
contents = source.read(count)
source.seek(position)
if contents:
written = dest.write(contents)
dest.flush()
return written
return 0
def __getattr__(self, name):
return getattr(self._os_module, name)
class FakeIoModule:
@staticmethod
def dir():
return 'open',
def __init__(self, filesystem):
self.filesystem = filesystem
self._io_module = io
def open(self, file, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None):
fake_open = FakeFileOpen(self.filesystem)
return fake_open(file, mode, buffering, encoding, errors,
newline, closefd, opener)
def __getattr__(self, name):
return getattr(self._io_module, name)
class FakeFileWrapper:
def __init__(self, file_object, file_path, update=False, read=False,
append=False, delete_on_close=False, filesystem=None,
newline=None, binary=True, closefd=True, encoding=None,
errors=None, raw_io=False, is_stream=False):
self.file_object = file_object
self.file_path = file_path
self._append = append
self._read = read
self.allow_update = update
self._closefd = closefd
self._file_epoch = file_object.epoch
self.raw_io = raw_io
self._binary = binary
self.is_stream = is_stream
self._changed = False
contents = file_object.byte_contents
self._encoding = encoding or locale.getpreferredencoding(False)
errors = errors or 'strict'
buffer_class = (NullFileBufferIO if file_object == filesystem.dev_null
else FileBufferIO)
self._io = buffer_class(contents, linesep=filesystem.line_separator(),
binary=binary, encoding=encoding,
newline=newline, errors=errors)
self._read_whence = 0
self._read_seek = 0
self._flush_pos = 0
if contents:
self._flush_pos = len(contents)
if update:
if not append:
self._io.seek(0)
else:
self._io.seek(self._flush_pos)
self._read_seek = self._io.tell()
if delete_on_close:
assert filesystem, 'delete_on_close=True requires filesystem'
self._filesystem = filesystem
self.delete_on_close = delete_on_close
# override, don't modify FakeFile.name, as FakeFilesystem expects
self.name = file_object.opened_as
self.filedes = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _raise(self, message):
if self.raw_io:
self._filesystem.raise_os_error(errno.EBADF, self.file_path)
raise io.UnsupportedOperation(message)
def get_object(self):
return self.file_object
def fileno(self):
return self.filedes
def close(self):
if not self._is_open():
return
if self.allow_update and not self.raw_io:
self.flush()
if self._filesystem.is_windows_fs and self._changed:
self.file_object.st_mtime = time.time()
if self._closefd:
self._filesystem._close_open_file(self.filedes)
else:
self._filesystem.open_files[self.filedes].remove(self)
if self.delete_on_close:
self._filesystem.remove_object(self.get_object().path)
@property
def closed(self):
return not self._is_open()
def flush(self):
self._check_open_file()
if self.allow_update and not self.is_stream:
contents = self._io.getvalue()
if self._append:
self._sync_io()
old_contents = (self.file_object.byte_contents
if is_byte_string(contents) else
self.file_object.contents)
contents = old_contents + contents[self._flush_pos:]
self._set_stream_contents(contents)
self.update_flush_pos()
else:
self._io.flush()
if self.file_object.set_contents(contents, self._encoding):
if self._filesystem.is_windows_fs:
self._changed = True
else:
current_time = time.time()
self.file_object.st_ctime = current_time
self.file_object.st_mtime = current_time
self._file_epoch = self.file_object.epoch
if not self.is_stream:
self._flush_related_files()
def update_flush_pos(self):
self._flush_pos = self._io.tell()
def _flush_related_files(self):
for open_files in self._filesystem.open_files[3:]:
if open_files is not None:
for open_file in open_files:
if (open_file is not self and
self.file_object == open_file.file_object and
not open_file._append):
open_file._sync_io()
def seek(self, offset, whence=0):
self._check_open_file()
if not self._append:
self._io.seek(offset, whence)
else:
self._read_seek = offset
self._read_whence = whence
if not self.is_stream:
self.flush()
def tell(self):
self._check_open_file()
if not self.is_stream:
self.flush()
if not self._append:
return self._io.tell()
if self._read_whence:
write_seek = self._io.tell()
self._io.seek(self._read_seek, self._read_whence)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(write_seek)
return self._read_seek
def _sync_io(self):
if self._file_epoch == self.file_object.epoch:
return
if self._io.binary:
contents = self.file_object.byte_contents
else:
contents = self.file_object.contents
self._set_stream_contents(contents)
self._file_epoch = self.file_object.epoch
def _set_stream_contents(self, contents):
whence = self._io.tell()
self._io.seek(0)
self._io.truncate()
if not self._io.binary and is_byte_string(contents):
contents = contents.decode(self._encoding)
self._io.putvalue(contents)
if not self._append:
self._io.seek(whence)
def _read_wrappers(self, name):
io_attr = getattr(self._io, name)
def read_wrapper(*args, **kwargs):
self._io.seek(self._read_seek, self._read_whence)
ret_value = io_attr(*args, **kwargs)
self._read_seek = self._io.tell()
self._read_whence = 0
self._io.seek(0, 2)
return ret_value
return read_wrapper
def _other_wrapper(self, name, writing):
io_attr = getattr(self._io, name)
def other_wrapper(*args, **kwargs):
write_seek = self._io.tell()
ret_value = io_attr(*args, **kwargs)
if write_seek != self._io.tell():
self._read_seek = self._io.tell()
self._read_whence = 0
return ret_value
return other_wrapper
def _adapt_size_for_related_files(self, size):
for open_files in self._filesystem.open_files[3:]:
if open_files is not None:
for open_file in open_files:
if (open_file is not self and
self.file_object == open_file.file_object and
open_file._append):
open_file._read_seek += size
def _truncate_wrapper(self):
io_attr = getattr(self._io, 'truncate')
def truncate_wrapper(*args, **kwargs):
if self._append:
self._io.seek(self._read_seek, self._read_whence)
size = io_attr(*args, **kwargs)
self.flush()
if not self.is_stream:
self.file_object.size = size
buffer_size = len(self._io.getvalue())
if buffer_size < size:
self._io.seek(buffer_size)
self._io.write('\0' * (size - buffer_size))
self.file_object.set_contents(
self._io.getvalue(), self._encoding)
self._flush_pos = size
self._adapt_size_for_related_files(size - buffer_size)
self.flush()
return size
return truncate_wrapper
def size(self):
return self.file_object.st_size
def __getattr__(self, name):
if self.file_object.is_large_file():
raise FakeLargeFileIoException(self.file_path)
reading = name.startswith('read') or name == 'next'
truncate = name == 'truncate'
writing = name.startswith('write') or truncate
if reading or writing:
self._check_open_file()
if not self._read and reading:
return self._read_error()
if not self.allow_update and writing:
return self._write_error()
if reading:
self._sync_io()
if not self.is_stream:
self.flush()
if not self._filesystem.is_windows_fs:
self.file_object.st_atime = time.time()
if truncate:
return self._truncate_wrapper()
if self._append:
if reading:
return self._read_wrappers(name)
else:
return self._other_wrapper(name, writing)
return getattr(self._io, name)
def _read_error(self):
def read_error(*args, **kwargs):
if args and args[0] == 0:
if self._filesystem.is_windows_fs and self.raw_io:
return b'' if self._binary else u''
self._raise('File is not open for reading.')
return read_error
def _write_error(self):
def write_error(*args, **kwargs):
if self.raw_io:
if (self._filesystem.is_windows_fs and args
and len(args[0]) == 0):
return 0
self._raise('File is not open for writing.')
return write_error
def _is_open(self):
return (self.filedes < len(self._filesystem.open_files) and
self._filesystem.open_files[self.filedes] is not None and
self in self._filesystem.open_files[self.filedes])
def _check_open_file(self):
if not self.is_stream and not self._is_open():
raise ValueError('I/O operation on closed file')
def __iter__(self):
if not self._read:
self._raise('File is not open for reading')
return self._io.__iter__()
def __next__(self):
if not self._read:
self._raise('File is not open for reading')
return next(self._io)
class StandardStreamWrapper:
def __init__(self, stream_object):
self._stream_object = stream_object
self.filedes = None
def get_object(self):
return self._stream_object
def fileno(self):
return self.filedes
def close(self):
pass
def is_stream(self):
return True
class FakeDirWrapper:
def __init__(self, file_object, file_path, filesystem):
self.file_object = file_object
self.file_path = file_path
self._filesystem = filesystem
self.filedes = None
def get_object(self):
return self.file_object
def fileno(self):
return self.filedes
def close(self):
self._filesystem._close_open_file(self.filedes)
class FakePipeWrapper:
def __init__(self, filesystem, fd):
self._filesystem = filesystem
self.fd = fd
self.file_object = None
self.filedes = None
def get_object(self):
return self.file_object
def fileno(self):
return self.filedes
def read(self, numBytes):
return os.read(self.fd, numBytes)
def write(self, contents):
return os.write(self.fd, contents)
def close(self):
self._filesystem.open_files[self.filedes].remove(self)
os.close(self.fd)
Deprecator.add(FakeFileWrapper, FakeFileWrapper.get_object, 'GetObject')
Deprecator.add(FakeFileWrapper, FakeFileWrapper.size, 'Size')
class FakeFileOpen:
__name__ = 'FakeFileOpen'
def __init__(self, filesystem, delete_on_close=False, raw_io=False):
self.filesystem = filesystem
self._delete_on_close = delete_on_close
self.raw_io = raw_io
def __call__(self, *args, **kwargs):
return self.call(*args, **kwargs)
def call(self, file_, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True, opener=None,
open_modes=None):
binary = 'b' in mode
newline, open_modes = self._handle_file_mode(mode, newline, open_modes)
file_object, file_path, filedes, real_path = self._handle_file_arg(
file_)
if not filedes:
closefd = True
if (open_modes.must_not_exist and
(file_object or self.filesystem.islink(file_path) and
not self.filesystem.is_windows_fs)):
self.filesystem.raise_os_error(errno.EEXIST, file_path)
file_object = self._init_file_object(file_object,
file_path, open_modes,
real_path)
if S_ISDIR(file_object.st_mode):
if self.filesystem.is_windows_fs:
self.filesystem.raise_os_error(errno.EACCES, file_path)
else:
self.filesystem.raise_os_error(errno.EISDIR, file_path)
file_object.opened_as = file_path
if open_modes.truncate:
current_time = time.time()
file_object.st_mtime = current_time
if not self.filesystem.is_windows_fs:
file_object.st_ctime = current_time
fakefile = FakeFileWrapper(file_object,
file_path,
update=open_modes.can_write,
read=open_modes.can_read,
append=open_modes.append,
delete_on_close=self._delete_on_close,
filesystem=self.filesystem,
newline=newline,
binary=binary,
closefd=closefd,
encoding=encoding,
errors=errors,
raw_io=self.raw_io)
if filedes is not None:
fakefile.filedes = filedes
self.filesystem.open_files[filedes].append(fakefile)
else:
fakefile.filedes = self.filesystem._add_open_file(fakefile)
return fakefile
def _init_file_object(self, file_object, file_path,
open_modes, real_path):
if file_object:
if (not is_root() and
((open_modes.can_read and
not file_object.st_mode & PERM_READ)
or (open_modes.can_write and
not file_object.st_mode & PERM_WRITE))):
self.filesystem.raise_os_error(errno.EACCES, file_path)
if open_modes.can_write:
if open_modes.truncate:
file_object.set_contents('')
else:
if open_modes.must_exist:
self.filesystem.raise_os_error(errno.ENOENT, file_path)
if self.filesystem.islink(file_path):
link_object = self.filesystem.resolve(file_path,
follow_symlinks=False)
target_path = link_object.contents
else:
target_path = file_path
if self.filesystem.ends_with_path_separator(target_path):
error = (errno.EINVAL if self.filesystem.is_windows_fs
else errno.ENOENT if self.filesystem.is_macos
else errno.EISDIR)
self.filesystem.raise_os_error(error, file_path)
file_object = self.filesystem.create_file_internally(
real_path, create_missing_dirs=False,
apply_umask=True, raw_io=self.raw_io)
return file_object
def _handle_file_arg(self, file_):
file_object = None
if isinstance(file_, int):
filedes = file_
wrapper = self.filesystem.get_open_file(filedes)
self._delete_on_close = wrapper.delete_on_close
file_object = self.filesystem.get_open_file(filedes).get_object()
file_path = file_object.name
real_path = file_path
else:
filedes = None
file_path = file_
if file_path == self.filesystem.dev_null.name:
file_object = self.filesystem.dev_null
real_path = file_path
else:
real_path = self.filesystem.resolve_path(
file_path, raw_io=self.raw_io)
if self.filesystem.exists(file_path):
file_object = self.filesystem.get_object_from_normpath(
real_path, check_read_perm=False)
return file_object, file_path, filedes, real_path
def _handle_file_mode(self, mode, newline, open_modes):
orig_modes = mode
if 'b' in mode and 't' in mode:
raise ValueError('Invalid mode: ' + mode)
mode = mode.replace('t', '').replace('b', '')
mode = mode.replace('rU', 'r').replace('U', 'r')
if not self.raw_io:
if mode not in _OPEN_MODE_MAP:
raise ValueError('Invalid mode: %r' % orig_modes)
open_modes = _OpenModes(*_OPEN_MODE_MAP[mode])
return newline, open_modes
def _run_doctest():
import doctest
import pyfakefs
return doctest.testmod(pyfakefs.fake_filesystem)
if __name__ == '__main__':
_run_doctest()
| true
| true
|
f71563a5ae0890e106de48a27661337660c13119
| 5,139
|
py
|
Python
|
Lib/wsgiref/simple_server.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 11,058
|
2018-05-29T07:40:06.000Z
|
2022-03-31T11:38:42.000Z
|
Lib/wsgiref/simple_server.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 2,105
|
2018-06-01T10:07:16.000Z
|
2022-03-31T14:56:42.000Z
|
Lib/wsgiref/simple_server.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 914
|
2018-07-27T09:36:14.000Z
|
2022-03-31T19:56:34.000Z
|
"""BaseHTTPServer that implements the Python WSGI protocol (PEP 3333)
This is both an example of how WSGI can be implemented, and a basis for running
simple web applications on a local machine, such as might be done when testing
or debugging an application. It has not been reviewed for security issues,
however, and we strongly recommend that you use a "real" web server for
production use.
For example usage, see the 'if __name__=="__main__"' block at the end of the
module. See also the BaseHTTPServer module docs for other API information.
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import sys
import urllib.parse
from wsgiref.handlers import SimpleHandler
from platform import python_implementation
__version__ = "0.2"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = python_implementation() + "/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
"""BaseHTTPServer that implements the Python WSGI protocol"""
application = None
def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
# Set up base environment
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['SERVER_SOFTWARE'] = self.server_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.parse.unquote(path, 'iso-8859-1')
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
for k, v in self.headers.items():
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue # skip content length, type,etc.
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v # comma-separate multiple headers
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from io import StringIO
stdout = StringIO()
print("Hello world!", file=stdout)
print(file=stdout)
h = sorted(environ.items())
for k,v in h:
print(k,'=',repr(v), file=stdout)
start_response("200 OK", [('Content-Type','text/plain; charset=utf-8')])
return [stdout.getvalue().encode("utf-8")]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
"""Create a new WSGI server listening on `host` and `port` for `app`"""
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
with make_server('', 8000, demo_app) as httpd:
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request() # serve one request, then exit
| 31.145455
| 79
| 0.627359
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import sys
import urllib.parse
from wsgiref.handlers import SimpleHandler
from platform import python_implementation
__version__ = "0.2"
__all__ = ['WSGIServer', 'WSGIRequestHandler', 'demo_app', 'make_server']
server_version = "WSGIServer/" + __version__
sys_version = python_implementation() + "/" + sys.version.split()[0]
software_version = server_version + ' ' + sys_version
class ServerHandler(SimpleHandler):
server_software = software_version
def close(self):
try:
self.request_handler.log_request(
self.status.split(' ',1)[0], self.bytes_sent
)
finally:
SimpleHandler.close(self)
class WSGIServer(HTTPServer):
application = None
def server_bind(self):
HTTPServer.server_bind(self)
self.setup_environ()
def setup_environ(self):
env = self.base_environ = {}
env['SERVER_NAME'] = self.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PORT'] = str(self.server_port)
env['REMOTE_HOST']=''
env['CONTENT_LENGTH']=''
env['SCRIPT_NAME'] = ''
def get_app(self):
return self.application
def set_app(self,application):
self.application = application
class WSGIRequestHandler(BaseHTTPRequestHandler):
server_version = "WSGIServer/" + __version__
def get_environ(self):
env = self.server.base_environ.copy()
env['SERVER_PROTOCOL'] = self.request_version
env['SERVER_SOFTWARE'] = self.server_version
env['REQUEST_METHOD'] = self.command
if '?' in self.path:
path,query = self.path.split('?',1)
else:
path,query = self.path,''
env['PATH_INFO'] = urllib.parse.unquote(path, 'iso-8859-1')
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
if self.headers.get('content-type') is None:
env['CONTENT_TYPE'] = self.headers.get_content_type()
else:
env['CONTENT_TYPE'] = self.headers['content-type']
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
for k, v in self.headers.items():
k=k.replace('-','_').upper(); v=v.strip()
if k in env:
continue
if 'HTTP_'+k in env:
env['HTTP_'+k] += ','+v
else:
env['HTTP_'+k] = v
return env
def get_stderr(self):
return sys.stderr
def handle(self):
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(414)
return
if not self.parse_request():
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self
handler.run(self.server.get_app())
def demo_app(environ,start_response):
from io import StringIO
stdout = StringIO()
print("Hello world!", file=stdout)
print(file=stdout)
h = sorted(environ.items())
for k,v in h:
print(k,'=',repr(v), file=stdout)
start_response("200 OK", [('Content-Type','text/plain; charset=utf-8')])
return [stdout.getvalue().encode("utf-8")]
def make_server(
host, port, app, server_class=WSGIServer, handler_class=WSGIRequestHandler
):
server = server_class((host, port), handler_class)
server.set_app(app)
return server
if __name__ == '__main__':
with make_server('', 8000, demo_app) as httpd:
sa = httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
import webbrowser
webbrowser.open('http://localhost:8000/xyz?abc')
httpd.handle_request()
| true
| true
|
f71563a970e54d91f082ae73af7abad4a8b23fdf
| 205
|
py
|
Python
|
exe.curso em video/def 20.py
|
Lorenzo-Lopes/Python-Estudo
|
7ee623ce29b6a0e9fac48189fbd9c641be84d418
|
[
"MIT"
] | null | null | null |
exe.curso em video/def 20.py
|
Lorenzo-Lopes/Python-Estudo
|
7ee623ce29b6a0e9fac48189fbd9c641be84d418
|
[
"MIT"
] | null | null | null |
exe.curso em video/def 20.py
|
Lorenzo-Lopes/Python-Estudo
|
7ee623ce29b6a0e9fac48189fbd9c641be84d418
|
[
"MIT"
] | null | null | null |
import random
n1 = str(input('nome 1='))
n2 = str(input('nome 2='))
n3 = str(input('nome 3='))
n4 = str(input('nome 4='))
lista = [n1, n2, n3, n4]
random.shuffle(lista)
print('nova ordem{}'.format(lista))
| 22.777778
| 35
| 0.62439
|
import random
n1 = str(input('nome 1='))
n2 = str(input('nome 2='))
n3 = str(input('nome 3='))
n4 = str(input('nome 4='))
lista = [n1, n2, n3, n4]
random.shuffle(lista)
print('nova ordem{}'.format(lista))
| true
| true
|
f715643f43f6a40626ee3423dc028f67fe0c8522
| 113
|
py
|
Python
|
mass/views.py
|
lsapan/channels-mass-broadcast
|
4f60e059ea9e3a861cc47250347900a4d0b8bd7f
|
[
"MIT"
] | null | null | null |
mass/views.py
|
lsapan/channels-mass-broadcast
|
4f60e059ea9e3a861cc47250347900a4d0b8bd7f
|
[
"MIT"
] | null | null | null |
mass/views.py
|
lsapan/channels-mass-broadcast
|
4f60e059ea9e3a861cc47250347900a4d0b8bd7f
|
[
"MIT"
] | null | null | null |
from django.views.generic.base import TemplateView
class AppView(TemplateView):
template_name = 'app.html'
| 18.833333
| 50
| 0.778761
|
from django.views.generic.base import TemplateView
class AppView(TemplateView):
template_name = 'app.html'
| true
| true
|
f715657d3665bb54ea1ce0ca055aa8948d82a119
| 17,134
|
py
|
Python
|
Lib/test/test_dict.py
|
weimingtom/wpython
|
d51bfe48ec4c0ade1514f1351dff700c63ca112a
|
[
"PSF-2.0"
] | 5
|
2020-06-30T05:06:40.000Z
|
2021-05-24T08:38:33.000Z
|
Lib/test/test_dict.py
|
weimingtom/wpython
|
d51bfe48ec4c0ade1514f1351dff700c63ca112a
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_dict.py
|
weimingtom/wpython
|
d51bfe48ec4c0ade1514f1351dff700c63ca112a
|
[
"PSF-2.0"
] | 2
|
2015-10-01T18:28:20.000Z
|
2020-09-09T16:25:27.000Z
|
import unittest
from test import test_support
import UserDict, random, string
class DictTest(unittest.TestCase):
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(dict(), {})
self.assert_(dict() is not {})
def test_literal_constructor(self):
# check literal constructor for different sized dicts (to exercise the BUILD_MAP oparg
for n in (0, 1, 6, 256, 400):
items = [(''.join([random.choice(string.letters)
for j in range(8)]),
i)
for i in range(n)]
random.shuffle(items)
dictliteral = '{' + ', '.join('%r: %d' % item for item in items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assert_(not {})
self.assert_({1: 2})
self.assert_(bool({}) is False)
self.assert_(bool({1: 2}) is True)
def test_keys(self):
d = {}
self.assertEqual(d.keys(), [])
d = {'a': 1, 'b': 2}
k = d.keys()
self.assert_(d.has_key('a'))
self.assert_(d.has_key('b'))
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = {}
self.assertEqual(d.values(), [])
d = {1:2}
self.assertEqual(d.values(), [2])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = {}
self.assertEqual(d.items(), [])
d = {1:2}
self.assertEqual(d.items(), [(1, 2)])
self.assertRaises(TypeError, d.items, None)
def test_has_key(self):
d = {}
self.assert_(not d.has_key('a'))
d = {'a': 1, 'b': 2}
k = d.keys()
k.sort()
self.assertEqual(k, ['a', 'b'])
self.assertRaises(TypeError, d.has_key)
def test_contains(self):
d = {}
self.assert_(not ('a' in d))
self.assert_('a' not in d)
d = {'a': 1, 'b': 2}
self.assert_('a' in d)
self.assert_('b' in d)
self.assert_('c' not in d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assert_(not(d.fromkeys('abc') is d))
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assert_(type(dictlike.fromkeys('a')) is dictlike)
self.assert_(type(dictlike().fromkeys('a')) is dictlike)
class mydict(dict):
def __new__(cls):
return UserDict.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assert_(isinstance(ud, UserDict.UserDict))
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = {}
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
d = {'a' : 1, 'b' : 2}
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = {}
self.assert_(d.setdefault('key0') is None)
d.setdefault('key0', [])
self.assert_(d.setdefault('key0') is None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assert_(not(copymode < 0 and ta != tb))
self.assert_(not a)
self.assert_(not b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
# verify longs/ints get same value when key > 32 bits (for 64-bit archs)
# see SF bug #689659
x = 4503599627370496L
y = 4503599627370496
h = {x: 'anything', y: 'something else'}
self.assertEqual(h[x], h[y])
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self):
d = {}
d[1] = 1
try:
for i in d:
d[i+1] = 1
except RuntimeError:
pass
else:
self.fail("changing dict size during iteration doesn't raise Error")
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_le(self):
self.assert_(not ({} < {}))
self.assert_(not ({1: 2} < {1L: 2L}))
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 42
d1 = {BadCmp(): 1}
d2 = {1: 1}
try:
d1 < d2
except Exc:
pass
else:
self.fail("< didn't raise Exc")
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertEqual(hasattr(dict, "__missing__"), False)
self.assertEqual(hasattr({}, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assert_(2 not in d)
self.assert_(2 not in d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
try:
f[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(dict):
pass
g = G()
try:
g[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
def test_tuple_keyerror(self):
# SF #1576657
d = {}
try:
d[(1,)]
except KeyError, e:
self.assertEqual(e.args, ((1,),))
else:
self.fail("missing KeyError")
def test_bad_key(self):
# Dictionary lookups should fail if __cmp__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __cmp__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.has_key(x2)',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
try:
exec stmt in locals()
except CustomException:
pass
else:
self.fail("Statement didn't raise exception")
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
try:
d = {'a': 1/0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
except ZeroDivisionError:
pass
d = {}
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
def test_main():
test_support.run_unittest(
DictTest,
GeneralMappingTests,
SubclassMappingTests,
)
if __name__ == "__main__":
test_main()
| 29.592401
| 94
| 0.487744
|
import unittest
from test import test_support
import UserDict, random, string
class DictTest(unittest.TestCase):
def test_constructor(self):
self.assertEqual(dict(), {})
self.assert_(dict() is not {})
def test_literal_constructor(self):
for n in (0, 1, 6, 256, 400):
items = [(''.join([random.choice(string.letters)
for j in range(8)]),
i)
for i in range(n)]
random.shuffle(items)
dictliteral = '{' + ', '.join('%r: %d' % item for item in items) + '}'
self.assertEqual(eval(dictliteral), dict(items))
def test_bool(self):
self.assert_(not {})
self.assert_({1: 2})
self.assert_(bool({}) is False)
self.assert_(bool({1: 2}) is True)
def test_keys(self):
d = {}
self.assertEqual(d.keys(), [])
d = {'a': 1, 'b': 2}
k = d.keys()
self.assert_(d.has_key('a'))
self.assert_(d.has_key('b'))
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = {}
self.assertEqual(d.values(), [])
d = {1:2}
self.assertEqual(d.values(), [2])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = {}
self.assertEqual(d.items(), [])
d = {1:2}
self.assertEqual(d.items(), [(1, 2)])
self.assertRaises(TypeError, d.items, None)
def test_has_key(self):
d = {}
self.assert_(not d.has_key('a'))
d = {'a': 1, 'b': 2}
k = d.keys()
k.sort()
self.assertEqual(k, ['a', 'b'])
self.assertRaises(TypeError, d.has_key)
def test_contains(self):
d = {}
self.assert_(not ('a' in d))
self.assert_('a' not in d)
d = {'a': 1, 'b': 2}
self.assert_('a' in d)
self.assert_('b' in d)
self.assert_('c' not in d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = {}
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = {}
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = {1:1, 2:2, 3:3}
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = {}
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = {}
self.assert_(not(d.fromkeys('abc') is d))
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assert_(type(dictlike.fromkeys('a')) is dictlike)
self.assert_(type(dictlike().fromkeys('a')) is dictlike)
class mydict(dict):
def __new__(cls):
return UserDict.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assert_(isinstance(ud, UserDict.UserDict))
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0]*6)))
def test_copy(self):
d = {1:1, 2:2, 3:3}
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
d = {}
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
d = {'a' : 1, 'b' : 2}
self.assert_(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = {}
self.assert_(d.setdefault('key0') is None)
d.setdefault('key0', [])
self.assert_(d.setdefault('key0') is None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_popitem(self):
for copymode in -1, +1:
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assert_(not(copymode < 0 and ta != tb))
self.assert_(not a)
self.assert_(not b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
d = {}
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
x = 4503599627370496L
y = 4503599627370496
h = {x: 'anything', y: 'something else'}
self.assertEqual(h[x], h[y])
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self):
d = {}
d[1] = 1
try:
for i in d:
d[i+1] = 1
except RuntimeError:
pass
else:
self.fail("changing dict size during iteration doesn't raise Error")
def test_repr(self):
d = {}
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = {}
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_le(self):
self.assert_(not ({} < {}))
self.assert_(not ({1: 2} < {1L: 2L}))
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 42
d1 = {BadCmp(): 1}
d2 = {1: 1}
try:
d1 < d2
except Exc:
pass
else:
self.fail("< didn't raise Exc")
def test_missing(self):
self.assertEqual(hasattr(dict, "__missing__"), False)
self.assertEqual(hasattr({}, "__missing__"), False)
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at a all
class D(dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assert_(2 not in d)
self.assert_(2 not in d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
try:
e[42]
except RuntimeError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("e[42] didn't raise RuntimeError")
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
try:
f[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("f[42] didn't raise KeyError")
class G(dict):
pass
g = G()
try:
g[42]
except KeyError, err:
self.assertEqual(err.args, (42,))
else:
self.fail("g[42] didn't raise KeyError")
def test_tuple_keyerror(self):
# SF #1576657
d = {}
try:
d[(1,)]
except KeyError, e:
self.assertEqual(e.args, ((1,),))
else:
self.fail("missing KeyError")
def test_bad_key(self):
# Dictionary lookups should fail if __cmp__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __cmp__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = {}
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in ['d[x2] = 2',
'z = d[x2]',
'x2 in d',
'd.has_key(x2)',
'd.get(x2)',
'd.setdefault(x2, 42)',
'd.pop(x2)',
'd.update({x2: 2})']:
try:
exec stmt in locals()
except CustomException:
pass
else:
self.fail("Statement didn't raise exception")
def test_resize1(self):
# that would be *likely* to hit a failing case in reasonable time.
d = {}
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = {}
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
try:
d = {'a': 1/0, 'b': None, 'c': None, 'd': None, 'e': None,
'f': None, 'g': None, 'h': None}
except ZeroDivisionError:
pass
d = {}
from test import mapping_tests
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = dict
class Dict(dict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = Dict
def test_main():
test_support.run_unittest(
DictTest,
GeneralMappingTests,
SubclassMappingTests,
)
if __name__ == "__main__":
test_main()
| false
| true
|
f71565b7be10bec97c68c9837cc952d9b34744fe
| 886
|
py
|
Python
|
s5/local/extract_text.py
|
cadia-lvl/althingi-asr
|
8830f40b8302834fc8176727a36ca8136cd9eedc
|
[
"Apache-2.0"
] | 4
|
2017-11-10T19:43:43.000Z
|
2019-07-03T12:12:09.000Z
|
egs/althingi/s5/local/extract_text.py
|
ingarun/kaldi
|
362ad3235c9740c0dfb0481cfbff81ca4604222d
|
[
"Apache-2.0"
] | null | null | null |
egs/althingi/s5/local/extract_text.py
|
ingarun/kaldi
|
362ad3235c9740c0dfb0481cfbff81ca4604222d
|
[
"Apache-2.0"
] | 1
|
2017-11-06T18:28:53.000Z
|
2017-11-06T18:28:53.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import glob
import os
import codecs
import re
#from bs4 import BeautifulSoup
with codecs.open(sys.argv[2],'w',encoding='utf-8') as fout:
xmlpaths = glob.glob(os.path.join(sys.argv[1],'*.xml'))
for file in xmlpaths:
file_base = os.path.splitext(os.path.basename(file))[0]
with codecs.open(file,'r',encoding='utf-8') as fin:
#soup = BeautifulSoup(fin, 'lxml-xml')
#speech=soup.find('ræðutexti')
data=fin.read().replace('\n', ' ')
if re.search('<ræðutexti>(.*)</ræðutexti>',data) == None:
print(file_base, file=fout)
else:
body_txt = re.search('<ræðutexti>(.*)</ræðutexti>',data).group()
text = ' '.join([file_base, body_txt]).strip().replace('\n', ' ')
print(text, file=fout)
| 35.44
| 81
| 0.565463
|
import sys
import glob
import os
import codecs
import re
with codecs.open(sys.argv[2],'w',encoding='utf-8') as fout:
xmlpaths = glob.glob(os.path.join(sys.argv[1],'*.xml'))
for file in xmlpaths:
file_base = os.path.splitext(os.path.basename(file))[0]
with codecs.open(file,'r',encoding='utf-8') as fin:
data=fin.read().replace('\n', ' ')
if re.search('<ræðutexti>(.*)</ræðutexti>',data) == None:
print(file_base, file=fout)
else:
body_txt = re.search('<ræðutexti>(.*)</ræðutexti>',data).group()
text = ' '.join([file_base, body_txt]).strip().replace('\n', ' ')
print(text, file=fout)
| true
| true
|
f715670072d1d78f8599fbbeeae887a4a8b445cc
| 2,970
|
py
|
Python
|
news/views.py
|
serg1ua/today-ua
|
6a3ad99c924884db81ecbdb5d3dc2255dd927b4e
|
[
"MIT"
] | null | null | null |
news/views.py
|
serg1ua/today-ua
|
6a3ad99c924884db81ecbdb5d3dc2255dd927b4e
|
[
"MIT"
] | null | null | null |
news/views.py
|
serg1ua/today-ua
|
6a3ad99c924884db81ecbdb5d3dc2255dd927b4e
|
[
"MIT"
] | null | null | null |
import json
from django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from .models import Article
get_articles = 10
# Create your views here.
def index(request):
# print(dir(request))
return HttpResponseRedirect(reverse("main"))
def main(request):
try:
global get_articles
db_len = Article.objects.all().count()
counter = db_get_len(db_len, get_articles)
articles = Article.objects.all().order_by('-id')[:counter]
except Article.DoesNotExist:
raise Http404("Articles does not exist")
context = {
"title": "Головна",
"articles_3": articles[:3],
"articles_7": articles[3:10]
}
return render(request, "news/articles.html", context)
def section(request, selector):
try:
global get_articles
db_len = Article.objects.filter(tag__iexact=selector).count()
counter = db_get_len(db_len, get_articles)
articles = Article.objects.filter(tag__iexact=selector).order_by('-id')[:counter]
except Article.DoesNotExist:
raise Http404("Articles does not exist")
context = {
"title": selector,
"articles_3": articles[:3],
"articles_7": articles[4:10]
}
return render(request, "news/articles.html", context)
def article(request, selector, article):
# print(f"{selector} & {article}")
try:
article = Article.objects.get(header__iexact=article)
except Article.DoesNotExist:
raise Http404("Articles not found")
context = {
"article": article
}
return render(request, "news/article.html", context)
def api_articles(request, params):
params = params.split('&')
tag = params[0]
count = int(params[1])
try:
if tag == 'Головна':
db_length = db_get_len(Article.objects.all().count(), count)
get_ten = ten_getter(db_length, count)
articles = Article.objects.all().values()[count-10:get_ten]
else:
db_length = db_get_len(Article.objects.filter(tag__iexact=tag).count(), count)
get_ten = ten_getter(db_length, count)
articles = Article.objects.filter(tag__iexact=tag).values()[count-10:get_ten]
except Article.DoesNotExist:
raise Http404("Articles not found")
return JsonResponse(list(articles), content_type='application/json', safe=False)
def api_article(request, tag, param):
try:
article = Article.objects.values().get(header__iexact=param)
except Article.DoesNotExist:
raise Http404("Articles not found")
return JsonResponse(article, content_type='application/json', safe=False)
def db_get_len(db, artcls):
if db < artcls:
return artcls - (artcls - db)
else:
return artcls
def ten_getter(length, count):
if (length - count) > 10:
return 10
else:
return length
| 27.757009
| 90
| 0.653535
|
import json
from django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from .models import Article
get_articles = 10
def index(request):
return HttpResponseRedirect(reverse("main"))
def main(request):
try:
global get_articles
db_len = Article.objects.all().count()
counter = db_get_len(db_len, get_articles)
articles = Article.objects.all().order_by('-id')[:counter]
except Article.DoesNotExist:
raise Http404("Articles does not exist")
context = {
"title": "Головна",
"articles_3": articles[:3],
"articles_7": articles[3:10]
}
return render(request, "news/articles.html", context)
def section(request, selector):
try:
global get_articles
db_len = Article.objects.filter(tag__iexact=selector).count()
counter = db_get_len(db_len, get_articles)
articles = Article.objects.filter(tag__iexact=selector).order_by('-id')[:counter]
except Article.DoesNotExist:
raise Http404("Articles does not exist")
context = {
"title": selector,
"articles_3": articles[:3],
"articles_7": articles[4:10]
}
return render(request, "news/articles.html", context)
def article(request, selector, article):
try:
article = Article.objects.get(header__iexact=article)
except Article.DoesNotExist:
raise Http404("Articles not found")
context = {
"article": article
}
return render(request, "news/article.html", context)
def api_articles(request, params):
params = params.split('&')
tag = params[0]
count = int(params[1])
try:
if tag == 'Головна':
db_length = db_get_len(Article.objects.all().count(), count)
get_ten = ten_getter(db_length, count)
articles = Article.objects.all().values()[count-10:get_ten]
else:
db_length = db_get_len(Article.objects.filter(tag__iexact=tag).count(), count)
get_ten = ten_getter(db_length, count)
articles = Article.objects.filter(tag__iexact=tag).values()[count-10:get_ten]
except Article.DoesNotExist:
raise Http404("Articles not found")
return JsonResponse(list(articles), content_type='application/json', safe=False)
def api_article(request, tag, param):
try:
article = Article.objects.values().get(header__iexact=param)
except Article.DoesNotExist:
raise Http404("Articles not found")
return JsonResponse(article, content_type='application/json', safe=False)
def db_get_len(db, artcls):
if db < artcls:
return artcls - (artcls - db)
else:
return artcls
def ten_getter(length, count):
if (length - count) > 10:
return 10
else:
return length
| true
| true
|
f715672884f4a836d1e1572dda38ea12b14cfb27
| 8,758
|
py
|
Python
|
lib/rucio/tests/conftest.py
|
chrisburr/rucio
|
735f628231cd9fae64adc31c9f548b14d5ca01d3
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/tests/conftest.py
|
chrisburr/rucio
|
735f628231cd9fae64adc31c9f548b14d5ca01d3
|
[
"Apache-2.0"
] | null | null | null |
lib/rucio/tests/conftest.py
|
chrisburr/rucio
|
735f628231cd9fae64adc31c9f548b14d5ca01d3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
# - Radu Carpa <radu.carpa@cern.ch>, 2021
# - Mayank Sharma <mayank.sharma@cern.ch>, 2021
from __future__ import print_function
import traceback
import pytest
# local imports in the fixtures to make this file loadable in e.g. client tests
@pytest.fixture(scope='session')
def vo():
from rucio.common.config import config_get_bool, config_get
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
return config_get('client', 'vo', raise_exception=False, default='tst')
else:
return 'def'
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def test_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
"""
Detects if containerized rses for xrootd are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
"""
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(vo):
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
yield factory
@pytest.fixture
def did_factory(vo, mock_scope):
from rucio.tests.temp_factories import TemporaryDidFactory
with TemporaryDidFactory(vo=vo, default_scope=mock_scope) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from rucio.tests.temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
# Parametrize support is incomplete for legacy unittest test cases
# Manually retrieve the parameters from the list of marks:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
@pytest.fixture
def core_config_mock(request):
"""
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
"""
from unittest import mock
from rucio.common.utils import generate_uuid
from sqlalchemy.pool import StaticPool
from rucio.db.sqla.models import ModelBase, BASE, Column, String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session, get_maker, get_engine, create_engine, declarative_base
# Get the fixture parameters
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
# Create an in-memory dropdown replacement table for the "models.Config" table
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
InMemoryBase = declarative_base(bind=engine)
class InMemoryConfig(InMemoryBase, ModelBase):
__tablename__ = 'configs_' + generate_uuid()
section = Column(String(128))
opt = Column(String(128))
value = Column(String(4000))
_table_args = (PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'), )
InMemoryBase.metadata.create_all()
# Register the new table with the associated engine into the sqlalchemy sessionmaker
# In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed
# during test case initialization, so there is no risk here to have concurrent calls from within the
# same process
current_engine = get_engine()
get_maker().configure(binds={BASE: current_engine, InMemoryBase: engine})
# Fill the table with the requested mock data
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def caches_mock(request):
"""
Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
"""
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
with ExitStack() as stack:
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=600)
stack.enter_context(mock.patch('{}.{}'.format(module, 'REGION'), new=region))
yield
| 33.945736
| 125
| 0.716716
|
from __future__ import print_function
import traceback
import pytest
@pytest.fixture(scope='session')
def vo():
from rucio.common.config import config_get_bool, config_get
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
return config_get('client', 'vo', raise_exception=False, default='tst')
else:
return 'def'
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def test_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(vo):
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
yield factory
@pytest.fixture
def did_factory(vo, mock_scope):
from rucio.tests.temp_factories import TemporaryDidFactory
with TemporaryDidFactory(vo=vo, default_scope=mock_scope) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from rucio.tests.temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
@pytest.fixture
def core_config_mock(request):
from unittest import mock
from rucio.common.utils import generate_uuid
from sqlalchemy.pool import StaticPool
from rucio.db.sqla.models import ModelBase, BASE, Column, String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session, get_maker, get_engine, create_engine, declarative_base
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
InMemoryBase = declarative_base(bind=engine)
class InMemoryConfig(InMemoryBase, ModelBase):
__tablename__ = 'configs_' + generate_uuid()
section = Column(String(128))
opt = Column(String(128))
value = Column(String(4000))
_table_args = (PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'), )
InMemoryBase.metadata.create_all()
current_engine = get_engine()
get_maker().configure(binds={BASE: current_engine, InMemoryBase: engine})
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def caches_mock(request):
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
with ExitStack() as stack:
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=600)
stack.enter_context(mock.patch('{}.{}'.format(module, 'REGION'), new=region))
yield
| true
| true
|
f71567dd29ec05db09822d985693d9e42b0f36ec
| 394
|
py
|
Python
|
src/param.py
|
xiajing10/akec
|
239fdda923c8a0743f56dbf0a009fa2235b85451
|
[
"MIT"
] | 14
|
2021-01-28T07:13:25.000Z
|
2022-02-10T06:41:32.000Z
|
src/param.py
|
xiajing10/akec
|
239fdda923c8a0743f56dbf0a009fa2235b85451
|
[
"MIT"
] | 2
|
2021-04-14T15:24:30.000Z
|
2021-05-06T07:02:08.000Z
|
src/param.py
|
xiajing10/akec
|
239fdda923c8a0743f56dbf0a009fa2235b85451
|
[
"MIT"
] | 1
|
2021-07-09T02:52:59.000Z
|
2021-07-09T02:52:59.000Z
|
# -*- coding: utf-8 -*-
"""
@author: eilxaix
"""
param = {
'data_path': '../dataset/ieee_xai.csv',
'terms_path': '../dataset/domain_terms.txt',
'conceptnet_emb': './embed_data/numberbatch-en-19.08.txt',
'elmo_options':'./embed_data/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json',
'elmo_weight':'./embed_data/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5'
}
| 32.833333
| 87
| 0.690355
|
param = {
'data_path': '../dataset/ieee_xai.csv',
'terms_path': '../dataset/domain_terms.txt',
'conceptnet_emb': './embed_data/numberbatch-en-19.08.txt',
'elmo_options':'./embed_data/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json',
'elmo_weight':'./embed_data/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5'
}
| true
| true
|
f71568ebb68f9faa9d28c57f4a9204630dfb7a43
| 18,797
|
py
|
Python
|
transfer_subnet/xiaoketransfer2.py
|
LenKerr/Colorization-1
|
bcfcdb24fc8ab107d34644d5a63b018f86784e21
|
[
"MIT"
] | 30
|
2020-06-21T09:29:51.000Z
|
2022-03-26T07:32:52.000Z
|
transfer_subnet/xiaoketransfer2.py
|
LenKerr/Colorization-1
|
bcfcdb24fc8ab107d34644d5a63b018f86784e21
|
[
"MIT"
] | 5
|
2020-09-27T09:45:44.000Z
|
2021-10-20T11:45:04.000Z
|
transfer_subnet/xiaoketransfer2.py
|
xuzhongyou/Colorization
|
bcfcdb24fc8ab107d34644d5a63b018f86784e21
|
[
"MIT"
] | 7
|
2020-07-03T02:55:25.000Z
|
2021-12-18T10:38:41.000Z
|
"""
Copyright (c) 2019 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
os.environ['CUDA_VISIBLE_DEVICES']='1'
import sys
sys.path.append('./segmentation')
import os
import tqdm
import argparse
import torch
from torchvision.utils import save_image
import torch.nn as nn
# from model import WaveEncoder, WaveDecoder
from utils.core import feature_wct
from utils.core import feature_adin
from utils.core import feature_adin_without_segment
from utils.core import feature_wct_without_segment
from utils.io import Timer, open_image, load_segment, compute_label_info
from xiaokemodel import XiaoKeEncoder, XiaoKeDecoder
import numpy as np
import torchvision.transforms as transforms
from scipy.io import loadmat
from PIL import Image
from scipy.misc import imread, imresize
import cv2
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy, mark_volatile
import datetime
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
class WCT2:
def __init__(self, model_path='./model_checkpoints', transfer_at=['encoder', 'skip', 'decoder'], option_unpool='cat5', device='cuda:0', verbose=False):
self.transfer_at = set(transfer_at)
assert not(self.transfer_at - set(['encoder', 'decoder', 'skip'])), 'invalid transfer_at: {}'.format(transfer_at)
assert self.transfer_at, 'empty transfer_at'
model_path = './xiaoke_video_checkpoints/'
encoder_path = 'xiaoke_encoder.pth'
decoder_path = 'xiaoke_decoder_0.0001_4.pth'
model_path = './xiaoke_checkpoints/'
encoder_path = 'xiaoke_encoder.pth'
decoder_path = 'xiaoke_decoder_87.pth'
self.device = torch.device(device)
self.verbose = verbose
# self.encoder = WaveEncoder(option_unpool).to(self.device)
# self.decoder = WaveDecoder(option_unpool).to(self.device)
# self.encoder.load_state_dict(torch.load(os.path.join(model_path, 'wave_encoder_{}_l4.pth'.format(option_unpool)), map_location=lambda storage, loc: storage))
# self.decoder.load_state_dict(torch.load(os.path.join(model_path, 'wave_decoder_{}_l4.pth'.format(option_unpool)), map_location=lambda storage, loc: storage))
self.encoder = XiaoKeEncoder(option_unpool).to(self.device)
self.decoder = XiaoKeDecoder(option_unpool).to(self.device)
self.encoder.load_state_dict(torch.load(os.path.join(model_path,encoder_path),map_location=lambda storage, loc: storage))
self.decoder.load_state_dict(torch.load(os.path.join(model_path,decoder_path),map_location=lambda storage, loc: storage))
def print_(self, msg):
if self.verbose:
print(msg)
def encode(self, x, skips, level):
return self.encoder.encode(x, skips, level)
def decode(self, x, skips, level):
return self.decoder.decode(x, skips, level)
def get_all_feature(self, x):
skips = {}
feats = {'encoder': {}, 'decoder': {}}
for level in [1, 2, 3, 4]:
x = self.encode(x, skips, level)
if 'encoder' in self.transfer_at:
feats['encoder'][level] = x
if 'encoder' not in self.transfer_at:
feats['decoder'][4] = x
for level in [4, 3, 2]:
x = self.decode(x, skips, level)
if 'decoder' in self.transfer_at:
feats['decoder'][level - 1] = x
return feats, skips
def transfer(self, content, style, content_segment, style_segment, alpha=1,is_wct=False):
content_feat, content_skips = content, {}
style_feats, style_skips = self.get_all_feature(style)
wct2_enc_level = [1, 2, 3, 4]
wct2_dec_level = [1, 2, 3, 4]
wct2_skip_level = ['pool1', 'pool2', 'pool3']
label_set,label_indicator = None, None
for level in [1, 2, 3, 4]:
content_feat = self.encode(content_feat, content_skips, level)
if 'encoder' in self.transfer_at and level in wct2_enc_level:
if is_wct:
content_feat = feature_wct(content_feat, style_feats['encoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else:
content_feat = feature_adin_without_segment(content_feat, style_feats['encoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at encoder {}'.format(level))
if 'skip' in self.transfer_at:
for skip_level in wct2_skip_level:
if is_wct:
content_skips[skip_level] = feature_wct(content_skips[skip_level], style_skips[skip_level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else :
content_skips[skip_level] = feature_adin_without_segment(content_skips[skip_level], style_skips[skip_level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at skip {}'.format(skip_level))
for level in [4, 3, 2, 1]:
if 'decoder' in self.transfer_at and level in style_feats['decoder'] and level in wct2_dec_level:
if is_wct:
content_feat = feature_wct(content_feat, style_feats['decoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else :
content_feat = feature_adin_without_segment(content_feat, style_feats['decoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at decoder {}'.format(level))
content_feat = self.decode(content_feat, content_skips, level)
return content_feat
def get_all_transfer():
ret = []
for e in ['encoder']:
for d in ['decoder']:
for s in ['skip']:
_ret = set([e, d, s]) & set(['encoder', 'decoder', 'skip'])
if _ret:
ret.append(_ret)
return ret
# def get_single_transfer():
# return ['encoder', 'decoder', 'skip']
def run_bulk():
accurate_segment = True
device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'
device = torch.device(device)
transfer_at = set()
if config.transfer_at_encoder:
transfer_at.add('encoder')
if config.transfer_at_decoder:
transfer_at.add('decoder')
if config.transfer_at_skip:
transfer_at.add('skip')
# cw, ch = 640,360
cw, ch = 640,400
# The filenames of the content and style pair should match
c_transforms = transforms.Compose([transforms.Resize((ch,cw), interpolation=Image.NEAREST),transforms.CenterCrop((ch // 16 * 16, cw // 16 * 16)),transforms.ToTensor()])
fnames = os.listdir(config.content)
fnames.sort()
print('transfer at ~~~~',transfer_at)
style = Image.open(config.style).convert('RGB')
style = c_transforms(style).unsqueeze(0).to(device)
sample_fnames = fnames[:50]
for fname in tqdm.tqdm(sample_fnames):
if not is_image_file(fname):
print('invalid file (is not image), ', fname)
continue
print('config.wct is ',config.is_wct)
# content
_content = os.path.join(config.content, fname)
content = Image.open(_content).convert('RGB') # 别忘了这边的to(device)
content = c_transforms(content).unsqueeze(0).to(device)
print('current frame {} and shape is {}'.format(fname,content.shape))
# _content_segment = os.path.join(config.content_segment, fname) if config.content_segment else None
# _style_segment = os.path.join(config.style_segment, fname) if config.style_segment else None
_output = os.path.join(config.output, fname)
content_segment,style_segment = None,None
if not config.transfer_all:
with Timer('Elapsed time in whole WCT: {}', config.verbose):
postfix = '_'.join(sorted(list(transfer_at)))
fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
print('------ transfer:', _output)
wct2 = WCT2(transfer_at=transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
with torch.no_grad():
img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)
save_image(img.clamp_(0, 1), fname_output, padding=0)
else:
for _transfer_at in get_all_transfer():
print('location for transfer at~~~~',_transfer_at)
with Timer('Elapsed time in whole WCT: {}', config.verbose):
postfix = '_'.join(sorted(list(_transfer_at)))
fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
print('------ transfer:', fname,'-',_transfer_at)
wct2 = WCT2(transfer_at=_transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
# print('wct2 model encoder ',wct2.encoder)
# print('wcr2 model decoder ',wct2.decoder)
with torch.no_grad():
starttime = datetime.datetime.now()
img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)
endtime = datetime.datetime.now()
print('xiaoke with adin 运行时间为----',(endtime - starttime))
save_image(img.clamp_(0, 1), fname_output, padding=0)
# break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--content', type=str, default='./examples/content')
parser.add_argument('--content_segment', type=str, default='./examples/content_segment')
parser.add_argument('--style', type=str, default='./examples/style')
parser.add_argument('--style_segment', type=str, default='./examples/style_segment')
parser.add_argument('--output', type=str, default='./outputs')
parser.add_argument('--image_size', type=int, default=512)
parser.add_argument('--alpha', type=float, default=1)
parser.add_argument('--option_unpool', type=str, default='cat5', choices=['sum', 'cat5'])
parser.add_argument('-e', '--transfer_at_encoder', action='store_true')
parser.add_argument('-d', '--transfer_at_decoder', action='store_true')
parser.add_argument('-s', '--transfer_at_skip', action='store_true')
parser.add_argument('-a', '--transfer_all', action='store_true')
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--is_wct',action='store_true')
parser.add_argument('--label_mapping', type=str, default='ade20k_semantic_rel.npy')
parser.add_argument('--model_path', help='folder to model path', default='baseline-resnet50_dilated8-ppm_bilinear_deepsup')
parser.add_argument('--arch_encoder', default='resnet50_dilated8', help="architecture of net_encoder")
parser.add_argument('--arch_decoder', default='ppm_bilinear_deepsup', help="architecture of net_decoder")
parser.add_argument('--suffix', default='_epoch_20.pth', help="which snapshot to load")
parser.add_argument('--fc_dim', default=2048, type=int, help='number of features between encoder and decoder')
parser.add_argument('--num_class', default=150, type=int, help='number of classes')
parser.add_argument('--padding_constant', default=8, type=int, help='maxmimum downsampling rate of the network')
parser.add_argument('--gpu_id', default=0, type=int, help='gpu_id for evaluation')
parser.add_argument('--imgSize', default=[300, 400, 500, 600], nargs='+', type=int, help='list of input image sizes.' 'for multiscale testing, e.g. 300 400 500')
# 不能定义两次同样的参数
config = parser.parse_args()
transform = transforms.Compose([transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.])])
print(config)
if not os.path.exists(os.path.join(config.output)):
os.makedirs(os.path.join(config.output))
run_bulk()
# 树林的图片
# 171124_D1_HD_01
# 170216A_122_ForestTrail_1080
# 170216A_070_LookingUpThroughForest_1080
# 180705_01_0
# 190416_10_Drone1_0
# Forest_15_1_Videv
# Forest_15_4_Videv
# on
# WalkingThroughTreesatSunsetVidev
# 树叶
# Autumn_leaves_in_motion_0
# autumn_leaves
# autumn-leaves-blowing-in-the-wind-H264
# 180705_01_0
# 海浪
# 46234354
# walking_on_the_beac
# 雪山
# 180607_A_00
# 开车
# 180607_A_10
# 飞机
# Airbus_A380_Landing_2__Videv
# Evening_landin
# PlaneLand
# 海边瑜伽
# Ao_Nang_Beach_Yoga_MP4_HDV_1080p25__TanuriX_Stock_Footage_N
# MVI_126
# 水稻
# Barley_3_Videv
# HandStrokin
# wild_gras
# windygrassnoaudi-
# 船
# beach1
# sailing_boa
# 天空
# Becco_di_Filadonna_su_Vall
# Blue_Sky_and_Clouds_Timelapse_0892__Videv
# 老鼠
# CotswoldSequence
# 奶牛
# cow
# Cow_Mother_and_cal
# Cows_
# Limousin_Cows_1__VIdev
# Limousin_Cows_2__Videv
# 日落
# Lonely_tree_at_sunset_CCBY_NatureCli
# MilkyWaywithTreeVidev
# SilhouetteJogge
# Sun_to_Sea_Model__Pan_Down_MP4_HDV_1080p25__TanuriX_Stock_Footage_W
# Sunris
# TimelapseSunse
# Wakeboarding_on_the_Lak
# 马
# Dirty_Hors
# 黑白鸟
# Pigeon-Stock-Vide
# Red_fod
# Weave
# 海鸥
# seagul-H264
# seagulls_on_the_beac
# 建筑
# Run_5_wo_metadata_h264420_720p_UH
# 鸭子
# SeaBirdsSwimming_
# Swans__1287_
# 羊
# Shee
'''
CUDA_VISIBLE_DEVICES=6 python transfer.py --content ./examples/content --style ./examples/style --content_segment ./examples/content_segment --style_segment ./examples/style_segment/ --output ./outputs/ --verbose --image_size 512 -a
'''
'''
python xiaoketransfer.py --content ./examples/demo_content/ --style ./examples/demo_style/ -a --output ./examples/demo_stylization --is_wct --image_size 400
CUDA_VISIBLE_DEVICES=1 python xiaoketransfer.py --content ./examples/dataset/alley_2/ --style ./examples/dataset/fangao.png -a --output ./examples/stylization
CUDA_VISIBLE_DEVICES=1 python xiaoketransfer2.py --content ./examples/data/MPI-Sintel-complete/training/clean/temple_2 --style ./examples/data/fangao.png -a --output ./examples/stylization
CUDA_VISIBLE_DEVICES=1 python xiaoketransfer2.py --content ./examples/data/MPI-Sintel-complete/training/clean/mountain_1 --style ./examples/data/fangao.png -a --output ./examples/stylization
CUDA_VISIBLE_DEVICES=1 python xiaoketransfer2.py --content ./examples/data/MPI-Sintel-complete/training/clean/temple_2 --style ./examples/data/fangao.png -a --output ./examples/stylization --is_wct
'''
'''
'../data/video-picture/160825_26_WindTurbines4_1080'
python xiaoketransfer2.py --content ../data/video-picture/160825_26_WindTurbines4_1080 --style ./examples/data/fangao.png -a --output ./examples/160825_26_WindTurbines4_1080_adain
'''
'''
'../data/video-picture/xxx'
python xiaoketransfer2.py --content ../data/video-picture/180705_01_0 --style ../data/reference/tar0056_orange_forest.png -a --output ./examples/Forest_15_4_Videv
python xiaoketransfer.py --content ../data/video-picture/Red_fod --style ../data/video-picture/Weave/frame_0001.png -a --output ./examples/Red_fod_seg
python xiaoketransfer.py --content ../data/video-picture/seagulls_on_the_beac --style ../data/video-picture/seagul-H264/frame_0001.png -a --output ./examples/seagulls_on_the_beac_seg
python xiaoketransfer2.py --content ../data/video-picture/HandStrokin --style ../data/video-picture/Barley_3_Videv/frame_0001.png -a --output ./examples/HandStrokin
python xiaoketransfer.py --content ../data/video-picture/Swans__1287_ --style ../data/video-picture/SeaBirdsSwimming_/frame_0001.png -a --output ./examples/Swans__1287_
python xiaoketransfer.py --content ../data/video-picture/Becco_di_Filadonna_su_Vall --style ../data/video-picture/Blue_Sky_and_Clouds_Timelapse_0892__Videv/frame_0001.png -a --output ./examples/Becco_di_Filadonna_su_Vall
python xiaoketransfer.py --content ../data/video-picture/Sun_to_Sea_Model__Pan_Down_MP4_HDV_1080p25__TanuriX_Stock_Footage_W --style ../data/video-picture/Lonely_tree_at_sunset_CCBY_NatureCli/frame_0004.png -a --output ./examples/Sun_to_Sea_Model__Pan_Down_MP4_HDV_1080p25__TanuriX_Stock_Footage_W
python xiaoketransfer2.py --content ../data/video-picture/Wakeboarding_on_the_Lak --style ../data/video-picture/Sunris/frame_0004.png -a --output ./examples/Wakeboarding_on_the_Lak
# Barley_3_Videv
# HandStrokin
# Pigeon-Stock-Vide
# Red_fod
'''
| 42.623583
| 297
| 0.665638
|
import os
os.environ['CUDA_VISIBLE_DEVICES']='1'
import sys
sys.path.append('./segmentation')
import os
import tqdm
import argparse
import torch
from torchvision.utils import save_image
import torch.nn as nn
from utils.core import feature_wct
from utils.core import feature_adin
from utils.core import feature_adin_without_segment
from utils.core import feature_wct_without_segment
from utils.io import Timer, open_image, load_segment, compute_label_info
from xiaokemodel import XiaoKeEncoder, XiaoKeDecoder
import numpy as np
import torchvision.transforms as transforms
from scipy.io import loadmat
from PIL import Image
from scipy.misc import imread, imresize
import cv2
from lib.nn import user_scattered_collate, async_copy_to
from lib.utils import as_numpy, mark_volatile
import datetime
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
class WCT2:
def __init__(self, model_path='./model_checkpoints', transfer_at=['encoder', 'skip', 'decoder'], option_unpool='cat5', device='cuda:0', verbose=False):
self.transfer_at = set(transfer_at)
assert not(self.transfer_at - set(['encoder', 'decoder', 'skip'])), 'invalid transfer_at: {}'.format(transfer_at)
assert self.transfer_at, 'empty transfer_at'
model_path = './xiaoke_video_checkpoints/'
encoder_path = 'xiaoke_encoder.pth'
decoder_path = 'xiaoke_decoder_0.0001_4.pth'
model_path = './xiaoke_checkpoints/'
encoder_path = 'xiaoke_encoder.pth'
decoder_path = 'xiaoke_decoder_87.pth'
self.device = torch.device(device)
self.verbose = verbose
self.encoder = XiaoKeEncoder(option_unpool).to(self.device)
self.decoder = XiaoKeDecoder(option_unpool).to(self.device)
self.encoder.load_state_dict(torch.load(os.path.join(model_path,encoder_path),map_location=lambda storage, loc: storage))
self.decoder.load_state_dict(torch.load(os.path.join(model_path,decoder_path),map_location=lambda storage, loc: storage))
def print_(self, msg):
if self.verbose:
print(msg)
def encode(self, x, skips, level):
return self.encoder.encode(x, skips, level)
def decode(self, x, skips, level):
return self.decoder.decode(x, skips, level)
def get_all_feature(self, x):
skips = {}
feats = {'encoder': {}, 'decoder': {}}
for level in [1, 2, 3, 4]:
x = self.encode(x, skips, level)
if 'encoder' in self.transfer_at:
feats['encoder'][level] = x
if 'encoder' not in self.transfer_at:
feats['decoder'][4] = x
for level in [4, 3, 2]:
x = self.decode(x, skips, level)
if 'decoder' in self.transfer_at:
feats['decoder'][level - 1] = x
return feats, skips
def transfer(self, content, style, content_segment, style_segment, alpha=1,is_wct=False):
content_feat, content_skips = content, {}
style_feats, style_skips = self.get_all_feature(style)
wct2_enc_level = [1, 2, 3, 4]
wct2_dec_level = [1, 2, 3, 4]
wct2_skip_level = ['pool1', 'pool2', 'pool3']
label_set,label_indicator = None, None
for level in [1, 2, 3, 4]:
content_feat = self.encode(content_feat, content_skips, level)
if 'encoder' in self.transfer_at and level in wct2_enc_level:
if is_wct:
content_feat = feature_wct(content_feat, style_feats['encoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else:
content_feat = feature_adin_without_segment(content_feat, style_feats['encoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at encoder {}'.format(level))
if 'skip' in self.transfer_at:
for skip_level in wct2_skip_level:
if is_wct:
content_skips[skip_level] = feature_wct(content_skips[skip_level], style_skips[skip_level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else :
content_skips[skip_level] = feature_adin_without_segment(content_skips[skip_level], style_skips[skip_level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at skip {}'.format(skip_level))
for level in [4, 3, 2, 1]:
if 'decoder' in self.transfer_at and level in style_feats['decoder'] and level in wct2_dec_level:
if is_wct:
content_feat = feature_wct(content_feat, style_feats['decoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
else :
content_feat = feature_adin_without_segment(content_feat, style_feats['decoder'][level],
content_segment, style_segment,
label_set, label_indicator,
alpha=alpha, device=self.device)
self.print_('transfer at decoder {}'.format(level))
content_feat = self.decode(content_feat, content_skips, level)
return content_feat
def get_all_transfer():
ret = []
for e in ['encoder']:
for d in ['decoder']:
for s in ['skip']:
_ret = set([e, d, s]) & set(['encoder', 'decoder', 'skip'])
if _ret:
ret.append(_ret)
return ret
def run_bulk():
accurate_segment = True
device = 'cpu' if config.cpu or not torch.cuda.is_available() else 'cuda:0'
device = torch.device(device)
transfer_at = set()
if config.transfer_at_encoder:
transfer_at.add('encoder')
if config.transfer_at_decoder:
transfer_at.add('decoder')
if config.transfer_at_skip:
transfer_at.add('skip')
cw, ch = 640,400
c_transforms = transforms.Compose([transforms.Resize((ch,cw), interpolation=Image.NEAREST),transforms.CenterCrop((ch // 16 * 16, cw // 16 * 16)),transforms.ToTensor()])
fnames = os.listdir(config.content)
fnames.sort()
print('transfer at ~~~~',transfer_at)
style = Image.open(config.style).convert('RGB')
style = c_transforms(style).unsqueeze(0).to(device)
sample_fnames = fnames[:50]
for fname in tqdm.tqdm(sample_fnames):
if not is_image_file(fname):
print('invalid file (is not image), ', fname)
continue
print('config.wct is ',config.is_wct)
_content = os.path.join(config.content, fname)
content = Image.open(_content).convert('RGB')
content = c_transforms(content).unsqueeze(0).to(device)
print('current frame {} and shape is {}'.format(fname,content.shape))
_output = os.path.join(config.output, fname)
content_segment,style_segment = None,None
if not config.transfer_all:
with Timer('Elapsed time in whole WCT: {}', config.verbose):
postfix = '_'.join(sorted(list(transfer_at)))
fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
print('------ transfer:', _output)
wct2 = WCT2(transfer_at=transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
with torch.no_grad():
img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)
save_image(img.clamp_(0, 1), fname_output, padding=0)
else:
for _transfer_at in get_all_transfer():
print('location for transfer at~~~~',_transfer_at)
with Timer('Elapsed time in whole WCT: {}', config.verbose):
postfix = '_'.join(sorted(list(_transfer_at)))
fname_output = _output.replace('.png', '_{}_{}.png'.format(config.option_unpool, postfix))
print('------ transfer:', fname,'-',_transfer_at)
wct2 = WCT2(transfer_at=_transfer_at, option_unpool=config.option_unpool, device=device, verbose=config.verbose)
with torch.no_grad():
starttime = datetime.datetime.now()
img = wct2.transfer(content, style, content_segment, style_segment, alpha=config.alpha,is_wct=config.is_wct)
endtime = datetime.datetime.now()
print('xiaoke with adin 运行时间为----',(endtime - starttime))
save_image(img.clamp_(0, 1), fname_output, padding=0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--content', type=str, default='./examples/content')
parser.add_argument('--content_segment', type=str, default='./examples/content_segment')
parser.add_argument('--style', type=str, default='./examples/style')
parser.add_argument('--style_segment', type=str, default='./examples/style_segment')
parser.add_argument('--output', type=str, default='./outputs')
parser.add_argument('--image_size', type=int, default=512)
parser.add_argument('--alpha', type=float, default=1)
parser.add_argument('--option_unpool', type=str, default='cat5', choices=['sum', 'cat5'])
parser.add_argument('-e', '--transfer_at_encoder', action='store_true')
parser.add_argument('-d', '--transfer_at_decoder', action='store_true')
parser.add_argument('-s', '--transfer_at_skip', action='store_true')
parser.add_argument('-a', '--transfer_all', action='store_true')
parser.add_argument('--cpu', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--is_wct',action='store_true')
parser.add_argument('--label_mapping', type=str, default='ade20k_semantic_rel.npy')
parser.add_argument('--model_path', help='folder to model path', default='baseline-resnet50_dilated8-ppm_bilinear_deepsup')
parser.add_argument('--arch_encoder', default='resnet50_dilated8', help="architecture of net_encoder")
parser.add_argument('--arch_decoder', default='ppm_bilinear_deepsup', help="architecture of net_decoder")
parser.add_argument('--suffix', default='_epoch_20.pth', help="which snapshot to load")
parser.add_argument('--fc_dim', default=2048, type=int, help='number of features between encoder and decoder')
parser.add_argument('--num_class', default=150, type=int, help='number of classes')
parser.add_argument('--padding_constant', default=8, type=int, help='maxmimum downsampling rate of the network')
parser.add_argument('--gpu_id', default=0, type=int, help='gpu_id for evaluation')
parser.add_argument('--imgSize', default=[300, 400, 500, 600], nargs='+', type=int, help='list of input image sizes.' 'for multiscale testing, e.g. 300 400 500')
config = parser.parse_args()
transform = transforms.Compose([transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.])])
print(config)
if not os.path.exists(os.path.join(config.output)):
os.makedirs(os.path.join(config.output))
run_bulk()
| true
| true
|
f7156985db791f120b85f1fb241fe315d40a1d08
| 16,415
|
py
|
Python
|
retrain_yolo.py
|
mukulbhave/YAD2K
|
a6174285e036f95df83783b7b4d951094cbb08c8
|
[
"MIT"
] | null | null | null |
retrain_yolo.py
|
mukulbhave/YAD2K
|
a6174285e036f95df83783b7b4d951094cbb08c8
|
[
"MIT"
] | null | null | null |
retrain_yolo.py
|
mukulbhave/YAD2K
|
a6174285e036f95df83783b7b4d951094cbb08c8
|
[
"MIT"
] | null | null | null |
"""
This is a script that can be used to retrain the YOLOv2 model for your own dataset.
"""
import argparse
import os
from PIL import ImageOps
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from keras import regularizers
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from yad2k.models.keras_yolo import (preprocess_true_boxes, yolo_body,
yolo_eval, yolo_head, yolo_loss)
from yad2k.utils.draw_boxes import draw_boxes
import h5py
import io
from yolo_data_gen import *
# Args
argparser = argparse.ArgumentParser(
description="Retrain or 'fine-tune' a pretrained YOLOv2 model for your own data.")
argparser.add_argument(
'-d',
'--data_path',
help="path to numpy data file (.npz) containing np.object array 'boxes' and np.uint8 array 'images'",
default=os.path.join('..', 'DATA', 'underwater_data.npz'))
argparser.add_argument(
'-a',
'--anchors_path',
help='path to anchors file, defaults to yolo_anchors.txt',
default=os.path.join('model_data', 'yolo_anchors.txt'))
argparser.add_argument(
'-c',
'--classes_path',
help='path to classes file, defaults to pascal_classes.txt',
default=os.path.join('..', 'DATA', 'underwater_classes.txt'))
# Default anchor boxes
YOLO_ANCHORS = np.array(
((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
(7.88282, 3.52778), (9.77052, 9.16828)))
def _main(args):
data_path = os.path.expanduser(args.data_path)
classes_path = os.path.expanduser(args.classes_path)
anchors_path = os.path.expanduser(args.anchors_path)
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
dataset = h5py.File(data_path,'r+')
anchors = YOLO_ANCHORS
#detectors_mask, matching_true_boxes = get_detector_mask(boxes, anchors)
model_body, model = create_model(anchors, class_names)
train( model, class_names, anchors, dataset) # image_data, boxes, detectors_mask, matching_true_boxes )
# TODO use data generator for draw as well
# draw(model_body,
# class_names,
# anchors,
# image_data,
# image_set='all', # assumes test set is 0.9
# weights_name='trained_stage_3_best.h5',
# save_all=True)
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
if os.path.isfile(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
else:
Warning("Could not open anchors file, using default.")
return YOLO_ANCHORS
#Exactly Same as process data but handles images of different sizes in dataset
def scale_data(images, boxes=None):
'''processes the data'''
img_shape = (416,416)
images = [PIL.Image.open(io.BytesIO(i)) for i in images]
# Box preprocessing.
if boxes is not None:
# Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
boxes = [box.reshape((-1, 5)) for box in boxes]
# Get box parameters as x_center, y_center, box_width, box_height, class.
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
# get original size of each image and and convert the coordinates and w h
processed_images = []
for i,img in enumerate(images):
orig_size = np.array([images[i].width, images[i].height])
boxes_xy[i] = boxes_xy[i] / orig_size
boxes_wh[i] = boxes_wh[i] / orig_size
images_i = images[i].resize(img_shape, PIL.Image.BICUBIC)
images_i = np.array(images_i, dtype=np.float)
processed_images.append(images_i/255)
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
# find the max number of boxes
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
# add zero pad for training
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
processed_images = [resize_image(i,img_shape[0],img_shape[1],False) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
return np.array(processed_images)
def process_data(images, boxes=None):
'''processes the data'''
#images = [PIL.Image.fromarray(i) for i in images]
images = [PIL.Image.open(io.BytesIO(i)) for i in images]
orig_size = np.array([images[0].width, images[0].height])
orig_size = np.expand_dims(orig_size, axis=0)
print(type(images[0]))
# Image preprocessing.
processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images]
#processed_images = [resize_image(i,416,416,False) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
if boxes is not None:
# Box preprocessing.
# Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max.
boxes = [box.reshape((-1, 5)) for box in boxes]
# Get extents as y_min, x_min, y_max, x_max, class for comparision with
# model output.
#boxes_extents = [box[:, [2, 1, 4, 3, 0]] for box in boxes]
# Get box parameters as x_center, y_center, box_width, box_height, class.
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]
boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
# find the max number of boxes
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
# add zero pad for training
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
return np.array(processed_images)
def get_detector_mask(boxes, anchors):
'''
Precompute detectors_mask and matching_true_boxes for training.
Detectors mask is 1 for each spatial position in the final conv layer and
anchor that should be active for the given boxes and 0 otherwise.
Matching true boxes gives the regression targets for the ground truth box
that caused a detector to be active or 0 otherwise.
'''
detectors_mask = [0 for i in range(len(boxes))]
matching_true_boxes = [0 for i in range(len(boxes))]
for i, box in enumerate(boxes):
detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box, anchors, [416, 416])
return np.array(detectors_mask), np.array(matching_true_boxes)
def create_model(anchors, class_names, load_pretrained=True, freeze_body=True):
'''
returns the body of the model and the model
# Params:
load_pretrained: whether or not to load the pretrained model or initialize all weights
freeze_body: whether or not to freeze all weights except for the last layer's
# Returns:
model_body: YOLOv2 with new output layer
model: YOLOv2 with custom loss Lambda layer
'''
detectors_mask_shape = (13, 13, 5, 1)
matching_boxes_shape = (13, 13, 5, 5)
# Create model input layers.
image_input = Input(shape=(416, 416, 3))
boxes_input = Input(shape=(None, 5))
detectors_mask_input = Input(shape=detectors_mask_shape)
matching_boxes_input = Input(shape=matching_boxes_shape)
# Create model body.
yolo_model = yolo_body(image_input, len(anchors), len(class_names))
topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output)
if load_pretrained:
# Save topless yolo:
topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5')
if not os.path.exists(topless_yolo_path):
print("CREATING TOPLESS WEIGHTS FILE")
yolo_path = os.path.join('model_data', 'yolo.h5')
model_body = load_model(yolo_path)
model_body = Model(model_body.inputs, model_body.layers[-2].output)
model_body.save_weights(topless_yolo_path)
topless_yolo.load_weights(topless_yolo_path)
if freeze_body:
for layer in topless_yolo.layers:
layer.trainable = False
final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear',kernel_regularizer= regularizers.l2(5e-4))(topless_yolo.output)
model_body = Model(image_input, final_layer)
# Place model loss on CPU to reduce GPU memory usage.
with tf.device('/cpu:0'):
# TODO: Replace Lambda with custom Keras layer for loss.
model_loss = Lambda(
yolo_loss,
output_shape=(1, ),
name='yolo_loss',
arguments={'anchors': anchors,
'num_classes': len(class_names)})([
model_body.output, boxes_input,
detectors_mask_input, matching_boxes_input
])
model = Model(
[model_body.input, boxes_input, detectors_mask_input,
matching_boxes_input], model_loss)
return model_body, model
def train(model, class_names, anchors, dataset):#image_data, boxes, detectors_mask, matching_true_boxes, validation_split=0.1):
'''
retrain/fine-tune the model
logs training with tensorboard
saves training weights in current directory
best weights according to val_loss is saved as trained_stage_3_best.h5
'''
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
logging = TensorBoard()#log_dir='./train_logs', histogram_freq=1, write_graph=False, write_images=True)
checkpoint = ModelCheckpoint("trained_stage_3_best.h5", monitor='val_loss',
save_weights_only=True, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')
batch_size = 8
dataTrain = dataset['train']
dataVal= dataset['val']
train_set_size =dataTrain.attrs['dataset_size']
val_set_size =dataVal.attrs['dataset_size']
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
# model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
# np.zeros(len(image_data)),
# validation_split=validation_split,
# batch_size=8,
# epochs=5,
# callbacks=[logging])
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=5,verbose = 1, callbacks=[logging])
model.save_weights('trained_stage_1.h5')
model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)
model.load_weights('trained_stage_1.h5')
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
}) # This is a hack to use the custom loss function in the last layer.
# model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
# np.zeros(len(image_data)),
# validation_split=validation_split,
# batch_size=8,
# epochs=30,
# callbacks=[logging])
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=30,verbose = 1, callbacks=[logging])
model.save_weights('trained_stage_2.h5')
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=30,verbose = 1, callbacks=[logging, checkpoint, early_stopping])
# model.fit([image_data, boxes, detectors_mask, matching_true_boxes],
# np.zeros(len(image_data)),
# validation_split=validation_split,
# batch_size=8,
# epochs=30,
# callbacks=[logging, checkpoint, early_stopping])
model.save_weights('trained_stage_3.h5')
def draw(model_body, class_names, anchors, image_data, image_set='val',
weights_name='trained_stage_3_best.h5', out_path="output_images", save_all=True):
'''
Draw bounding boxes on image data
'''
if image_set == 'train':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[:int(len(image_data)*.9)]])
elif image_set == 'val':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[int(len(image_data)*.9):]])
elif image_set == 'all':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data])
else:
ValueError("draw argument image_set must be 'train', 'val', or 'all'")
# model.load_weights(weights_name)
print(image_data.shape)
model_body.load_weights(weights_name)
# Create output variables for prediction.
yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs, input_image_shape, score_threshold=0.7, iou_threshold=0.7)
# Run prediction on overfit image.
sess = K.get_session() # TODO: Remove dependence on Tensorflow session.
if not os.path.exists(out_path):
os.makedirs(out_path)
for i in range(len(image_data)):
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
model_body.input: image_data[i],
input_image_shape: [image_data.shape[2], image_data.shape[3]],
K.learning_phase(): 0
})
print('Found {} boxes for image.'.format(len(out_boxes)))
print(out_boxes)
# Plot image with predicted boxes.
image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
class_names, out_scores,out_path+"\\"+str(i)+'.jpg')
# Save the image:
if save_all :
image = PIL.Image.fromarray(image_with_boxes)
image.save(os.path.join(out_path,str(i)+'.jpg'))
# To display (pauses the program):
plt.imshow(image_with_boxes, interpolation='nearest')
plt.show()
if __name__ == '__main__':
args = argparser.parse_args()
_main(args)
| 39.176611
| 157
| 0.648005
|
import argparse
import os
from PIL import ImageOps
import matplotlib.pyplot as plt
import numpy as np
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from keras import regularizers
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping
from yad2k.models.keras_yolo import (preprocess_true_boxes, yolo_body,
yolo_eval, yolo_head, yolo_loss)
from yad2k.utils.draw_boxes import draw_boxes
import h5py
import io
from yolo_data_gen import *
argparser = argparse.ArgumentParser(
description="Retrain or 'fine-tune' a pretrained YOLOv2 model for your own data.")
argparser.add_argument(
'-d',
'--data_path',
help="path to numpy data file (.npz) containing np.object array 'boxes' and np.uint8 array 'images'",
default=os.path.join('..', 'DATA', 'underwater_data.npz'))
argparser.add_argument(
'-a',
'--anchors_path',
help='path to anchors file, defaults to yolo_anchors.txt',
default=os.path.join('model_data', 'yolo_anchors.txt'))
argparser.add_argument(
'-c',
'--classes_path',
help='path to classes file, defaults to pascal_classes.txt',
default=os.path.join('..', 'DATA', 'underwater_classes.txt'))
YOLO_ANCHORS = np.array(
((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),
(7.88282, 3.52778), (9.77052, 9.16828)))
def _main(args):
data_path = os.path.expanduser(args.data_path)
classes_path = os.path.expanduser(args.classes_path)
anchors_path = os.path.expanduser(args.anchors_path)
class_names = get_classes(classes_path)
anchors = get_anchors(anchors_path)
dataset = h5py.File(data_path,'r+')
anchors = YOLO_ANCHORS
model_body, model = create_model(anchors, class_names)
train( model, class_names, anchors, dataset)
get_classes(classes_path):
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
if os.path.isfile(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
else:
Warning("Could not open anchors file, using default.")
return YOLO_ANCHORS
def scale_data(images, boxes=None):
img_shape = (416,416)
images = [PIL.Image.open(io.BytesIO(i)) for i in images]
if boxes is not None:
boxes = [box.reshape((-1, 5)) for box in boxes]
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
processed_images = []
for i,img in enumerate(images):
orig_size = np.array([images[i].width, images[i].height])
boxes_xy[i] = boxes_xy[i] / orig_size
boxes_wh[i] = boxes_wh[i] / orig_size
images_i = images[i].resize(img_shape, PIL.Image.BICUBIC)
images_i = np.array(images_i, dtype=np.float)
processed_images.append(images_i/255)
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
processed_images = [resize_image(i,img_shape[0],img_shape[1],False) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
return np.array(processed_images)
def process_data(images, boxes=None):
images = [PIL.Image.open(io.BytesIO(i)) for i in images]
orig_size = np.array([images[0].width, images[0].height])
orig_size = np.expand_dims(orig_size, axis=0)
print(type(images[0]))
processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images]
processed_images = [np.array(image, dtype=np.float) for image in processed_images]
processed_images = [image/255. for image in processed_images]
if boxes is not None:
boxes = [box.reshape((-1, 5)) for box in boxes]
boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes]
boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes]
boxes_xy = [boxxy / orig_size for boxxy in boxes_xy]
boxes_wh = [boxwh / orig_size for boxwh in boxes_wh]
boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)]
max_boxes = 0
for boxz in boxes:
if boxz.shape[0] > max_boxes:
max_boxes = boxz.shape[0]
for i, boxz in enumerate(boxes):
if boxz.shape[0] < max_boxes:
zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32)
boxes[i] = np.vstack((boxz, zero_padding))
return np.array(processed_images), np.array(boxes)
else:
return np.array(processed_images)
def get_detector_mask(boxes, anchors):
detectors_mask = [0 for i in range(len(boxes))]
matching_true_boxes = [0 for i in range(len(boxes))]
for i, box in enumerate(boxes):
detectors_mask[i], matching_true_boxes[i] = preprocess_true_boxes(box, anchors, [416, 416])
return np.array(detectors_mask), np.array(matching_true_boxes)
def create_model(anchors, class_names, load_pretrained=True, freeze_body=True):
detectors_mask_shape = (13, 13, 5, 1)
matching_boxes_shape = (13, 13, 5, 5)
image_input = Input(shape=(416, 416, 3))
boxes_input = Input(shape=(None, 5))
detectors_mask_input = Input(shape=detectors_mask_shape)
matching_boxes_input = Input(shape=matching_boxes_shape)
yolo_model = yolo_body(image_input, len(anchors), len(class_names))
topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output)
if load_pretrained:
topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5')
if not os.path.exists(topless_yolo_path):
print("CREATING TOPLESS WEIGHTS FILE")
yolo_path = os.path.join('model_data', 'yolo.h5')
model_body = load_model(yolo_path)
model_body = Model(model_body.inputs, model_body.layers[-2].output)
model_body.save_weights(topless_yolo_path)
topless_yolo.load_weights(topless_yolo_path)
if freeze_body:
for layer in topless_yolo.layers:
layer.trainable = False
final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear',kernel_regularizer= regularizers.l2(5e-4))(topless_yolo.output)
model_body = Model(image_input, final_layer)
with tf.device('/cpu:0'):
model_loss = Lambda(
yolo_loss,
output_shape=(1, ),
name='yolo_loss',
arguments={'anchors': anchors,
'num_classes': len(class_names)})([
model_body.output, boxes_input,
detectors_mask_input, matching_boxes_input
])
model = Model(
[model_body.input, boxes_input, detectors_mask_input,
matching_boxes_input], model_loss)
return model_body, model
def train(model, class_names, anchors, dataset):
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
})
logging = TensorBoard()
checkpoint = ModelCheckpoint("trained_stage_3_best.h5", monitor='val_loss',
save_weights_only=True, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, verbose=1, mode='auto')
batch_size = 8
dataTrain = dataset['train']
dataVal= dataset['val']
train_set_size =dataTrain.attrs['dataset_size']
val_set_size =dataVal.attrs['dataset_size']
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=5,verbose = 1, callbacks=[logging])
model.save_weights('trained_stage_1.h5')
model_body, model = create_model(anchors, class_names, load_pretrained=False, freeze_body=False)
model.load_weights('trained_stage_1.h5')
model.compile(
optimizer='adam', loss={
'yolo_loss': lambda y_true, y_pred: y_pred
})
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=30,verbose = 1, callbacks=[logging])
model.save_weights('trained_stage_2.h5')
training_generator = DataGenerator(dataTrain, train_set_size,batch_size=batch_size)
validation_generator = DataGenerator(dataVal, val_set_size,batch_size=batch_size,is_train=0)
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=False,
epochs=30,verbose = 1, callbacks=[logging, checkpoint, early_stopping])
model.save_weights('trained_stage_3.h5')
def draw(model_body, class_names, anchors, image_data, image_set='val',
weights_name='trained_stage_3_best.h5', out_path="output_images", save_all=True):
if image_set == 'train':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[:int(len(image_data)*.9)]])
elif image_set == 'val':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data[int(len(image_data)*.9):]])
elif image_set == 'all':
image_data = np.array([np.expand_dims(image, axis=0)
for image in image_data])
else:
ValueError("draw argument image_set must be 'train', 'val', or 'all'")
print(image_data.shape)
model_body.load_weights(weights_name)
yolo_outputs = yolo_head(model_body.output, anchors, len(class_names))
input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(
yolo_outputs, input_image_shape, score_threshold=0.7, iou_threshold=0.7)
sess = K.get_session()
if not os.path.exists(out_path):
os.makedirs(out_path)
for i in range(len(image_data)):
out_boxes, out_scores, out_classes = sess.run(
[boxes, scores, classes],
feed_dict={
model_body.input: image_data[i],
input_image_shape: [image_data.shape[2], image_data.shape[3]],
K.learning_phase(): 0
})
print('Found {} boxes for image.'.format(len(out_boxes)))
print(out_boxes)
image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,
class_names, out_scores,out_path+"\\"+str(i)+'.jpg')
if save_all :
image = PIL.Image.fromarray(image_with_boxes)
image.save(os.path.join(out_path,str(i)+'.jpg'))
plt.imshow(image_with_boxes, interpolation='nearest')
plt.show()
if __name__ == '__main__':
args = argparser.parse_args()
_main(args)
| true
| true
|
f7156a419031a548f8c6765c306917d7d0a579d2
| 399
|
py
|
Python
|
chilled-vibes.py
|
bcgreen24/ten-lines-or-less
|
7a34ff7d7222fd3946e9cbb418afc992bc84e5e5
|
[
"MIT"
] | 44
|
2018-08-15T08:32:43.000Z
|
2022-02-15T20:25:03.000Z
|
chilled-vibes.py
|
bcgreen24/ten-lines-or-less
|
7a34ff7d7222fd3946e9cbb418afc992bc84e5e5
|
[
"MIT"
] | null | null | null |
chilled-vibes.py
|
bcgreen24/ten-lines-or-less
|
7a34ff7d7222fd3946e9cbb418afc992bc84e5e5
|
[
"MIT"
] | 7
|
2018-09-08T20:05:58.000Z
|
2021-11-22T12:46:15.000Z
|
Clock.bpm=100; Scale.default="minor"
p1 >> pulse([0,-1,-2,-3], dur=8, lpf=600, lpr=0.2, crush=8) + (0,2,4,const(6))
p3 >> blip(p1.pitch, dur=8, sus=4, room=1, oct=6) + [0,0,0,P*(2,4,3,-1)]
p2 >> saw(P[:5][:9][:16], dur=1/4, oct=var([3,4],[12,4])).penta()
d1 >> play("(x )( x)o{ vx[xx]}", crush=16, rate=.8).every([24,5,3], "stutter", 4, dur=3)
d2 >> play("<-s>< ~*~>").every(30.5, "jump", cycle=32)
| 57
| 88
| 0.526316
|
Clock.bpm=100; Scale.default="minor"
p1 >> pulse([0,-1,-2,-3], dur=8, lpf=600, lpr=0.2, crush=8) + (0,2,4,const(6))
p3 >> blip(p1.pitch, dur=8, sus=4, room=1, oct=6) + [0,0,0,P*(2,4,3,-1)]
p2 >> saw(P[:5][:9][:16], dur=1/4, oct=var([3,4],[12,4])).penta()
d1 >> play("(x )( x)o{ vx[xx]}", crush=16, rate=.8).every([24,5,3], "stutter", 4, dur=3)
d2 >> play("<-s>< ~*~>").every(30.5, "jump", cycle=32)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.