hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c45d8033a8532a5310eb7b5b6868e1725ae9e8a | 1,188 | py | Python | profiles_api/serializers.py | vikrantgautam/profiles-rest-api | 68abd9398f04de6eb87357b997dd438b6503f8ea | [
"MIT"
] | null | null | null | profiles_api/serializers.py | vikrantgautam/profiles-rest-api | 68abd9398f04de6eb87357b997dd438b6503f8ea | [
"MIT"
] | null | null | null | profiles_api/serializers.py | vikrantgautam/profiles-rest-api | 68abd9398f04de6eb87357b997dd438b6503f8ea | [
"MIT"
] | null | null | null | from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
"""Serializes a name field for testing our APIView"""
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
"""Serializes a user profile object"""
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
"""Create and return a new user"""
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password'],
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
"""Serializes profile feed items"""
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwards = {'user_profile': {'read_only': True}}
| 28.285714 | 68 | 0.625421 | from rest_framework import serializers
from profiles_api import models
class HelloSerializer(serializers.Serializer):
name = serializers.CharField(max_length=10)
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = models.UserProfile
fields = ('id', 'email', 'name', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {'input_type': 'password'}
}
}
def create(self, validated_data):
user = models.UserProfile.objects.create_user(
email=validated_data['email'],
name=validated_data['name'],
password=validated_data['password'],
)
return user
class ProfileFeedItemSerializer(serializers.ModelSerializer):
class Meta:
model = models.ProfileFeedItem
fields = ('id', 'user_profile', 'status_text', 'created_on')
extra_kwards = {'user_profile': {'read_only': True}}
| true | true |
1c45d82e9f5994d25e1d89ebc33ba778c614bf38 | 1,850 | py | Python | weasyl/cron.py | theSeracen/weasyl | c13b4f61f559ce44bfaee027ffc59a1379d25f3e | [
"Apache-2.0"
] | null | null | null | weasyl/cron.py | theSeracen/weasyl | c13b4f61f559ce44bfaee027ffc59a1379d25f3e | [
"Apache-2.0"
] | 148 | 2021-03-16T07:40:05.000Z | 2022-03-21T08:14:46.000Z | weasyl/cron.py | theSeracen/weasyl | c13b4f61f559ce44bfaee027ffc59a1379d25f3e | [
"Apache-2.0"
] | null | null | null | import arrow
from twisted.python import log
from weasyl.define import engine
from weasyl import index, submission
def run_periodic_tasks():
# An arrow object representing the current UTC time
now = arrow.utcnow()
db = engine.connect()
with db.begin():
locked = db.scalar("SELECT pg_try_advisory_xact_lock(0)")
if not locked:
return
last_run = arrow.get(db.scalar("SELECT last_run FROM cron_runs"))
if not last_run or now < last_run.replace(second=59):
return
# Recache the latest submissions
# Every 2 minutes
if now.minute % 2 == 0:
index.recent_submissions.refresh()
log.msg('refreshed recent submissions')
# Recalculate recently popular submissions
# Every 10 minutes
if now.minute % 10 == 0:
submission.select_recently_popular.refresh()
log.msg('refreshed recently popular submissions')
# Delete all records from views table
# Every 15 minutes
if now.minute % 15 == 0:
db.execute("DELETE FROM views")
log.msg('cleared views')
# Daily at 0:00
if now.hour == 0 and now.minute == 0:
# Delete email reset requests older than two days
db.execute("""
DELETE FROM emailverify
WHERE createtimestamp < (NOW() - INTERVAL '2 days')
""")
log.msg('cleared stale email change records')
# Purge stale logincreate records older than two days
db.execute("""
DELETE FROM logincreate
WHERE created_at < (NOW() - INTERVAL '2 days')
""")
log.msg('cleared stale account creation records')
db.execute("UPDATE cron_runs SET last_run = %(now)s", now=now.naive)
| 33.035714 | 76 | 0.58973 | import arrow
from twisted.python import log
from weasyl.define import engine
from weasyl import index, submission
def run_periodic_tasks():
now = arrow.utcnow()
db = engine.connect()
with db.begin():
locked = db.scalar("SELECT pg_try_advisory_xact_lock(0)")
if not locked:
return
last_run = arrow.get(db.scalar("SELECT last_run FROM cron_runs"))
if not last_run or now < last_run.replace(second=59):
return
if now.minute % 2 == 0:
index.recent_submissions.refresh()
log.msg('refreshed recent submissions')
if now.minute % 10 == 0:
submission.select_recently_popular.refresh()
log.msg('refreshed recently popular submissions')
if now.minute % 15 == 0:
db.execute("DELETE FROM views")
log.msg('cleared views')
if now.hour == 0 and now.minute == 0:
db.execute("""
DELETE FROM emailverify
WHERE createtimestamp < (NOW() - INTERVAL '2 days')
""")
log.msg('cleared stale email change records')
db.execute("""
DELETE FROM logincreate
WHERE created_at < (NOW() - INTERVAL '2 days')
""")
log.msg('cleared stale account creation records')
db.execute("UPDATE cron_runs SET last_run = %(now)s", now=now.naive)
| true | true |
1c45d908aa737c8cc2b138b76e17ef1a9a3d56e4 | 871 | py | Python | evennia/commands/default/cmdset_unloggedin.py | fermuch/evennia | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | [
"BSD-3-Clause"
] | 3 | 2019-08-08T16:58:25.000Z | 2019-10-12T07:31:36.000Z | evennia/commands/default/cmdset_unloggedin.py | fermuch/evennia | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | [
"BSD-3-Clause"
] | 9 | 2019-09-06T18:21:59.000Z | 2022-01-13T03:04:11.000Z | evennia/commands/default/cmdset_unloggedin.py | fermuch/evennia | 8961baa0a5b9b5419f864a144f080acc68a7ad0f | [
"BSD-3-Clause"
] | 2 | 2019-09-02T08:39:24.000Z | 2019-09-02T18:39:32.000Z | """
This module describes the unlogged state of the default game.
The setting STATE_UNLOGGED should be set to the python path
of the state instance in this module.
"""
from evennia.commands.cmdset import CmdSet
from evennia.commands.default import unloggedin
class UnloggedinCmdSet(CmdSet):
"""
Sets up the unlogged cmdset.
"""
key = "DefaultUnloggedin"
priority = 0
def at_cmdset_creation(self):
"Populate the cmdset"
self.add(unloggedin.CmdUnconnectedConnect())
self.add(unloggedin.CmdUnconnectedCreate())
self.add(unloggedin.CmdUnconnectedQuit())
self.add(unloggedin.CmdUnconnectedLook())
self.add(unloggedin.CmdUnconnectedHelp())
self.add(unloggedin.CmdUnconnectedEncoding())
self.add(unloggedin.CmdUnconnectedScreenreader())
self.add(unloggedin.CmdUnconnectedInfo())
| 32.259259 | 61 | 0.72101 | from evennia.commands.cmdset import CmdSet
from evennia.commands.default import unloggedin
class UnloggedinCmdSet(CmdSet):
key = "DefaultUnloggedin"
priority = 0
def at_cmdset_creation(self):
self.add(unloggedin.CmdUnconnectedConnect())
self.add(unloggedin.CmdUnconnectedCreate())
self.add(unloggedin.CmdUnconnectedQuit())
self.add(unloggedin.CmdUnconnectedLook())
self.add(unloggedin.CmdUnconnectedHelp())
self.add(unloggedin.CmdUnconnectedEncoding())
self.add(unloggedin.CmdUnconnectedScreenreader())
self.add(unloggedin.CmdUnconnectedInfo())
| true | true |
1c45db6d3cecdc6f61c22f43e5bb581f20cf7a6b | 2,437 | py | Python | naeval/ner/models/tomita.py | sdspieg/naeval | 52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f | [
"MIT"
] | 36 | 2020-03-22T09:37:10.000Z | 2022-01-17T14:49:30.000Z | naeval/ner/models/tomita.py | sdspieg/naeval | 52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f | [
"MIT"
] | 11 | 2020-03-25T09:39:45.000Z | 2020-08-16T05:37:02.000Z | naeval/ner/models/tomita.py | sdspieg/naeval | 52c4a508bf212b95d4e610cfe1b5e23b8ca94d2f | [
"MIT"
] | 6 | 2020-05-16T05:52:04.000Z | 2022-01-16T06:45:29.000Z |
from naeval.const import TOMITA, PER
from naeval.record import Record
from naeval.io import parse_xml
from naeval.span import Span
from ..adapt import adapt_tomita
from ..markup import Markup
from .base import Model, post
TOMITA_IMAGE = 'natasha/tomita-algfio'
TOMITA_CONTAINER_PORT = 8080
TOMITA_URL = 'http://{host}:{port}/'
class TomitaFact(Record):
__attributes__ = [
'start', 'stop',
'first', 'last', 'middle', 'known_surname'
]
def __init__(self, start, stop,
first, last, middle, known_surname):
self.start = start
self.stop = stop
self.first = first
self.last = last
self.middle = middle
self.known_surname = known_surname
class TomitaMarkup(Markup):
@property
def adapted(self):
return adapt_tomita(self)
def parse_facts(xml):
if xml is None:
return
for item in xml.findall('Person'):
start = int(item.get('pos'))
size = int(item.get('len'))
stop = start + size
last = item.find('Name_Surname')
if last is not None:
last = last.get('val') or None
first = item.find('Name_FirstName')
if first is not None:
first = first.get('val')
middle = item.find('Name_Patronymic')
if middle is not None:
middle = middle.get('val')
known_surname = item.find('Name_SurnameIsDictionary')
if known_surname is not None:
known_surname = int(known_surname.get('val'))
known_surname = bool(known_surname)
yield TomitaFact(
start, stop,
first, last, middle, known_surname
)
def fact_spans(facts):
for fact in facts:
yield Span(fact.start, fact.stop, PER)
def parse_tomita(text, xml):
assert xml.tag == 'document', xml.tag
facts = xml.find('facts')
facts = parse_facts(facts)
spans = list(fact_spans(facts))
return TomitaMarkup(text, spans)
def call_tomita(text, host, port):
url = TOMITA_URL.format(
host=host,
port=port
)
payload = text.encode('utf8')
response = post(url, data=payload)
xml = parse_xml(response.text)
return parse_tomita(text, xml)
class TomitaModel(Model):
name = TOMITA
image = TOMITA_IMAGE
container_port = TOMITA_CONTAINER_PORT
def __call__(self, text):
return call_tomita(text, self.host, self.port)
| 24.867347 | 61 | 0.622487 |
from naeval.const import TOMITA, PER
from naeval.record import Record
from naeval.io import parse_xml
from naeval.span import Span
from ..adapt import adapt_tomita
from ..markup import Markup
from .base import Model, post
TOMITA_IMAGE = 'natasha/tomita-algfio'
TOMITA_CONTAINER_PORT = 8080
TOMITA_URL = 'http://{host}:{port}/'
class TomitaFact(Record):
__attributes__ = [
'start', 'stop',
'first', 'last', 'middle', 'known_surname'
]
def __init__(self, start, stop,
first, last, middle, known_surname):
self.start = start
self.stop = stop
self.first = first
self.last = last
self.middle = middle
self.known_surname = known_surname
class TomitaMarkup(Markup):
@property
def adapted(self):
return adapt_tomita(self)
def parse_facts(xml):
if xml is None:
return
for item in xml.findall('Person'):
start = int(item.get('pos'))
size = int(item.get('len'))
stop = start + size
last = item.find('Name_Surname')
if last is not None:
last = last.get('val') or None
first = item.find('Name_FirstName')
if first is not None:
first = first.get('val')
middle = item.find('Name_Patronymic')
if middle is not None:
middle = middle.get('val')
known_surname = item.find('Name_SurnameIsDictionary')
if known_surname is not None:
known_surname = int(known_surname.get('val'))
known_surname = bool(known_surname)
yield TomitaFact(
start, stop,
first, last, middle, known_surname
)
def fact_spans(facts):
for fact in facts:
yield Span(fact.start, fact.stop, PER)
def parse_tomita(text, xml):
assert xml.tag == 'document', xml.tag
facts = xml.find('facts')
facts = parse_facts(facts)
spans = list(fact_spans(facts))
return TomitaMarkup(text, spans)
def call_tomita(text, host, port):
url = TOMITA_URL.format(
host=host,
port=port
)
payload = text.encode('utf8')
response = post(url, data=payload)
xml = parse_xml(response.text)
return parse_tomita(text, xml)
class TomitaModel(Model):
name = TOMITA
image = TOMITA_IMAGE
container_port = TOMITA_CONTAINER_PORT
def __call__(self, text):
return call_tomita(text, self.host, self.port)
| true | true |
1c45dbebdbe4e22104a31a6023c49fc2d26290c2 | 10,611 | py | Python | src/cargrid.py | Potgront/ABM | 76fef2c7ded7e362ecf72fffd82512b9d7926700 | [
"BSD-3-Clause"
] | null | null | null | src/cargrid.py | Potgront/ABM | 76fef2c7ded7e362ecf72fffd82512b9d7926700 | [
"BSD-3-Clause"
] | null | null | null | src/cargrid.py | Potgront/ABM | 76fef2c7ded7e362ecf72fffd82512b9d7926700 | [
"BSD-3-Clause"
] | null | null | null | """
Module which defines the car agents
"""
from mesa import Agent
import numpy as np
class Car(Agent):
"""
Class which defines the inidividual car agents.
Each car has a specific unique_id and an index which is the
unique_id modulo the resolution of the LaneSpace.
Attributes:
unique_id (int): An integer value which is uniquely defines each agent
model (obj): An instance of the model class
index (int): The unique_id modulo the resolution of the LaneSpace
defined in the model instance.
pos (tuple): The position of the car agent. The first argument of the
tuple defines the position along the horizontal continuous axis,
the second value specifies the current lane of the car.
max_speed (float): A constant value which defines the upper limit of
the cars' speed.
speed (float): A variable value which specifies the current speed of
the car, cannot exceed the value defined in max_speed.
agression (float): Defines the agression of the car, between [0,1].
Results in the car swiching lanes more often as it increases.
Also makes acceleration more likely if possible.
gap (float): Defines the space cars keep between each other, relative
to their speeds, e.g. a value of 2 would result in a gap equal to
twice the cars' speed, so 20 meters at a speed of 10 m/s.
Higher agression can occasionally override this specified gap.
switch_delay (int): Defines the number of time steps a car waits
between switching lanes. High agression lowers this value.
switched (int): Counter for the number of time steps since last
switching a lane. Decreases by one each timestep and allows the car
to switch lanes if at zero. Is set to the value of switch_delay if
the car switches a lane.
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, unique_id, model, start_lane,
speed, agression, min_gap):
"""
Args:
unique_id (int): The unique id of the current agent, generated by
agent scheduler.
model (obj): Instance of the LaneSpace model
start_lane (int): The lane in which the car should start.
speed (float): Initial speed of the car, also used to set the
maximum speed of the car.
agression (float): Agression of the car, bounded between [0,1].
min_gap (float): The absolute minimum space the car should
maintain, relative to it's own speed.
"""
super().__init__(unique_id, model)
self.start_lane = start_lane
self.index = self.unique_id % model.grid.length
self.pos = (0.0, start_lane)
self.max_speed = speed+(abs(np.random.randn())*agression)
self.speed = self.max_speed
self.agression = agression
self.gap = np.random.rand() / agression + min_gap
self.switch_delay = int(5 / agression / self.model.time_step)
self.switched = self.switch_delay
def compute_pars(self, FRONT, BACK):
"""compute_pars
Method which determines weather a car can switch to another lane or
maintain it's current speed.
Args:
FRONT (list): A list which should contain the positions of the cars
in front of the current car, on the right, middle and left. In
that specific order.
BACK (list): A list which should contain the postitions of the cars
behind the current car, on the right, middle and left. In that
specific order.
Returns:
can_left (bool): Can the car switch to the left?
can_middle (bool): Can the car go forward?
can_right (bool): Can the car switch to the right?
"""
rf, mf, lf = FRONT # right_front, middle_front, left_front
rb, mb, lb = BACK # right_back, middle_back, left_back
"""
Can the car turn left?
The available space to the left front car should be larger than the
cars' current speed multiplied by the minimum gap the car has to
maintain. The gap to the car behind should be 0.5 times this
distance as the car is also moving forward.
Also checks if a lane exists on the left and if the car has recovered
from the previous lane switch
"""
can_left = lf-self.pos[0] > self.gap * self.speed and\
self.pos[0]-lb > 0.5*self.gap * self.speed and\
self.pos[1] < (self.model.lanes - 1) and\
self.switched == 0
"""
Can the car turn right?
The available space to the right front should be larger than the cars'
current speed multiplied by the minimum gap the car has to maintain.
The gap to the car behind should be 0.5 times this distance as the car
is also moving forward.
Also checks if the car is not already in the rightmost lane and has
recovered from the previous lane switch
"""
can_right = rf-self.pos[0] > self.gap * self.speed and\
self.pos[0]-rb > 0.5*self.gap * self.speed and\
self.pos[1] > 0 and\
self.switched == 0
"""
Can the car go forward (to the middle)?
This gap should be larger than minimum gap multiplied by the
cars' current speed.
"""
can_middle = mf - self.pos[0] > self.gap*self.speed
return can_left, can_middle, can_right
def get_move(self):
"""
Method which determines the best possible move of the car,
depedent on the agression, current speed, minimal gap, and
postions of the six surrouding cars.
This move is determined in a loop which first determines which
moves are initially possible. It then uses the agression of the
car to decide if the car should keep right, overtake, maintain
in the current lane, or slow down.
Returns:
Integer of the best possible move: -1 if right, 0 if forward,
1 if left.
"""
self.switched -= 1
self.switched = max(0, self.switched)
FRONT, BACK = self.model.grid.get_neighbors(self)
rf, mf, lf = FRONT # right_front, middle_front, left_front
rb, mb, lb = BACK # right_back, middle_back, left_back
while True:
cl, cm, cr = self.compute_pars(FRONT, BACK) # can_left, can_middle, can_right
if cm: # Can i go forward at current speed?
if cr and np.random.rand() > self.agression:
"""
Keep right if possible, probability decreases with
increasing agression
"""
self.switched = self.switch_delay
return -1
if (self.speed < self.max_speed) and\
(np.random.rand() < self.agression):
"""
Speed up if slowed down, probability increases
with increasing agression. Also overtake to the
left if possible
"""
self.check_speed(FRONT[1]-self.pos[0])
if cl:
return 1
return 0
if cl and cr: # Can i go left and right?
if (self.speed < self.max_speed) and\
(np.random.rand() < self.agression):
"""
Overtake on the left if slowed and agression allows.
Speed up relative to the agression of the car.
"""
self.speed += np.random.rand()/self.agression*self.model.time_step
self.switched = self.switch_delay
return 1
if np.random.rand() > self.agression:
"""
Hold right if agression is low. Also slow down a bit
so not to overtake on the right, scaling with agression.
"""
self.speed -= np.random.rand()/self.agression*self.model.time_step
self.switched = self.switch_delay
return -1
if rf > lf:
"""
Otherwise go to the lane with the most space in front.
"""
return -1
return 1
if cl and (np.random.rand() < self.agression): # Can i go left?
"""
Move to the left if agression allows.
"""
self.switched = self.switch_delay
return 1
if cr: # Can i go right?
"""
Move to the right and slow down so not to overtake on
the right. Deceleration decreases with agresion, so
high agression could result in an overtake on the right.
"""
self.switched = self.switch_delay
self.speed -= np.random.rand()/self.agression*self.model.time_step
return -1
"""
Slow down if none of the moves are possible and try all
posibilites again. Recalculating the boolean values each loop.
"""
self.speed -= np.random.rand()*self.model.time_step
def check_speed(self, gap):
"""
Check how much the car is slowed down and the gap to the car in front.
Accelerate faster if agression is high, gap is large, or the car is
slowed down a lot.
Args:
gap (float): Gap to the car in front
"""
diff = self.max_speed - self.speed
space = (gap-self.speed)/self.speed/self.gap/self.agression
speedup = max(np.random.rand(), np.log(diff*space))*self.model.time_step
self.speed += speedup
def step(self):
"""
Method used by the mesa scheduler to advance each car.
Obtains a move from the get_move method and applies this
to the overall LaneSpace model. Also performs a global check
if the car is not exceeding its maximum speed.
"""
move = self.get_move()
self.model.move(self, move)
if self.speed > self.max_speed:
self.speed -= np.random.rand()*self.model.time_step
| 43.310204 | 90 | 0.577985 | from mesa import Agent
import numpy as np
class Car(Agent):
def __init__(self, unique_id, model, start_lane,
speed, agression, min_gap):
super().__init__(unique_id, model)
self.start_lane = start_lane
self.index = self.unique_id % model.grid.length
self.pos = (0.0, start_lane)
self.max_speed = speed+(abs(np.random.randn())*agression)
self.speed = self.max_speed
self.agression = agression
self.gap = np.random.rand() / agression + min_gap
self.switch_delay = int(5 / agression / self.model.time_step)
self.switched = self.switch_delay
def compute_pars(self, FRONT, BACK):
rf, mf, lf = FRONT
rb, mb, lb = BACK
can_left = lf-self.pos[0] > self.gap * self.speed and\
self.pos[0]-lb > 0.5*self.gap * self.speed and\
self.pos[1] < (self.model.lanes - 1) and\
self.switched == 0
can_right = rf-self.pos[0] > self.gap * self.speed and\
self.pos[0]-rb > 0.5*self.gap * self.speed and\
self.pos[1] > 0 and\
self.switched == 0
can_middle = mf - self.pos[0] > self.gap*self.speed
return can_left, can_middle, can_right
def get_move(self):
self.switched -= 1
self.switched = max(0, self.switched)
FRONT, BACK = self.model.grid.get_neighbors(self)
rf, mf, lf = FRONT
rb, mb, lb = BACK
while True:
cl, cm, cr = self.compute_pars(FRONT, BACK)
if cm:
if cr and np.random.rand() > self.agression:
self.switched = self.switch_delay
return -1
if (self.speed < self.max_speed) and\
(np.random.rand() < self.agression):
self.check_speed(FRONT[1]-self.pos[0])
if cl:
return 1
return 0
if cl and cr:
if (self.speed < self.max_speed) and\
(np.random.rand() < self.agression):
self.speed += np.random.rand()/self.agression*self.model.time_step
self.switched = self.switch_delay
return 1
if np.random.rand() > self.agression:
self.speed -= np.random.rand()/self.agression*self.model.time_step
self.switched = self.switch_delay
return -1
if rf > lf:
return -1
return 1
if cl and (np.random.rand() < self.agression):
self.switched = self.switch_delay
return 1
if cr:
self.switched = self.switch_delay
self.speed -= np.random.rand()/self.agression*self.model.time_step
return -1
self.speed -= np.random.rand()*self.model.time_step
def check_speed(self, gap):
diff = self.max_speed - self.speed
space = (gap-self.speed)/self.speed/self.gap/self.agression
speedup = max(np.random.rand(), np.log(diff*space))*self.model.time_step
self.speed += speedup
def step(self):
move = self.get_move()
self.model.move(self, move)
if self.speed > self.max_speed:
self.speed -= np.random.rand()*self.model.time_step
| true | true |
1c45dc41168fc46b895c51c21cd20daa9a2082ba | 5,247 | py | Python | splink/intuition.py | rubensmau/splink | da4f5d5bc09753b6c6974af308dd1bad324d9b4b | [
"MIT"
] | 176 | 2020-03-16T15:19:39.000Z | 2022-03-30T06:38:29.000Z | splink/intuition.py | rubensmau/splink | da4f5d5bc09753b6c6974af308dd1bad324d9b4b | [
"MIT"
] | 194 | 2020-03-01T21:32:26.000Z | 2022-03-30T14:58:38.000Z | splink/intuition.py | rubensmau/splink | da4f5d5bc09753b6c6974af308dd1bad324d9b4b | [
"MIT"
] | 25 | 2020-03-07T00:09:22.000Z | 2022-03-11T16:28:06.000Z | from .model import Model
from .charts import load_chart_definition, altair_if_installed_else_json
import pandas as pd
from math import log2
initial_template = """
Initial probability of match (prior) = λ = {lam:.4g}
"""
col_template = [
("Comparison of {column_name}. Values are:", ""),
("{column_name}_l:", "{value_l}"),
("{column_name}_r:", "{value_r}"),
("Comparison has:", "{num_levels} levels"),
("Level for this comparison:", "{gamma_column_name} = {gamma_index}"),
("m probability = P(level|match):", "{m_probability:.4g}"),
("u probability = P(level|non-match):", "{u_probability:.4g}"),
("Bayes factor = m/u:", "{bayes_factor:.4g}"),
("New probability of match (updated belief):", "{updated_belief:.4g}"),
]
end_template = """
Final probability of match = {final:.4g}
Reminder:
The m probability for a given level is the proportion of matches which are in this level.
We would generally expect the highest similarity level to have the largest proportion of matches.
For example, we would expect first name field to match exactly amongst most matching records, except where nicknames, aliases or typos have occurred.
For a comparison column that changes through time, like address, we may expect a lower proportion of comparisons to be in the highest similarity level.
The u probability for a given level is the proportion of non-matches which are in this level.
We would generally expect the lowest similarity level to have the highest proportion of non-matches, but the magnitude depends on the cardinality of the field.
For example, we would expect that in the vast majority of non-matching records, the date of birth field would not match. However, we would expect it to be common for gender to match amongst non-matches.
"""
def intuition_report(row_dict: dict, model: Model):
"""Generate a text summary of a row in the comparison table which explains how the match_probability was computed
Args:
row_dict (dict): A python dictionary representing the comparison row
model (Model): splink Model object
Returns:
string: The intuition report
"""
lam = model.current_settings_obj["proportion_of_matches"]
report = initial_template.format(lam=lam)
current_prob = lam
for cc in model.current_settings_obj.comparison_columns_list:
d = cc.describe_row_dict(row_dict)
bf = d["bayes_factor"]
a = bf * current_prob
new_p = a / (a + (1 - current_prob))
d["updated_belief"] = new_p
current_prob = new_p
col_report = []
col_report.append("------")
for (blurb, value) in col_template:
blurb_fmt = blurb.format(**d)
value_fmt = value.format(**d)
col_report.append(f"{blurb_fmt:<50} {value_fmt}")
col_report.append("\n")
col_report = "\n".join(col_report)
report += col_report
report += end_template.format(final=new_p)
if len(model.current_settings_obj["blocking_rules"]) > 1:
match_key = int(row_dict["match_key"])
br = model.current_settings_obj["blocking_rules"][match_key]
br = f"\nThis comparison was generated by the blocking rule: {br}"
report += br
return report
def _get_bayes_factors(row_dict, model):
bayes_factors = []
lam = model.current_settings_obj["proportion_of_matches"]
for cc in model.current_settings_obj.comparison_columns_list:
row_desc = cc.describe_row_dict(row_dict, lam)
bayes_factors.append(row_desc)
return bayes_factors
def bayes_factor_chart(row_dict, model):
chart_path = "bayes_factor_chart_def.json"
bayes_factor_chart_def = load_chart_definition(chart_path)
bayes_factor_chart_def["data"]["values"] = _get_bayes_factors(row_dict, model)
bayes_factor_chart_def["encoding"]["y"]["field"] = "column_name"
del bayes_factor_chart_def["encoding"]["row"]
return altair_if_installed_else_json(bayes_factor_chart_def)
def bayes_factor_intuition_chart(row_dict, model):
chart_path = "bayes_factor_intuition_chart_def.json"
bayes_factor_intuition_chart_def = load_chart_definition(chart_path)
data = _get_bayes_factors(row_dict, model)
# Get initial and final bayes factors
lam = model.current_settings_obj["proportion_of_matches"]
bf_init = lam/(1-lam)
bf_final = sum([d['log2_bayes_factor'] for d in data]) + log2(bf_init)
# Sort records in descending order of influence
# with start and end positions added
df = pd.DataFrame(data)\
.sort_values(by="log2_bayes_factor", key=abs, ascending=False)\
.reset_index(drop=True)\
.append({
'bayes_factor': 2**bf_final,
'log2_bayes_factor': bf_final,
'column_name': 'Final score'
},
ignore_index=True
)
df = pd.DataFrame({
'bayes_factor': bf_init,
'log2_bayes_factor': log2(bf_init),
'column_name': 'Prior lambda'
},
index=[0]
).append(df, ignore_index=True).reset_index()
bayes_factor_intuition_chart_def["data"]["values"] = df.to_dict('records')
return altair_if_installed_else_json(bayes_factor_intuition_chart_def)
| 37.478571 | 203 | 0.692014 | from .model import Model
from .charts import load_chart_definition, altair_if_installed_else_json
import pandas as pd
from math import log2
initial_template = """
Initial probability of match (prior) = λ = {lam:.4g}
"""
col_template = [
("Comparison of {column_name}. Values are:", ""),
("{column_name}_l:", "{value_l}"),
("{column_name}_r:", "{value_r}"),
("Comparison has:", "{num_levels} levels"),
("Level for this comparison:", "{gamma_column_name} = {gamma_index}"),
("m probability = P(level|match):", "{m_probability:.4g}"),
("u probability = P(level|non-match):", "{u_probability:.4g}"),
("Bayes factor = m/u:", "{bayes_factor:.4g}"),
("New probability of match (updated belief):", "{updated_belief:.4g}"),
]
end_template = """
Final probability of match = {final:.4g}
Reminder:
The m probability for a given level is the proportion of matches which are in this level.
We would generally expect the highest similarity level to have the largest proportion of matches.
For example, we would expect first name field to match exactly amongst most matching records, except where nicknames, aliases or typos have occurred.
For a comparison column that changes through time, like address, we may expect a lower proportion of comparisons to be in the highest similarity level.
The u probability for a given level is the proportion of non-matches which are in this level.
We would generally expect the lowest similarity level to have the highest proportion of non-matches, but the magnitude depends on the cardinality of the field.
For example, we would expect that in the vast majority of non-matching records, the date of birth field would not match. However, we would expect it to be common for gender to match amongst non-matches.
"""
def intuition_report(row_dict: dict, model: Model):
lam = model.current_settings_obj["proportion_of_matches"]
report = initial_template.format(lam=lam)
current_prob = lam
for cc in model.current_settings_obj.comparison_columns_list:
d = cc.describe_row_dict(row_dict)
bf = d["bayes_factor"]
a = bf * current_prob
new_p = a / (a + (1 - current_prob))
d["updated_belief"] = new_p
current_prob = new_p
col_report = []
col_report.append("------")
for (blurb, value) in col_template:
blurb_fmt = blurb.format(**d)
value_fmt = value.format(**d)
col_report.append(f"{blurb_fmt:<50} {value_fmt}")
col_report.append("\n")
col_report = "\n".join(col_report)
report += col_report
report += end_template.format(final=new_p)
if len(model.current_settings_obj["blocking_rules"]) > 1:
match_key = int(row_dict["match_key"])
br = model.current_settings_obj["blocking_rules"][match_key]
br = f"\nThis comparison was generated by the blocking rule: {br}"
report += br
return report
def _get_bayes_factors(row_dict, model):
bayes_factors = []
lam = model.current_settings_obj["proportion_of_matches"]
for cc in model.current_settings_obj.comparison_columns_list:
row_desc = cc.describe_row_dict(row_dict, lam)
bayes_factors.append(row_desc)
return bayes_factors
def bayes_factor_chart(row_dict, model):
chart_path = "bayes_factor_chart_def.json"
bayes_factor_chart_def = load_chart_definition(chart_path)
bayes_factor_chart_def["data"]["values"] = _get_bayes_factors(row_dict, model)
bayes_factor_chart_def["encoding"]["y"]["field"] = "column_name"
del bayes_factor_chart_def["encoding"]["row"]
return altair_if_installed_else_json(bayes_factor_chart_def)
def bayes_factor_intuition_chart(row_dict, model):
chart_path = "bayes_factor_intuition_chart_def.json"
bayes_factor_intuition_chart_def = load_chart_definition(chart_path)
data = _get_bayes_factors(row_dict, model)
lam = model.current_settings_obj["proportion_of_matches"]
bf_init = lam/(1-lam)
bf_final = sum([d['log2_bayes_factor'] for d in data]) + log2(bf_init)
df = pd.DataFrame(data)\
.sort_values(by="log2_bayes_factor", key=abs, ascending=False)\
.reset_index(drop=True)\
.append({
'bayes_factor': 2**bf_final,
'log2_bayes_factor': bf_final,
'column_name': 'Final score'
},
ignore_index=True
)
df = pd.DataFrame({
'bayes_factor': bf_init,
'log2_bayes_factor': log2(bf_init),
'column_name': 'Prior lambda'
},
index=[0]
).append(df, ignore_index=True).reset_index()
bayes_factor_intuition_chart_def["data"]["values"] = df.to_dict('records')
return altair_if_installed_else_json(bayes_factor_intuition_chart_def)
| true | true |
1c45dd33925b9b9be524163ed7fb322778cad0d2 | 1,395 | py | Python | graph_scripts/identification_graph.py | karannewatia/Mycelium | c20deab29d97025d7623af4bbf97f79f3132b415 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 3 | 2022-01-19T18:14:42.000Z | 2022-02-07T19:16:17.000Z | graph_scripts/identification_graph.py | karannewatia/Mycelium | c20deab29d97025d7623af4bbf97f79f3132b415 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | graph_scripts/identification_graph.py | karannewatia/Mycelium | c20deab29d97025d7623af4bbf97f79f3132b415 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
malice_vals = [0.005, 0.01, 0.02, 0.04]
ind = np.arange(4)
#replace these with the data obtained from identification.py
k2r1 = [0.0, 0.0, 0.0008000000000000229, 0.0015999999999999348]
k2r2 = [0.0, 0.0, 0.0008000000000000229, 0.0033000000000004137]
k2r3 = [0.0, 0.000200000000000089, 0.001200000000000201, 0.0049000000000002375]
k3r1 = [0.0, 0.0, 0.0, 0.0]
k3r2 = [0.0, 0.0, 0.0, 9.999999999987796e-05]
k3r3 = [0.0, 0.0, 0.0, 0.000200000000000089]
font = {'size' : 17}
plt.rc('font', **font)
plt.gcf().subplots_adjust(bottom=0.15)
plt.gcf().subplots_adjust(left=0.20)
plt.plot(ind, k2r1, label = "k=2,r=1", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k2r2, label = "k=2,r=2", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k2r3, label = "k=2,r=3", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k3r1, label = "k=3,r=1", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k3r2, label = "k=3,r=2", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k3r3, label = "k=3,r=3", marker="X", markersize=10, linewidth=5)
plt.xticks(ind, ('0.5', '1', '2', '4'))
plt.xlabel('Malice rate (%)', fontsize='large')
plt.ylabel('Probability of identification', fontsize='large')
plt.legend()
plt.savefig('../new_graphs/Identification.pdf', format='pdf')
| 36.710526 | 79 | 0.682437 | import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
malice_vals = [0.005, 0.01, 0.02, 0.04]
ind = np.arange(4)
k2r1 = [0.0, 0.0, 0.0008000000000000229, 0.0015999999999999348]
k2r2 = [0.0, 0.0, 0.0008000000000000229, 0.0033000000000004137]
k2r3 = [0.0, 0.000200000000000089, 0.001200000000000201, 0.0049000000000002375]
k3r1 = [0.0, 0.0, 0.0, 0.0]
k3r2 = [0.0, 0.0, 0.0, 9.999999999987796e-05]
k3r3 = [0.0, 0.0, 0.0, 0.000200000000000089]
font = {'size' : 17}
plt.rc('font', **font)
plt.gcf().subplots_adjust(bottom=0.15)
plt.gcf().subplots_adjust(left=0.20)
plt.plot(ind, k2r1, label = "k=2,r=1", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k2r2, label = "k=2,r=2", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k2r3, label = "k=2,r=3", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k3r1, label = "k=3,r=1", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k3r2, label = "k=3,r=2", marker="X", markersize=10, linewidth=5)
plt.plot(ind, k3r3, label = "k=3,r=3", marker="X", markersize=10, linewidth=5)
plt.xticks(ind, ('0.5', '1', '2', '4'))
plt.xlabel('Malice rate (%)', fontsize='large')
plt.ylabel('Probability of identification', fontsize='large')
plt.legend()
plt.savefig('../new_graphs/Identification.pdf', format='pdf')
| true | true |
1c45dd47cd5d01f117d4d2dabd7d739958d96331 | 1,591 | py | Python | setup.py | arpitban/integrate | c991a50546229c2341ad5d8571c72c819c06c4b2 | [
"MIT"
] | null | null | null | setup.py | arpitban/integrate | c991a50546229c2341ad5d8571c72c819c06c4b2 | [
"MIT"
] | null | null | null | setup.py | arpitban/integrate | c991a50546229c2341ad5d8571c72c819c06c4b2 | [
"MIT"
] | null | null | null | """
integrate
Package to integrate functions
"""
from setuptools import setup
import versioneer
DOCLINES = __doc__.split("\n")
setup(
# Self-descriptive entries which should always be present
name='integrate',
author='Arpit Bansal',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='MIT',
install_requires=[
'numpy',
],
# Which Python importable modules should be included when your package is installed
packages=['integrate', "integrate.tests"],
# Optional include package data to ship with your package
# Comment out this line to prevent the files from being packaged with your software
# Extend/modify the list to include/exclude other items as need be
package_data={'integrate': ["data/*.dat"]
},
# Additional entries you may want simply uncomment the lines you want and fill in the data
# author_email='me@place.org', # Author email
# url='http://www.my_package.com', # Website
# install_requires=[], # Required packages, pulls from pip if needed; do not use for Conda deployment
# platforms=['Linux',
# 'Mac OS-X',
# 'Unix',
# 'Windows'], # Valid platforms your code works on, adjust to your flavor
# python_requires=">=3.5", # Python version restrictions
# Manual control if final package is compressible or not, set False to prevent the .egg from being made
# zip_safe=False,
)
| 34.586957 | 118 | 0.657448 | from setuptools import setup
import versioneer
DOCLINES = __doc__.split("\n")
setup(
name='integrate',
author='Arpit Bansal',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='MIT',
install_requires=[
'numpy',
],
packages=['integrate', "integrate.tests"],
package_data={'integrate': ["data/*.dat"]
},
| true | true |
1c45dd9a6f5f623f74d785fbdbad56a08b56d3f5 | 2,746 | py | Python | xknx/remote_value/remote_value_control.py | magicbear/xknx | e6fe7bbd292e0fee29b2c4f210aff3031d76539d | [
"MIT"
] | 1 | 2021-01-24T21:08:36.000Z | 2021-01-24T21:08:36.000Z | xknx/remote_value/remote_value_control.py | magicbear/xknx | e6fe7bbd292e0fee29b2c4f210aff3031d76539d | [
"MIT"
] | 54 | 2021-10-01T17:42:16.000Z | 2022-03-31T09:22:46.000Z | xknx/remote_value/remote_value_control.py | crazyfx1/xknx | 87666cc9bd9da64a84305baeff84486097346111 | [
"MIT"
] | null | null | null | """
Module for managing a control remote value.
Examples are switching commands with priority control, relative dimming or blinds control commands.
DPT 2.yyy and DPT 3.yyy
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from xknx.dpt import DPTArray, DPTBinary, DPTControlStepCode
from xknx.exceptions import ConversionError
from .remote_value import AsyncCallbackType, GroupAddressesType, RemoteValue
if TYPE_CHECKING:
from xknx.xknx import XKNX
class RemoteValueControl(RemoteValue[DPTBinary, Any]):
"""Abstraction for remote value used for controling."""
def __init__(
self,
xknx: XKNX,
group_address: GroupAddressesType | None = None,
group_address_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
value_type: str | None = None,
device_name: str | None = None,
feature_name: str = "Control",
after_update_cb: AsyncCallbackType | None = None,
):
"""Initialize control remote value."""
if value_type is None:
raise ConversionError("no value type given", device_name=device_name)
_dpt_class = DPTControlStepCode.parse_transcoder(value_type)
if _dpt_class is None:
raise ConversionError(
"invalid value type", value_type=value_type, device_name=device_name
)
self.dpt_class: type[DPTControlStepCode] = _dpt_class
super().__init__(
xknx,
group_address,
group_address_state,
sync_state=sync_state,
device_name=device_name,
feature_name=feature_name,
after_update_cb=after_update_cb,
)
def payload_valid(self, payload: DPTArray | DPTBinary | None) -> DPTBinary | None:
"""Test if telegram payload may be parsed."""
# pylint: disable=no-self-use
return payload if isinstance(payload, DPTBinary) else None
def to_knx(self, value: Any) -> DPTBinary:
"""Convert value to payload."""
return DPTBinary(self.dpt_class.to_knx(value))
def from_knx(self, payload: DPTBinary) -> Any:
"""Convert current payload to value."""
# TODO: DPTBinary.value is int - DPTBase.from_knx requires Tuple[int, ...] - maybe use bytes
return self.dpt_class.from_knx((payload.value,))
@property
def unit_of_measurement(self) -> str | None:
"""Return the unit of measurement."""
return self.dpt_class.unit
@property
def ha_device_class(self) -> str | None:
"""Return a string representing the home assistant device class."""
return getattr(self.dpt_class, "ha_device_class", None) # type: ignore
| 36.131579 | 100 | 0.664967 | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from xknx.dpt import DPTArray, DPTBinary, DPTControlStepCode
from xknx.exceptions import ConversionError
from .remote_value import AsyncCallbackType, GroupAddressesType, RemoteValue
if TYPE_CHECKING:
from xknx.xknx import XKNX
class RemoteValueControl(RemoteValue[DPTBinary, Any]):
def __init__(
self,
xknx: XKNX,
group_address: GroupAddressesType | None = None,
group_address_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
value_type: str | None = None,
device_name: str | None = None,
feature_name: str = "Control",
after_update_cb: AsyncCallbackType | None = None,
):
if value_type is None:
raise ConversionError("no value type given", device_name=device_name)
_dpt_class = DPTControlStepCode.parse_transcoder(value_type)
if _dpt_class is None:
raise ConversionError(
"invalid value type", value_type=value_type, device_name=device_name
)
self.dpt_class: type[DPTControlStepCode] = _dpt_class
super().__init__(
xknx,
group_address,
group_address_state,
sync_state=sync_state,
device_name=device_name,
feature_name=feature_name,
after_update_cb=after_update_cb,
)
def payload_valid(self, payload: DPTArray | DPTBinary | None) -> DPTBinary | None:
return payload if isinstance(payload, DPTBinary) else None
def to_knx(self, value: Any) -> DPTBinary:
return DPTBinary(self.dpt_class.to_knx(value))
def from_knx(self, payload: DPTBinary) -> Any:
return self.dpt_class.from_knx((payload.value,))
@property
def unit_of_measurement(self) -> str | None:
return self.dpt_class.unit
@property
def ha_device_class(self) -> str | None:
return getattr(self.dpt_class, "ha_device_class", None)
| true | true |
1c45dde6f471e6c02e753f85eac93a9d22fbde55 | 3,277 | py | Python | sphinx/pocketsphinx-5prealpha/swig/python/test/decoder_test.py | anshsarkar/TailBench | 25845756aee9a892229c25b681051591c94daafd | [
"MIT"
] | 2 | 2021-01-13T21:17:42.000Z | 2021-01-13T21:17:42.000Z | sphinx/pocketsphinx-5prealpha/swig/python/test/decoder_test.py | anshsarkar/TailBench | 25845756aee9a892229c25b681051591c94daafd | [
"MIT"
] | null | null | null | sphinx/pocketsphinx-5prealpha/swig/python/test/decoder_test.py | anshsarkar/TailBench | 25845756aee9a892229c25b681051591c94daafd | [
"MIT"
] | null | null | null | # ====================================================================
# Copyright (c) 2013 Carnegie Mellon University. All rights
# reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# This work was supported in part by funding from the Defense Advanced
# Research Projects Agency and the National Science Foundation of the
# United States of America, and the CMU Sphinx Speech Consortium.
#
# THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY
# NOR ITS EMPLOYEES BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ====================================================================
from os import environ, path
from pocketsphinx.pocketsphinx import *
from sphinxbase.sphinxbase import *
MODELDIR = "../../../model"
DATADIR = "../../../test/data"
# Create a decoder with certain model
config = Decoder.default_config()
config.set_string('-hmm', path.join(MODELDIR, 'en-us/en-us'))
config.set_string('-lm', path.join(MODELDIR, 'en-us/en-us.lm.bin'))
config.set_string('-dict', path.join(MODELDIR, 'en-us/cmudict-en-us.dict'))
# Decode streaming data.
decoder = Decoder(config)
print ("Pronunciation for word 'hello' is ", decoder.lookup_word("hello"))
print ("Pronunciation for word 'abcdf' is ", decoder.lookup_word("abcdf"))
decoder.start_utt()
stream = open(path.join(DATADIR, 'goforward.raw'), 'rb')
while True:
buf = stream.read(1024)
if buf:
decoder.process_raw(buf, False, False)
else:
break
decoder.end_utt()
hypothesis = decoder.hyp()
print ('Best hypothesis: ', hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob)
print ('Best hypothesis segments: ', [seg.word for seg in decoder.seg()])
# Access N best decodings.
print ('Best 10 hypothesis: ')
for best, i in zip(decoder.nbest(), range(10)):
print (best.hypstr, best.score)
stream = open(path.join(DATADIR, 'goforward.mfc'), 'rb')
stream.read(4)
buf = stream.read(13780)
decoder.start_utt()
decoder.process_cep(buf, False, True)
decoder.end_utt()
hypothesis = decoder.hyp()
print ('Best hypothesis: ', hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob)
| 38.552941 | 121 | 0.712237 |
from os import environ, path
from pocketsphinx.pocketsphinx import *
from sphinxbase.sphinxbase import *
MODELDIR = "../../../model"
DATADIR = "../../../test/data"
config = Decoder.default_config()
config.set_string('-hmm', path.join(MODELDIR, 'en-us/en-us'))
config.set_string('-lm', path.join(MODELDIR, 'en-us/en-us.lm.bin'))
config.set_string('-dict', path.join(MODELDIR, 'en-us/cmudict-en-us.dict'))
decoder = Decoder(config)
print ("Pronunciation for word 'hello' is ", decoder.lookup_word("hello"))
print ("Pronunciation for word 'abcdf' is ", decoder.lookup_word("abcdf"))
decoder.start_utt()
stream = open(path.join(DATADIR, 'goforward.raw'), 'rb')
while True:
buf = stream.read(1024)
if buf:
decoder.process_raw(buf, False, False)
else:
break
decoder.end_utt()
hypothesis = decoder.hyp()
print ('Best hypothesis: ', hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob)
print ('Best hypothesis segments: ', [seg.word for seg in decoder.seg()])
print ('Best 10 hypothesis: ')
for best, i in zip(decoder.nbest(), range(10)):
print (best.hypstr, best.score)
stream = open(path.join(DATADIR, 'goforward.mfc'), 'rb')
stream.read(4)
buf = stream.read(13780)
decoder.start_utt()
decoder.process_cep(buf, False, True)
decoder.end_utt()
hypothesis = decoder.hyp()
print ('Best hypothesis: ', hypothesis.hypstr, " model score: ", hypothesis.best_score, " confidence: ", hypothesis.prob)
| true | true |
1c45ddeda1ba48e329cee4025e6983697c5c5850 | 512 | py | Python | scipy/sparse/linalg/setup.py | lorentzenchr/scipy | 393a05ee927883ad6316b7092c851afea8f16816 | [
"BSD-3-Clause"
] | 9,095 | 2015-01-02T18:24:23.000Z | 2022-03-31T20:35:31.000Z | scipy/sparse/linalg/setup.py | lorentzenchr/scipy | 393a05ee927883ad6316b7092c851afea8f16816 | [
"BSD-3-Clause"
] | 11,500 | 2015-01-01T01:15:30.000Z | 2022-03-31T23:07:35.000Z | scipy/sparse/linalg/setup.py | lorentzenchr/scipy | 393a05ee927883ad6316b7092c851afea8f16816 | [
"BSD-3-Clause"
] | 5,838 | 2015-01-05T11:56:42.000Z | 2022-03-31T23:21:19.000Z |
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linalg', parent_package, top_path)
config.add_subpackage('_isolve')
config.add_subpackage('_dsolve')
config.add_subpackage('_eigen')
config.add_data_dir('tests')
# PROPACK
config.add_subpackage('_propack')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 23.272727 | 62 | 0.71875 |
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linalg', parent_package, top_path)
config.add_subpackage('_isolve')
config.add_subpackage('_dsolve')
config.add_subpackage('_eigen')
config.add_data_dir('tests')
config.add_subpackage('_propack')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| true | true |
1c45de15d7b5493f204a2593fa32d8e68e6eccbf | 612 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/GREMEDY/string_marker.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/GREMEDY/string_marker.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/raw/GL/GREMEDY/string_marker.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_GREMEDY_string_marker'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_GREMEDY_string_marker',error_checker=_errors._error_checker)
@_f
@_p.types(None,_cs.GLsizei,ctypes.c_void_p)
def glStringMarkerGREMEDY(len,string):pass
| 34 | 119 | 0.776144 | from OpenGL import platform as _p, arrays
from OpenGL.raw.GL import _types as _cs
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_GREMEDY_string_marker'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_GREMEDY_string_marker',error_checker=_errors._error_checker)
@_f
@_p.types(None,_cs.GLsizei,ctypes.c_void_p)
def glStringMarkerGREMEDY(len,string):pass
| true | true |
1c45decc0c487ca311ecba9f0e1ff22edc6b5a52 | 10,437 | bzl | Python | tools/build_defs/repo/git.bzl | FengRillian/bazel | c962975f152e30741a3affb1d41dd885543bbea6 | [
"Apache-2.0"
] | 3 | 2019-03-18T23:49:16.000Z | 2021-05-30T09:44:18.000Z | tools/build_defs/repo/git.bzl | FengRillian/bazel | c962975f152e30741a3affb1d41dd885543bbea6 | [
"Apache-2.0"
] | null | null | null | tools/build_defs/repo/git.bzl | FengRillian/bazel | c962975f152e30741a3affb1d41dd885543bbea6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for cloning external git repositories."""
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "patch", "update_attrs", "workspace_and_buildfile")
def _clone_or_update(ctx):
if ((not ctx.attr.tag and not ctx.attr.commit and not ctx.attr.branch) or
(ctx.attr.tag and ctx.attr.commit) or
(ctx.attr.tag and ctx.attr.branch) or
(ctx.attr.commit and ctx.attr.branch)):
fail("Exactly one of commit, tag, or branch must be provided")
shallow = ""
if ctx.attr.commit:
ref = ctx.attr.commit
elif ctx.attr.tag:
ref = "tags/" + ctx.attr.tag
shallow = "--depth=1"
else:
ref = ctx.attr.branch
shallow = "--depth=1"
directory = str(ctx.path("."))
if ctx.attr.strip_prefix:
directory = directory + "-tmp"
if ctx.attr.shallow_since:
if ctx.attr.tag:
fail("shallow_since not allowed if a tag is specified; --depth=1 will be used for tags")
if ctx.attr.branch:
fail("shallow_since not allowed if a branch is specified; --depth=1 will be used for branches")
shallow = "--shallow-since=%s" % ctx.attr.shallow_since
ctx.report_progress("Cloning %s of %s" % (ref, ctx.attr.remote))
if (ctx.attr.verbose):
print("git.bzl: Cloning or updating %s repository %s using strip_prefix of [%s]" %
(
" (%s)" % shallow if shallow else "",
ctx.name,
ctx.attr.strip_prefix if ctx.attr.strip_prefix else "None",
))
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
st = ctx.execute([bash_exe, "-c", """
cd {working_dir}
set -ex
( cd {working_dir} &&
if ! ( cd '{dir_link}' && [[ "$(git rev-parse --git-dir)" == '.git' ]] ) >/dev/null 2>&1; then
rm -rf '{directory}' '{dir_link}'
git clone '{shallow}' '{remote}' '{directory}' || git clone '{remote}' '{directory}'
fi
git -C '{directory}' reset --hard {ref} || \
((git -C '{directory}' fetch '{shallow}' origin {ref}:{ref} || \
git -C '{directory}' fetch origin {ref}:{ref}) && git -C '{directory}' reset --hard {ref})
git -C '{directory}' clean -xdf )
""".format(
working_dir = ctx.path(".").dirname,
dir_link = ctx.path("."),
directory = directory,
remote = ctx.attr.remote,
ref = ref,
shallow = shallow,
)], environment = ctx.os.environ)
if st.return_code:
fail("error cloning %s:\n%s" % (ctx.name, st.stderr))
if ctx.attr.strip_prefix:
dest_link = "{}/{}".format(directory, ctx.attr.strip_prefix)
if not ctx.path(dest_link).exists:
fail("strip_prefix at {} does not exist in repo".format(ctx.attr.strip_prefix))
ctx.symlink(dest_link, ctx.path("."))
if ctx.attr.init_submodules:
ctx.report_progress("Updating submodules")
st = ctx.execute([bash_exe, "-c", """
set -ex
( git -C '{directory}' submodule update --init --checkout --force )
""".format(
directory = ctx.path("."),
)], environment = ctx.os.environ)
if st.return_code:
fail("error updating submodules %s:\n%s" % (ctx.name, st.stderr))
ctx.report_progress("Recording actual commit")
# After the fact, determine the actual commit and its date
actual_commit = ctx.execute([
bash_exe,
"-c",
"(git -C '{directory}' log -n 1 --pretty='format:%H')".format(
directory = ctx.path("."),
),
]).stdout
shallow_date = ctx.execute([
bash_exe,
"-c",
"(git -C '{directory}' log -n 1 --pretty='format:%cd' --date=raw)".format(
directory = ctx.path("."),
),
]).stdout
return {"commit": actual_commit, "shallow_since": shallow_date}
def _remove_dot_git(ctx):
# Remove the .git directory, if present
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
ctx.execute([
bash_exe,
"-c",
"rm -rf '{directory}'".format(directory = ctx.path(".git")),
])
def _update_git_attrs(orig, keys, override):
result = update_attrs(orig, keys, override)
# if we found the actual commit, remove all other means of specifying it,
# like tag or branch.
if "commit" in result:
result.pop("tag", None)
result.pop("branch", None)
return result
_common_attrs = {
"remote": attr.string(mandatory = True),
"commit": attr.string(default = ""),
"shallow_since": attr.string(default = ""),
"tag": attr.string(default = ""),
"branch": attr.string(default = ""),
"init_submodules": attr.bool(default = False),
"verbose": attr.bool(default = False),
"strip_prefix": attr.string(default = ""),
"patches": attr.label_list(default = []),
"patch_tool": attr.string(default = "patch"),
"patch_args": attr.string_list(default = ["-p0"]),
"patch_cmds": attr.string_list(default = []),
}
_new_git_repository_attrs = dict(_common_attrs.items() + {
"build_file": attr.label(allow_single_file = True),
"build_file_content": attr.string(),
"workspace_file": attr.label(),
"workspace_file_content": attr.string(),
}.items())
def _new_git_repository_implementation(ctx):
if ((not ctx.attr.build_file and not ctx.attr.build_file_content) or
(ctx.attr.build_file and ctx.attr.build_file_content)):
fail("Exactly one of build_file and build_file_content must be provided.")
update = _clone_or_update(ctx)
workspace_and_buildfile(ctx)
patch(ctx)
_remove_dot_git(ctx)
return _update_git_attrs(ctx.attr, _new_git_repository_attrs.keys(), update)
def _git_repository_implementation(ctx):
update = _clone_or_update(ctx)
patch(ctx)
_remove_dot_git(ctx)
return _update_git_attrs(ctx.attr, _common_attrs.keys(), update)
new_git_repository = repository_rule(
implementation = _new_git_repository_implementation,
attrs = _new_git_repository_attrs,
)
"""Clone an external git repository.
Clones a Git repository, checks out the specified tag, or commit, and
makes its targets available for binding. Also determine the id of the
commit actually checked out and its date, and return a dict with parameters
that provide a reproducible version of this rule (which a tag not necessarily
is).
Args:
name: A unique name for this repository.
build_file: The file to use as the BUILD file for this repository.
Either build_file or build_file_content must be specified.
This attribute is an absolute label (use '@//' for the main repo). The file
does not need to be named BUILD, but can be (something like
BUILD.new-repo-name may work well for distinguishing it from the
repository's actual BUILD files.
build_file_content: The content for the BUILD file for this repository.
Either build_file or build_file_content must be specified.
workspace_file: The file to use as the `WORKSPACE` file for this repository.
Either `workspace_file` or `workspace_file_content` can be specified, or
neither, but not both.
workspace_file_content: The content for the WORKSPACE file for this repository.
Either `workspace_file` or `workspace_file_content` can be specified, or
neither, but not both.
branch: branch in the remote repository to checked out
tag: tag in the remote repository to checked out
commit: specific commit to be checked out
Precisely one of branch, tag, or commit must be specified.
shallow_since: an optional date, not after the specified commit; the
argument is not allowed if a tag is specified (which allows cloning
with depth 1). Setting such a date close to the specified commit
allows for a more shallow clone of the repository, saving bandwidth and
wall-clock time.
init_submodules: Whether to clone submodules in the repository.
remote: The URI of the remote Git repository.
strip_prefix: A directory prefix to strip from the extracted files.
patches: A list of files that are to be applied as patches after extracting
the archive.
patch_tool: the patch(1) utility to use.
patch_args: arguments given to the patch tool, defaults to ["-p0"]
patch_cmds: sequence of commands to be applied after patches are applied.
"""
git_repository = repository_rule(
implementation = _git_repository_implementation,
attrs = _common_attrs,
)
"""Clone an external git repository.
Clones a Git repository, checks out the specified tag, or commit, and
makes its targets available for binding. Also determine the id of the
commit actually checked out and its date, and return a dict with parameters
that provide a reproducible version of this rule (which a tag not necessarily
is).
Args:
name: A unique name for this repository.
init_submodules: Whether to clone submodules in the repository.
remote: The URI of the remote Git repository.
branch: branch in the remote repository to checked out
tag: tag in the remote repository to checked out
commit: specific commit to be checked out
Precisely one of branch, tag, or commit must be specified.
shallow_since: an optional date in the form YYYY-MM-DD, not after
the specified commit; the argument is not allowed if a tag is specified
(which allows cloning with depth 1). Setting such a date close to the
specified commit allows for a more shallow clone of the repository, saving
bandwidth and wall-clock time.
strip_prefix: A directory prefix to strip from the extracted files.
patches: A list of files that are to be applied as patches after extracting
the archive.
patch_tool: the patch(1) utility to use.
patch_args: arguments given to the patch tool, defaults to ["-p0"]
patch_cmds: sequence of commands to be applied after patches are applied.
"""
| 38.655556 | 107 | 0.678547 |
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "patch", "update_attrs", "workspace_and_buildfile")
def _clone_or_update(ctx):
if ((not ctx.attr.tag and not ctx.attr.commit and not ctx.attr.branch) or
(ctx.attr.tag and ctx.attr.commit) or
(ctx.attr.tag and ctx.attr.branch) or
(ctx.attr.commit and ctx.attr.branch)):
fail("Exactly one of commit, tag, or branch must be provided")
shallow = ""
if ctx.attr.commit:
ref = ctx.attr.commit
elif ctx.attr.tag:
ref = "tags/" + ctx.attr.tag
shallow = "--depth=1"
else:
ref = ctx.attr.branch
shallow = "--depth=1"
directory = str(ctx.path("."))
if ctx.attr.strip_prefix:
directory = directory + "-tmp"
if ctx.attr.shallow_since:
if ctx.attr.tag:
fail("shallow_since not allowed if a tag is specified; --depth=1 will be used for tags")
if ctx.attr.branch:
fail("shallow_since not allowed if a branch is specified; --depth=1 will be used for branches")
shallow = "--shallow-since=%s" % ctx.attr.shallow_since
ctx.report_progress("Cloning %s of %s" % (ref, ctx.attr.remote))
if (ctx.attr.verbose):
print("git.bzl: Cloning or updating %s repository %s using strip_prefix of [%s]" %
(
" (%s)" % shallow if shallow else "",
ctx.name,
ctx.attr.strip_prefix if ctx.attr.strip_prefix else "None",
))
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
st = ctx.execute([bash_exe, "-c", """
cd {working_dir}
set -ex
( cd {working_dir} &&
if ! ( cd '{dir_link}' && [[ "$(git rev-parse --git-dir)" == '.git' ]] ) >/dev/null 2>&1; then
rm -rf '{directory}' '{dir_link}'
git clone '{shallow}' '{remote}' '{directory}' || git clone '{remote}' '{directory}'
fi
git -C '{directory}' reset --hard {ref} || \
((git -C '{directory}' fetch '{shallow}' origin {ref}:{ref} || \
git -C '{directory}' fetch origin {ref}:{ref}) && git -C '{directory}' reset --hard {ref})
git -C '{directory}' clean -xdf )
""".format(
working_dir = ctx.path(".").dirname,
dir_link = ctx.path("."),
directory = directory,
remote = ctx.attr.remote,
ref = ref,
shallow = shallow,
)], environment = ctx.os.environ)
if st.return_code:
fail("error cloning %s:\n%s" % (ctx.name, st.stderr))
if ctx.attr.strip_prefix:
dest_link = "{}/{}".format(directory, ctx.attr.strip_prefix)
if not ctx.path(dest_link).exists:
fail("strip_prefix at {} does not exist in repo".format(ctx.attr.strip_prefix))
ctx.symlink(dest_link, ctx.path("."))
if ctx.attr.init_submodules:
ctx.report_progress("Updating submodules")
st = ctx.execute([bash_exe, "-c", """
set -ex
( git -C '{directory}' submodule update --init --checkout --force )
""".format(
directory = ctx.path("."),
)], environment = ctx.os.environ)
if st.return_code:
fail("error updating submodules %s:\n%s" % (ctx.name, st.stderr))
ctx.report_progress("Recording actual commit")
actual_commit = ctx.execute([
bash_exe,
"-c",
"(git -C '{directory}' log -n 1 --pretty='format:%H')".format(
directory = ctx.path("."),
),
]).stdout
shallow_date = ctx.execute([
bash_exe,
"-c",
"(git -C '{directory}' log -n 1 --pretty='format:%cd' --date=raw)".format(
directory = ctx.path("."),
),
]).stdout
return {"commit": actual_commit, "shallow_since": shallow_date}
def _remove_dot_git(ctx):
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
ctx.execute([
bash_exe,
"-c",
"rm -rf '{directory}'".format(directory = ctx.path(".git")),
])
def _update_git_attrs(orig, keys, override):
result = update_attrs(orig, keys, override)
if "commit" in result:
result.pop("tag", None)
result.pop("branch", None)
return result
_common_attrs = {
"remote": attr.string(mandatory = True),
"commit": attr.string(default = ""),
"shallow_since": attr.string(default = ""),
"tag": attr.string(default = ""),
"branch": attr.string(default = ""),
"init_submodules": attr.bool(default = False),
"verbose": attr.bool(default = False),
"strip_prefix": attr.string(default = ""),
"patches": attr.label_list(default = []),
"patch_tool": attr.string(default = "patch"),
"patch_args": attr.string_list(default = ["-p0"]),
"patch_cmds": attr.string_list(default = []),
}
_new_git_repository_attrs = dict(_common_attrs.items() + {
"build_file": attr.label(allow_single_file = True),
"build_file_content": attr.string(),
"workspace_file": attr.label(),
"workspace_file_content": attr.string(),
}.items())
def _new_git_repository_implementation(ctx):
if ((not ctx.attr.build_file and not ctx.attr.build_file_content) or
(ctx.attr.build_file and ctx.attr.build_file_content)):
fail("Exactly one of build_file and build_file_content must be provided.")
update = _clone_or_update(ctx)
workspace_and_buildfile(ctx)
patch(ctx)
_remove_dot_git(ctx)
return _update_git_attrs(ctx.attr, _new_git_repository_attrs.keys(), update)
def _git_repository_implementation(ctx):
update = _clone_or_update(ctx)
patch(ctx)
_remove_dot_git(ctx)
return _update_git_attrs(ctx.attr, _common_attrs.keys(), update)
new_git_repository = repository_rule(
implementation = _new_git_repository_implementation,
attrs = _new_git_repository_attrs,
)
git_repository = repository_rule(
implementation = _git_repository_implementation,
attrs = _common_attrs,
)
| true | true |
1c45dee6a500635eabaad87541b2131f941e9a46 | 69,722 | py | Python | kgtk/io/kgtkreader.py | dgarijo/kgtk | f624754e91afbad8d28006e716189b43d367ef04 | [
"MIT"
] | null | null | null | kgtk/io/kgtkreader.py | dgarijo/kgtk | f624754e91afbad8d28006e716189b43d367ef04 | [
"MIT"
] | null | null | null | kgtk/io/kgtkreader.py | dgarijo/kgtk | f624754e91afbad8d28006e716189b43d367ef04 | [
"MIT"
] | null | null | null | """Read a KGTK node or edge file in TSV format.
Normally, results are obtained as rows of string values obtained by iteration
on the KgtkReader object. Alternative iterators are available to return the results
as:
* concise_rows: lists of strings with empty fields converted to None
* kgtk_values: lists of KgtkValue objects
* concise_kgtk_values: lists of KgtkValue objects with empty fields converted to None
* dicts: dicts of strings
* dicts(concise=True): dicts of strings with empty fields omitted
* kgtk_value_dicts: dicts of KgtkValue objects
* kgtk_value_dicts(concise=True): dicts of KgtkValue objects with empty fields omitted
TODO: Add support for alternative envelope formats, such as JSON.
"""
from argparse import ArgumentParser, _ArgumentGroup, Namespace, SUPPRESS
import attr
import bz2
from enum import Enum
import gzip
import lz4 # type: ignore
import lzma
from multiprocessing import Process, Queue
from pathlib import Path
import sys
import typing
from kgtk.kgtkformat import KgtkFormat
from kgtk.io.kgtkbase import KgtkBase
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.utils.closableiter import ClosableIter, ClosableIterTextIOWrapper
from kgtk.utils.enumnameaction import EnumNameAction
from kgtk.utils.gzipprocess import GunzipProcess
from kgtk.utils.validationaction import ValidationAction
from kgtk.value.kgtkvalue import KgtkValue
from kgtk.value.kgtkvalueoptions import KgtkValueOptions, DEFAULT_KGTK_VALUE_OPTIONS
class KgtkReaderMode(Enum):
"""
There are four file reading modes:
"""
NONE = 0 # Enforce neither edge nore node file required columns
EDGE = 1 # Enforce edge file required columns
NODE = 2 # Enforce node file require columns
AUTO = 3 # Automatically decide whether to enforce edge or node file required columns
@attr.s(slots=True, frozen=True)
class KgtkReaderOptions():
ERROR_LIMIT_DEFAULT: int = 1000
GZIP_QUEUE_SIZE_DEFAULT: int = GunzipProcess.GZIP_QUEUE_SIZE_DEFAULT
mode: KgtkReaderMode = attr.ib(validator=attr.validators.instance_of(KgtkReaderMode), default=KgtkReaderMode.AUTO)
# The column separator is normally tab.
column_separator: str = attr.ib(validator=attr.validators.instance_of(str), default=KgtkFormat.COLUMN_SEPARATOR)
# supply a missing header record or override an existing header record.
force_column_names: typing.Optional[typing.List[str]] = attr.ib(validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str),
iterable_validator=attr.validators.instance_of(list))),
default=None)
skip_header_record: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
# Data record sampling, pre-validation.
#
# 1) Optionally read and skip a specific number of initial records, or record_limit - tail_count,
# whichever is greater.
# 2) Optionally pass through every nth record relative to the number of records read.
# 3) Optionally limit the total number of records read.
initial_skip_count: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
every_nth_record: int = attr.ib(validator=attr.validators.instance_of(int), default=1)
record_limit: typing.Optional[int] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(int)), default=None)
tail_count: typing.Optional[int] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(int)), default=None)
# How do we handle errors?
error_limit: int = attr.ib(validator=attr.validators.instance_of(int), default=ERROR_LIMIT_DEFAULT) # >0 ==> limit error reports
# Top-level validation controls:
repair_and_validate_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
repair_and_validate_values: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
# Ignore empty lines, comments, and all whitespace lines, etc.?
empty_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE)
comment_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE)
whitespace_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE)
# Ignore records with empty values in certain fields:
blank_required_field_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE)
# Ignore records with too many or too few fields?
short_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN)
long_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN)
# How should header errors be processed?
header_error_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXIT)
unsafe_column_name_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.REPORT)
# Validate data cell values?
invalid_value_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN)
prohibited_list_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN)
# Repair records with too many or too few fields?
fill_short_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
truncate_long_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
# Other implementation options?
compression_type: typing.Optional[str] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(str)), default=None) # TODO: use an Enum
gzip_in_parallel: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
gzip_queue_size: int = attr.ib(validator=attr.validators.instance_of(int), default=GZIP_QUEUE_SIZE_DEFAULT)
@classmethod
def add_arguments(cls,
parser: ArgumentParser,
mode_options: bool = False,
default_mode: KgtkReaderMode = KgtkReaderMode.AUTO,
validate_by_default: bool = False,
expert: bool = False,
defaults: bool = True,
who: str = "",
):
# This helper function makes it easy to suppress options from
# The help message. The options are still there, and initialize
# what they need to initialize.
def h(msg: str)->str:
if expert:
return msg
else:
return SUPPRESS
# This helper function decices whether or not to include defaults
# in argument declarations. If we plan to make arguments with
# prefixes and fallbacks, the fallbacks (the ones without prefixes)
# should get default values, while the prefixed arguments should
# not get defaults.
#
# Note: In obscure circumstances (EnumNameAction, I'm looking at you),
# explicitly setting "default=None" may fail, whereas omitting the
# "default=" phrase succeeds.
#
# TODO: continue researching these issues.
def d(default: typing.Any)->typing.Mapping[str, typing.Any]:
if defaults:
return {"default": default}
else:
return { }
prefix1: str = "--" if len(who) == 0 else "--" + who + "-"
prefix2: str = "" if len(who) == 0 else who + "_"
prefix3: str = "" if len(who) == 0 else who + ": "
prefix4: str = "" if len(who) == 0 else who + " file "
fgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "File options"),
h("Options affecting " + prefix4 + "processing."))
fgroup.add_argument(prefix1 + "column-separator",
dest=prefix2 + "column_separator",
help=h(prefix3 + "Column separator (default=<TAB>)."), # TODO: provide the default with escapes, e.g. \t
type=str, **d(default=KgtkFormat.COLUMN_SEPARATOR))
# TODO: use an Enum or add choices.
fgroup.add_argument(prefix1 + "compression-type",
dest=prefix2 + "compression_type",
help=h(prefix3 + "Specify the compression type (default=%(default)s)."))
fgroup.add_argument(prefix1 + "error-limit",
dest=prefix2 + "error_limit",
help=h(prefix3 + "The maximum number of errors to report before failing (default=%(default)s)"),
type=int, **d(default=cls.ERROR_LIMIT_DEFAULT))
fgroup.add_argument(prefix1 + "gzip-in-parallel",
dest=prefix2 + "gzip_in_parallel",
metavar="optional True|False",
help=h(prefix3 + "Execute gzip in parallel (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=False))
fgroup.add_argument(prefix1 + "gzip-queue-size",
dest=prefix2 + "gzip_queue_size",
help=h(prefix3 + "Queue size for parallel gzip (default=%(default)s)."),
type=int, **d(default=cls.GZIP_QUEUE_SIZE_DEFAULT))
if mode_options:
fgroup.add_argument(prefix1 + "mode",
dest=prefix2 + "mode",
help=h(prefix3 + "Determine the KGTK file mode (default=%(default)s)."),
type=KgtkReaderMode, action=EnumNameAction, **d(default_mode))
hgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Header parsing"),
h("Options affecting " + prefix4 + "header parsing."))
hgroup.add_argument(prefix1 + "force-column-names",
dest=prefix2 + "force_column_names",
help=h(prefix3 + "Force the column names (default=None)."),
nargs='+')
hgroup.add_argument(prefix1 + "header-error-action",
dest=prefix2 + "header_error_action",
help=h(prefix3 + "The action to take when a header error is detected. Only ERROR or EXIT are supported (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXIT))
hgroup.add_argument(prefix1 + "skip-header-record",
dest=prefix2 + "skip_header_record",
metavar="optional True|False",
help=h(prefix3 + "Skip the first record when forcing column names (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=False))
hgroup.add_argument(prefix1 + "unsafe-column-name-action",
dest=prefix2 + "unsafe_column_name_action",
help=h(prefix3 + "The action to take when a column name is unsafe (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.REPORT))
sgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Pre-validation sampling"),
h("Options affecting " + prefix4 + "pre-validation data line sampling."))
sgroup.add_argument(prefix1 + "initial-skip-count",
dest=prefix2 + "initial_skip_count",
help=h(prefix3 + "The number of data records to skip initially (default=do not skip)."),
type=int, **d(default=0))
sgroup.add_argument(prefix1 + "every-nth-record",
dest=prefix2 + "every_nth_record",
help=h(prefix3 + "Pass every nth record (default=pass all records)."),
type=int, **d(default=1))
sgroup.add_argument(prefix1 + "record-limit",
dest=prefix2 + "record_limit",
help=h(prefix3 + "Limit the number of records read (default=no limit)."),
type=int, **d(default=None))
sgroup.add_argument(prefix1 + "tail-count",
dest=prefix2 + "tail_count",
help=h(prefix3 + "Pass this number of records (default=no tail processing)."),
type=int, **d(default=None))
lgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Line parsing"),
h("Options affecting " + prefix4 + "data line parsing."))
lgroup.add_argument(prefix1 + "repair-and-validate-lines",
dest=prefix2 + "repair_and_validate_lines",
metavar="optional True|False",
help=h(prefix3 + "Repair and validate lines (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=validate_by_default))
lgroup.add_argument(prefix1 + "repair-and-validate-values",
dest=prefix2 + "repair_and_validate_values",
metavar="optional True|False",
help=h(prefix3 + "Repair and validate values (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=validate_by_default))
lgroup.add_argument(prefix1 + "blank-required-field-line-action",
dest=prefix2 + "blank_required_field_line_action",
help=h(prefix3 + "The action to take when a line with a blank node1, node2, or id field (per mode) is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE))
lgroup.add_argument(prefix1 + "comment-line-action",
dest=prefix2 + "comment_line_action",
help=h(prefix3 + "The action to take when a comment line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE))
lgroup.add_argument(prefix1 + "empty-line-action",
dest=prefix2 + "empty_line_action",
help=h(prefix3 + "The action to take when an empty line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE))
lgroup.add_argument(prefix1 + "fill-short-lines",
dest=prefix2 + "fill_short_lines",
metavar="optional True|False",
help=h(prefix3 + "Fill missing trailing columns in short lines with empty values (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=False))
lgroup.add_argument(prefix1 + "invalid-value-action",
dest=prefix2 + "invalid_value_action",
help=h(prefix3 + "The action to take when a data cell value is invalid (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN))
lgroup.add_argument(prefix1 + "long-line-action",
dest=prefix2 + "long_line_action",
help=h(prefix3 + "The action to take when a long line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN))
lgroup.add_argument(prefix1 + "prohibited-list-action",
dest=prefix2 + "prohibited list_action",
help=h(prefix3 + "The action to take when a data cell contains a prohibited list (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN))
lgroup.add_argument(prefix1 + "short-line-action",
dest=prefix2 + "short_line_action",
help=h(prefix3 + "The action to take when a short line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN))
lgroup.add_argument(prefix1 + "truncate-long-lines",
dest=prefix2 + "truncate_long_lines",
help=h(prefix3 + "Remove excess trailing columns in long lines (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=False))
lgroup.add_argument(prefix1 + "whitespace-line-action",
dest=prefix2 + "whitespace_line_action",
help=h(prefix3 + "The action to take when a whitespace line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE))
@classmethod
# Build the value parsing option structure.
def from_dict(cls,
d: dict,
who: str = "",
mode: typing.Optional[KgtkReaderMode] = None,
fallback: bool = False,
)->'KgtkReaderOptions':
prefix: str = "" # The destination name prefix.
if len(who) > 0:
prefix = who + "_"
# TODO: Figure out how to type check this method.
def lookup(name: str, default):
prefixed_name = prefix + name
if prefixed_name in d and d[prefixed_name] is not None:
return d[prefixed_name]
elif fallback and name in d and d[name] is not None:
return d[name]
else:
return default
reader_mode: KgtkReaderMode
if mode is not None:
reader_mode = mode
else:
reader_mode = lookup("mode", KgtkReaderMode.AUTO)
return cls(
blank_required_field_line_action=lookup("blank_required_field_line_action", ValidationAction.EXCLUDE),
column_separator=lookup("column_separator", KgtkFormat.COLUMN_SEPARATOR),
comment_line_action=lookup("comment_line_action", ValidationAction.EXCLUDE),
compression_type=lookup("compression_type", None),
empty_line_action=lookup("empty_line_action", ValidationAction.EXCLUDE),
error_limit=lookup("error_limit", cls.ERROR_LIMIT_DEFAULT),
every_nth_record=lookup("every_nth_record", 1),
fill_short_lines=lookup("fill_short_lines", False),
force_column_names=lookup("force_column_names", None),
gzip_in_parallel=lookup("gzip_in_parallel", False),
gzip_queue_size=lookup("gzip_queue_size", KgtkReaderOptions.GZIP_QUEUE_SIZE_DEFAULT),
header_error_action=lookup("header_error_action", ValidationAction.EXCLUDE),
initial_skip_count=lookup("initial_skip_count", 0),
invalid_value_action=lookup("invalid_value_action", ValidationAction.REPORT),
long_line_action=lookup("long_line_action", ValidationAction.EXCLUDE),
mode=reader_mode,
prohibited_list_action=lookup("prohibited_list_action", ValidationAction.REPORT),
record_limit=lookup("record_limit", None),
repair_and_validate_lines=lookup("repair_and_validate_lines", False),
repair_and_validate_values=lookup("repair_and_validate_values", False),
short_line_action=lookup("short_line_action", ValidationAction.EXCLUDE),
skip_header_record=lookup("skip_header_recordb", False),
tail_count=lookup("tail_count", None),
truncate_long_lines=lookup("truncate_long_lines", False),
unsafe_column_name_action=lookup("unsafe_column_name_action", ValidationAction.REPORT),
whitespace_line_action=lookup("whitespace_line_action", ValidationAction.EXCLUDE),
)
# Build the value parsing option structure.
@classmethod
def from_args(cls,
args: Namespace,
who: str = "",
mode: typing.Optional[KgtkReaderMode] = None,
fallback: bool = False,
)->'KgtkReaderOptions':
return cls.from_dict(vars(args), who=who, mode=mode, fallback=fallback)
def show(self, who: str="", out: typing.TextIO=sys.stderr):
prefix: str = "--" if len(who) == 0 else "--" + who + "-"
print("%smode=%s" % (prefix, self.mode.name), file=out)
print("%scolumn-separator=%s" % (prefix, repr(self.column_separator)), file=out)
if self.force_column_names is not None:
print("%sforce-column-names=%s" % (prefix, " ".join(self.force_column_names)), file=out)
print("%sskip-header-record=%s" % (prefix, str(self.skip_header_record)), file=out)
print("%serror-limit=%s" % (prefix, str(self.error_limit)), file=out)
print("%srepair-and-validate-lines=%s" % (prefix, str(self.repair_and_validate_lines)), file=out)
print("%srepair-and-validate-values=%s" % (prefix, str(self.repair_and_validate_values)), file=out)
print("%sempty-line-action=%s" % (prefix, self.empty_line_action.name), file=out)
print("%scomment-line-action=%s" % (prefix, self.comment_line_action.name), file=out)
print("%swhitespace-line-action=%s" % (prefix, self.whitespace_line_action.name), file=out)
print("%sblank-required-field-line-action=%s" % (prefix, self.blank_required_field_line_action.name), file=out)
print("%sshort-line-action=%s" % (prefix, self.short_line_action.name), file=out)
print("%slong-line-action=%s" % (prefix, self.long_line_action.name), file=out)
print("%sheader-error-action=%s" % (prefix, self.header_error_action.name), file=out)
print("%sunsafe-column-name-action=%s" % (prefix, self.unsafe_column_name_action.name), file=out)
print("%sinvalid-value-action=%s" % (prefix, self.invalid_value_action.name), file=out)
print("%sinitial-skip-count=%s" % (prefix, str(self.initial_skip_count)), file=out)
print("%severy-nth-record=%s" % (prefix, str(self.every_nth_record)), file=out)
if self.record_limit is not None:
print("%srecord-limit=%s" % (prefix, str(self.record_limit)), file=out)
if self.tail_count is not None:
print("%stail-count=%s" % (prefix, str(self.tail_count)), file=out)
print("%sinitial-skip-count=%s" % (prefix, str(self.initial_skip_count)), file=out)
print("%sprohibited-list-action=%s" % (prefix, self.prohibited_list_action.name), file=out)
print("%sfill-short-lines=%s" % (prefix, str(self.fill_short_lines)), file=out)
print("%struncate-long-lines=%s" % (prefix, str(self.truncate_long_lines)), file=out)
if self.compression_type is not None:
print("%scompression-type=%s" % (prefix, str(self.compression_type)), file=out)
print("%sgzip-in-parallel=%s" % (prefix, str(self.gzip_in_parallel)), file=out)
print("%sgzip-queue-size=%s" % (prefix, str(self.gzip_queue_size)), file=out)
DEFAULT_KGTK_READER_OPTIONS: KgtkReaderOptions = KgtkReaderOptions()
@attr.s(slots=True, frozen=False)
class KgtkReader(KgtkBase, ClosableIter[typing.List[str]]):
file_path: typing.Optional[Path] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Path)))
source: ClosableIter[str] = attr.ib() # Todo: validate
# TODO: Fix this validator:
# options: KgtkReaderOptions = attr.ib(validator=attr.validators.instance_of(KgtkReaderOptions))
options: KgtkReaderOptions = attr.ib()
value_options: KgtkValueOptions = attr.ib(validator=attr.validators.instance_of(KgtkValueOptions))
column_names: typing.List[str] = attr.ib(validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str),
iterable_validator=attr.validators.instance_of(list)))
# For convenience, the count of columns. This is the same as len(column_names).
column_count: int = attr.ib(validator=attr.validators.instance_of(int))
column_name_map: typing.Mapping[str, int] = attr.ib(validator=attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str),
value_validator=attr.validators.instance_of(int)))
# The actual mode used.
#
# TODO: fix the validator.
# mode: KgtkReaderMode = attr.ib(validator=attr.validators.instance_of(KgtkReaderMode), default=KgtkReaderMode.NONE)
mode: KgtkReaderMode = attr.ib(default=KgtkReaderMode.NONE)
# The index of the mandatory/aliased columns. -1 means missing:
node1_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file
label_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file
node2_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file
id_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # node file
data_lines_read: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
data_lines_skipped: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
data_lines_passed: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
data_lines_ignored: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
data_errors_reported: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
# Is this an edge file or a node file?
is_edge_file: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
is_node_file: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
# Feedback and error output:
error_file: typing.TextIO = attr.ib(default=sys.stderr)
verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
very_verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
@classmethod
def _default_options(
cls,
options: typing.Optional[KgtkReaderOptions] = None,
value_options: typing.Optional[KgtkValueOptions] = None,
)->typing.Tuple[KgtkReaderOptions, KgtkValueOptions]:
# Supply the default reader and value options:
if options is None:
options = DEFAULT_KGTK_READER_OPTIONS
if value_options is None:
value_options = DEFAULT_KGTK_VALUE_OPTIONS
return (options, value_options)
@classmethod
def open(cls,
file_path: typing.Optional[Path],
who: str = "input",
error_file: typing.TextIO = sys.stderr,
mode: typing.Optional[KgtkReaderMode] = None,
options: typing.Optional[KgtkReaderOptions] = None,
value_options: typing.Optional[KgtkValueOptions] = None,
verbose: bool = False,
very_verbose: bool = False)->"KgtkReader":
"""
Opens a KGTK file, which may be an edge file or a node file. The appropriate reader is returned.
"""
# Supply the default reader and value options:
(options, value_options) = cls._default_options(options, value_options)
source: ClosableIter[str] = cls._openfile(file_path, options=options, error_file=error_file, verbose=verbose)
# Read the kgtk file header and split it into column names. We get the
# header back, too, for use in debugging and error messages.
header: str
column_names: typing.List[str]
(header, column_names) = cls._build_column_names(source, options, error_file=error_file, verbose=verbose)
# Check for unsafe column names.
cls.check_column_names(column_names,
header_line=header,
who=who,
error_action=options.unsafe_column_name_action,
error_file=error_file)
# Build a map from column name to column index.
column_name_map: typing.Mapping[str, int] = cls.build_column_name_map(column_names,
header_line=header,
who=who,
error_action=options.header_error_action,
error_file=error_file)
# Should we automatically determine if this is an edge file or a node file?
if mode is None:
mode = options.mode
is_edge_file: bool = False
is_node_file: bool = False
if mode is KgtkReaderMode.AUTO:
# If we have a node1 (or alias) column, then this must be an edge file. Otherwise, assume it is a node file.
node1_idx: int = cls.get_column_idx(cls.NODE1_COLUMN_NAMES,
column_name_map,
header_line=header,
who=who,
error_action=options.header_error_action,
error_file=error_file,
is_optional=True)
if node1_idx >= 0:
is_edge_file = True
is_node_file = False
if verbose:
print("%s column found, this is a KGTK edge file" % column_names[node1_idx], file=error_file, flush=True)
else:
is_edge_file = False
is_node_file = True
if verbose:
print("node1 column not found, assuming this is a KGTK node file", file=error_file, flush=True)
elif mode is KgtkReaderMode.EDGE:
is_edge_file = True
elif mode is KgtkReaderMode.NODE:
is_node_file = True
elif mode is KgtkReaderMode.NONE:
pass
# Get the indices of the special columns.
node1_column_idx: int
label_column_idx: int
node2_column_idx: int
id_column_idx: int
(node1_column_idx,
label_column_idx,
node2_column_idx,
id_column_idx) = cls.get_special_columns(column_name_map,
header_line=header,
who=who,
error_action=options.header_error_action,
error_file=error_file,
is_edge_file=is_edge_file,
is_node_file=is_node_file)
if verbose:
print("KgtkReader: Special columns: node1=%d label=%d node2=%d id=%d" % (node1_column_idx,
label_column_idx,
node2_column_idx,
id_column_idx), file=error_file, flush=True)
if is_edge_file:
# We'll instantiate an EdgeReader, which is a subclass of KgtkReader.
# The EdgeReader import is deferred to avoid circular imports.
from kgtk.io.edgereader import EdgeReader
if verbose:
print("KgtkReader: Reading an edge file.", file=error_file, flush=True)
cls = EdgeReader
elif is_node_file:
# We'll instantiate an NodeReader, which is a subclass of KgtkReader.
# The NodeReader import is deferred to avoid circular imports.
from kgtk.io.nodereader import NodeReader
if verbose:
print("KgtkReader: Reading an node file.", file=error_file, flush=True)
cls = NodeReader
return cls(file_path=file_path,
source=source,
column_names=column_names,
column_name_map=column_name_map,
column_count=len(column_names),
mode=mode,
node1_column_idx=node1_column_idx,
label_column_idx=label_column_idx,
node2_column_idx=node2_column_idx,
id_column_idx=id_column_idx,
error_file=error_file,
options=options,
value_options=value_options,
is_edge_file=is_edge_file,
is_node_file=is_node_file,
verbose=verbose,
very_verbose=very_verbose,
)
@classmethod
def _open_compressed_file(cls,
compression_type: str,
file_name: str,
file_or_path: typing.Union[Path, typing.TextIO],
who: str,
error_file: typing.TextIO,
verbose: bool)->typing.TextIO:
# TODO: find a better way to coerce typing.IO[Any] to typing.TextIO
if compression_type in [".gz", "gz"]:
if verbose:
print("%s: reading gzip %s" % (who, file_name), file=error_file, flush=True)
return gzip.open(file_or_path, mode="rt") # type: ignore
elif compression_type in [".bz2", "bz2"]:
if verbose:
print("%s: reading bz2 %s" % (who, file_name), file=error_file, flush=True)
return bz2.open(file_or_path, mode="rt") # type: ignore
elif compression_type in [".xz", "xz"]:
if verbose:
print("%s: reading lzma %s" % (who, file_name), file=error_file, flush=True)
return lzma.open(file_or_path, mode="rt") # type: ignore
elif compression_type in [".lz4", "lz4"]:
if verbose:
print("%s: reading lz4 %s" % (who, file_name), file=error_file, flush=True)
return lz4.frame.open(file_or_path, mode="rt") # type: ignore
else:
# TODO: throw a better exception.
raise ValueError("%s: Unexpected compression_type '%s'" % (who, compression_type))
@classmethod
def _openfile(cls,
file_path: typing.Optional[Path],
options: KgtkReaderOptions,
error_file: typing.TextIO,
verbose: bool)->ClosableIter[str]:
who: str = cls.__name__
if file_path is None or str(file_path) == "-":
if options.compression_type is not None and len(options.compression_type) > 0:
return ClosableIterTextIOWrapper(cls._open_compressed_file(options.compression_type, "-", sys.stdin, who, error_file, verbose))
else:
if verbose:
print("%s: reading stdin" % who, file=error_file, flush=True)
return ClosableIterTextIOWrapper(sys.stdin)
if verbose:
print("%s: File_path.suffix: %s" % (who, file_path.suffix), file=error_file, flush=True)
gzip_file: typing.TextIO
if options.compression_type is not None and len(options.compression_type) > 0:
gzip_file = cls._open_compressed_file(options.compression_type, str(file_path), file_path, who, error_file, verbose)
elif file_path.suffix in [".bz2", ".gz", ".lz4", ".xz"]:
gzip_file = cls._open_compressed_file(file_path.suffix, str(file_path), file_path, who, error_file, verbose)
else:
if verbose:
print("%s: reading file %s" % (who, str(file_path)), file=error_file, flush=True)
return ClosableIterTextIOWrapper(open(file_path, "r"))
if options.gzip_in_parallel:
gzip_thread: GunzipProcess = GunzipProcess(gzip_file, Queue(options.gzip_queue_size))
gzip_thread.start()
return gzip_thread
else:
return ClosableIterTextIOWrapper(gzip_file)
@classmethod
def _build_column_names(cls,
source: ClosableIter[str],
options: KgtkReaderOptions,
error_file: typing.TextIO,
verbose: bool = False,
)->typing.Tuple[str, typing.List[str]]:
"""
Read the kgtk file header and split it into column names.
"""
column_names: typing.List[str]
if options.force_column_names is None:
# Read the column names from the first line, stripping end-of-line characters.
#
# TODO: if the read fails, throw a more useful exception with the line number.
try:
header: str = next(source).rstrip("\r\n")
except StopIteration:
raise ValueError("No header line in file")
if verbose:
print("header: %s" % header, file=error_file, flush=True)
# Split the first line into column names.
return header, header.split(options.column_separator)
else:
# Skip the first record to override the column names in the file.
# Do not skip the first record if the file does not hae a header record.
if options.skip_header_record:
try:
next(source)
except StopIteration:
raise ValueError("No header line to skip")
# Use the forced column names.
return options.column_separator.join(options.force_column_names), options.force_column_names
def close(self):
self.source.close()
def exclude_line(self, action: ValidationAction, msg: str, line: str)->bool:
"""
Take a validation action. Returns True if the line should be excluded.
"""
result: bool
if action == ValidationAction.PASS:
return False # Silently pass the line through
elif action == ValidationAction.REPORT:
result= False # Report the issue then pass the line.
elif action == ValidationAction.EXCLUDE:
return True # Silently exclude the line
elif action == ValidationAction.COMPLAIN:
result = True # Report the issue then exclude the line.
elif action == ValidationAction.ERROR:
# Immediately raise an exception.
raise ValueError("In input data line %d, %s: %s" % (self.data_lines_read, msg, line))
elif action == ValidationAction.EXIT:
print("Data line %d:\n%s\n%s" % (self.data_lines_read, line, msg), file=self.error_file, flush=True)
sys.exit(1)
# print("In input data line %d, %s: %s" % (self.data_lines_read, msg, line), file=self.error_file, flush=True)
print("Data line %d:\n%s\n%s" % (self.data_lines_read, line, msg), file=self.error_file, flush=True)
self.data_errors_reported += 1
if self.options.error_limit > 0 and self.data_errors_reported >= self.options.error_limit:
raise ValueError("Too many data errors, exiting.")
return result
# Get the next edge values as a list of strings.
def nextrow(self)-> typing.List[str]:
row: typing.List[str]
repair_and_validate_lines: bool = self.options.repair_and_validate_lines
repair_and_validate_values: bool = self.options.repair_and_validate_values
# Compute the initial skip count
skip_count: int = self.options.initial_skip_count
if self.options.record_limit is not None and self.options.tail_count is not None:
# Compute the tail count.
tail_skip_count: int = self.options.record_limit - self.options.tail_count
if tail_skip_count > skip_count:
skip_count = tail_skip_count # Take the larger skip count.
# This loop accomodates lines that are ignored.
while (True):
# Has a record limit been specified and have we reached it?
if self.options.record_limit is not None:
if self.data_lines_read >= self.options.record_limit:
# Close the source and stop the iteration.
self.source.close() # Do we need to guard against repeating this call?
raise StopIteration
# Read a line from the source
line: str
try:
line = next(self.source) # Will throw StopIteration
except StopIteration as e:
# Close the input file!
#
# TODO: implement a close() routine and/or whatever it takes to support "with".
self.source.close() # Do we need to guard against repeating this call?
raise e
# Count the data line read.
self.data_lines_read += 1
# Data sampling:
if self.data_lines_read <= skip_count:
self.data_lines_skipped += 1
continue
if self.options.every_nth_record > 1:
if self.data_lines_read % self.options.every_nth_record != 0:
self.data_lines_skipped += 1
continue
# Strip the end-of-line characters:
line = line.rstrip("\r\n")
if repair_and_validate_lines:
# TODO: Use a sepearate option to control this.
if self.very_verbose:
print("'%s'" % line, file=self.error_file, flush=True)
# Ignore empty lines.
if self.options.empty_line_action != ValidationAction.PASS and len(line) == 0:
if self.exclude_line(self.options.empty_line_action, "saw an empty line", line):
continue
# Ignore comment lines:
if self.options.comment_line_action != ValidationAction.PASS and line[0] == self.COMMENT_INDICATOR:
if self.exclude_line(self.options.comment_line_action, "saw a comment line", line):
continue
# Ignore whitespace lines
if self.options.whitespace_line_action != ValidationAction.PASS and line.isspace():
if self.exclude_line(self.options.whitespace_line_action, "saw a whitespace line", line):
continue
row = line.split(self.options.column_separator)
if repair_and_validate_lines:
# Optionally fill missing trailing columns with empty row:
if self.options.fill_short_lines and len(row) < self.column_count:
while len(row) < self.column_count:
row.append("")
# Optionally remove extra trailing columns:
if self.options.truncate_long_lines and len(row) > self.column_count:
row = row[:self.column_count]
# Optionally validate that the line contained the right number of columns:
#
# When we report line numbers in error messages, line 1 is the first line after the header line.
if self.options.short_line_action != ValidationAction.PASS and len(row) < self.column_count:
if self.exclude_line(self.options.short_line_action,
"Required %d columns, saw %d: '%s'" % (self.column_count,
len(row),
line),
line):
continue
if self.options.long_line_action != ValidationAction.PASS and len(row) > self.column_count:
if self.exclude_line(self.options.long_line_action,
"Required %d columns, saw %d (%d extra): '%s'" % (self.column_count,
len(row),
len(row) - self.column_count,
line),
line):
continue
if self._ignore_if_blank_fields(row, line):
continue
if repair_and_validate_values:
if self.options.invalid_value_action != ValidationAction.PASS:
# TODO: find a way to optionally cache the KgtkValue objects
# so we don't have to create them a second time in the conversion
# and iterator methods below.
if self._ignore_invalid_values(row, line):
continue
if self.options.prohibited_list_action != ValidationAction.PASS:
if self._ignore_prohibited_lists(row, line):
continue
self.data_lines_passed += 1
# TODO: User a seperate option to control this.
# if self.very_verbose:
# self.error_file.write(".")
# self.error_file.flush()
return row
# This is both and iterable and an iterator object.
def __iter__(self)->typing.Iterator[typing.List[str]]:
return self
# Get the next row values as a list of strings.
# TODO: Convert integers, coordinates, etc. to Python types
def __next__(self)-> typing.List[str]:
return self.nextrow()
def concise_rows(self)->typing.Iterator[typing.List[typing.Optional[str]]]:
"""
Using a generator function, create an iterator that returns rows of fields
as strings. Empty fields will be returned as None.
"""
while True:
try:
row: typing.List[str] = self.nextrow()
except StopIteration:
return
# Copy the row, converting empty fields into None:
results: typing.List[typing.Optional[str]] = [ ]
field: str
for field in row:
if len(field) == 0:
results.append(None)
else:
results.append(field)
yield results
def to_kgtk_values(self, row: typing.List[str],
validate: bool = False,
parse_fields: bool = False)->typing.List[KgtkValue]:
"""
Convert an input row into a list of KgtkValue instances.
When validate is True, validate each KgtkValue object.
"""
results: typing.List[KgtkValue] = [ ]
field: str
for field in row:
kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields)
if validate:
kv.validate()
results.append(kv)
return results
def kgtk_values(self,
validate: bool = False,
parse_fields: bool = False
)->typing.Iterator[typing.List[KgtkValue]]:
"""
Using a generator function, create an iterator that returns rows of fields
as KgtkValue objects.
When validate is True, validate each KgtkValue object.
"""
while True:
try:
yield self.to_kgtk_values(self.nextrow(), validate=validate, parse_fields=parse_fields)
except StopIteration:
return
def to_concise_kgtk_values(self,
row: typing.List[str],
validate: bool = False,
parse_fields: bool = False
)->typing.List[typing.Optional[KgtkValue]]:
"""
Convert an input row into a list of KgtkValue instances. Empty fields will be returned as None.
When validate is True, validate each KgtkValue object.
"""
results: typing.List[typing.Optional[KgtkValue]] = [ ]
field: str
for field in row:
if len(field) == 0:
results.append(None)
else:
kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields)
if validate:
kv.validate()
results.append(kv)
return results
def concise_kgtk_values(self,
validate: bool = False,
parse_fields: bool = False
)->typing.Iterator[typing.List[typing.Optional[KgtkValue]]]:
"""
Using a generator function, create an iterator that returns rows of fields
as KgtkValue objects, with empty fields returned as None.
When validate is True, validate each KgtkValue object.
"""
while True:
try:
yield self.to_concise_kgtk_values(self.nextrow(), validate=validate)
except StopIteration:
return
def to_dict(self, row: typing.List[str], concise: bool=False
)->typing.Mapping[str, str]:
"""
Convert an input row into a dict of named fields.
If concise is True, then empty fields will be skipped.
"""
results: typing.MutableMapping[str, str] = { }
field: str
idx: int = 0
# We'll use two seperate loops in anticipation of a modest
# efficiency gain.
if concise:
for field in row:
if len(field) > 0:
results[self.column_names[idx]] = field
idx += 1
else:
for field in row:
results[self.column_names[idx]] = field
idx += 1
return results
def dicts(self, concise: bool=False
)->typing.Iterator[typing.Mapping[str, str]]:
"""
Using a generator function, create an iterator that returns each row as a dict of named fields.
If concise is True, then empty fields will be skipped.
"""
while True:
try:
yield self.to_dict(self.nextrow(), concise=concise)
except StopIteration:
return
def to_kgtk_value_dict(self,
row: typing.List[str],
validate: bool=False,
parse_fields: bool=False,
concise: bool=False
)->typing.Mapping[str, KgtkValue]:
"""
Convert an input row into a dict of named fields.
If concise is True, then empty fields will be skipped.
When validate is True, validate each KgtkValue object.
"""
results: typing.MutableMapping[str, KgtkValue] = { }
idx: int = 0
field: str
for field in row:
if concise and len(field) == 0:
pass # Skip the empty field.
else:
kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields)
if validate:
kv.validate()
results[self.column_names[idx]] = kv
idx += 1
return results
def kgtk_value_dicts(self,
validate: bool=False,
parse_fields: bool=False,
concise: bool=False
)->typing.Iterator[typing.Mapping[str, KgtkValue]]:
"""
Using a generator function, create an iterator that returns each row as a
dict of named KgtkValue objects.
If concise is True, then empty fields will be skipped.
When validate is True, validate each KgtkValue object.
"""
while True:
try:
yield self.to_kgtk_value_dict(self.nextrow(), validate=validate, parse_fields=parse_fields, concise=concise)
except StopIteration:
return
def _ignore_invalid_values(self, row: typing.List[str], line: str)->bool:
"""Give a row of values, validate each value. If we find one or more
validation problems, we might want to emit error messages and we might
want to ignore the entire row.
Returns True to indicate that the row should be ignored (skipped).
"""
problems: typing.List[str] = [ ] # Build a list of problems.
idx: int
item: str
for idx, item in enumerate(row):
if len(item) > 0: # Optimize the common case of empty columns.
kv: KgtkValue = KgtkValue(item, options=self.value_options)
if not kv.is_valid():
problems.append("col %d (%s) value '%s'is an %s" % (idx, self.column_names[idx], item, kv.describe()))
if kv.repaired:
# If this value was repaired, update the item in the row.
#
# Warning: We expect this change to be seen by the caller.
row[idx] = kv.value
if len(problems) == 0:
return False
return self.exclude_line(self.options.invalid_value_action,
"\n".join(problems),
line)
def _ignore_prohibited_list(self,
idx: int,
row: typing.List[str],
line: str,
problems: typing.List[str],
):
if idx < 0:
return
item: str = row[idx]
if KgtkFormat.LIST_SEPARATOR not in item:
return
if len(KgtkValue.split_list(item)) == 1:
return
problems.append("col %d (%s) value '%s'is a prohibited list" % (idx, self.column_names[idx], item))
def _ignore_prohibited_lists(self, row: typing.List[str], line: str)->bool:
"""
KGTK File Format v2 prohibits "|" lists in the node1, label, and node2 columns.
"""
problems: typing.List[str] = [ ] # Build a list of problems.
self._ignore_prohibited_list(self.node1_column_idx, row, line, problems)
self._ignore_prohibited_list(self.label_column_idx, row, line, problems)
self._ignore_prohibited_list(self.node2_column_idx, row, line, problems)
if len(problems) == 0:
return False
return self.exclude_line(self.options.invalid_value_action,
"\n".join(problems),
line)
# May be overridden
def _ignore_if_blank_fields(self, values: typing.List[str], line: str)->bool:
return False
# May be overridden
def _skip_reserved_fields(self, column_name)->bool:
return False
def additional_column_names(self)->typing.List[str]:
if self.is_edge_file:
return KgtkBase.additional_edge_columns(self.column_names)
elif self.is_node_file:
return KgtkBase.additional_node_columns(self.column_names)
else:
# TODO: throw a better exception.
raise ValueError("KgtkReader: Unknown Kgtk file type.")
def merge_columns(self, additional_columns: typing.List[str])->typing.List[str]:
"""
Return a list that merges the current column names with an additional set
of column names.
"""
merged_columns: typing.List[str] = self.column_names.copy()
column_name: str
for column_name in additional_columns:
if column_name in self.column_name_map:
continue
merged_columns.append(column_name)
return merged_columns
def get_node1_column_index(self, column_name: typing.Optional[str] = None)->int:
"""
Get the node1 column index, unless an overriding column
name is provided. Returns -1 if no column found.
"""
if column_name is None or len(column_name) == 0:
return self.node1_column_idx
else:
return self.column_name_map.get(column_name, -1)
def get_node1_canonical_name(self, column_name: typing.Optional[str]=None)->str:
"""
Get the canonical name for the node1 column, unless an
overriding name is provided.
"""
if column_name is not None and len(column_name) > 0:
return column_name
else:
return KgtkFormat.NODE1
def get_node1_column_actual_name(self, column_name: typing.Optional[str]=None)->str:
"""
Get the actual name for the node1 column or its overriding column.
Return an empty string if the column was not found.
"""
idx: int = self.get_node1_column_index(column_name)
if idx >= 0:
return self.column_names[idx]
else:
return ""
def get_label_column_index(self, column_name: typing.Optional[str] = None)->int:
"""
Get the label column index, unless an overriding column
name is provided. Returns -1 if no column found.
"""
if column_name is None or len(column_name) == 0:
return self.label_column_idx
else:
return self.column_name_map.get(column_name, -1)
def get_label_canonical_name(self, column_name: typing.Optional[str]=None)->str:
"""
Get the canonical name for the label column, unless an
overriding name is provided.
"""
if column_name is not None and len(column_name) > 0:
return column_name
else:
return KgtkFormat.LABEL
def get_label_column_actual_name(self, column_name: typing.Optional[str]=None)->str:
"""
Get the actual name for the label column or its overriding column.
Return an empty string if the column was not found.
"""
idx: int = self.get_label_column_index(column_name)
if idx >= 0:
return self.column_names[idx]
else:
return ""
def get_node2_column_index(self, column_name: typing.Optional[str] = None)->int:
"""
Get the node2 column index, unless an overriding column
name is provided. Returns -1 if no column found.
"""
if column_name is None or len(column_name) == 0:
return self.node2_column_idx
else:
return self.column_name_map.get(column_name, -1)
def get_node2_canonical_name(self, column_name: typing.Optional[str]=None)->str:
"""
Get the canonical name for the node2 column, unless an
overriding name is provided.
"""
if column_name is not None and len(column_name) > 0:
return column_name
else:
return KgtkFormat.NODE2
def get_node2_column_actual_name(self, column_name: typing.Optional[str]=None)->str:
"""
Get the actual name for the node2 column or its overriding column.
Return an empty string if the column was not found.
"""
idx: int = self.get_node2_column_index(column_name)
if idx >= 0:
return self.column_names[idx]
else:
return ""
def get_id_column_index(self, column_name: typing.Optional[str] = None)->int:
"""
Get the id column index, unless an overriding column
name is provided. Returns -1 if no column found.
"""
if column_name is None or len(column_name) == 0:
return self.id_column_idx
else:
return self.column_name_map.get(column_name, -1)
def get_id_canonical_name(self, column_name: typing.Optional[str]=None)->str:
"""
Get the canonical name for the id column, unless an
overriding name is provided.
"""
if column_name is not None and len(column_name) > 0:
return column_name
else:
return KgtkFormat.ID
def get_id_column_actual_name(self, column_name: typing.Optional[str]=None)->str:
"""
Get the actual name for the id column or its overriding column.
Return an empty string if the column was not found.
"""
idx: int = self.get_id_column_index(column_name)
if idx >= 0:
return self.column_names[idx]
else:
return ""
@classmethod
def add_debug_arguments(cls, parser: ArgumentParser, expert: bool = False):
# This helper function makes it easy to suppress options from
# The help message. The options are still there, and initialize
# what they need to initialize.
def h(msg: str)->str:
if expert:
return msg
else:
return SUPPRESS
egroup: _ArgumentGroup = parser.add_argument_group(h("Error and feedback messages"),
h("Send error messages and feedback to stderr or stdout, " +
"control the amount of feedback and debugging messages."))
# Avoid the argparse bug that prevents these two arguments from having
# their help messages suppressed directly.
#
# TODO: Is there a better fix?
#
# TODO: replace --errors-to-stdout and --errors-to-stderr with
# --errors-to=stdout and --errors-to=stderr, using either an enum
# or choices. That will avoid the argparse bug, too.
if expert:
errors_to = egroup.add_mutually_exclusive_group()
errors_to.add_argument( "--errors-to-stdout", dest="errors_to_stdout",
help="Send errors to stdout instead of stderr",
action="store_true")
errors_to.add_argument( "--errors-to-stderr", dest="errors_to_stderr",
help="Send errors to stderr instead of stdout",
action="store_true")
else:
egroup.add_argument( "--errors-to-stderr", dest="errors_to_stderr",
help=h("Send errors to stderr instead of stdout"),
action="store_true")
egroup.add_argument( "--errors-to-stdout", dest="errors_to_stdout",
help=h("Send errors to stdout instead of stderr"),
action="store_true")
egroup.add_argument( "--show-options", dest="show_options", help=h("Print the options selected (default=%(default)s)."), action='store_true')
egroup.add_argument("-v", "--verbose", dest="verbose", help="Print additional progress messages (default=%(default)s).", action='store_true')
egroup.add_argument( "--very-verbose", dest="very_verbose",
help=h("Print additional progress messages (default=%(default)s)."),
action='store_true')
def main():
"""
Test the KGTK file reader.
"""
# The EdgeReader import is deferred to avoid circular imports.
from kgtk.io.edgereader import EdgeReader
# The NodeReader import is deferred to avoid circular imports.
from kgtk.io.nodereader import NodeReader
parser = ArgumentParser()
parser.add_argument(dest="kgtk_file", help="The KGTK file to read", type=Path, nargs="?")
KgtkReader.add_debug_arguments(parser, expert=True)
parser.add_argument( "--test", dest="test_method", help="The test to perform (default=%(default)s).",
choices=["rows", "concise-rows",
"kgtk-values", "concise-kgtk-values",
"dicts", "concise-dicts",
"kgtk-value-dicts", "concise-kgtk-value-dicts"],
default="rows")
parser.add_argument( "--test-validate", dest="test_validate", help="Validate KgtkValue objects in test (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
KgtkReaderOptions.add_arguments(parser, mode_options=True, validate_by_default=True, expert=True)
KgtkValueOptions.add_arguments(parser, expert=True)
args: Namespace = parser.parse_args()
error_file: typing.TextIO = sys.stdout if args.errors_to_stdout else sys.stderr
# Build the option structures.
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_args(args)
value_options: KgtkValueOptions = KgtkValueOptions.from_args(args)
if args.show_options:
print("--test=%s" % str(args.test), file=error_file)
print("--test-validate=%s" % str(args.test_validate), file=error_file)
reader_options.show(out=error_file)
value_options.show(out=error_file)
print("=======", file=error_file, flush=True)
kr: KgtkReader = KgtkReader.open(args.kgtk_file,
error_file = error_file,
options=reader_options,
value_options=value_options,
verbose=args.verbose,
very_verbose=args.very_verbose)
line_count: int = 0
row: typing.List[str]
kgtk_values: typing.List[KgtkValue]
concise_kgtk_values: typing.List[typing.Optional[KgtkValue]]
dict_row: typing.Mapping[str, str]
kgtk_value_dict: typing.Mapping[str, str]
if args.test_method == "rows":
if args.verbose:
print("Testing iterating over rows.", file=error_file, flush=True)
for row in kr:
line_count += 1
elif args.test_method == "concise-rows":
if args.verbose:
print("Testing iterating over concise rows.", file=error_file, flush=True)
for row in kr.concise_rows():
line_count += 1
elif args.test_method == "kgtk-values":
if args.verbose:
print("Testing iterating over KgtkValue rows.", file=error_file, flush=True)
for kgtk_values in kr.kgtk_values(validate=args.test_validate):
line_count += 1
elif args.test_method == "concise-kgtk-values":
if args.verbose:
print("Testing iterating over concise KgtkValue rows.", file=error_file, flush=True)
for kgtk_values in kr.concise_kgtk_values(validate=args.test_validate):
line_count += 1
elif args.test_method == "dicts":
if args.verbose:
print("Testing iterating over dicts.", file=error_file, flush=True)
for dict_row in kr.dicts():
line_count += 1
elif args.test_method == "concise-dicts":
if args.verbose:
print("Testing iterating over concise dicts.", file=error_file, flush=True)
for dict_row in kr.dicts(concise=True):
line_count += 1
elif args.test_method == "kgtk-value-dicts":
if args.verbose:
print("Testing iterating over KgtkValue dicts.", file=error_file, flush=True)
for kgtk_value_dict in kr.kgtk_value_dicts(validate=args.test_validate):
line_count += 1
elif args.test_method == "concise-kgtk-value-dicts":
if args.verbose:
print("Testing iterating over concise KgtkValue dicts.", file=error_file, flush=True)
for kgtk_value_dict in kr.kgtk_value_dicts(concise=True, validate=args.test_validate):
line_count += 1
print("Read %d lines" % line_count, file=error_file, flush=True)
if __name__ == "__main__":
main()
| 49.238701 | 188 | 0.586486 |
from argparse import ArgumentParser, _ArgumentGroup, Namespace, SUPPRESS
import attr
import bz2
from enum import Enum
import gzip
import lz4
import lzma
from multiprocessing import Process, Queue
from pathlib import Path
import sys
import typing
from kgtk.kgtkformat import KgtkFormat
from kgtk.io.kgtkbase import KgtkBase
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.utils.closableiter import ClosableIter, ClosableIterTextIOWrapper
from kgtk.utils.enumnameaction import EnumNameAction
from kgtk.utils.gzipprocess import GunzipProcess
from kgtk.utils.validationaction import ValidationAction
from kgtk.value.kgtkvalue import KgtkValue
from kgtk.value.kgtkvalueoptions import KgtkValueOptions, DEFAULT_KGTK_VALUE_OPTIONS
class KgtkReaderMode(Enum):
NONE = 0
EDGE = 1
NODE = 2
AUTO = 3
@attr.s(slots=True, frozen=True)
class KgtkReaderOptions():
ERROR_LIMIT_DEFAULT: int = 1000
GZIP_QUEUE_SIZE_DEFAULT: int = GunzipProcess.GZIP_QUEUE_SIZE_DEFAULT
mode: KgtkReaderMode = attr.ib(validator=attr.validators.instance_of(KgtkReaderMode), default=KgtkReaderMode.AUTO)
column_separator: str = attr.ib(validator=attr.validators.instance_of(str), default=KgtkFormat.COLUMN_SEPARATOR)
force_column_names: typing.Optional[typing.List[str]] = attr.ib(validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str),
iterable_validator=attr.validators.instance_of(list))),
default=None)
skip_header_record: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
initial_skip_count: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
every_nth_record: int = attr.ib(validator=attr.validators.instance_of(int), default=1)
record_limit: typing.Optional[int] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(int)), default=None)
tail_count: typing.Optional[int] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(int)), default=None)
error_limit: int = attr.ib(validator=attr.validators.instance_of(int), default=ERROR_LIMIT_DEFAULT)
repair_and_validate_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
repair_and_validate_values: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
empty_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE)
comment_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE)
whitespace_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE)
blank_required_field_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXCLUDE)
short_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN)
long_line_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN)
header_error_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.EXIT)
unsafe_column_name_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.REPORT)
invalid_value_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN)
prohibited_list_action: ValidationAction = attr.ib(validator=attr.validators.instance_of(ValidationAction), default=ValidationAction.COMPLAIN)
fill_short_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
truncate_long_lines: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
compression_type: typing.Optional[str] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(str)), default=None)
gzip_in_parallel: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
gzip_queue_size: int = attr.ib(validator=attr.validators.instance_of(int), default=GZIP_QUEUE_SIZE_DEFAULT)
@classmethod
def add_arguments(cls,
parser: ArgumentParser,
mode_options: bool = False,
default_mode: KgtkReaderMode = KgtkReaderMode.AUTO,
validate_by_default: bool = False,
expert: bool = False,
defaults: bool = True,
who: str = "",
):
def h(msg: str)->str:
if expert:
return msg
else:
return SUPPRESS
# explicitly setting "default=None" may fail, whereas omitting the
# "default=" phrase succeeds.
#
# TODO: continue researching these issues.
def d(default: typing.Any)->typing.Mapping[str, typing.Any]:
if defaults:
return {"default": default}
else:
return { }
prefix1: str = "--" if len(who) == 0 else "--" + who + "-"
prefix2: str = "" if len(who) == 0 else who + "_"
prefix3: str = "" if len(who) == 0 else who + ": "
prefix4: str = "" if len(who) == 0 else who + " file "
fgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "File options"),
h("Options affecting " + prefix4 + "processing."))
fgroup.add_argument(prefix1 + "column-separator",
dest=prefix2 + "column_separator",
help=h(prefix3 + "Column separator (default=<TAB>)."), # TODO: provide the default with escapes, e.g. \t
type=str, **d(default=KgtkFormat.COLUMN_SEPARATOR))
# TODO: use an Enum or add choices.
fgroup.add_argument(prefix1 + "compression-type",
dest=prefix2 + "compression_type",
help=h(prefix3 + "Specify the compression type (default=%(default)s)."))
fgroup.add_argument(prefix1 + "error-limit",
dest=prefix2 + "error_limit",
help=h(prefix3 + "The maximum number of errors to report before failing (default=%(default)s)"),
type=int, **d(default=cls.ERROR_LIMIT_DEFAULT))
fgroup.add_argument(prefix1 + "gzip-in-parallel",
dest=prefix2 + "gzip_in_parallel",
metavar="optional True|False",
help=h(prefix3 + "Execute gzip in parallel (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=False))
fgroup.add_argument(prefix1 + "gzip-queue-size",
dest=prefix2 + "gzip_queue_size",
help=h(prefix3 + "Queue size for parallel gzip (default=%(default)s)."),
type=int, **d(default=cls.GZIP_QUEUE_SIZE_DEFAULT))
if mode_options:
fgroup.add_argument(prefix1 + "mode",
dest=prefix2 + "mode",
help=h(prefix3 + "Determine the KGTK file mode (default=%(default)s)."),
type=KgtkReaderMode, action=EnumNameAction, **d(default_mode))
hgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Header parsing"),
h("Options affecting " + prefix4 + "header parsing."))
hgroup.add_argument(prefix1 + "force-column-names",
dest=prefix2 + "force_column_names",
help=h(prefix3 + "Force the column names (default=None)."),
nargs='+')
hgroup.add_argument(prefix1 + "header-error-action",
dest=prefix2 + "header_error_action",
help=h(prefix3 + "The action to take when a header error is detected. Only ERROR or EXIT are supported (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXIT))
hgroup.add_argument(prefix1 + "skip-header-record",
dest=prefix2 + "skip_header_record",
metavar="optional True|False",
help=h(prefix3 + "Skip the first record when forcing column names (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=False))
hgroup.add_argument(prefix1 + "unsafe-column-name-action",
dest=prefix2 + "unsafe_column_name_action",
help=h(prefix3 + "The action to take when a column name is unsafe (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.REPORT))
sgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Pre-validation sampling"),
h("Options affecting " + prefix4 + "pre-validation data line sampling."))
sgroup.add_argument(prefix1 + "initial-skip-count",
dest=prefix2 + "initial_skip_count",
help=h(prefix3 + "The number of data records to skip initially (default=do not skip)."),
type=int, **d(default=0))
sgroup.add_argument(prefix1 + "every-nth-record",
dest=prefix2 + "every_nth_record",
help=h(prefix3 + "Pass every nth record (default=pass all records)."),
type=int, **d(default=1))
sgroup.add_argument(prefix1 + "record-limit",
dest=prefix2 + "record_limit",
help=h(prefix3 + "Limit the number of records read (default=no limit)."),
type=int, **d(default=None))
sgroup.add_argument(prefix1 + "tail-count",
dest=prefix2 + "tail_count",
help=h(prefix3 + "Pass this number of records (default=no tail processing)."),
type=int, **d(default=None))
lgroup: _ArgumentGroup = parser.add_argument_group(h(prefix3 + "Line parsing"),
h("Options affecting " + prefix4 + "data line parsing."))
lgroup.add_argument(prefix1 + "repair-and-validate-lines",
dest=prefix2 + "repair_and_validate_lines",
metavar="optional True|False",
help=h(prefix3 + "Repair and validate lines (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=validate_by_default))
lgroup.add_argument(prefix1 + "repair-and-validate-values",
dest=prefix2 + "repair_and_validate_values",
metavar="optional True|False",
help=h(prefix3 + "Repair and validate values (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=validate_by_default))
lgroup.add_argument(prefix1 + "blank-required-field-line-action",
dest=prefix2 + "blank_required_field_line_action",
help=h(prefix3 + "The action to take when a line with a blank node1, node2, or id field (per mode) is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE))
lgroup.add_argument(prefix1 + "comment-line-action",
dest=prefix2 + "comment_line_action",
help=h(prefix3 + "The action to take when a comment line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE))
lgroup.add_argument(prefix1 + "empty-line-action",
dest=prefix2 + "empty_line_action",
help=h(prefix3 + "The action to take when an empty line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE))
lgroup.add_argument(prefix1 + "fill-short-lines",
dest=prefix2 + "fill_short_lines",
metavar="optional True|False",
help=h(prefix3 + "Fill missing trailing columns in short lines with empty values (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=False))
lgroup.add_argument(prefix1 + "invalid-value-action",
dest=prefix2 + "invalid_value_action",
help=h(prefix3 + "The action to take when a data cell value is invalid (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN))
lgroup.add_argument(prefix1 + "long-line-action",
dest=prefix2 + "long_line_action",
help=h(prefix3 + "The action to take when a long line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN))
lgroup.add_argument(prefix1 + "prohibited-list-action",
dest=prefix2 + "prohibited list_action",
help=h(prefix3 + "The action to take when a data cell contains a prohibited list (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN))
lgroup.add_argument(prefix1 + "short-line-action",
dest=prefix2 + "short_line_action",
help=h(prefix3 + "The action to take when a short line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.COMPLAIN))
lgroup.add_argument(prefix1 + "truncate-long-lines",
dest=prefix2 + "truncate_long_lines",
help=h(prefix3 + "Remove excess trailing columns in long lines (default=%(default)s)."),
type=optional_bool, nargs='?', const=True, **d(default=False))
lgroup.add_argument(prefix1 + "whitespace-line-action",
dest=prefix2 + "whitespace_line_action",
help=h(prefix3 + "The action to take when a whitespace line is detected (default=%(default)s)."),
type=ValidationAction, action=EnumNameAction, **d(default=ValidationAction.EXCLUDE))
@classmethod
# Build the value parsing option structure.
def from_dict(cls,
d: dict,
who: str = "",
mode: typing.Optional[KgtkReaderMode] = None,
fallback: bool = False,
)->'KgtkReaderOptions':
prefix: str = "" # The destination name prefix.
if len(who) > 0:
prefix = who + "_"
# TODO: Figure out how to type check this method.
def lookup(name: str, default):
prefixed_name = prefix + name
if prefixed_name in d and d[prefixed_name] is not None:
return d[prefixed_name]
elif fallback and name in d and d[name] is not None:
return d[name]
else:
return default
reader_mode: KgtkReaderMode
if mode is not None:
reader_mode = mode
else:
reader_mode = lookup("mode", KgtkReaderMode.AUTO)
return cls(
blank_required_field_line_action=lookup("blank_required_field_line_action", ValidationAction.EXCLUDE),
column_separator=lookup("column_separator", KgtkFormat.COLUMN_SEPARATOR),
comment_line_action=lookup("comment_line_action", ValidationAction.EXCLUDE),
compression_type=lookup("compression_type", None),
empty_line_action=lookup("empty_line_action", ValidationAction.EXCLUDE),
error_limit=lookup("error_limit", cls.ERROR_LIMIT_DEFAULT),
every_nth_record=lookup("every_nth_record", 1),
fill_short_lines=lookup("fill_short_lines", False),
force_column_names=lookup("force_column_names", None),
gzip_in_parallel=lookup("gzip_in_parallel", False),
gzip_queue_size=lookup("gzip_queue_size", KgtkReaderOptions.GZIP_QUEUE_SIZE_DEFAULT),
header_error_action=lookup("header_error_action", ValidationAction.EXCLUDE),
initial_skip_count=lookup("initial_skip_count", 0),
invalid_value_action=lookup("invalid_value_action", ValidationAction.REPORT),
long_line_action=lookup("long_line_action", ValidationAction.EXCLUDE),
mode=reader_mode,
prohibited_list_action=lookup("prohibited_list_action", ValidationAction.REPORT),
record_limit=lookup("record_limit", None),
repair_and_validate_lines=lookup("repair_and_validate_lines", False),
repair_and_validate_values=lookup("repair_and_validate_values", False),
short_line_action=lookup("short_line_action", ValidationAction.EXCLUDE),
skip_header_record=lookup("skip_header_recordb", False),
tail_count=lookup("tail_count", None),
truncate_long_lines=lookup("truncate_long_lines", False),
unsafe_column_name_action=lookup("unsafe_column_name_action", ValidationAction.REPORT),
whitespace_line_action=lookup("whitespace_line_action", ValidationAction.EXCLUDE),
)
# Build the value parsing option structure.
@classmethod
def from_args(cls,
args: Namespace,
who: str = "",
mode: typing.Optional[KgtkReaderMode] = None,
fallback: bool = False,
)->'KgtkReaderOptions':
return cls.from_dict(vars(args), who=who, mode=mode, fallback=fallback)
def show(self, who: str="", out: typing.TextIO=sys.stderr):
prefix: str = "--" if len(who) == 0 else "--" + who + "-"
print("%smode=%s" % (prefix, self.mode.name), file=out)
print("%scolumn-separator=%s" % (prefix, repr(self.column_separator)), file=out)
if self.force_column_names is not None:
print("%sforce-column-names=%s" % (prefix, " ".join(self.force_column_names)), file=out)
print("%sskip-header-record=%s" % (prefix, str(self.skip_header_record)), file=out)
print("%serror-limit=%s" % (prefix, str(self.error_limit)), file=out)
print("%srepair-and-validate-lines=%s" % (prefix, str(self.repair_and_validate_lines)), file=out)
print("%srepair-and-validate-values=%s" % (prefix, str(self.repair_and_validate_values)), file=out)
print("%sempty-line-action=%s" % (prefix, self.empty_line_action.name), file=out)
print("%scomment-line-action=%s" % (prefix, self.comment_line_action.name), file=out)
print("%swhitespace-line-action=%s" % (prefix, self.whitespace_line_action.name), file=out)
print("%sblank-required-field-line-action=%s" % (prefix, self.blank_required_field_line_action.name), file=out)
print("%sshort-line-action=%s" % (prefix, self.short_line_action.name), file=out)
print("%slong-line-action=%s" % (prefix, self.long_line_action.name), file=out)
print("%sheader-error-action=%s" % (prefix, self.header_error_action.name), file=out)
print("%sunsafe-column-name-action=%s" % (prefix, self.unsafe_column_name_action.name), file=out)
print("%sinvalid-value-action=%s" % (prefix, self.invalid_value_action.name), file=out)
print("%sinitial-skip-count=%s" % (prefix, str(self.initial_skip_count)), file=out)
print("%severy-nth-record=%s" % (prefix, str(self.every_nth_record)), file=out)
if self.record_limit is not None:
print("%srecord-limit=%s" % (prefix, str(self.record_limit)), file=out)
if self.tail_count is not None:
print("%stail-count=%s" % (prefix, str(self.tail_count)), file=out)
print("%sinitial-skip-count=%s" % (prefix, str(self.initial_skip_count)), file=out)
print("%sprohibited-list-action=%s" % (prefix, self.prohibited_list_action.name), file=out)
print("%sfill-short-lines=%s" % (prefix, str(self.fill_short_lines)), file=out)
print("%struncate-long-lines=%s" % (prefix, str(self.truncate_long_lines)), file=out)
if self.compression_type is not None:
print("%scompression-type=%s" % (prefix, str(self.compression_type)), file=out)
print("%sgzip-in-parallel=%s" % (prefix, str(self.gzip_in_parallel)), file=out)
print("%sgzip-queue-size=%s" % (prefix, str(self.gzip_queue_size)), file=out)
DEFAULT_KGTK_READER_OPTIONS: KgtkReaderOptions = KgtkReaderOptions()
@attr.s(slots=True, frozen=False)
class KgtkReader(KgtkBase, ClosableIter[typing.List[str]]):
file_path: typing.Optional[Path] = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Path)))
source: ClosableIter[str] = attr.ib() # Todo: validate
# TODO: Fix this validator:
# options: KgtkReaderOptions = attr.ib(validator=attr.validators.instance_of(KgtkReaderOptions))
options: KgtkReaderOptions = attr.ib()
value_options: KgtkValueOptions = attr.ib(validator=attr.validators.instance_of(KgtkValueOptions))
column_names: typing.List[str] = attr.ib(validator=attr.validators.deep_iterable(member_validator=attr.validators.instance_of(str),
iterable_validator=attr.validators.instance_of(list)))
# For convenience, the count of columns. This is the same as len(column_names).
column_count: int = attr.ib(validator=attr.validators.instance_of(int))
column_name_map: typing.Mapping[str, int] = attr.ib(validator=attr.validators.deep_mapping(key_validator=attr.validators.instance_of(str),
value_validator=attr.validators.instance_of(int)))
# The actual mode used.
#
# TODO: fix the validator.
# mode: KgtkReaderMode = attr.ib(validator=attr.validators.instance_of(KgtkReaderMode), default=KgtkReaderMode.NONE)
mode: KgtkReaderMode = attr.ib(default=KgtkReaderMode.NONE)
# The index of the mandatory/aliased columns. -1 means missing:
node1_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file
label_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file
node2_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # edge file
id_column_idx: int = attr.ib(validator=attr.validators.instance_of(int), default=-1) # node file
data_lines_read: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
data_lines_skipped: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
data_lines_passed: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
data_lines_ignored: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
data_errors_reported: int = attr.ib(validator=attr.validators.instance_of(int), default=0)
# Is this an edge file or a node file?
is_edge_file: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
is_node_file: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
# Feedback and error output:
error_file: typing.TextIO = attr.ib(default=sys.stderr)
verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
very_verbose: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
@classmethod
def _default_options(
cls,
options: typing.Optional[KgtkReaderOptions] = None,
value_options: typing.Optional[KgtkValueOptions] = None,
)->typing.Tuple[KgtkReaderOptions, KgtkValueOptions]:
# Supply the default reader and value options:
if options is None:
options = DEFAULT_KGTK_READER_OPTIONS
if value_options is None:
value_options = DEFAULT_KGTK_VALUE_OPTIONS
return (options, value_options)
@classmethod
def open(cls,
file_path: typing.Optional[Path],
who: str = "input",
error_file: typing.TextIO = sys.stderr,
mode: typing.Optional[KgtkReaderMode] = None,
options: typing.Optional[KgtkReaderOptions] = None,
value_options: typing.Optional[KgtkValueOptions] = None,
verbose: bool = False,
very_verbose: bool = False)->"KgtkReader":
# Supply the default reader and value options:
(options, value_options) = cls._default_options(options, value_options)
source: ClosableIter[str] = cls._openfile(file_path, options=options, error_file=error_file, verbose=verbose)
# Read the kgtk file header and split it into column names. We get the
# header back, too, for use in debugging and error messages.
header: str
column_names: typing.List[str]
(header, column_names) = cls._build_column_names(source, options, error_file=error_file, verbose=verbose)
# Check for unsafe column names.
cls.check_column_names(column_names,
header_line=header,
who=who,
error_action=options.unsafe_column_name_action,
error_file=error_file)
# Build a map from column name to column index.
column_name_map: typing.Mapping[str, int] = cls.build_column_name_map(column_names,
header_line=header,
who=who,
error_action=options.header_error_action,
error_file=error_file)
# Should we automatically determine if this is an edge file or a node file?
if mode is None:
mode = options.mode
is_edge_file: bool = False
is_node_file: bool = False
if mode is KgtkReaderMode.AUTO:
# If we have a node1 (or alias) column, then this must be an edge file. Otherwise, assume it is a node file.
node1_idx: int = cls.get_column_idx(cls.NODE1_COLUMN_NAMES,
column_name_map,
header_line=header,
who=who,
error_action=options.header_error_action,
error_file=error_file,
is_optional=True)
if node1_idx >= 0:
is_edge_file = True
is_node_file = False
if verbose:
print("%s column found, this is a KGTK edge file" % column_names[node1_idx], file=error_file, flush=True)
else:
is_edge_file = False
is_node_file = True
if verbose:
print("node1 column not found, assuming this is a KGTK node file", file=error_file, flush=True)
elif mode is KgtkReaderMode.EDGE:
is_edge_file = True
elif mode is KgtkReaderMode.NODE:
is_node_file = True
elif mode is KgtkReaderMode.NONE:
pass
# Get the indices of the special columns.
node1_column_idx: int
label_column_idx: int
node2_column_idx: int
id_column_idx: int
(node1_column_idx,
label_column_idx,
node2_column_idx,
id_column_idx) = cls.get_special_columns(column_name_map,
header_line=header,
who=who,
error_action=options.header_error_action,
error_file=error_file,
is_edge_file=is_edge_file,
is_node_file=is_node_file)
if verbose:
print("KgtkReader: Special columns: node1=%d label=%d node2=%d id=%d" % (node1_column_idx,
label_column_idx,
node2_column_idx,
id_column_idx), file=error_file, flush=True)
if is_edge_file:
# We'll instantiate an EdgeReader, which is a subclass of KgtkReader.
from kgtk.io.edgereader import EdgeReader
if verbose:
print("KgtkReader: Reading an edge file.", file=error_file, flush=True)
cls = EdgeReader
elif is_node_file:
# The NodeReader import is deferred to avoid circular imports.
from kgtk.io.nodereader import NodeReader
if verbose:
print("KgtkReader: Reading an node file.", file=error_file, flush=True)
cls = NodeReader
return cls(file_path=file_path,
source=source,
column_names=column_names,
column_name_map=column_name_map,
column_count=len(column_names),
mode=mode,
node1_column_idx=node1_column_idx,
label_column_idx=label_column_idx,
node2_column_idx=node2_column_idx,
id_column_idx=id_column_idx,
error_file=error_file,
options=options,
value_options=value_options,
is_edge_file=is_edge_file,
is_node_file=is_node_file,
verbose=verbose,
very_verbose=very_verbose,
)
@classmethod
def _open_compressed_file(cls,
compression_type: str,
file_name: str,
file_or_path: typing.Union[Path, typing.TextIO],
who: str,
error_file: typing.TextIO,
verbose: bool)->typing.TextIO:
# TODO: find a better way to coerce typing.IO[Any] to typing.TextIO
if compression_type in [".gz", "gz"]:
if verbose:
print("%s: reading gzip %s" % (who, file_name), file=error_file, flush=True)
return gzip.open(file_or_path, mode="rt") # type: ignore
elif compression_type in [".bz2", "bz2"]:
if verbose:
print("%s: reading bz2 %s" % (who, file_name), file=error_file, flush=True)
return bz2.open(file_or_path, mode="rt") # type: ignore
elif compression_type in [".xz", "xz"]:
if verbose:
print("%s: reading lzma %s" % (who, file_name), file=error_file, flush=True)
return lzma.open(file_or_path, mode="rt") # type: ignore
elif compression_type in [".lz4", "lz4"]:
if verbose:
print("%s: reading lz4 %s" % (who, file_name), file=error_file, flush=True)
return lz4.frame.open(file_or_path, mode="rt") # type: ignore
else:
# TODO: throw a better exception.
raise ValueError("%s: Unexpected compression_type '%s'" % (who, compression_type))
@classmethod
def _openfile(cls,
file_path: typing.Optional[Path],
options: KgtkReaderOptions,
error_file: typing.TextIO,
verbose: bool)->ClosableIter[str]:
who: str = cls.__name__
if file_path is None or str(file_path) == "-":
if options.compression_type is not None and len(options.compression_type) > 0:
return ClosableIterTextIOWrapper(cls._open_compressed_file(options.compression_type, "-", sys.stdin, who, error_file, verbose))
else:
if verbose:
print("%s: reading stdin" % who, file=error_file, flush=True)
return ClosableIterTextIOWrapper(sys.stdin)
if verbose:
print("%s: File_path.suffix: %s" % (who, file_path.suffix), file=error_file, flush=True)
gzip_file: typing.TextIO
if options.compression_type is not None and len(options.compression_type) > 0:
gzip_file = cls._open_compressed_file(options.compression_type, str(file_path), file_path, who, error_file, verbose)
elif file_path.suffix in [".bz2", ".gz", ".lz4", ".xz"]:
gzip_file = cls._open_compressed_file(file_path.suffix, str(file_path), file_path, who, error_file, verbose)
else:
if verbose:
print("%s: reading file %s" % (who, str(file_path)), file=error_file, flush=True)
return ClosableIterTextIOWrapper(open(file_path, "r"))
if options.gzip_in_parallel:
gzip_thread: GunzipProcess = GunzipProcess(gzip_file, Queue(options.gzip_queue_size))
gzip_thread.start()
return gzip_thread
else:
return ClosableIterTextIOWrapper(gzip_file)
@classmethod
def _build_column_names(cls,
source: ClosableIter[str],
options: KgtkReaderOptions,
error_file: typing.TextIO,
verbose: bool = False,
)->typing.Tuple[str, typing.List[str]]:
column_names: typing.List[str]
if options.force_column_names is None:
# Read the column names from the first line, stripping end-of-line characters.
#
# TODO: if the read fails, throw a more useful exception with the line number.
try:
header: str = next(source).rstrip("\r\n")
except StopIteration:
raise ValueError("No header line in file")
if verbose:
print("header: %s" % header, file=error_file, flush=True)
# Split the first line into column names.
return header, header.split(options.column_separator)
else:
# Skip the first record to override the column names in the file.
# Do not skip the first record if the file does not hae a header record.
if options.skip_header_record:
try:
next(source)
except StopIteration:
raise ValueError("No header line to skip")
# Use the forced column names.
return options.column_separator.join(options.force_column_names), options.force_column_names
def close(self):
self.source.close()
def exclude_line(self, action: ValidationAction, msg: str, line: str)->bool:
result: bool
if action == ValidationAction.PASS:
return False # Silently pass the line through
elif action == ValidationAction.REPORT:
result= False # Report the issue then pass the line.
elif action == ValidationAction.EXCLUDE:
return True # Silently exclude the line
elif action == ValidationAction.COMPLAIN:
result = True # Report the issue then exclude the line.
elif action == ValidationAction.ERROR:
# Immediately raise an exception.
raise ValueError("In input data line %d, %s: %s" % (self.data_lines_read, msg, line))
elif action == ValidationAction.EXIT:
print("Data line %d:\n%s\n%s" % (self.data_lines_read, line, msg), file=self.error_file, flush=True)
sys.exit(1)
# print("In input data line %d, %s: %s" % (self.data_lines_read, msg, line), file=self.error_file, flush=True)
print("Data line %d:\n%s\n%s" % (self.data_lines_read, line, msg), file=self.error_file, flush=True)
self.data_errors_reported += 1
if self.options.error_limit > 0 and self.data_errors_reported >= self.options.error_limit:
raise ValueError("Too many data errors, exiting.")
return result
# Get the next edge values as a list of strings.
def nextrow(self)-> typing.List[str]:
row: typing.List[str]
repair_and_validate_lines: bool = self.options.repair_and_validate_lines
repair_and_validate_values: bool = self.options.repair_and_validate_values
# Compute the initial skip count
skip_count: int = self.options.initial_skip_count
if self.options.record_limit is not None and self.options.tail_count is not None:
# Compute the tail count.
tail_skip_count: int = self.options.record_limit - self.options.tail_count
if tail_skip_count > skip_count:
skip_count = tail_skip_count # Take the larger skip count.
# This loop accomodates lines that are ignored.
while (True):
# Has a record limit been specified and have we reached it?
if self.options.record_limit is not None:
if self.data_lines_read >= self.options.record_limit:
# Close the source and stop the iteration.
self.source.close() # Do we need to guard against repeating this call?
raise StopIteration
# Read a line from the source
line: str
try:
line = next(self.source) # Will throw StopIteration
except StopIteration as e:
# Close the input file!
#
# TODO: implement a close() routine and/or whatever it takes to support "with".
self.source.close() # Do we need to guard against repeating this call?
raise e
# Count the data line read.
self.data_lines_read += 1
# Data sampling:
if self.data_lines_read <= skip_count:
self.data_lines_skipped += 1
continue
if self.options.every_nth_record > 1:
if self.data_lines_read % self.options.every_nth_record != 0:
self.data_lines_skipped += 1
continue
# Strip the end-of-line characters:
line = line.rstrip("\r\n")
if repair_and_validate_lines:
# TODO: Use a sepearate option to control this.
if self.very_verbose:
print("'%s'" % line, file=self.error_file, flush=True)
# Ignore empty lines.
if self.options.empty_line_action != ValidationAction.PASS and len(line) == 0:
if self.exclude_line(self.options.empty_line_action, "saw an empty line", line):
continue
# Ignore comment lines:
if self.options.comment_line_action != ValidationAction.PASS and line[0] == self.COMMENT_INDICATOR:
if self.exclude_line(self.options.comment_line_action, "saw a comment line", line):
continue
# Ignore whitespace lines
if self.options.whitespace_line_action != ValidationAction.PASS and line.isspace():
if self.exclude_line(self.options.whitespace_line_action, "saw a whitespace line", line):
continue
row = line.split(self.options.column_separator)
if repair_and_validate_lines:
# Optionally fill missing trailing columns with empty row:
if self.options.fill_short_lines and len(row) < self.column_count:
while len(row) < self.column_count:
row.append("")
# Optionally remove extra trailing columns:
if self.options.truncate_long_lines and len(row) > self.column_count:
row = row[:self.column_count]
# Optionally validate that the line contained the right number of columns:
#
# When we report line numbers in error messages, line 1 is the first line after the header line.
if self.options.short_line_action != ValidationAction.PASS and len(row) < self.column_count:
if self.exclude_line(self.options.short_line_action,
"Required %d columns, saw %d: '%s'" % (self.column_count,
len(row),
line),
line):
continue
if self.options.long_line_action != ValidationAction.PASS and len(row) > self.column_count:
if self.exclude_line(self.options.long_line_action,
"Required %d columns, saw %d (%d extra): '%s'" % (self.column_count,
len(row),
len(row) - self.column_count,
line),
line):
continue
if self._ignore_if_blank_fields(row, line):
continue
if repair_and_validate_values:
if self.options.invalid_value_action != ValidationAction.PASS:
# TODO: find a way to optionally cache the KgtkValue objects
# so we don't have to create them a second time in the conversion
if self._ignore_invalid_values(row, line):
continue
if self.options.prohibited_list_action != ValidationAction.PASS:
if self._ignore_prohibited_lists(row, line):
continue
self.data_lines_passed += 1
return row
def __iter__(self)->typing.Iterator[typing.List[str]]:
return self
def __next__(self)-> typing.List[str]:
return self.nextrow()
def concise_rows(self)->typing.Iterator[typing.List[typing.Optional[str]]]:
while True:
try:
row: typing.List[str] = self.nextrow()
except StopIteration:
return
results: typing.List[typing.Optional[str]] = [ ]
field: str
for field in row:
if len(field) == 0:
results.append(None)
else:
results.append(field)
yield results
def to_kgtk_values(self, row: typing.List[str],
validate: bool = False,
parse_fields: bool = False)->typing.List[KgtkValue]:
results: typing.List[KgtkValue] = [ ]
field: str
for field in row:
kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields)
if validate:
kv.validate()
results.append(kv)
return results
def kgtk_values(self,
validate: bool = False,
parse_fields: bool = False
)->typing.Iterator[typing.List[KgtkValue]]:
while True:
try:
yield self.to_kgtk_values(self.nextrow(), validate=validate, parse_fields=parse_fields)
except StopIteration:
return
def to_concise_kgtk_values(self,
row: typing.List[str],
validate: bool = False,
parse_fields: bool = False
)->typing.List[typing.Optional[KgtkValue]]:
results: typing.List[typing.Optional[KgtkValue]] = [ ]
field: str
for field in row:
if len(field) == 0:
results.append(None)
else:
kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields)
if validate:
kv.validate()
results.append(kv)
return results
def concise_kgtk_values(self,
validate: bool = False,
parse_fields: bool = False
)->typing.Iterator[typing.List[typing.Optional[KgtkValue]]]:
while True:
try:
yield self.to_concise_kgtk_values(self.nextrow(), validate=validate)
except StopIteration:
return
def to_dict(self, row: typing.List[str], concise: bool=False
)->typing.Mapping[str, str]:
results: typing.MutableMapping[str, str] = { }
field: str
idx: int = 0
# efficiency gain.
if concise:
for field in row:
if len(field) > 0:
results[self.column_names[idx]] = field
idx += 1
else:
for field in row:
results[self.column_names[idx]] = field
idx += 1
return results
def dicts(self, concise: bool=False
)->typing.Iterator[typing.Mapping[str, str]]:
while True:
try:
yield self.to_dict(self.nextrow(), concise=concise)
except StopIteration:
return
def to_kgtk_value_dict(self,
row: typing.List[str],
validate: bool=False,
parse_fields: bool=False,
concise: bool=False
)->typing.Mapping[str, KgtkValue]:
results: typing.MutableMapping[str, KgtkValue] = { }
idx: int = 0
field: str
for field in row:
if concise and len(field) == 0:
pass # Skip the empty field.
else:
kv = KgtkValue(field, options=self.value_options, parse_fields=parse_fields)
if validate:
kv.validate()
results[self.column_names[idx]] = kv
idx += 1
return results
def kgtk_value_dicts(self,
validate: bool=False,
parse_fields: bool=False,
concise: bool=False
)->typing.Iterator[typing.Mapping[str, KgtkValue]]:
while True:
try:
yield self.to_kgtk_value_dict(self.nextrow(), validate=validate, parse_fields=parse_fields, concise=concise)
except StopIteration:
return
def _ignore_invalid_values(self, row: typing.List[str], line: str)->bool:
problems: typing.List[str] = [ ] # Build a list of problems.
idx: int
item: str
for idx, item in enumerate(row):
if len(item) > 0: # Optimize the common case of empty columns.
kv: KgtkValue = KgtkValue(item, options=self.value_options)
if not kv.is_valid():
problems.append("col %d (%s) value '%s'is an %s" % (idx, self.column_names[idx], item, kv.describe()))
if kv.repaired:
# If this value was repaired, update the item in the row.
#
# Warning: We expect this change to be seen by the caller.
row[idx] = kv.value
if len(problems) == 0:
return False
return self.exclude_line(self.options.invalid_value_action,
"\n".join(problems),
line)
def _ignore_prohibited_list(self,
idx: int,
row: typing.List[str],
line: str,
problems: typing.List[str],
):
if idx < 0:
return
item: str = row[idx]
if KgtkFormat.LIST_SEPARATOR not in item:
return
if len(KgtkValue.split_list(item)) == 1:
return
problems.append("col %d (%s) value '%s'is a prohibited list" % (idx, self.column_names[idx], item))
def _ignore_prohibited_lists(self, row: typing.List[str], line: str)->bool:
problems: typing.List[str] = [ ] # Build a list of problems.
self._ignore_prohibited_list(self.node1_column_idx, row, line, problems)
self._ignore_prohibited_list(self.label_column_idx, row, line, problems)
self._ignore_prohibited_list(self.node2_column_idx, row, line, problems)
if len(problems) == 0:
return False
return self.exclude_line(self.options.invalid_value_action,
"\n".join(problems),
line)
# May be overridden
def _ignore_if_blank_fields(self, values: typing.List[str], line: str)->bool:
return False
# May be overridden
def _skip_reserved_fields(self, column_name)->bool:
return False
def additional_column_names(self)->typing.List[str]:
if self.is_edge_file:
return KgtkBase.additional_edge_columns(self.column_names)
elif self.is_node_file:
return KgtkBase.additional_node_columns(self.column_names)
else:
# TODO: throw a better exception.
raise ValueError("KgtkReader: Unknown Kgtk file type.")
def merge_columns(self, additional_columns: typing.List[str])->typing.List[str]:
merged_columns: typing.List[str] = self.column_names.copy()
column_name: str
for column_name in additional_columns:
if column_name in self.column_name_map:
continue
merged_columns.append(column_name)
return merged_columns
def get_node1_column_index(self, column_name: typing.Optional[str] = None)->int:
if column_name is None or len(column_name) == 0:
return self.node1_column_idx
else:
return self.column_name_map.get(column_name, -1)
def get_node1_canonical_name(self, column_name: typing.Optional[str]=None)->str:
if column_name is not None and len(column_name) > 0:
return column_name
else:
return KgtkFormat.NODE1
def get_node1_column_actual_name(self, column_name: typing.Optional[str]=None)->str:
idx: int = self.get_node1_column_index(column_name)
if idx >= 0:
return self.column_names[idx]
else:
return ""
def get_label_column_index(self, column_name: typing.Optional[str] = None)->int:
if column_name is None or len(column_name) == 0:
return self.label_column_idx
else:
return self.column_name_map.get(column_name, -1)
def get_label_canonical_name(self, column_name: typing.Optional[str]=None)->str:
if column_name is not None and len(column_name) > 0:
return column_name
else:
return KgtkFormat.LABEL
def get_label_column_actual_name(self, column_name: typing.Optional[str]=None)->str:
idx: int = self.get_label_column_index(column_name)
if idx >= 0:
return self.column_names[idx]
else:
return ""
def get_node2_column_index(self, column_name: typing.Optional[str] = None)->int:
if column_name is None or len(column_name) == 0:
return self.node2_column_idx
else:
return self.column_name_map.get(column_name, -1)
def get_node2_canonical_name(self, column_name: typing.Optional[str]=None)->str:
if column_name is not None and len(column_name) > 0:
return column_name
else:
return KgtkFormat.NODE2
def get_node2_column_actual_name(self, column_name: typing.Optional[str]=None)->str:
idx: int = self.get_node2_column_index(column_name)
if idx >= 0:
return self.column_names[idx]
else:
return ""
def get_id_column_index(self, column_name: typing.Optional[str] = None)->int:
if column_name is None or len(column_name) == 0:
return self.id_column_idx
else:
return self.column_name_map.get(column_name, -1)
def get_id_canonical_name(self, column_name: typing.Optional[str]=None)->str:
if column_name is not None and len(column_name) > 0:
return column_name
else:
return KgtkFormat.ID
def get_id_column_actual_name(self, column_name: typing.Optional[str]=None)->str:
idx: int = self.get_id_column_index(column_name)
if idx >= 0:
return self.column_names[idx]
else:
return ""
@classmethod
def add_debug_arguments(cls, parser: ArgumentParser, expert: bool = False):
# This helper function makes it easy to suppress options from
# The help message. The options are still there, and initialize
# what they need to initialize.
def h(msg: str)->str:
if expert:
return msg
else:
return SUPPRESS
egroup: _ArgumentGroup = parser.add_argument_group(h("Error and feedback messages"),
h("Send error messages and feedback to stderr or stdout, " +
"control the amount of feedback and debugging messages."))
# Avoid the argparse bug that prevents these two arguments from having
# their help messages suppressed directly.
#
# TODO: Is there a better fix?
#
# TODO: replace --errors-to-stdout and --errors-to-stderr with
# --errors-to=stdout and --errors-to=stderr, using either an enum
# or choices. That will avoid the argparse bug, too.
if expert:
errors_to = egroup.add_mutually_exclusive_group()
errors_to.add_argument( "--errors-to-stdout", dest="errors_to_stdout",
help="Send errors to stdout instead of stderr",
action="store_true")
errors_to.add_argument( "--errors-to-stderr", dest="errors_to_stderr",
help="Send errors to stderr instead of stdout",
action="store_true")
else:
egroup.add_argument( "--errors-to-stderr", dest="errors_to_stderr",
help=h("Send errors to stderr instead of stdout"),
action="store_true")
egroup.add_argument( "--errors-to-stdout", dest="errors_to_stdout",
help=h("Send errors to stdout instead of stderr"),
action="store_true")
egroup.add_argument( "--show-options", dest="show_options", help=h("Print the options selected (default=%(default)s)."), action='store_true')
egroup.add_argument("-v", "--verbose", dest="verbose", help="Print additional progress messages (default=%(default)s).", action='store_true')
egroup.add_argument( "--very-verbose", dest="very_verbose",
help=h("Print additional progress messages (default=%(default)s)."),
action='store_true')
def main():
# The EdgeReader import is deferred to avoid circular imports.
from kgtk.io.edgereader import EdgeReader
# The NodeReader import is deferred to avoid circular imports.
from kgtk.io.nodereader import NodeReader
parser = ArgumentParser()
parser.add_argument(dest="kgtk_file", help="The KGTK file to read", type=Path, nargs="?")
KgtkReader.add_debug_arguments(parser, expert=True)
parser.add_argument( "--test", dest="test_method", help="The test to perform (default=%(default)s).",
choices=["rows", "concise-rows",
"kgtk-values", "concise-kgtk-values",
"dicts", "concise-dicts",
"kgtk-value-dicts", "concise-kgtk-value-dicts"],
default="rows")
parser.add_argument( "--test-validate", dest="test_validate", help="Validate KgtkValue objects in test (default=%(default)s).",
type=optional_bool, nargs='?', const=True, default=False)
KgtkReaderOptions.add_arguments(parser, mode_options=True, validate_by_default=True, expert=True)
KgtkValueOptions.add_arguments(parser, expert=True)
args: Namespace = parser.parse_args()
error_file: typing.TextIO = sys.stdout if args.errors_to_stdout else sys.stderr
# Build the option structures.
reader_options: KgtkReaderOptions = KgtkReaderOptions.from_args(args)
value_options: KgtkValueOptions = KgtkValueOptions.from_args(args)
if args.show_options:
print("--test=%s" % str(args.test), file=error_file)
print("--test-validate=%s" % str(args.test_validate), file=error_file)
reader_options.show(out=error_file)
value_options.show(out=error_file)
print("=======", file=error_file, flush=True)
kr: KgtkReader = KgtkReader.open(args.kgtk_file,
error_file = error_file,
options=reader_options,
value_options=value_options,
verbose=args.verbose,
very_verbose=args.very_verbose)
line_count: int = 0
row: typing.List[str]
kgtk_values: typing.List[KgtkValue]
concise_kgtk_values: typing.List[typing.Optional[KgtkValue]]
dict_row: typing.Mapping[str, str]
kgtk_value_dict: typing.Mapping[str, str]
if args.test_method == "rows":
if args.verbose:
print("Testing iterating over rows.", file=error_file, flush=True)
for row in kr:
line_count += 1
elif args.test_method == "concise-rows":
if args.verbose:
print("Testing iterating over concise rows.", file=error_file, flush=True)
for row in kr.concise_rows():
line_count += 1
elif args.test_method == "kgtk-values":
if args.verbose:
print("Testing iterating over KgtkValue rows.", file=error_file, flush=True)
for kgtk_values in kr.kgtk_values(validate=args.test_validate):
line_count += 1
elif args.test_method == "concise-kgtk-values":
if args.verbose:
print("Testing iterating over concise KgtkValue rows.", file=error_file, flush=True)
for kgtk_values in kr.concise_kgtk_values(validate=args.test_validate):
line_count += 1
elif args.test_method == "dicts":
if args.verbose:
print("Testing iterating over dicts.", file=error_file, flush=True)
for dict_row in kr.dicts():
line_count += 1
elif args.test_method == "concise-dicts":
if args.verbose:
print("Testing iterating over concise dicts.", file=error_file, flush=True)
for dict_row in kr.dicts(concise=True):
line_count += 1
elif args.test_method == "kgtk-value-dicts":
if args.verbose:
print("Testing iterating over KgtkValue dicts.", file=error_file, flush=True)
for kgtk_value_dict in kr.kgtk_value_dicts(validate=args.test_validate):
line_count += 1
elif args.test_method == "concise-kgtk-value-dicts":
if args.verbose:
print("Testing iterating over concise KgtkValue dicts.", file=error_file, flush=True)
for kgtk_value_dict in kr.kgtk_value_dicts(concise=True, validate=args.test_validate):
line_count += 1
print("Read %d lines" % line_count, file=error_file, flush=True)
if __name__ == "__main__":
main()
| true | true |
1c45e033fc674af02e4beaed1d9dd8f01d305f9a | 686 | py | Python | app/core/management/commands/wait_for_db.py | amir-rz/recipe-app-api | 69f8c2ee801cd4bf909979bd246b8fe1bf9e2d60 | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | amir-rz/recipe-app-api | 69f8c2ee801cd4bf909979bd246b8fe1bf9e2d60 | [
"MIT"
] | null | null | null | app/core/management/commands/wait_for_db.py | amir-rz/recipe-app-api | 69f8c2ee801cd4bf909979bd246b8fe1bf9e2d60 | [
"MIT"
] | null | null | null | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
""" Django command to pause excuation until database is available """
def handle(self, *args, **options):
self.stdout.write("Waiting for database...")
db_conn = None
while not db_conn:
try:
db_conn = connections["default"]
except OperationalError:
self.stdout.write("Database unavailable, waiting 1 second...")
time.sleep(1)
self.stdout.write(self.style.SUCCESS("Database available!")) # noqa: W391 | 34.3 | 82 | 0.650146 | import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Waiting for database...")
db_conn = None
while not db_conn:
try:
db_conn = connections["default"]
except OperationalError:
self.stdout.write("Database unavailable, waiting 1 second...")
time.sleep(1)
self.stdout.write(self.style.SUCCESS("Database available!")) | true | true |
1c45e09129bfd3cd066e0bd3ca94b7df369924d0 | 962 | py | Python | atriage/collectors/flatdir.py | Ayrx/atriage | 6e928da0d673260e61e089f69cb56555c7d9cdf6 | [
"Apache-2.0"
] | 11 | 2017-12-17T12:18:56.000Z | 2021-05-10T23:11:29.000Z | atriage/collectors/flatdir.py | Ayrx/atriage | 6e928da0d673260e61e089f69cb56555c7d9cdf6 | [
"Apache-2.0"
] | 7 | 2018-10-01T08:46:24.000Z | 2021-06-01T21:48:44.000Z | atriage/collectors/flatdir.py | Ayrx/atriage | 6e928da0d673260e61e089f69cb56555c7d9cdf6 | [
"Apache-2.0"
] | 3 | 2017-12-17T12:19:00.000Z | 2019-03-25T09:31:52.000Z | from atriage.collectors.exceptions import NoopException
from atriage.collectors.interface import CollectorInterface
from pathlib import Path
import click
class FlatDirCollector(object):
name = "flat-dir-collector"
def __init__(self, results):
self._results = results
def parse_directory(self, directory):
click.echo("Reading {}...".format(directory))
new = self._read_directory(directory)
old = set([i[1] for i in self._results.all_crashes])
diff = new - old
if len(diff) != 0:
click.echo("Adding {} crashes.".format(len(diff)))
self._results.save_crashes(diff)
def gather_all_samples(self, directory):
raise NoopException
def _read_directory(self, directory):
crashes = set()
p = Path(directory)
for crash in p.iterdir():
crashes.add(str(crash))
return crashes
CollectorInterface.register(FlatDirCollector)
| 24.05 | 62 | 0.660083 | from atriage.collectors.exceptions import NoopException
from atriage.collectors.interface import CollectorInterface
from pathlib import Path
import click
class FlatDirCollector(object):
name = "flat-dir-collector"
def __init__(self, results):
self._results = results
def parse_directory(self, directory):
click.echo("Reading {}...".format(directory))
new = self._read_directory(directory)
old = set([i[1] for i in self._results.all_crashes])
diff = new - old
if len(diff) != 0:
click.echo("Adding {} crashes.".format(len(diff)))
self._results.save_crashes(diff)
def gather_all_samples(self, directory):
raise NoopException
def _read_directory(self, directory):
crashes = set()
p = Path(directory)
for crash in p.iterdir():
crashes.add(str(crash))
return crashes
CollectorInterface.register(FlatDirCollector)
| true | true |
1c45e18204e4eaacee40b946650e58bb8faf467c | 2,264 | py | Python | email/sphinxcontrib/email.py | Tommy1969/sphinx-contrib | d479ece0fe6c2f33bbebcc52677035d5003b7b35 | [
"BSD-2-Clause"
] | 14 | 2016-02-22T12:06:54.000Z | 2021-01-05T07:01:43.000Z | email/sphinxcontrib/email.py | SuperKogito/sphinx-contrib | 3b643bffb90a27ae378717ae6335873a0e73cf9d | [
"BSD-2-Clause"
] | 8 | 2015-03-06T13:46:49.000Z | 2019-10-09T08:53:14.000Z | email/sphinxcontrib/email.py | SuperKogito/sphinx-contrib | 3b643bffb90a27ae378717ae6335873a0e73cf9d | [
"BSD-2-Clause"
] | 16 | 2015-05-25T02:51:05.000Z | 2020-01-17T05:49:47.000Z | # E-mail obfuscation role for Sphinx.
from docutils import nodes
# The obfuscation code was taken from
#
# http://pypi.python.org/pypi/bud.nospam
#
# and was was released by Kevin Teague <kevin at bud ca> under
# a BSD license.
import re
try:
maketrans = ''.maketrans
except AttributeError:
# fallback for Python 2
from string import maketrans
rot_13_trans = maketrans(
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
'NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm'
)
def rot_13_encrypt(line):
"""Rotate 13 encryption.
"""
line = line.translate(rot_13_trans)
line = re.sub('(?=[\\"])', r'\\', line)
line = re.sub('\n', r'\n', line)
line = re.sub('@', r'\\100', line)
line = re.sub('\.', r'\\056', line)
line = re.sub('/', r'\\057', line)
return line
def js_obfuscated_text(text):
"""ROT 13 encryption with embedded in Javascript code to decrypt
in the browser.
"""
return """<script type="text/javascript">document.write(
"%s".replace(/[a-zA-Z]/g,
function(c){
return String.fromCharCode(
(c<="Z"?90:122)>=(c=c.charCodeAt(0)+13)?c:c-26);}));
</script>""" % rot_13_encrypt(text)
def js_obfuscated_mailto(email, displayname=None):
"""ROT 13 encryption within an Anchor tag w/ a mailto: attribute
"""
if not displayname:
displayname = email
return js_obfuscated_text("""<a href="mailto:%s">%s</a>""" % (
email, displayname
))
# -- end bud.nospam
def email_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
"""
Role to obfuscate e-mail addresses.
"""
text = text.decode('utf-8').encode('utf-8')
# Handle addresses of the form "Name <name@domain.org>"
if '<' in text and '>' in text:
name, email = text.split('<')
email = email.split('>')[0]
elif '(' in text and ')' in text:
name, email = text.split('(')
email = email.split(')')[0]
else:
name = text
email = name
obfuscated = js_obfuscated_mailto(email, displayname=name)
node = nodes.raw('', obfuscated, format='html')
return [node], []
def setup(app):
app.add_role('email', email_role)
| 27.277108 | 76 | 0.605124 |
from docutils import nodes
import re
try:
maketrans = ''.maketrans
except AttributeError:
from string import maketrans
rot_13_trans = maketrans(
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
'NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm'
)
def rot_13_encrypt(line):
line = line.translate(rot_13_trans)
line = re.sub('(?=[\\"])', r'\\', line)
line = re.sub('\n', r'\n', line)
line = re.sub('@', r'\\100', line)
line = re.sub('\.', r'\\056', line)
line = re.sub('/', r'\\057', line)
return line
def js_obfuscated_text(text):
return """<script type="text/javascript">document.write(
"%s".replace(/[a-zA-Z]/g,
function(c){
return String.fromCharCode(
(c<="Z"?90:122)>=(c=c.charCodeAt(0)+13)?c:c-26);}));
</script>""" % rot_13_encrypt(text)
def js_obfuscated_mailto(email, displayname=None):
if not displayname:
displayname = email
return js_obfuscated_text("""<a href="mailto:%s">%s</a>""" % (
email, displayname
))
# -- end bud.nospam
def email_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = text.decode('utf-8').encode('utf-8')
# Handle addresses of the form "Name <name@domain.org>"
if '<' in text and '>' in text:
name, email = text.split('<')
email = email.split('>')[0]
elif '(' in text and ')' in text:
name, email = text.split('(')
email = email.split(')')[0]
else:
name = text
email = name
obfuscated = js_obfuscated_mailto(email, displayname=name)
node = nodes.raw('', obfuscated, format='html')
return [node], []
def setup(app):
app.add_role('email', email_role)
| true | true |
1c45e1cfb9f789049e129d5f970c555b0f0ffd3c | 15,669 | py | Python | distributed/stealing.py | dchudz/distributed | 591bca00af4fc07d0c5cac5189fc3b08ef8a93cd | [
"BSD-3-Clause"
] | null | null | null | distributed/stealing.py | dchudz/distributed | 591bca00af4fc07d0c5cac5189fc3b08ef8a93cd | [
"BSD-3-Clause"
] | null | null | null | distributed/stealing.py | dchudz/distributed | 591bca00af4fc07d0c5cac5189fc3b08ef8a93cd | [
"BSD-3-Clause"
] | null | null | null | import logging
from collections import defaultdict, deque
from math import log2
from time import time
from tlz import topk
from tornado.ioloop import PeriodicCallback
import dask
from dask.utils import parse_timedelta
from .comm.addressing import get_address_host
from .core import CommClosedError
from .diagnostics.plugin import SchedulerPlugin
from .utils import log_errors
LATENCY = 10e-3
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
class WorkStealing(SchedulerPlugin):
def __init__(self, scheduler):
self.scheduler = scheduler
# { level: { task states } }
self.stealable_all = [set() for i in range(15)]
# { worker: { level: { task states } } }
self.stealable = dict()
# { task state: (worker, level) }
self.key_stealable = dict()
self.cost_multipliers = [1 + 2 ** (i - 6) for i in range(15)]
self.cost_multipliers[0] = 1
for worker in scheduler.workers:
self.add_worker(worker=worker)
callback_time = parse_timedelta(
dask.config.get("distributed.scheduler.work-stealing-interval"),
default="ms",
)
# `callback_time` is in milliseconds
pc = PeriodicCallback(callback=self.balance, callback_time=callback_time * 1000)
self._pc = pc
self.scheduler.periodic_callbacks["stealing"] = pc
self.scheduler.plugins.append(self)
self.scheduler.extensions["stealing"] = self
self.scheduler.events["stealing"] = deque(maxlen=100000)
self.count = 0
# { task state: <stealing info dict> }
self.in_flight = dict()
# { worker state: occupancy }
self.in_flight_occupancy = defaultdict(lambda: 0)
self.scheduler.stream_handlers["steal-response"] = self.move_task_confirm
def log(self, msg):
return self.scheduler.log_event("stealing", msg)
def add_worker(self, scheduler=None, worker=None):
self.stealable[worker] = [set() for i in range(15)]
def remove_worker(self, scheduler=None, worker=None):
del self.stealable[worker]
def teardown(self):
self._pc.stop()
def transition(
self, key, start, finish, compute_start=None, compute_stop=None, *args, **kwargs
):
if finish == "processing":
ts = self.scheduler.tasks[key]
self.put_key_in_stealable(ts)
elif start == "processing":
ts = self.scheduler.tasks[key]
self.remove_key_from_stealable(ts)
if finish != "memory":
self.in_flight.pop(ts, None)
def put_key_in_stealable(self, ts):
cost_multiplier, level = self.steal_time_ratio(ts)
if cost_multiplier is not None:
ws = ts.processing_on
worker = ws.address
self.stealable_all[level].add(ts)
self.stealable[worker][level].add(ts)
self.key_stealable[ts] = (worker, level)
def remove_key_from_stealable(self, ts):
result = self.key_stealable.pop(ts, None)
if result is None:
return
worker, level = result
try:
self.stealable[worker][level].remove(ts)
except KeyError:
pass
try:
self.stealable_all[level].remove(ts)
except KeyError:
pass
def steal_time_ratio(self, ts):
"""The compute to communication time ratio of a key
Returns
-------
cost_multiplier: The increased cost from moving this task as a factor.
For example a result of zero implies a task without dependencies.
level: The location within a stealable list to place this value
"""
if not ts.dependencies: # no dependencies fast path
return 0, 0
split = ts.prefix.name
if split in fast_tasks:
return None, None
ws = ts.processing_on
compute_time = ws.processing[ts]
if compute_time < 0.005: # 5ms, just give up
return None, None
nbytes = ts.get_nbytes_deps()
transfer_time = nbytes / self.scheduler.bandwidth + LATENCY
cost_multiplier = transfer_time / compute_time
if cost_multiplier > 100:
return None, None
level = int(round(log2(cost_multiplier) + 6))
if level < 1:
level = 1
return cost_multiplier, level
def move_task_request(self, ts, victim, thief):
try:
if self.scheduler.validate:
if victim is not ts.processing_on and LOG_PDB:
import pdb
pdb.set_trace()
key = ts.key
self.remove_key_from_stealable(ts)
logger.debug(
"Request move %s, %s: %2f -> %s: %2f",
key,
victim,
victim.occupancy,
thief,
thief.occupancy,
)
victim_duration = victim.processing[ts]
thief_duration = self.scheduler.get_task_duration(
ts
) + self.scheduler.get_comm_cost(ts, thief)
self.scheduler.stream_comms[victim.address].send(
{"op": "steal-request", "key": key}
)
self.in_flight[ts] = {
"victim": victim,
"thief": thief,
"victim_duration": victim_duration,
"thief_duration": thief_duration,
}
self.in_flight_occupancy[victim] -= victim_duration
self.in_flight_occupancy[thief] += thief_duration
except CommClosedError:
logger.info("Worker comm %r closed while stealing: %r", victim, ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def move_task_confirm(self, key=None, worker=None, state=None):
try:
try:
ts = self.scheduler.tasks[key]
except KeyError:
logger.debug("Key released between request and confirm: %s", key)
return
try:
d = self.in_flight.pop(ts)
except KeyError:
return
thief = d["thief"]
victim = d["victim"]
logger.debug(
"Confirm move %s, %s -> %s. State: %s", key, victim, thief, state
)
self.in_flight_occupancy[thief] -= d["thief_duration"]
self.in_flight_occupancy[victim] += d["victim_duration"]
if not self.in_flight:
self.in_flight_occupancy = defaultdict(lambda: 0)
if ts.state != "processing" or ts.processing_on is not victim:
old_thief = thief.occupancy
new_thief = sum(thief.processing.values())
old_victim = victim.occupancy
new_victim = sum(victim.processing.values())
thief.occupancy = new_thief
victim.occupancy = new_victim
self.scheduler.total_occupancy += (
new_thief - old_thief + new_victim - old_victim
)
return
# One of the pair has left, punt and reschedule
if (
thief.address not in self.scheduler.workers
or victim.address not in self.scheduler.workers
):
self.scheduler.reschedule(key)
return
# Victim had already started execution, reverse stealing
if state in ("memory", "executing", "long-running", None):
self.log(("already-computing", key, victim.address, thief.address))
self.scheduler.check_idle_saturated(thief)
self.scheduler.check_idle_saturated(victim)
# Victim was waiting, has given up task, enact steal
elif state in ("waiting", "ready", "constrained"):
self.remove_key_from_stealable(ts)
ts.processing_on = thief
duration = victim.processing.pop(ts)
victim.occupancy -= duration
self.scheduler.total_occupancy -= duration
if not victim.processing:
self.scheduler.total_occupancy -= victim.occupancy
victim.occupancy = 0
thief.processing[ts] = d["thief_duration"]
thief.occupancy += d["thief_duration"]
self.scheduler.total_occupancy += d["thief_duration"]
self.put_key_in_stealable(ts)
try:
self.scheduler.send_task_to_worker(thief.address, ts)
except CommClosedError:
await self.scheduler.remove_worker(thief.address)
self.log(("confirm", key, victim.address, thief.address))
else:
raise ValueError("Unexpected task state: %s" % state)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
try:
self.scheduler.check_idle_saturated(thief)
except Exception:
pass
try:
self.scheduler.check_idle_saturated(victim)
except Exception:
pass
def balance(self):
s = self.scheduler
def combined_occupancy(ws):
return ws.occupancy + self.in_flight_occupancy[ws]
def maybe_move_task(level, ts, sat, idl, duration, cost_multiplier):
occ_idl = combined_occupancy(idl)
occ_sat = combined_occupancy(sat)
if occ_idl + cost_multiplier * duration <= occ_sat - duration / 2:
self.move_task_request(ts, sat, idl)
log.append(
(
start,
level,
ts.key,
duration,
sat.address,
occ_sat,
idl.address,
occ_idl,
)
)
s.check_idle_saturated(sat, occ=occ_sat)
s.check_idle_saturated(idl, occ=occ_idl)
with log_errors():
i = 0
idle = s.idle.values()
saturated = s.saturated
if not idle or len(idle) == len(s.workers):
return
log = []
start = time()
if not s.saturated:
saturated = topk(10, s.workers.values(), key=combined_occupancy)
saturated = [
ws
for ws in saturated
if combined_occupancy(ws) > 0.2 and len(ws.processing) > ws.nthreads
]
elif len(s.saturated) < 20:
saturated = sorted(saturated, key=combined_occupancy, reverse=True)
if len(idle) < 20:
idle = sorted(idle, key=combined_occupancy)
for level, cost_multiplier in enumerate(self.cost_multipliers):
if not idle:
break
for sat in list(saturated):
stealable = self.stealable[sat.address][level]
if not stealable or not idle:
continue
for ts in list(stealable):
if ts not in self.key_stealable or ts.processing_on is not sat:
stealable.discard(ts)
continue
i += 1
if not idle:
break
if _has_restrictions(ts):
thieves = [ws for ws in idle if _can_steal(ws, ts, sat)]
else:
thieves = idle
if not thieves:
break
thief = thieves[i % len(thieves)]
duration = sat.processing.get(ts)
if duration is None:
stealable.discard(ts)
continue
maybe_move_task(
level, ts, sat, thief, duration, cost_multiplier
)
if self.cost_multipliers[level] < 20: # don't steal from public at cost
stealable = self.stealable_all[level]
for ts in list(stealable):
if not idle:
break
if ts not in self.key_stealable:
stealable.discard(ts)
continue
sat = ts.processing_on
if sat is None:
stealable.discard(ts)
continue
if combined_occupancy(sat) < 0.2:
continue
if len(sat.processing) <= sat.nthreads:
continue
i += 1
if _has_restrictions(ts):
thieves = [ws for ws in idle if _can_steal(ws, ts, sat)]
else:
thieves = idle
if not thieves:
continue
thief = thieves[i % len(thieves)]
duration = sat.processing[ts]
maybe_move_task(
level, ts, sat, thief, duration, cost_multiplier
)
if log:
self.log(log)
self.count += 1
stop = time()
if s.digests:
s.digests["steal-duration"].add(stop - start)
def restart(self, scheduler):
for stealable in self.stealable.values():
for s in stealable:
s.clear()
for s in self.stealable_all:
s.clear()
self.key_stealable.clear()
def story(self, *keys):
keys = set(keys)
out = []
for _, L in self.scheduler.get_event("stealing"):
if not isinstance(L, list):
L = [L]
for t in L:
if any(x in keys for x in t):
out.append(t)
return out
def _has_restrictions(ts):
"""Determine whether the given task has restrictions and whether these
restrictions are strict.
"""
return not ts.loose_restrictions and (
ts.host_restrictions or ts.worker_restrictions or ts.resource_restrictions
)
def _can_steal(thief, ts, victim):
"""Determine whether worker ``thief`` can steal task ``ts`` from worker
``victim``.
Assumes that `ts` has some restrictions.
"""
if (
ts.host_restrictions
and get_address_host(thief.address) not in ts.host_restrictions
):
return False
elif ts.worker_restrictions and thief.address not in ts.worker_restrictions:
return False
if victim.resources is None:
return True
for resource, value in victim.resources.items():
try:
supplied = thief.resources[resource]
except KeyError:
return False
else:
if supplied < value:
return False
return True
fast_tasks = {"split-shuffle"}
| 34.286652 | 88 | 0.521029 | import logging
from collections import defaultdict, deque
from math import log2
from time import time
from tlz import topk
from tornado.ioloop import PeriodicCallback
import dask
from dask.utils import parse_timedelta
from .comm.addressing import get_address_host
from .core import CommClosedError
from .diagnostics.plugin import SchedulerPlugin
from .utils import log_errors
LATENCY = 10e-3
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
class WorkStealing(SchedulerPlugin):
def __init__(self, scheduler):
self.scheduler = scheduler
self.stealable_all = [set() for i in range(15)]
self.stealable = dict()
self.key_stealable = dict()
self.cost_multipliers = [1 + 2 ** (i - 6) for i in range(15)]
self.cost_multipliers[0] = 1
for worker in scheduler.workers:
self.add_worker(worker=worker)
callback_time = parse_timedelta(
dask.config.get("distributed.scheduler.work-stealing-interval"),
default="ms",
)
pc = PeriodicCallback(callback=self.balance, callback_time=callback_time * 1000)
self._pc = pc
self.scheduler.periodic_callbacks["stealing"] = pc
self.scheduler.plugins.append(self)
self.scheduler.extensions["stealing"] = self
self.scheduler.events["stealing"] = deque(maxlen=100000)
self.count = 0
self.in_flight = dict()
self.in_flight_occupancy = defaultdict(lambda: 0)
self.scheduler.stream_handlers["steal-response"] = self.move_task_confirm
def log(self, msg):
return self.scheduler.log_event("stealing", msg)
def add_worker(self, scheduler=None, worker=None):
self.stealable[worker] = [set() for i in range(15)]
def remove_worker(self, scheduler=None, worker=None):
del self.stealable[worker]
def teardown(self):
self._pc.stop()
def transition(
self, key, start, finish, compute_start=None, compute_stop=None, *args, **kwargs
):
if finish == "processing":
ts = self.scheduler.tasks[key]
self.put_key_in_stealable(ts)
elif start == "processing":
ts = self.scheduler.tasks[key]
self.remove_key_from_stealable(ts)
if finish != "memory":
self.in_flight.pop(ts, None)
def put_key_in_stealable(self, ts):
cost_multiplier, level = self.steal_time_ratio(ts)
if cost_multiplier is not None:
ws = ts.processing_on
worker = ws.address
self.stealable_all[level].add(ts)
self.stealable[worker][level].add(ts)
self.key_stealable[ts] = (worker, level)
def remove_key_from_stealable(self, ts):
result = self.key_stealable.pop(ts, None)
if result is None:
return
worker, level = result
try:
self.stealable[worker][level].remove(ts)
except KeyError:
pass
try:
self.stealable_all[level].remove(ts)
except KeyError:
pass
def steal_time_ratio(self, ts):
if not ts.dependencies:
return 0, 0
split = ts.prefix.name
if split in fast_tasks:
return None, None
ws = ts.processing_on
compute_time = ws.processing[ts]
if compute_time < 0.005:
return None, None
nbytes = ts.get_nbytes_deps()
transfer_time = nbytes / self.scheduler.bandwidth + LATENCY
cost_multiplier = transfer_time / compute_time
if cost_multiplier > 100:
return None, None
level = int(round(log2(cost_multiplier) + 6))
if level < 1:
level = 1
return cost_multiplier, level
def move_task_request(self, ts, victim, thief):
try:
if self.scheduler.validate:
if victim is not ts.processing_on and LOG_PDB:
import pdb
pdb.set_trace()
key = ts.key
self.remove_key_from_stealable(ts)
logger.debug(
"Request move %s, %s: %2f -> %s: %2f",
key,
victim,
victim.occupancy,
thief,
thief.occupancy,
)
victim_duration = victim.processing[ts]
thief_duration = self.scheduler.get_task_duration(
ts
) + self.scheduler.get_comm_cost(ts, thief)
self.scheduler.stream_comms[victim.address].send(
{"op": "steal-request", "key": key}
)
self.in_flight[ts] = {
"victim": victim,
"thief": thief,
"victim_duration": victim_duration,
"thief_duration": thief_duration,
}
self.in_flight_occupancy[victim] -= victim_duration
self.in_flight_occupancy[thief] += thief_duration
except CommClosedError:
logger.info("Worker comm %r closed while stealing: %r", victim, ts)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def move_task_confirm(self, key=None, worker=None, state=None):
try:
try:
ts = self.scheduler.tasks[key]
except KeyError:
logger.debug("Key released between request and confirm: %s", key)
return
try:
d = self.in_flight.pop(ts)
except KeyError:
return
thief = d["thief"]
victim = d["victim"]
logger.debug(
"Confirm move %s, %s -> %s. State: %s", key, victim, thief, state
)
self.in_flight_occupancy[thief] -= d["thief_duration"]
self.in_flight_occupancy[victim] += d["victim_duration"]
if not self.in_flight:
self.in_flight_occupancy = defaultdict(lambda: 0)
if ts.state != "processing" or ts.processing_on is not victim:
old_thief = thief.occupancy
new_thief = sum(thief.processing.values())
old_victim = victim.occupancy
new_victim = sum(victim.processing.values())
thief.occupancy = new_thief
victim.occupancy = new_victim
self.scheduler.total_occupancy += (
new_thief - old_thief + new_victim - old_victim
)
return
if (
thief.address not in self.scheduler.workers
or victim.address not in self.scheduler.workers
):
self.scheduler.reschedule(key)
return
if state in ("memory", "executing", "long-running", None):
self.log(("already-computing", key, victim.address, thief.address))
self.scheduler.check_idle_saturated(thief)
self.scheduler.check_idle_saturated(victim)
elif state in ("waiting", "ready", "constrained"):
self.remove_key_from_stealable(ts)
ts.processing_on = thief
duration = victim.processing.pop(ts)
victim.occupancy -= duration
self.scheduler.total_occupancy -= duration
if not victim.processing:
self.scheduler.total_occupancy -= victim.occupancy
victim.occupancy = 0
thief.processing[ts] = d["thief_duration"]
thief.occupancy += d["thief_duration"]
self.scheduler.total_occupancy += d["thief_duration"]
self.put_key_in_stealable(ts)
try:
self.scheduler.send_task_to_worker(thief.address, ts)
except CommClosedError:
await self.scheduler.remove_worker(thief.address)
self.log(("confirm", key, victim.address, thief.address))
else:
raise ValueError("Unexpected task state: %s" % state)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
try:
self.scheduler.check_idle_saturated(thief)
except Exception:
pass
try:
self.scheduler.check_idle_saturated(victim)
except Exception:
pass
def balance(self):
s = self.scheduler
def combined_occupancy(ws):
return ws.occupancy + self.in_flight_occupancy[ws]
def maybe_move_task(level, ts, sat, idl, duration, cost_multiplier):
occ_idl = combined_occupancy(idl)
occ_sat = combined_occupancy(sat)
if occ_idl + cost_multiplier * duration <= occ_sat - duration / 2:
self.move_task_request(ts, sat, idl)
log.append(
(
start,
level,
ts.key,
duration,
sat.address,
occ_sat,
idl.address,
occ_idl,
)
)
s.check_idle_saturated(sat, occ=occ_sat)
s.check_idle_saturated(idl, occ=occ_idl)
with log_errors():
i = 0
idle = s.idle.values()
saturated = s.saturated
if not idle or len(idle) == len(s.workers):
return
log = []
start = time()
if not s.saturated:
saturated = topk(10, s.workers.values(), key=combined_occupancy)
saturated = [
ws
for ws in saturated
if combined_occupancy(ws) > 0.2 and len(ws.processing) > ws.nthreads
]
elif len(s.saturated) < 20:
saturated = sorted(saturated, key=combined_occupancy, reverse=True)
if len(idle) < 20:
idle = sorted(idle, key=combined_occupancy)
for level, cost_multiplier in enumerate(self.cost_multipliers):
if not idle:
break
for sat in list(saturated):
stealable = self.stealable[sat.address][level]
if not stealable or not idle:
continue
for ts in list(stealable):
if ts not in self.key_stealable or ts.processing_on is not sat:
stealable.discard(ts)
continue
i += 1
if not idle:
break
if _has_restrictions(ts):
thieves = [ws for ws in idle if _can_steal(ws, ts, sat)]
else:
thieves = idle
if not thieves:
break
thief = thieves[i % len(thieves)]
duration = sat.processing.get(ts)
if duration is None:
stealable.discard(ts)
continue
maybe_move_task(
level, ts, sat, thief, duration, cost_multiplier
)
if self.cost_multipliers[level] < 20:
stealable = self.stealable_all[level]
for ts in list(stealable):
if not idle:
break
if ts not in self.key_stealable:
stealable.discard(ts)
continue
sat = ts.processing_on
if sat is None:
stealable.discard(ts)
continue
if combined_occupancy(sat) < 0.2:
continue
if len(sat.processing) <= sat.nthreads:
continue
i += 1
if _has_restrictions(ts):
thieves = [ws for ws in idle if _can_steal(ws, ts, sat)]
else:
thieves = idle
if not thieves:
continue
thief = thieves[i % len(thieves)]
duration = sat.processing[ts]
maybe_move_task(
level, ts, sat, thief, duration, cost_multiplier
)
if log:
self.log(log)
self.count += 1
stop = time()
if s.digests:
s.digests["steal-duration"].add(stop - start)
def restart(self, scheduler):
for stealable in self.stealable.values():
for s in stealable:
s.clear()
for s in self.stealable_all:
s.clear()
self.key_stealable.clear()
def story(self, *keys):
keys = set(keys)
out = []
for _, L in self.scheduler.get_event("stealing"):
if not isinstance(L, list):
L = [L]
for t in L:
if any(x in keys for x in t):
out.append(t)
return out
def _has_restrictions(ts):
return not ts.loose_restrictions and (
ts.host_restrictions or ts.worker_restrictions or ts.resource_restrictions
)
def _can_steal(thief, ts, victim):
if (
ts.host_restrictions
and get_address_host(thief.address) not in ts.host_restrictions
):
return False
elif ts.worker_restrictions and thief.address not in ts.worker_restrictions:
return False
if victim.resources is None:
return True
for resource, value in victim.resources.items():
try:
supplied = thief.resources[resource]
except KeyError:
return False
else:
if supplied < value:
return False
return True
fast_tasks = {"split-shuffle"}
| true | true |
1c45e29f710037c91f2e554142171caeedf3bf05 | 4,923 | py | Python | src/lidar.py | steviet91/furmulaone_source | ca738b271fa346da4234c5ffc781abc12a5ac49f | [
"MIT"
] | null | null | null | src/lidar.py | steviet91/furmulaone_source | ca738b271fa346da4234c5ffc781abc12a5ac49f | [
"MIT"
] | 24 | 2020-04-14T12:38:07.000Z | 2020-04-29T08:18:33.000Z | src/lidar.py | steviet91/furmulaone_source | ca738b271fa346da4234c5ffc781abc12a5ac49f | [
"MIT"
] | null | null | null | import numpy as np
from .track import TrackHandler
from .geom import Line
from .geom import Circle
from .geom import get_intersection_point_lineseg_lineseg
from .geom import calc_euclid_distance_2d
from .geom import rotate_point
import time
#from multiprocessing import Pool
class Lidar(object):
"""
Object for the LIDAR containing the rays and intersect data
"""
_NRays = 3
_xLidarRange = float(200)
def __init__(self, track: TrackHandler, a0: float, x0: float, y0: float, aFov: float):
"""
Initialise the LIDAR object
"""
# save the arguments
self.track = track
self.a0 = a0 # the initial nominal angle of the lidar (the centre ray)
self.x0 = x0 # the position of the lidar
self.y0 = y0 # the y position of the lidar
self.aFov = aFov # the field of view of the lidar
# initialise the lidar rays
self.initialise_rays()
# initialise the lidar collision circle
self.collisionCircle = Circle(self.x0, self.y0, Lidar._xLidarRange)
# intialise the collision array
self.initialise_collision_array()
# initialise the mp pool
#self.pool = Pool(4)
def initialise_rays(self):
"""
Set up the rays that represent the lidar
"""
self.rays = []
for i in range(0,Lidar._NRays):
a = self.a0 - self.aFov / 2 + i * self.aFov / (Lidar._NRays - 1) # angle of this ray
# going to work as a unit vector
x = np.cos(a)
y = np.sin(a)
# instantiate the ray as a line
self.rays.append(Line((self.x0, self.y0), (self.x0 + x, self.y0 + y)))
def initialise_collision_array(self):
"""
Collision array contains the distance of the collision for each ray.
A negative number (-1) infers no collision found
"""
self.collision_array = -1.0 * np.ones(Lidar._NRays, dtype=np.float64)
def rotate_lidar_by_delta(self, daRot: float, cX: float, cY: float):
"""
Rotate the lidars about a pivot point
"""
for r in self.rays:
r.rotate_line_by_delta(daRot, cX, cY)
pNew = rotate_point(cX, cY, daRot, np.array([self.x0, self.y0]))
self.x0 = pNew[0]
self.y0 = pNew[1]
def translate_lidars_by_delta(self, dX: float, dY: float):
"""
Translate the lidars by the given deltas
"""
for r in self.rays:
r.translate_line_by_delta(dX, dY)
self.collisionCircle.update_centre_by_delta(dX, dY)
self.x0 += dX
self.y0 += dY
def reset_lidar(self):
"""
Reset the lidars to their previous position/angles
"""
for r in self.rays:
r.reset_line()
self.collisionCircle.update_centre_to_new_pos(self.x0, self.y0)
self.initialise_collision_array()
def fire_lidar(self):
"""
Determine the distance to the nearest
"""
# find the indexes of the track segments to check collision for
in_idxs, out_idxs = self.track.get_line_idxs_for_collision(self.collisionCircle)
# get the objects of the lines that should be checked for collision
check_lines = []
if len(in_idxs) > 0:
check_lines.extend([self.track.data.in_lines[i] for i in in_idxs])
if len(out_idxs) > 0:
check_lines.extend([self.track.data.out_lines[i] for i in out_idxs])
# calculate the collision array
self.collision_array = np.array([self.check_rays(r, check_lines) for r in self.rays])
def check_rays(self, r: Line, check_lines: list):
"""
Calculate the minimum distance to each of the provided lines
"""
ds = np.array([self.cast_ray(r, l) for l in check_lines])
# if the ray scored a collision then find the minimum distance (-1 is no collision)
if np.max(ds) > 0:
# get the minimun distance
return np.min(ds[np.where(ds > 0)[0]])
else:
return float(-1)
def cast_ray(self, r: Line, l: Line):
"""
Cast the ray r and return the distance to the line l if less than
lidar range
"""
pInt = get_intersection_point_lineseg_lineseg(l, r, l2_is_ray=True)
if pInt is None:
return float(-1)
else:
d = float(calc_euclid_distance_2d(tuple(r.p1), tuple(pInt)))
if d <= Lidar._xLidarRange:
return d
else:
# collisiion is out of range
return float(-1)
if __name__ == "__main__":
import time
t = TrackHandler('octo_track')
l = Lidar(t, 0, 0, 0, 20)
n = 1000
t = time.time()
for i in range(0,n):
l.fire_lidar()
print((time.time()-t) * 1000 / n,'ms')
| 33.719178 | 97 | 0.589681 | import numpy as np
from .track import TrackHandler
from .geom import Line
from .geom import Circle
from .geom import get_intersection_point_lineseg_lineseg
from .geom import calc_euclid_distance_2d
from .geom import rotate_point
import time
class Lidar(object):
_NRays = 3
_xLidarRange = float(200)
def __init__(self, track: TrackHandler, a0: float, x0: float, y0: float, aFov: float):
self.track = track
self.a0 = a0
self.x0 = x0
self.y0 = y0
self.aFov = aFov
self.initialise_rays()
self.collisionCircle = Circle(self.x0, self.y0, Lidar._xLidarRange)
self.initialise_collision_array()
def initialise_rays(self):
self.rays = []
for i in range(0,Lidar._NRays):
a = self.a0 - self.aFov / 2 + i * self.aFov / (Lidar._NRays - 1)
x = np.cos(a)
y = np.sin(a)
self.rays.append(Line((self.x0, self.y0), (self.x0 + x, self.y0 + y)))
def initialise_collision_array(self):
self.collision_array = -1.0 * np.ones(Lidar._NRays, dtype=np.float64)
def rotate_lidar_by_delta(self, daRot: float, cX: float, cY: float):
for r in self.rays:
r.rotate_line_by_delta(daRot, cX, cY)
pNew = rotate_point(cX, cY, daRot, np.array([self.x0, self.y0]))
self.x0 = pNew[0]
self.y0 = pNew[1]
def translate_lidars_by_delta(self, dX: float, dY: float):
for r in self.rays:
r.translate_line_by_delta(dX, dY)
self.collisionCircle.update_centre_by_delta(dX, dY)
self.x0 += dX
self.y0 += dY
def reset_lidar(self):
for r in self.rays:
r.reset_line()
self.collisionCircle.update_centre_to_new_pos(self.x0, self.y0)
self.initialise_collision_array()
def fire_lidar(self):
in_idxs, out_idxs = self.track.get_line_idxs_for_collision(self.collisionCircle)
check_lines = []
if len(in_idxs) > 0:
check_lines.extend([self.track.data.in_lines[i] for i in in_idxs])
if len(out_idxs) > 0:
check_lines.extend([self.track.data.out_lines[i] for i in out_idxs])
self.collision_array = np.array([self.check_rays(r, check_lines) for r in self.rays])
def check_rays(self, r: Line, check_lines: list):
ds = np.array([self.cast_ray(r, l) for l in check_lines])
if np.max(ds) > 0:
return np.min(ds[np.where(ds > 0)[0]])
else:
return float(-1)
def cast_ray(self, r: Line, l: Line):
pInt = get_intersection_point_lineseg_lineseg(l, r, l2_is_ray=True)
if pInt is None:
return float(-1)
else:
d = float(calc_euclid_distance_2d(tuple(r.p1), tuple(pInt)))
if d <= Lidar._xLidarRange:
return d
else:
return float(-1)
if __name__ == "__main__":
import time
t = TrackHandler('octo_track')
l = Lidar(t, 0, 0, 0, 20)
n = 1000
t = time.time()
for i in range(0,n):
l.fire_lidar()
print((time.time()-t) * 1000 / n,'ms')
| true | true |
1c45e2b8f9e20b2f3b7d0e052c8fd311816a6a16 | 1,984 | py | Python | carball/analysis2/stats/demo_stats.py | twobackfromtheend/carball | 6dcc3f7f0f2266cc3e0a3de24deaac2aec392b73 | [
"Apache-2.0"
] | null | null | null | carball/analysis2/stats/demo_stats.py | twobackfromtheend/carball | 6dcc3f7f0f2266cc3e0a3de24deaac2aec392b73 | [
"Apache-2.0"
] | null | null | null | carball/analysis2/stats/demo_stats.py | twobackfromtheend/carball | 6dcc3f7f0f2266cc3e0a3de24deaac2aec392b73 | [
"Apache-2.0"
] | null | null | null | from collections import Counter
from typing import Dict, List
import numpy as np
import pandas as pd
from api.analysis.stats_pb2 import PlayerStats
from api.events.demo_pb2 import Demo
from api.game.game_pb2 import Game
from carball.analysis2.constants.constants import FIELD_Y_LIM, FIELD_X_LIM
def set_demo_stats(player_stats: Dict[str, PlayerStats],
game: Game, demos: List[Demo],
player_blue_data_frames: Dict[str, pd.DataFrame]):
player_id_to_name: Dict[str, str] = {player.id.id: player.name for player in game.players}
demo_counts = Counter()
demoed_counts = Counter()
demos_near_opponent_goal_counts = Counter()
demoed_near_own_goal_counts = Counter()
active_frames = list(player_blue_data_frames.values())[0].index
for demo in demos:
frame_number = demo.frame_number
if frame_number not in active_frames:
continue
attacker_id = demo.attacker_id.id
victim_id = demo.victim_id.id
demo_counts[attacker_id] += 1
demoed_counts[victim_id] += 1
victim_blue_df = player_blue_data_frames[victim_id]
victim_name = player_id_to_name[victim_id]
victim_position_at_demo = victim_blue_df.loc[frame_number - 1, (victim_name, ['pos_x', 'pos_y'])].values
BLUE_GOAL_POSITION = np.array([0, -FIELD_Y_LIM])
victim_distance_from_goal = ((victim_position_at_demo - BLUE_GOAL_POSITION) ** 2).sum() ** 0.5
if victim_distance_from_goal < FIELD_X_LIM / 2:
demos_near_opponent_goal_counts[attacker_id] += 1
demoed_near_own_goal_counts[victim_id] += 1
for player_id, _player_stats in player_stats.items():
_player_stats.demos = demo_counts[player_id]
_player_stats.demoed = demoed_counts[player_id]
_player_stats.demos_near_opponent_goal = demos_near_opponent_goal_counts[player_id]
_player_stats.demoed_near_own_goal = demoed_near_own_goal_counts[player_id]
| 39.68 | 112 | 0.720766 | from collections import Counter
from typing import Dict, List
import numpy as np
import pandas as pd
from api.analysis.stats_pb2 import PlayerStats
from api.events.demo_pb2 import Demo
from api.game.game_pb2 import Game
from carball.analysis2.constants.constants import FIELD_Y_LIM, FIELD_X_LIM
def set_demo_stats(player_stats: Dict[str, PlayerStats],
game: Game, demos: List[Demo],
player_blue_data_frames: Dict[str, pd.DataFrame]):
player_id_to_name: Dict[str, str] = {player.id.id: player.name for player in game.players}
demo_counts = Counter()
demoed_counts = Counter()
demos_near_opponent_goal_counts = Counter()
demoed_near_own_goal_counts = Counter()
active_frames = list(player_blue_data_frames.values())[0].index
for demo in demos:
frame_number = demo.frame_number
if frame_number not in active_frames:
continue
attacker_id = demo.attacker_id.id
victim_id = demo.victim_id.id
demo_counts[attacker_id] += 1
demoed_counts[victim_id] += 1
victim_blue_df = player_blue_data_frames[victim_id]
victim_name = player_id_to_name[victim_id]
victim_position_at_demo = victim_blue_df.loc[frame_number - 1, (victim_name, ['pos_x', 'pos_y'])].values
BLUE_GOAL_POSITION = np.array([0, -FIELD_Y_LIM])
victim_distance_from_goal = ((victim_position_at_demo - BLUE_GOAL_POSITION) ** 2).sum() ** 0.5
if victim_distance_from_goal < FIELD_X_LIM / 2:
demos_near_opponent_goal_counts[attacker_id] += 1
demoed_near_own_goal_counts[victim_id] += 1
for player_id, _player_stats in player_stats.items():
_player_stats.demos = demo_counts[player_id]
_player_stats.demoed = demoed_counts[player_id]
_player_stats.demos_near_opponent_goal = demos_near_opponent_goal_counts[player_id]
_player_stats.demoed_near_own_goal = demoed_near_own_goal_counts[player_id]
| true | true |
1c45e33ca52708a7eea05a4c0e1c5e724e56f6ba | 3,556 | py | Python | deploytesting/settings.py | mouaadBenAllal/deploytest | 1011152d3a00879f450db3deaef64dee7c9009c0 | [
"Apache-2.0"
] | null | null | null | deploytesting/settings.py | mouaadBenAllal/deploytest | 1011152d3a00879f450db3deaef64dee7c9009c0 | [
"Apache-2.0"
] | null | null | null | deploytesting/settings.py | mouaadBenAllal/deploytest | 1011152d3a00879f450db3deaef64dee7c9009c0 | [
"Apache-2.0"
] | null | null | null | """
Django settings for deploytesting project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '!5+^n3g458fs0#x8=142wf+5xqkw2)nb5^_zkry-g2fl&8@rn_')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = '!5+^n3g458fs0#x8=142wf+5xqkw2)nb5^_zkry-g2fl&8@rn_'
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = True
DEBUG = bool(os.environ.get('DJANGO_DEBUG', True))
ALLOWED_HOSTS = ['Mouaadben.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'deploytesting',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'deploytesting.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'deploytesting.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/home/Mouaadben/deploytest.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/' | 28.448 | 102 | 0.708661 |
import os
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '!5+^n3g458fs0#x8=142wf+5xqkw2)nb5^_zkry-g2fl&8@rn_')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# DEBUG = True
DEBUG = bool(os.environ.get('DJANGO_DEBUG', True))
ALLOWED_HOSTS = ['Mouaadben.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'deploytesting',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'deploytesting.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'deploytesting.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/home/Mouaadben/deploytest.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/' | true | true |
1c45e454061369b7ef00827dbb014ceacea5b029 | 212 | py | Python | lambdata_andrewarnett/__init__.py | AndrewArnett/lambdata | fe7e2694a0a099f9df88807f744556c230e9f18d | [
"MIT"
] | null | null | null | lambdata_andrewarnett/__init__.py | AndrewArnett/lambdata | fe7e2694a0a099f9df88807f744556c230e9f18d | [
"MIT"
] | null | null | null | lambdata_andrewarnett/__init__.py | AndrewArnett/lambdata | fe7e2694a0a099f9df88807f744556c230e9f18d | [
"MIT"
] | 1 | 2020-08-04T19:20:50.000Z | 2020-08-04T19:20:50.000Z | """
lambdata - a collection of Data Science helper functions
"""
import pandas as pd
import numpy as np
from lambdata_andrewarnett.dataframe_helper import shape_head, baseline
TEST = pd.DataFrame(np.ones(10))
| 19.272727 | 71 | 0.783019 |
import pandas as pd
import numpy as np
from lambdata_andrewarnett.dataframe_helper import shape_head, baseline
TEST = pd.DataFrame(np.ones(10))
| true | true |
1c45e47eae4c91731680b35fa0da9f2d7d523680 | 10,928 | py | Python | mindmeld/components/_elasticsearch_helpers.py | jre21/mindmeld | 6a88e4b0dfc7971f6bf9ae406b89dbc76f68af81 | [
"Apache-2.0"
] | 1 | 2021-01-06T23:39:57.000Z | 2021-01-06T23:39:57.000Z | mindmeld/components/_elasticsearch_helpers.py | jre21/mindmeld | 6a88e4b0dfc7971f6bf9ae406b89dbc76f68af81 | [
"Apache-2.0"
] | 1 | 2021-02-02T22:53:01.000Z | 2021-02-02T22:53:01.000Z | mindmeld/components/_elasticsearch_helpers.py | jre21/mindmeld | 6a88e4b0dfc7971f6bf9ae406b89dbc76f68af81 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains helper methods for consuming Elasticsearch."""
import os
import logging
from elasticsearch5 import (Elasticsearch, ImproperlyConfigured, ElasticsearchException,
ConnectionError as EsConnectionError, TransportError)
from elasticsearch5.helpers import streaming_bulk
from tqdm import tqdm
from ._config import DEFAULT_ES_INDEX_TEMPLATE, DEFAULT_ES_INDEX_TEMPLATE_NAME
from ..exceptions import KnowledgeBaseConnectionError, KnowledgeBaseError
logger = logging.getLogger(__name__)
INDEX_TYPE_SYNONYM = 'syn'
INDEX_TYPE_KB = 'kb'
def get_scoped_index_name(app_namespace, index_name):
return '{}${}'.format(app_namespace, index_name)
def create_es_client(es_host=None, es_user=None, es_pass=None):
"""Creates a new Elasticsearch client
Args:
es_host (str): The Elasticsearch host server
es_user (str): The Elasticsearch username for http auth
es_pass (str): The Elasticsearch password for http auth
"""
es_host = es_host or os.environ.get('MM_ES_HOST')
es_user = es_user or os.environ.get('MM_ES_USERNAME')
es_pass = es_pass or os.environ.get('MM_ES_PASSWORD')
try:
http_auth = (es_user, es_pass) if es_user and es_pass else None
es_client = Elasticsearch(es_host, http_auth=http_auth)
return es_client
except ElasticsearchException:
raise KnowledgeBaseError
except ImproperlyConfigured:
raise KnowledgeBaseError
def does_index_exist(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2):
"""Return boolean flag to indicate whether the specified index exists."""
es_client = es_client or create_es_client(es_host)
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
try:
# Confirm ES connection with a shorter timeout
es_client.cluster.health(request_timeout=connect_timeout)
return es_client.indices.exists(index=scoped_index_name)
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError
except ElasticsearchException:
raise KnowledgeBaseError
def get_field_names(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2):
"""Return a list of field names available in the specified index."""
es_client = es_client or create_es_client(es_host)
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
try:
if not does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout):
raise ValueError('Elasticsearch index \'{}\' does not exist.'.format(index_name))
res = es_client.indices.get(index=scoped_index_name)
all_field_info = res[scoped_index_name]['mappings']['document']['properties']
return all_field_info.keys()
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError
except ElasticsearchException:
raise KnowledgeBaseError
def create_index(app_namespace, index_name, mapping, es_host=None, es_client=None,
connect_timeout=2):
"""Creates a new index.
Args:
app_namespace (str): The namespace of the app
index_name (str): The name of the new index to be created
mapping (str): The Elasticsearch index mapping to use
es_host (str): The Elasticsearch host server
es_client: The Elasticsearch client
connect_timeout (int, optional): The amount of time for a connection to the
Elasticsearch host
"""
es_client = es_client or create_es_client(es_host)
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
try:
if not does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout):
# checks the existence of default index template, if not then creates it.
if not es_client.indices.exists_template(name=DEFAULT_ES_INDEX_TEMPLATE_NAME):
es_client.indices.put_template(name=DEFAULT_ES_INDEX_TEMPLATE_NAME,
body=DEFAULT_ES_INDEX_TEMPLATE)
logger.info('Creating index %r', index_name)
es_client.indices.create(scoped_index_name, body=mapping)
else:
logger.error('Index %r already exists.', index_name)
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError('Unexpected error occurred when sending requests to '
'Elasticsearch: {} Status code: {} details: '
'{}'.format(e.error, e.status_code, e.info))
except ElasticsearchException:
raise KnowledgeBaseError
def delete_index(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2):
"""Deletes an index.
Args:
app_namespace (str): The namespace of the app
index_name (str): The name of the index to be deleted
es_host (str): The Elasticsearch host server
es_client: The Elasticsearch client
connect_timeout (int, optional): The amount of time for a connection to the
Elasticsearch host
"""
es_client = es_client or create_es_client(es_host)
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
try:
if does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout):
logger.info('Deleting index %r', index_name)
es_client.indices.delete(scoped_index_name)
else:
raise ValueError('Elasticsearch index \'{}\' for application \'{}\' does not exist.'
.format(index_name, app_namespace))
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError
except ElasticsearchException:
raise KnowledgeBaseError
def load_index(app_namespace, index_name, docs, docs_count, mapping, doc_type, es_host=None,
es_client=None, connect_timeout=2):
"""Loads documents from data into the specified index. If an index with the specified name
doesn't exist, a new index with that name will be created.
Args:
app_namespace (str): The namespace of the app
index_name (str): The name of the new index to be created
docs (iterable): An iterable which contains a collection of documents in the correct format
which should be imported into the index
docs_count (int): The number of documents in doc
mapping (str): The Elasticsearch index mapping to use
doc_type (str): The document type
es_host (str): The Elasticsearch host server
es_client (Elasticsearch): The Elasticsearch client
connect_timeout (int, optional): The amount of time for a connection to the
Elasticsearch host
"""
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
es_client = es_client or create_es_client(es_host)
try:
# create index if specified index does not exist
if does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout):
logger.warning('Elasticsearch index \'%s\' for application \'%s\' already exists!',
index_name, app_namespace)
logger.info('Loading index %r', index_name)
else:
create_index(app_namespace, index_name, mapping, es_host=es_host, es_client=es_client)
count = 0
# create the progess bar with docs count
pbar = tqdm(total=docs_count)
for okay, result in streaming_bulk(es_client, docs,
index=scoped_index_name, doc_type=doc_type,
chunk_size=50, raise_on_error=False):
action, result = result.popitem()
doc_id = '/%s/%s/%s' % (index_name, doc_type, result['_id'])
# process the information from ES whether the document has been
# successfully indexed
if not okay:
logger.error('Failed to %s document %s: %r', action, doc_id, result)
else:
count += 1
pbar.update(1)
# close the progress bar and flush all output
pbar.close()
logger.info('Loaded %s document%s', count, '' if count == 1 else 's')
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError
except ElasticsearchException:
raise KnowledgeBaseError
| 46.901288 | 99 | 0.685212 |
import os
import logging
from elasticsearch5 import (Elasticsearch, ImproperlyConfigured, ElasticsearchException,
ConnectionError as EsConnectionError, TransportError)
from elasticsearch5.helpers import streaming_bulk
from tqdm import tqdm
from ._config import DEFAULT_ES_INDEX_TEMPLATE, DEFAULT_ES_INDEX_TEMPLATE_NAME
from ..exceptions import KnowledgeBaseConnectionError, KnowledgeBaseError
logger = logging.getLogger(__name__)
INDEX_TYPE_SYNONYM = 'syn'
INDEX_TYPE_KB = 'kb'
def get_scoped_index_name(app_namespace, index_name):
return '{}${}'.format(app_namespace, index_name)
def create_es_client(es_host=None, es_user=None, es_pass=None):
es_host = es_host or os.environ.get('MM_ES_HOST')
es_user = es_user or os.environ.get('MM_ES_USERNAME')
es_pass = es_pass or os.environ.get('MM_ES_PASSWORD')
try:
http_auth = (es_user, es_pass) if es_user and es_pass else None
es_client = Elasticsearch(es_host, http_auth=http_auth)
return es_client
except ElasticsearchException:
raise KnowledgeBaseError
except ImproperlyConfigured:
raise KnowledgeBaseError
def does_index_exist(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2):
es_client = es_client or create_es_client(es_host)
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
try:
es_client.cluster.health(request_timeout=connect_timeout)
return es_client.indices.exists(index=scoped_index_name)
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError
except ElasticsearchException:
raise KnowledgeBaseError
def get_field_names(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2):
es_client = es_client or create_es_client(es_host)
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
try:
if not does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout):
raise ValueError('Elasticsearch index \'{}\' does not exist.'.format(index_name))
res = es_client.indices.get(index=scoped_index_name)
all_field_info = res[scoped_index_name]['mappings']['document']['properties']
return all_field_info.keys()
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError
except ElasticsearchException:
raise KnowledgeBaseError
def create_index(app_namespace, index_name, mapping, es_host=None, es_client=None,
connect_timeout=2):
es_client = es_client or create_es_client(es_host)
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
try:
if not does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout):
if not es_client.indices.exists_template(name=DEFAULT_ES_INDEX_TEMPLATE_NAME):
es_client.indices.put_template(name=DEFAULT_ES_INDEX_TEMPLATE_NAME,
body=DEFAULT_ES_INDEX_TEMPLATE)
logger.info('Creating index %r', index_name)
es_client.indices.create(scoped_index_name, body=mapping)
else:
logger.error('Index %r already exists.', index_name)
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError('Unexpected error occurred when sending requests to '
'Elasticsearch: {} Status code: {} details: '
'{}'.format(e.error, e.status_code, e.info))
except ElasticsearchException:
raise KnowledgeBaseError
def delete_index(app_namespace, index_name, es_host=None, es_client=None, connect_timeout=2):
es_client = es_client or create_es_client(es_host)
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
try:
if does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout):
logger.info('Deleting index %r', index_name)
es_client.indices.delete(scoped_index_name)
else:
raise ValueError('Elasticsearch index \'{}\' for application \'{}\' does not exist.'
.format(index_name, app_namespace))
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError
except ElasticsearchException:
raise KnowledgeBaseError
def load_index(app_namespace, index_name, docs, docs_count, mapping, doc_type, es_host=None,
es_client=None, connect_timeout=2):
scoped_index_name = get_scoped_index_name(app_namespace, index_name)
es_client = es_client or create_es_client(es_host)
try:
if does_index_exist(app_namespace, index_name, es_host, es_client, connect_timeout):
logger.warning('Elasticsearch index \'%s\' for application \'%s\' already exists!',
index_name, app_namespace)
logger.info('Loading index %r', index_name)
else:
create_index(app_namespace, index_name, mapping, es_host=es_host, es_client=es_client)
count = 0
pbar = tqdm(total=docs_count)
for okay, result in streaming_bulk(es_client, docs,
index=scoped_index_name, doc_type=doc_type,
chunk_size=50, raise_on_error=False):
action, result = result.popitem()
doc_id = '/%s/%s/%s' % (index_name, doc_type, result['_id'])
if not okay:
logger.error('Failed to %s document %s: %r', action, doc_id, result)
else:
count += 1
pbar.update(1)
pbar.close()
logger.info('Loaded %s document%s', count, '' if count == 1 else 's')
except EsConnectionError as e:
logger.debug('Unable to connect to Elasticsearch: %s details: %s', e.error, e.info)
raise KnowledgeBaseConnectionError(es_host=es_client.transport.hosts)
except TransportError as e:
logger.error('Unexpected error occurred when sending requests to Elasticsearch: %s '
'Status code: %s details: %s', e.error, e.status_code, e.info)
raise KnowledgeBaseError
except ElasticsearchException:
raise KnowledgeBaseError
| true | true |
1c45e48a779bc28e0a8409233594b33b5c303ad5 | 376 | py | Python | test/test_ynab.py | quinnhosler/ynab-sdk-python | 4ef8040bb44216212a84c8990329dcf63972e0fa | [
"Apache-2.0"
] | null | null | null | test/test_ynab.py | quinnhosler/ynab-sdk-python | 4ef8040bb44216212a84c8990329dcf63972e0fa | [
"Apache-2.0"
] | null | null | null | test/test_ynab.py | quinnhosler/ynab-sdk-python | 4ef8040bb44216212a84c8990329dcf63972e0fa | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from test.support.dummy_client import DummyClient
from ynab_sdk import YNAB
class YNABTest(TestCase):
ynab: YNAB
client: DummyClient
def setUp(self):
self.client = DummyClient()
self.ynab = YNAB(client=self.client)
def test_client_requires_key_or_client(self):
self.assertRaises(AssertionError, YNAB)
| 22.117647 | 49 | 0.728723 | from unittest import TestCase
from test.support.dummy_client import DummyClient
from ynab_sdk import YNAB
class YNABTest(TestCase):
ynab: YNAB
client: DummyClient
def setUp(self):
self.client = DummyClient()
self.ynab = YNAB(client=self.client)
def test_client_requires_key_or_client(self):
self.assertRaises(AssertionError, YNAB)
| true | true |
1c45e570f903cc0c5df4101b24907713afacfe1b | 1,034 | py | Python | test/app/all_tests.py | chuyqa/pydoop | 575f56cc66381fef08981a2452acde02bddf0363 | [
"Apache-2.0"
] | 1 | 2021-03-22T02:22:30.000Z | 2021-03-22T02:22:30.000Z | test/app/all_tests.py | chuyqa/pydoop | 575f56cc66381fef08981a2452acde02bddf0363 | [
"Apache-2.0"
] | null | null | null | test/app/all_tests.py | chuyqa/pydoop | 575f56cc66381fef08981a2452acde02bddf0363 | [
"Apache-2.0"
] | null | null | null | # BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
import unittest
from pydoop.test_utils import get_module
TEST_MODULE_NAMES = [
'test_submit',
]
def suite(path=None):
suites = []
for module in TEST_MODULE_NAMES:
suites.append(get_module(module, path).suite())
return unittest.TestSuite(suites)
if __name__ == '__main__':
import sys
_RESULT = unittest.TextTestRunner(verbosity=2).run(suite())
sys.exit(not _RESULT.wasSuccessful())
| 26.512821 | 77 | 0.737911 |
import unittest
from pydoop.test_utils import get_module
TEST_MODULE_NAMES = [
'test_submit',
]
def suite(path=None):
suites = []
for module in TEST_MODULE_NAMES:
suites.append(get_module(module, path).suite())
return unittest.TestSuite(suites)
if __name__ == '__main__':
import sys
_RESULT = unittest.TextTestRunner(verbosity=2).run(suite())
sys.exit(not _RESULT.wasSuccessful())
| true | true |
1c45e61545b2b62f4211d66f7c74da507f7af9e4 | 15,352 | py | Python | tests/unit/test_hooks.py | i386x/tox-lsr | 22f4d63d58050b1c1bee2e91eb239c31f35cfd13 | [
"MIT"
] | null | null | null | tests/unit/test_hooks.py | i386x/tox-lsr | 22f4d63d58050b1c1bee2e91eb239c31f35cfd13 | [
"MIT"
] | null | null | null | tests/unit/test_hooks.py | i386x/tox-lsr | 22f4d63d58050b1c1bee2e91eb239c31f35cfd13 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
#
"""Tests for tox_lsr hooks."""
import os
import shutil
import tempfile
try:
from unittest import mock as unittest_mock
from unittest.mock import MagicMock, Mock, patch
except ImportError:
import mock as unittest_mock
from mock import MagicMock, Mock, patch
from copy import deepcopy
import pkg_resources
# I have no idea why pylint complains about this. This works:
# command = python -c 'import py; print(dir(py.iniconfig))'
# bug in pylint? anyway, just ignore it
# in addition - pylint does not allow me to disable it
# on the same line, so I have to disable it before the line
# pylint: disable=no-member,no-name-in-module,import-error
import py.iniconfig
import unittest2
from tox_lsr.hooks import (
CONFIG_FILES_SUBDIR,
LSR_CONFIG_SECTION,
LSR_ENABLE,
LSR_ENABLE_ENV,
SCRIPT_NAME,
TOX_DEFAULT_INI,
_LSRPath,
is_lsr_enabled,
merge_config,
merge_envconf,
merge_ini,
merge_prop_values,
prop_is_set,
set_prop_values_ini,
tox_addoption,
tox_configure,
)
from .utils import MockConfig
# code uses some protected members such as _cfg, _parser, _reader
# pylint: disable=protected-access
class HooksTestCase(unittest2.TestCase):
def setUp(self):
self.toxworkdir = tempfile.mkdtemp()
patch(
"pkg_resources.resource_filename",
return_value=self.toxworkdir + "/" + SCRIPT_NAME,
).start()
self.default_tox_ini_b = pkg_resources.resource_string(
"tox_lsr", CONFIG_FILES_SUBDIR + "/" + TOX_DEFAULT_INI
)
self.default_tox_ini_raw = self.default_tox_ini_b.decode()
# e.g. __file__ is tests/unit/something.py -
# fixture_path is tests/fixtures
self.tests_path = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
self.fixture_path = os.path.join(
self.tests_path, "fixtures", self.id().split(".")[-1]
)
def tearDown(self):
shutil.rmtree(self.toxworkdir)
patch.stopall()
def test_tox_addoption(self):
"""Test tox_addoption."""
parser = Mock(add_argument=Mock())
tox_addoption(parser)
self.assertEqual(1, parser.add_argument.call_count)
def test_tox_configure(self):
"""Test tox_configure."""
config = MockConfig(toxworkdir=self.toxworkdir)
with patch(
"tox_lsr.hooks.is_lsr_enabled", return_value=False
) as mock_ile:
tox_configure(config)
self.assertEqual(1, mock_ile.call_count)
setattr(config.option, LSR_ENABLE, True)
default_config = MockConfig(toxworkdir=self.toxworkdir)
with patch(
"pkg_resources.resource_string",
return_value=self.default_tox_ini_b,
) as mock_rs:
with patch("tox_lsr.hooks.merge_config") as mock_mc:
with patch(
"tox_lsr.hooks.merge_ini",
return_value=self.default_tox_ini_raw,
) as mock_mi:
with patch(
"tox_lsr.hooks.Config",
side_effect=[TypeError(), default_config],
) as mock_cfg:
with patch(
"tox_lsr.hooks.ParseIni",
side_effect=[TypeError(), None],
) as mock_pi:
tox_configure(config)
self.assertEqual(1, mock_rs.call_count)
self.assertEqual(2, mock_pi.call_count)
self.assertEqual(1, mock_mc.call_count)
self.assertEqual(1, mock_mi.call_count)
self.assertEqual(2, mock_cfg.call_count)
def test_tox_merge_ini(self):
"""Test that given config is merged with default config ini."""
config = MockConfig(toxworkdir=self.toxworkdir)
tox_ini_file = os.path.join(self.fixture_path, "tox.ini")
config._cfg = py.iniconfig.IniConfig(tox_ini_file)
result = merge_ini(config, self.default_tox_ini_raw)
# check the result
expected_file = os.path.join(self.fixture_path, "result.ini")
expected_ini = py.iniconfig.IniConfig(expected_file)
result_ini = py.iniconfig.IniConfig("", result)
self.assertDictEqual(expected_ini.sections, result_ini.sections)
def test_tox_prop_is_set(self):
"""Test prop_is_set."""
tec = Mock(envname="prop")
tec._reader = Mock()
tec._reader._cfg = Mock()
cfgdict = {
"empty_str_prop": "",
"str_prop": "str_prop",
"int_prop": 0,
"bool_prop": False,
"float_prop": 0.0,
"list_prop": [1, 2, 3],
"empty_list_prop": [],
"dict_prop": {"a": "a"},
"empty_dict_prop": {},
"obj_prop": object(),
"none_prop": None,
}
tec._reader._cfg.sections = deepcopy({"testenv": cfgdict})
for prop in cfgdict:
self.assertTrue(prop_is_set(tec, prop))
tec._reader._cfg.sections["testenv:prop"] = deepcopy(cfgdict)
for prop in cfgdict:
self.assertTrue(prop_is_set(tec, prop))
del tec._reader._cfg.sections["testenv"]
del tec._reader._cfg.sections["testenv:prop"]
tec.configure_mock(**deepcopy(cfgdict))
for prop in cfgdict:
self.assertFalse(prop_is_set(tec, prop))
def test_tox_merge_prop_values(self):
"""Test merge_prop_values."""
# assert that code ignores properties it does not handle
tec = MagicMock()
def_tec = MagicMock()
merge_prop_values("nosuchprop", tec, def_tec)
self.assertFalse(tec.mock_calls)
self.assertFalse(def_tec.mock_calls)
# test empty tec
tec = MagicMock()
def_tec = MagicMock()
propnames = ["setenv", "deps", "passenv", "whitelist_externals"]
empty_attrs = {
"setenv": {},
"deps": [],
"passenv": set(),
"whitelist_externals": [],
}
tec.configure_mock(**deepcopy(empty_attrs))
full_attrs = {
"setenv": {"a": "a", "b": "b"},
"deps": ["a", "b"],
"passenv": set(["a", "b"]),
"whitelist_externals": ["a", "b"],
}
def_tec.configure_mock(**deepcopy(full_attrs))
for prop in propnames:
merge_prop_values(prop, tec, def_tec)
for prop in propnames:
val = getattr(tec, prop)
exp_val = full_attrs[prop]
if isinstance(val, list):
self.assertEqual(set(exp_val), set(val))
else:
self.assertEqual(exp_val, val)
# test empty def_tec
tec = MagicMock()
def_tec = MagicMock()
tec.configure_mock(**deepcopy(full_attrs))
def_tec.configure_mock(**deepcopy(empty_attrs))
for prop in propnames:
merge_prop_values(prop, tec, def_tec)
for prop in propnames:
val = getattr(tec, prop)
exp_val = full_attrs[prop]
if isinstance(val, list):
self.assertEqual(set(exp_val), set(val))
else:
self.assertEqual(exp_val, val)
# test merging
more_attrs = {
"setenv": {"a": "a", "c": "c"},
"deps": ["a", "c"],
"passenv": set(["a", "c"]),
"whitelist_externals": ["a", "c"],
}
result_attrs = {
"setenv": {"a": "a", "b": "b", "c": "c"},
"deps": ["a", "b", "c"],
"passenv": set(["a", "b", "c"]),
"whitelist_externals": ["a", "b", "c"],
}
tec = MagicMock()
def_tec = MagicMock()
tec.configure_mock(**deepcopy(full_attrs))
def_tec.configure_mock(**deepcopy(more_attrs))
for prop in propnames:
merge_prop_values(prop, tec, def_tec)
for prop in propnames:
val = getattr(tec, prop)
exp_val = result_attrs[prop]
if isinstance(val, list):
self.assertEqual(set(exp_val), set(val))
else:
self.assertEqual(exp_val, val)
def test_tox_merge_envconf(self):
"""Test the merge_envconf method."""
# test exception handling
prop = "unsettable"
def mock_unsettable_is_set(envconf, propname):
if propname != prop:
return False
if envconf == def_tec:
return True
return False
def_tec = Mock(unsettable="unsettable")
tec = Mock()
with patch(
"tox_lsr.hooks.prop_is_set", side_effect=mock_unsettable_is_set
):
with patch("tox_lsr.hooks.setattr", side_effect=AttributeError()):
merge_envconf(tec, def_tec)
self.assertNotEqual(tec.unsettable, "unsettable")
# test setting an unset property
prop = "propa"
def mock_prop_is_set(envconf, propname):
if propname != prop:
return False
if envconf == def_tec:
return True
return False
unittest_mock.FILTER_DIR = (
False # for handling attributes that start with underscore
)
def_tec = Mock(spec=[prop], propa=prop, _ignoreme="ignoreme")
tec = Mock(spec=[prop])
with patch("tox_lsr.hooks.prop_is_set", side_effect=mock_prop_is_set):
merge_envconf(tec, def_tec)
unittest_mock.FILTER_DIR = True # reset to default
self.assertEqual(prop, tec.propa)
# test that it tries to merge if both props are set
# pylint: disable=unused-argument
def mock_prop_is_set2(envconf, propname):
if propname != prop:
return False
return True
def_tec = Mock(spec=[prop], propa=prop)
tec = Mock(spec=[prop], propa="someothervalue")
with patch("tox_lsr.hooks.prop_is_set", side_effect=mock_prop_is_set2):
with patch("tox_lsr.hooks.merge_prop_values") as mock_mpv:
merge_envconf(tec, def_tec)
self.assertEqual(1, mock_mpv.call_count)
self.assertEqual("someothervalue", tec.propa)
def test_tox_merge_config(self):
"""Test the merge_config method."""
tox_attrs = {
"a": "a",
"b": "b",
}
tec = Mock()
tec._cfg = Mock()
tec._cfg.sections = deepcopy({"tox": tox_attrs})
tec.configure_mock(**deepcopy(tox_attrs))
tec.envlist_explicit = False
tec.envlist = ["a", "b"]
tec.envlist_default = ["a", "b"]
enva = {}
envb = {}
tec.envconfigs = {"a": enva, "b": envb}
def_tox_attrs = {"a": "b", "b": "c", "c": "d", "_skip": "skip"}
unittest_mock.FILTER_DIR = (
False # for handling attributes that start with underscore
)
def_tec = Mock()
def_tec._cfg = Mock()
def_tec._cfg.sections = deepcopy({"tox": def_tox_attrs})
def_tec.configure_mock(**deepcopy(def_tox_attrs))
def_tec.envlist = ["b", "c"]
def_tec.envlist_default = ["b", "c"]
envc = {}
def_tec.envconfigs = {"b": {}, "c": envc}
with patch("tox_lsr.hooks.merge_envconf") as mock_me:
merge_config(tec, def_tec)
self.assertEqual(1, mock_me.call_count)
self.assertIs(enva, tec.envconfigs["a"])
self.assertIs(envb, tec.envconfigs["b"])
self.assertIs(envc, tec.envconfigs["c"])
self.assertEqual("a", tec.a)
self.assertEqual("b", tec.b)
self.assertEqual("d", tec.c)
self.assertEqual(set(["a", "b", "c"]), set(tec.envlist))
self.assertEqual(set(["a", "b", "c"]), set(tec.envlist_default))
unittest_mock.FILTER_DIR = True # reset
def test_tox_set_set_prop_values_ini(self):
"""Test set_prop_values_ini."""
conf = {"a": "a", "b": "b"}
def_conf = {}
set_prop_values_ini("a", def_conf, conf)
self.assertEqual({"a": "a"}, def_conf)
set_prop_values_ini("a", def_conf, conf)
self.assertEqual({"a": "a"}, def_conf)
set_prop_values_ini("b", def_conf, conf)
self.assertEqual({"a": "a", "b": "b"}, def_conf)
set_prop_values_ini("a", def_conf, conf)
self.assertEqual({"a": "a", "b": "b"}, def_conf)
conf = {
"setenv": "a\nb",
"deps": "a",
"passenv": "TEST_*",
"whitelist_externals": "mycmd\nmyothercmd",
}
def_conf = {
"setenv": "c\nd",
"deps": "b",
"passenv": "*",
"whitelist_externals": "bash",
}
set_prop_values_ini("setenv", def_conf, conf)
self.assertEqual("c\nd\na\nb", def_conf["setenv"])
set_prop_values_ini("deps", def_conf, conf)
self.assertEqual("b\na", def_conf["deps"])
set_prop_values_ini("passenv", def_conf, conf)
self.assertEqual("*\nTEST_*", def_conf["passenv"])
set_prop_values_ini("whitelist_externals", def_conf, conf)
self.assertEqual(
"bash\nmycmd\nmyothercmd", def_conf["whitelist_externals"]
)
def test_lsr_path(self):
"""Test the _LSRPath class."""
real = "/no/such/path/to/realfile"
temp = "/no/such/path/to/temp"
stack_plain = (("myfile", 1, "myfunc", "text"),)
stack_iniconfig = (("/path/to/iniconfig.py", 1, "__init__", "text"),)
with patch("traceback.extract_stack", return_value=stack_plain):
lsr = _LSRPath(real, temp)
self.assertEqual(real, str(lsr))
with patch("traceback.extract_stack", return_value=stack_iniconfig):
lsr = _LSRPath(real, temp)
self.assertEqual(temp, str(lsr))
def test_is_lsr_enabled(self):
"""Test is_lsr_enabled."""
config = MockConfig({})
config._cfg.get = Mock(return_value="false")
self.assertFalse(is_lsr_enabled(config))
config._cfg.sections[LSR_CONFIG_SECTION] = {}
self.assertFalse(is_lsr_enabled(config))
self.assertFalse(is_lsr_enabled(config))
config._cfg.get = Mock(return_value="true")
self.assertTrue(is_lsr_enabled(config))
config._cfg.get = Mock(return_value="true")
os.environ[LSR_ENABLE_ENV] = "false"
self.assertFalse(is_lsr_enabled(config))
config._cfg.get = Mock(return_value="false")
os.environ[LSR_ENABLE_ENV] = "true"
self.assertTrue(is_lsr_enabled(config))
config = MockConfig()
config._cfg.get = Mock(return_value="false")
os.environ[LSR_ENABLE_ENV] = "false"
setattr(config.option, LSR_ENABLE, True)
self.assertTrue(is_lsr_enabled(config))
config._cfg.get = Mock(return_value="true")
os.environ[LSR_ENABLE_ENV] = "true"
setattr(config.option, LSR_ENABLE, False)
self.assertFalse(is_lsr_enabled(config))
del os.environ[LSR_ENABLE_ENV]
| 36.293144 | 79 | 0.572108 |
import os
import shutil
import tempfile
try:
from unittest import mock as unittest_mock
from unittest.mock import MagicMock, Mock, patch
except ImportError:
import mock as unittest_mock
from mock import MagicMock, Mock, patch
from copy import deepcopy
import pkg_resources
import py.iniconfig
import unittest2
from tox_lsr.hooks import (
CONFIG_FILES_SUBDIR,
LSR_CONFIG_SECTION,
LSR_ENABLE,
LSR_ENABLE_ENV,
SCRIPT_NAME,
TOX_DEFAULT_INI,
_LSRPath,
is_lsr_enabled,
merge_config,
merge_envconf,
merge_ini,
merge_prop_values,
prop_is_set,
set_prop_values_ini,
tox_addoption,
tox_configure,
)
from .utils import MockConfig
class HooksTestCase(unittest2.TestCase):
def setUp(self):
self.toxworkdir = tempfile.mkdtemp()
patch(
"pkg_resources.resource_filename",
return_value=self.toxworkdir + "/" + SCRIPT_NAME,
).start()
self.default_tox_ini_b = pkg_resources.resource_string(
"tox_lsr", CONFIG_FILES_SUBDIR + "/" + TOX_DEFAULT_INI
)
self.default_tox_ini_raw = self.default_tox_ini_b.decode()
self.tests_path = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
self.fixture_path = os.path.join(
self.tests_path, "fixtures", self.id().split(".")[-1]
)
def tearDown(self):
shutil.rmtree(self.toxworkdir)
patch.stopall()
def test_tox_addoption(self):
parser = Mock(add_argument=Mock())
tox_addoption(parser)
self.assertEqual(1, parser.add_argument.call_count)
def test_tox_configure(self):
config = MockConfig(toxworkdir=self.toxworkdir)
with patch(
"tox_lsr.hooks.is_lsr_enabled", return_value=False
) as mock_ile:
tox_configure(config)
self.assertEqual(1, mock_ile.call_count)
setattr(config.option, LSR_ENABLE, True)
default_config = MockConfig(toxworkdir=self.toxworkdir)
with patch(
"pkg_resources.resource_string",
return_value=self.default_tox_ini_b,
) as mock_rs:
with patch("tox_lsr.hooks.merge_config") as mock_mc:
with patch(
"tox_lsr.hooks.merge_ini",
return_value=self.default_tox_ini_raw,
) as mock_mi:
with patch(
"tox_lsr.hooks.Config",
side_effect=[TypeError(), default_config],
) as mock_cfg:
with patch(
"tox_lsr.hooks.ParseIni",
side_effect=[TypeError(), None],
) as mock_pi:
tox_configure(config)
self.assertEqual(1, mock_rs.call_count)
self.assertEqual(2, mock_pi.call_count)
self.assertEqual(1, mock_mc.call_count)
self.assertEqual(1, mock_mi.call_count)
self.assertEqual(2, mock_cfg.call_count)
def test_tox_merge_ini(self):
config = MockConfig(toxworkdir=self.toxworkdir)
tox_ini_file = os.path.join(self.fixture_path, "tox.ini")
config._cfg = py.iniconfig.IniConfig(tox_ini_file)
result = merge_ini(config, self.default_tox_ini_raw)
expected_file = os.path.join(self.fixture_path, "result.ini")
expected_ini = py.iniconfig.IniConfig(expected_file)
result_ini = py.iniconfig.IniConfig("", result)
self.assertDictEqual(expected_ini.sections, result_ini.sections)
def test_tox_prop_is_set(self):
tec = Mock(envname="prop")
tec._reader = Mock()
tec._reader._cfg = Mock()
cfgdict = {
"empty_str_prop": "",
"str_prop": "str_prop",
"int_prop": 0,
"bool_prop": False,
"float_prop": 0.0,
"list_prop": [1, 2, 3],
"empty_list_prop": [],
"dict_prop": {"a": "a"},
"empty_dict_prop": {},
"obj_prop": object(),
"none_prop": None,
}
tec._reader._cfg.sections = deepcopy({"testenv": cfgdict})
for prop in cfgdict:
self.assertTrue(prop_is_set(tec, prop))
tec._reader._cfg.sections["testenv:prop"] = deepcopy(cfgdict)
for prop in cfgdict:
self.assertTrue(prop_is_set(tec, prop))
del tec._reader._cfg.sections["testenv"]
del tec._reader._cfg.sections["testenv:prop"]
tec.configure_mock(**deepcopy(cfgdict))
for prop in cfgdict:
self.assertFalse(prop_is_set(tec, prop))
def test_tox_merge_prop_values(self):
tec = MagicMock()
def_tec = MagicMock()
merge_prop_values("nosuchprop", tec, def_tec)
self.assertFalse(tec.mock_calls)
self.assertFalse(def_tec.mock_calls)
tec = MagicMock()
def_tec = MagicMock()
propnames = ["setenv", "deps", "passenv", "whitelist_externals"]
empty_attrs = {
"setenv": {},
"deps": [],
"passenv": set(),
"whitelist_externals": [],
}
tec.configure_mock(**deepcopy(empty_attrs))
full_attrs = {
"setenv": {"a": "a", "b": "b"},
"deps": ["a", "b"],
"passenv": set(["a", "b"]),
"whitelist_externals": ["a", "b"],
}
def_tec.configure_mock(**deepcopy(full_attrs))
for prop in propnames:
merge_prop_values(prop, tec, def_tec)
for prop in propnames:
val = getattr(tec, prop)
exp_val = full_attrs[prop]
if isinstance(val, list):
self.assertEqual(set(exp_val), set(val))
else:
self.assertEqual(exp_val, val)
tec = MagicMock()
def_tec = MagicMock()
tec.configure_mock(**deepcopy(full_attrs))
def_tec.configure_mock(**deepcopy(empty_attrs))
for prop in propnames:
merge_prop_values(prop, tec, def_tec)
for prop in propnames:
val = getattr(tec, prop)
exp_val = full_attrs[prop]
if isinstance(val, list):
self.assertEqual(set(exp_val), set(val))
else:
self.assertEqual(exp_val, val)
more_attrs = {
"setenv": {"a": "a", "c": "c"},
"deps": ["a", "c"],
"passenv": set(["a", "c"]),
"whitelist_externals": ["a", "c"],
}
result_attrs = {
"setenv": {"a": "a", "b": "b", "c": "c"},
"deps": ["a", "b", "c"],
"passenv": set(["a", "b", "c"]),
"whitelist_externals": ["a", "b", "c"],
}
tec = MagicMock()
def_tec = MagicMock()
tec.configure_mock(**deepcopy(full_attrs))
def_tec.configure_mock(**deepcopy(more_attrs))
for prop in propnames:
merge_prop_values(prop, tec, def_tec)
for prop in propnames:
val = getattr(tec, prop)
exp_val = result_attrs[prop]
if isinstance(val, list):
self.assertEqual(set(exp_val), set(val))
else:
self.assertEqual(exp_val, val)
def test_tox_merge_envconf(self):
prop = "unsettable"
def mock_unsettable_is_set(envconf, propname):
if propname != prop:
return False
if envconf == def_tec:
return True
return False
def_tec = Mock(unsettable="unsettable")
tec = Mock()
with patch(
"tox_lsr.hooks.prop_is_set", side_effect=mock_unsettable_is_set
):
with patch("tox_lsr.hooks.setattr", side_effect=AttributeError()):
merge_envconf(tec, def_tec)
self.assertNotEqual(tec.unsettable, "unsettable")
prop = "propa"
def mock_prop_is_set(envconf, propname):
if propname != prop:
return False
if envconf == def_tec:
return True
return False
unittest_mock.FILTER_DIR = (
False
)
def_tec = Mock(spec=[prop], propa=prop, _ignoreme="ignoreme")
tec = Mock(spec=[prop])
with patch("tox_lsr.hooks.prop_is_set", side_effect=mock_prop_is_set):
merge_envconf(tec, def_tec)
unittest_mock.FILTER_DIR = True
self.assertEqual(prop, tec.propa)
def mock_prop_is_set2(envconf, propname):
if propname != prop:
return False
return True
def_tec = Mock(spec=[prop], propa=prop)
tec = Mock(spec=[prop], propa="someothervalue")
with patch("tox_lsr.hooks.prop_is_set", side_effect=mock_prop_is_set2):
with patch("tox_lsr.hooks.merge_prop_values") as mock_mpv:
merge_envconf(tec, def_tec)
self.assertEqual(1, mock_mpv.call_count)
self.assertEqual("someothervalue", tec.propa)
def test_tox_merge_config(self):
tox_attrs = {
"a": "a",
"b": "b",
}
tec = Mock()
tec._cfg = Mock()
tec._cfg.sections = deepcopy({"tox": tox_attrs})
tec.configure_mock(**deepcopy(tox_attrs))
tec.envlist_explicit = False
tec.envlist = ["a", "b"]
tec.envlist_default = ["a", "b"]
enva = {}
envb = {}
tec.envconfigs = {"a": enva, "b": envb}
def_tox_attrs = {"a": "b", "b": "c", "c": "d", "_skip": "skip"}
unittest_mock.FILTER_DIR = (
False
)
def_tec = Mock()
def_tec._cfg = Mock()
def_tec._cfg.sections = deepcopy({"tox": def_tox_attrs})
def_tec.configure_mock(**deepcopy(def_tox_attrs))
def_tec.envlist = ["b", "c"]
def_tec.envlist_default = ["b", "c"]
envc = {}
def_tec.envconfigs = {"b": {}, "c": envc}
with patch("tox_lsr.hooks.merge_envconf") as mock_me:
merge_config(tec, def_tec)
self.assertEqual(1, mock_me.call_count)
self.assertIs(enva, tec.envconfigs["a"])
self.assertIs(envb, tec.envconfigs["b"])
self.assertIs(envc, tec.envconfigs["c"])
self.assertEqual("a", tec.a)
self.assertEqual("b", tec.b)
self.assertEqual("d", tec.c)
self.assertEqual(set(["a", "b", "c"]), set(tec.envlist))
self.assertEqual(set(["a", "b", "c"]), set(tec.envlist_default))
unittest_mock.FILTER_DIR = True
def test_tox_set_set_prop_values_ini(self):
conf = {"a": "a", "b": "b"}
def_conf = {}
set_prop_values_ini("a", def_conf, conf)
self.assertEqual({"a": "a"}, def_conf)
set_prop_values_ini("a", def_conf, conf)
self.assertEqual({"a": "a"}, def_conf)
set_prop_values_ini("b", def_conf, conf)
self.assertEqual({"a": "a", "b": "b"}, def_conf)
set_prop_values_ini("a", def_conf, conf)
self.assertEqual({"a": "a", "b": "b"}, def_conf)
conf = {
"setenv": "a\nb",
"deps": "a",
"passenv": "TEST_*",
"whitelist_externals": "mycmd\nmyothercmd",
}
def_conf = {
"setenv": "c\nd",
"deps": "b",
"passenv": "*",
"whitelist_externals": "bash",
}
set_prop_values_ini("setenv", def_conf, conf)
self.assertEqual("c\nd\na\nb", def_conf["setenv"])
set_prop_values_ini("deps", def_conf, conf)
self.assertEqual("b\na", def_conf["deps"])
set_prop_values_ini("passenv", def_conf, conf)
self.assertEqual("*\nTEST_*", def_conf["passenv"])
set_prop_values_ini("whitelist_externals", def_conf, conf)
self.assertEqual(
"bash\nmycmd\nmyothercmd", def_conf["whitelist_externals"]
)
def test_lsr_path(self):
real = "/no/such/path/to/realfile"
temp = "/no/such/path/to/temp"
stack_plain = (("myfile", 1, "myfunc", "text"),)
stack_iniconfig = (("/path/to/iniconfig.py", 1, "__init__", "text"),)
with patch("traceback.extract_stack", return_value=stack_plain):
lsr = _LSRPath(real, temp)
self.assertEqual(real, str(lsr))
with patch("traceback.extract_stack", return_value=stack_iniconfig):
lsr = _LSRPath(real, temp)
self.assertEqual(temp, str(lsr))
def test_is_lsr_enabled(self):
config = MockConfig({})
config._cfg.get = Mock(return_value="false")
self.assertFalse(is_lsr_enabled(config))
config._cfg.sections[LSR_CONFIG_SECTION] = {}
self.assertFalse(is_lsr_enabled(config))
self.assertFalse(is_lsr_enabled(config))
config._cfg.get = Mock(return_value="true")
self.assertTrue(is_lsr_enabled(config))
config._cfg.get = Mock(return_value="true")
os.environ[LSR_ENABLE_ENV] = "false"
self.assertFalse(is_lsr_enabled(config))
config._cfg.get = Mock(return_value="false")
os.environ[LSR_ENABLE_ENV] = "true"
self.assertTrue(is_lsr_enabled(config))
config = MockConfig()
config._cfg.get = Mock(return_value="false")
os.environ[LSR_ENABLE_ENV] = "false"
setattr(config.option, LSR_ENABLE, True)
self.assertTrue(is_lsr_enabled(config))
config._cfg.get = Mock(return_value="true")
os.environ[LSR_ENABLE_ENV] = "true"
setattr(config.option, LSR_ENABLE, False)
self.assertFalse(is_lsr_enabled(config))
del os.environ[LSR_ENABLE_ENV]
| true | true |
1c45e7af81826e60de1299cc184fcbaf42464f56 | 3,170 | py | Python | lists/tests/test_models.py | brendanodwyer/python-tdd-book | ff3a8a8254a3112937ce9924dfa05ba52069c8bf | [
"Apache-2.0"
] | null | null | null | lists/tests/test_models.py | brendanodwyer/python-tdd-book | ff3a8a8254a3112937ce9924dfa05ba52069c8bf | [
"Apache-2.0"
] | null | null | null | lists/tests/test_models.py | brendanodwyer/python-tdd-book | ff3a8a8254a3112937ce9924dfa05ba52069c8bf | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.test import TestCase
from lists.models import Item
from lists.models import List
User = get_user_model()
class ItemModelTest(TestCase):
def test_default_text(self):
item = Item()
self.assertEqual(item.text, "")
class ListModelTest(TestCase):
def test_item_is_related_to_list(self):
list_ = List.objects.create()
item = Item()
item.list = list_
item.save()
self.assertIn(item, list_.item_set.all())
def test_cannot_save_empty_list_items(self):
list_ = List.objects.create()
item = Item(list=list_, text="")
with self.assertRaises(ValidationError):
item.save()
item.full_clean()
def test_get_absolute_url(self):
list_ = List.objects.create()
self.assertEqual(list_.get_absolute_url(), f"/lists/{list_.id}/")
def test_duplicate_items_are_invalid(self):
list_ = List.objects.create()
Item.objects.create(list=list_, text="bla")
with self.assertRaises(ValidationError):
item = Item(list=list_, text="bla")
item.full_clean()
def test_CAN_save_same_item_to_different_lists(self):
list1 = List.objects.create()
list2 = List.objects.create()
Item.objects.create(list=list1, text="bla")
item = Item(list=list2, text="bla")
item.full_clean() # should not raise
def test_list_ordering(self):
list1 = List.objects.create()
item1 = Item.objects.create(list=list1, text="i1")
item2 = Item.objects.create(list=list1, text="i2")
item3 = Item.objects.create(list=list1, text="i3")
self.assertEqual(list(Item.objects.all()), [item1, item2, item3])
def test_string_representation(self):
item = Item(text="some text")
self.assertEqual(str(item), "some text")
def test_create_new_creates_lists_and_first_item(self):
List.create_new(first_item_text="new item text")
new_item = Item.objects.first()
self.assertEqual(new_item.text, "new item text")
new_list = List.objects.first()
self.assertEqual(new_item.list, new_list)
def test_create_new_optionally_saves_owner(self):
user = User.objects.create()
List.create_new(first_item_text="new item text", owner=user)
new_list = List.objects.first()
self.assertEqual(new_list.owner, user)
def test_lists_can_have_owners(self):
List(owner=User()) # Should not raise
def test_lists_owner_is_optinal(self):
List().full_clean() # Should not raise
def test_create_returns_new_list_object(self):
returned = List.create_new(first_item_text="new item text")
new_list = List.objects.first()
self.assertEqual(returned, new_list)
def test_list_name_is_first_item_text(self):
new_list = List.objects.create()
Item.objects.create(list=new_list, text="first item")
Item.objects.create(list=new_list, text="second item")
self.assertEqual(new_list.name, "first item")
| 34.086022 | 73 | 0.667508 | from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.test import TestCase
from lists.models import Item
from lists.models import List
User = get_user_model()
class ItemModelTest(TestCase):
def test_default_text(self):
item = Item()
self.assertEqual(item.text, "")
class ListModelTest(TestCase):
def test_item_is_related_to_list(self):
list_ = List.objects.create()
item = Item()
item.list = list_
item.save()
self.assertIn(item, list_.item_set.all())
def test_cannot_save_empty_list_items(self):
list_ = List.objects.create()
item = Item(list=list_, text="")
with self.assertRaises(ValidationError):
item.save()
item.full_clean()
def test_get_absolute_url(self):
list_ = List.objects.create()
self.assertEqual(list_.get_absolute_url(), f"/lists/{list_.id}/")
def test_duplicate_items_are_invalid(self):
list_ = List.objects.create()
Item.objects.create(list=list_, text="bla")
with self.assertRaises(ValidationError):
item = Item(list=list_, text="bla")
item.full_clean()
def test_CAN_save_same_item_to_different_lists(self):
list1 = List.objects.create()
list2 = List.objects.create()
Item.objects.create(list=list1, text="bla")
item = Item(list=list2, text="bla")
item.full_clean()
def test_list_ordering(self):
list1 = List.objects.create()
item1 = Item.objects.create(list=list1, text="i1")
item2 = Item.objects.create(list=list1, text="i2")
item3 = Item.objects.create(list=list1, text="i3")
self.assertEqual(list(Item.objects.all()), [item1, item2, item3])
def test_string_representation(self):
item = Item(text="some text")
self.assertEqual(str(item), "some text")
def test_create_new_creates_lists_and_first_item(self):
List.create_new(first_item_text="new item text")
new_item = Item.objects.first()
self.assertEqual(new_item.text, "new item text")
new_list = List.objects.first()
self.assertEqual(new_item.list, new_list)
def test_create_new_optionally_saves_owner(self):
user = User.objects.create()
List.create_new(first_item_text="new item text", owner=user)
new_list = List.objects.first()
self.assertEqual(new_list.owner, user)
def test_lists_can_have_owners(self):
List(owner=User())
def test_lists_owner_is_optinal(self):
List().full_clean()
def test_create_returns_new_list_object(self):
returned = List.create_new(first_item_text="new item text")
new_list = List.objects.first()
self.assertEqual(returned, new_list)
def test_list_name_is_first_item_text(self):
new_list = List.objects.create()
Item.objects.create(list=new_list, text="first item")
Item.objects.create(list=new_list, text="second item")
self.assertEqual(new_list.name, "first item")
| true | true |
1c45e7bb27a310e77f8849bbd05bd8e62fc757bb | 12,127 | py | Python | core/domain/caching_services.py | prayutsu/oppia | e82da7653f7bbfb9ded0e1ba16cd9f481ff5a786 | [
"Apache-2.0"
] | 2 | 2020-10-13T12:59:08.000Z | 2020-10-13T17:10:26.000Z | core/domain/caching_services.py | gitter-badger/oppia | 7d8e659264582d7ce74bc6c139e597b82bca0e04 | [
"Apache-2.0"
] | 35 | 2019-02-23T20:31:21.000Z | 2019-08-19T12:32:13.000Z | core/domain/caching_services.py | gitter-badger/oppia | 7d8e659264582d7ce74bc6c139e597b82bca0e04 | [
"Apache-2.0"
] | 1 | 2021-08-13T07:54:56.000Z | 2021-08-13T07:54:56.000Z | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Service functions to set and retrieve data from the memory cache."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import json
from core.domain import collection_domain
from core.domain import exp_domain
from core.domain import platform_parameter_domain
from core.domain import skill_domain
from core.domain import story_domain
from core.domain import topic_domain
from core.platform import models
import python_utils
memory_cache_services = models.Registry.import_cache_services()
# NOTE: Namespaces and sub-namespaces cannot contain ':' because this is used as
# an internal delimiter for cache keys that separates the namespace, the
# sub-namespace, and the id in the cache keys.
MEMCACHE_KEY_DELIMITER = ':'
# This namespace supports sub-namespaces which are identified by the stringified
# version number of the explorations within the sub-namespace. The value for
# each key in this namespace should be a serialized representation of an
# Exploration. There is also a special sub-namespace represented by the empty
# string; this sub-namespace stores the latest version of the exploration.
CACHE_NAMESPACE_EXPLORATION = 'exploration'
# This namespace supports sub-namespaces which are identified by the stringified
# version number of the collections within the sub-namespace. The value for
# each key in this namespace should be a serialized representation of a
# Collection. There is also a special sub-namespace represented by the empty
# string; this sub-namespace stores the latest version of the collection.
CACHE_NAMESPACE_COLLECTION = 'collection'
# This namespace supports sub-namespaces which are identified by the stringified
# version number of the skills within the sub-namespace. The value for
# each key in this namespace should be a serialized representation of a
# Skill. There is also a special sub-namespace represented by the empty
# string; this sub-namespace stores the latest version of the skill.
CACHE_NAMESPACE_SKILL = 'skill'
# This namespace supports sub-namespaces which are identified by the stringified
# version number of the stories within the sub-namespace. The value for
# each key in this namespace should be a serialized representation of a
# Story. There is also a special sub-namespace represented by the empty
# string; this sub-namespace stores the latest version of the story.
CACHE_NAMESPACE_STORY = 'story'
# This namespace supports sub-namespaces which are identified by the stringified
# version number of the topics within the sub-namespace. The value for
# each key in this namespace should be a serialized representation of a
# Topic. There is also a special sub-namespace represented by the empty
# string; this sub-namespace stores the latest version of the topic.
CACHE_NAMESPACE_TOPIC = 'topic'
# This namespace supports sub-namespaces which are identified by the stringified
# version number of the topics within the sub-namespace. The value for
# each key in this namespace should be a serialized representation of a
# Platform Parameter. This namespace does not support sub-namespaces.
CACHE_NAMESPACE_PLATFORM_PARAMETER = 'platform'
# The value for each key in this namespace should be a serialized representation
# of a ConfigPropertyModel value (the 'value' attribute of a ConfigPropertyModel
# object). This namespace does not support sub-namespaces.
CACHE_NAMESPACE_CONFIG = 'config'
# The sub-namespace is not necessary for the default namespace. The namespace
# handles default datatypes allowed by Redis including Strings, Lists, Sets,
# and Hashes. More details can be found at: https://redis.io/topics/data-types.
CACHE_NAMESPACE_DEFAULT = 'default'
DESERIALIZATION_FUNCTIONS = {
CACHE_NAMESPACE_COLLECTION: collection_domain.Collection.deserialize,
CACHE_NAMESPACE_EXPLORATION: exp_domain.Exploration.deserialize,
CACHE_NAMESPACE_SKILL: skill_domain.Skill.deserialize,
CACHE_NAMESPACE_STORY: story_domain.Story.deserialize,
CACHE_NAMESPACE_TOPIC: topic_domain.Topic.deserialize,
CACHE_NAMESPACE_PLATFORM_PARAMETER: (
platform_parameter_domain.PlatformParameter.deserialize),
CACHE_NAMESPACE_CONFIG: lambda x: json.loads(x.decode('utf-8')),
CACHE_NAMESPACE_DEFAULT: lambda x: json.loads(x.decode('utf-8'))
}
SERIALIZATION_FUNCTIONS = {
CACHE_NAMESPACE_COLLECTION: lambda x: x.serialize(),
CACHE_NAMESPACE_EXPLORATION: lambda x: x.serialize(),
CACHE_NAMESPACE_SKILL: lambda x: x.serialize(),
CACHE_NAMESPACE_STORY: lambda x: x.serialize(),
CACHE_NAMESPACE_TOPIC: lambda x: x.serialize(),
CACHE_NAMESPACE_PLATFORM_PARAMETER: lambda x: x.serialize(),
CACHE_NAMESPACE_CONFIG: lambda x: json.dumps(x).encode('utf-8'),
CACHE_NAMESPACE_DEFAULT: lambda x: json.dumps(x).encode('utf-8')
}
def _get_memcache_key(namespace, sub_namespace, obj_id):
"""Returns a memcache key for the class under the corresponding
namespace and sub_namespace.
Args:
namespace: str. The namespace under which the values associated with the
id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for ids that
are not associated with a conceptual domain-layer entity and
therefore don't require serialization.
sub_namespace: str|None. The sub-namespace further differentiates the
values. For Explorations, Skills, Stories, Topics, and Collections,
the sub-namespace is the stringified version number of the objects.
obj_id: str. The id of the value to store in the memory cache.
Raises:
Exception. The sub-namespace contains a ':'.
Returns:
str. The generated key for use in the memory cache in order to
differentiate a passed-in key based on namespace and sub-namespace.
"""
sub_namespace_key_string = (sub_namespace or '')
if MEMCACHE_KEY_DELIMITER in sub_namespace_key_string:
raise ValueError(
'Sub-namespace %s cannot contain \':\'.' % sub_namespace_key_string)
return '%s%s%s%s%s' % (
namespace, MEMCACHE_KEY_DELIMITER,
sub_namespace_key_string, MEMCACHE_KEY_DELIMITER, obj_id)
def flush_memory_cache():
"""Flushes the memory cache by wiping all of the data."""
memory_cache_services.flush_cache()
def get_multi(namespace, sub_namespace, obj_ids):
"""Get a dictionary of the {id, value} pairs from the memory cache.
Args:
namespace: str. The namespace under which the values associated with
these object ids lie. The namespace determines how the objects are
decoded from their JSON-encoded string. Use CACHE_NAMESPACE_DEFAULT
as the namespace for objects that are not associated with a
conceptual domain-layer entity and therefore don't require
serialization.
sub_namespace: str|None. The sub-namespace further differentiates the
values. For Explorations, Skills, Stories, Topics, and Collections,
the sub-namespace is either None or the stringified version number
of the objects. If the sub-namespace is not required, pass in None.
obj_ids: list(str). List of object ids corresponding to values to
retrieve from the cache.
Raises:
ValueError. The namespace does not exist or is not recognized.
Returns:
dict(str, Exploration|Skill|Story|Topic|Collection|str). Dictionary of
decoded (id, value) pairs retrieved from the platform caching service.
"""
result_dict = {}
if len(obj_ids) == 0:
return result_dict
if namespace not in DESERIALIZATION_FUNCTIONS:
raise ValueError('Invalid namespace: %s.' % namespace)
memcache_keys = [
_get_memcache_key(namespace, sub_namespace, obj_id)
for obj_id in obj_ids]
values = memory_cache_services.get_multi(memcache_keys)
for obj_id, value in python_utils.ZIP(obj_ids, values):
if value:
result_dict[obj_id] = DESERIALIZATION_FUNCTIONS[namespace](value)
return result_dict
def set_multi(namespace, sub_namespace, id_value_mapping):
"""Set multiple id values at once to the cache, where the values are all
of a specific namespace type or a Redis compatible type (more details here:
https://redis.io/topics/data-types).
Args:
namespace: str. The namespace under which the values associated with the
id lie. Use CACHE_NAMESPACE_DEFAULT as the namespace for objects
that are not associated with a conceptual domain-layer entity and
therefore don't require serialization.
sub_namespace: str|None. The sub-namespace further differentiates the
values. For Explorations, Skills, Stories, Topics, and Collections,
the sub-namespace is either None or the stringified version number
of the objects. If the sub-namespace is not required, pass in None.
id_value_mapping:
dict(str, Exploration|Skill|Story|Topic|Collection|str). A dict of
{id, value} pairs to set to the cache.
Raises:
ValueError. The namespace does not exist or is not recognized.
Returns:
bool. Whether all operations complete successfully.
"""
if len(id_value_mapping) == 0:
return True
if namespace not in SERIALIZATION_FUNCTIONS:
raise ValueError('Invalid namespace: %s.' % namespace)
memory_cache_id_value_mapping = (
{
_get_memcache_key(namespace, sub_namespace, obj_id):
SERIALIZATION_FUNCTIONS[namespace](value)
for obj_id, value in id_value_mapping.items()
})
return memory_cache_services.set_multi(memory_cache_id_value_mapping)
def delete_multi(namespace, sub_namespace, obj_ids):
"""Deletes multiple ids in the cache.
Args:
namespace: str. The namespace under which the values associated with the
id lie. Use CACHE_NAMESPACE_DEFAULT namespace for object ids that
are not associated with a conceptual domain-layer entity and
therefore don't require serialization.
sub_namespace: str|None. The sub-namespace further differentiates the
values. For Explorations, Skills, Stories, Topics, and Collections,
the sub-namespace is either None or the stringified version number
of the objects. If the sub-namespace is not required, pass in None.
obj_ids: list(str). A list of id strings to delete from the cache.
Raises:
ValueError. The namespace does not exist or is not recognized.
Returns:
bool. Whether all operations complete successfully.
"""
if len(obj_ids) == 0:
return True
if namespace not in DESERIALIZATION_FUNCTIONS:
raise ValueError('Invalid namespace: %s.' % namespace)
memcache_keys = [
_get_memcache_key(namespace, sub_namespace, obj_id)
for obj_id in obj_ids]
return memory_cache_services.delete_multi(memcache_keys) == len(obj_ids)
def get_memory_cache_stats():
"""Get a memory profile of the cache in a dictionary dependent on how the
caching service profiles its own cache.
Returns:
MemoryCacheStats. MemoryCacheStats object containing the total allocated
memory in bytes, peak memory usage in bytes, and the total number of
keys stored as values.
"""
return memory_cache_services.get_memory_cache_stats()
| 45.762264 | 80 | 0.740002 |
from __future__ import absolute_import
from __future__ import unicode_literals
import json
from core.domain import collection_domain
from core.domain import exp_domain
from core.domain import platform_parameter_domain
from core.domain import skill_domain
from core.domain import story_domain
from core.domain import topic_domain
from core.platform import models
import python_utils
memory_cache_services = models.Registry.import_cache_services()
MEMCACHE_KEY_DELIMITER = ':'
CACHE_NAMESPACE_EXPLORATION = 'exploration'
CACHE_NAMESPACE_COLLECTION = 'collection'
CACHE_NAMESPACE_SKILL = 'skill'
CACHE_NAMESPACE_STORY = 'story'
CACHE_NAMESPACE_TOPIC = 'topic'
CACHE_NAMESPACE_PLATFORM_PARAMETER = 'platform'
CACHE_NAMESPACE_CONFIG = 'config'
CACHE_NAMESPACE_DEFAULT = 'default'
DESERIALIZATION_FUNCTIONS = {
CACHE_NAMESPACE_COLLECTION: collection_domain.Collection.deserialize,
CACHE_NAMESPACE_EXPLORATION: exp_domain.Exploration.deserialize,
CACHE_NAMESPACE_SKILL: skill_domain.Skill.deserialize,
CACHE_NAMESPACE_STORY: story_domain.Story.deserialize,
CACHE_NAMESPACE_TOPIC: topic_domain.Topic.deserialize,
CACHE_NAMESPACE_PLATFORM_PARAMETER: (
platform_parameter_domain.PlatformParameter.deserialize),
CACHE_NAMESPACE_CONFIG: lambda x: json.loads(x.decode('utf-8')),
CACHE_NAMESPACE_DEFAULT: lambda x: json.loads(x.decode('utf-8'))
}
SERIALIZATION_FUNCTIONS = {
CACHE_NAMESPACE_COLLECTION: lambda x: x.serialize(),
CACHE_NAMESPACE_EXPLORATION: lambda x: x.serialize(),
CACHE_NAMESPACE_SKILL: lambda x: x.serialize(),
CACHE_NAMESPACE_STORY: lambda x: x.serialize(),
CACHE_NAMESPACE_TOPIC: lambda x: x.serialize(),
CACHE_NAMESPACE_PLATFORM_PARAMETER: lambda x: x.serialize(),
CACHE_NAMESPACE_CONFIG: lambda x: json.dumps(x).encode('utf-8'),
CACHE_NAMESPACE_DEFAULT: lambda x: json.dumps(x).encode('utf-8')
}
def _get_memcache_key(namespace, sub_namespace, obj_id):
sub_namespace_key_string = (sub_namespace or '')
if MEMCACHE_KEY_DELIMITER in sub_namespace_key_string:
raise ValueError(
'Sub-namespace %s cannot contain \':\'.' % sub_namespace_key_string)
return '%s%s%s%s%s' % (
namespace, MEMCACHE_KEY_DELIMITER,
sub_namespace_key_string, MEMCACHE_KEY_DELIMITER, obj_id)
def flush_memory_cache():
memory_cache_services.flush_cache()
def get_multi(namespace, sub_namespace, obj_ids):
result_dict = {}
if len(obj_ids) == 0:
return result_dict
if namespace not in DESERIALIZATION_FUNCTIONS:
raise ValueError('Invalid namespace: %s.' % namespace)
memcache_keys = [
_get_memcache_key(namespace, sub_namespace, obj_id)
for obj_id in obj_ids]
values = memory_cache_services.get_multi(memcache_keys)
for obj_id, value in python_utils.ZIP(obj_ids, values):
if value:
result_dict[obj_id] = DESERIALIZATION_FUNCTIONS[namespace](value)
return result_dict
def set_multi(namespace, sub_namespace, id_value_mapping):
if len(id_value_mapping) == 0:
return True
if namespace not in SERIALIZATION_FUNCTIONS:
raise ValueError('Invalid namespace: %s.' % namespace)
memory_cache_id_value_mapping = (
{
_get_memcache_key(namespace, sub_namespace, obj_id):
SERIALIZATION_FUNCTIONS[namespace](value)
for obj_id, value in id_value_mapping.items()
})
return memory_cache_services.set_multi(memory_cache_id_value_mapping)
def delete_multi(namespace, sub_namespace, obj_ids):
if len(obj_ids) == 0:
return True
if namespace not in DESERIALIZATION_FUNCTIONS:
raise ValueError('Invalid namespace: %s.' % namespace)
memcache_keys = [
_get_memcache_key(namespace, sub_namespace, obj_id)
for obj_id in obj_ids]
return memory_cache_services.delete_multi(memcache_keys) == len(obj_ids)
def get_memory_cache_stats():
return memory_cache_services.get_memory_cache_stats()
| true | true |
1c45e7bf89b8194eb005c56d95a2d6c85d741f03 | 326 | py | Python | pa05_find_max.py | jpch89/picalgo | 73aa98e477c68bb39d337914065c0fe1b4bad756 | [
"MIT"
] | null | null | null | pa05_find_max.py | jpch89/picalgo | 73aa98e477c68bb39d337914065c0fe1b4bad756 | [
"MIT"
] | null | null | null | pa05_find_max.py | jpch89/picalgo | 73aa98e477c68bb39d337914065c0fe1b4bad756 | [
"MIT"
] | null | null | null | def find_max(arr):
if len(arr) == 1:
return arr[0]
elif len(arr) == 2:
return arr[0] if arr[0] > arr[1] else arr[1]
first = arr[0]
second = find_max(arr[1:])
return first if first > second else second
if __name__ == '__main__':
arr = [1, 7, 4, 8]
print(find_max(arr))
"""
8
"""
| 17.157895 | 52 | 0.53681 | def find_max(arr):
if len(arr) == 1:
return arr[0]
elif len(arr) == 2:
return arr[0] if arr[0] > arr[1] else arr[1]
first = arr[0]
second = find_max(arr[1:])
return first if first > second else second
if __name__ == '__main__':
arr = [1, 7, 4, 8]
print(find_max(arr))
| true | true |
1c45e9e5f64d477f9fcc29374315d1729650f609 | 7,784 | py | Python | tensorflow/tools/pip_package/setup.py | jjzhang166/tensorflow | 61c0b39011671628ee85c2b49bc8845520018aa2 | [
"Apache-2.0"
] | 3 | 2017-05-31T01:33:48.000Z | 2020-02-18T17:12:56.000Z | tensorflow/tools/pip_package/setup.py | jjzhang166/tensorflow | 61c0b39011671628ee85c2b49bc8845520018aa2 | [
"Apache-2.0"
] | null | null | null | tensorflow/tools/pip_package/setup.py | jjzhang166/tensorflow | 61c0b39011671628ee85c2b49bc8845520018aa2 | [
"Apache-2.0"
] | 1 | 2018-12-28T12:55:11.000Z | 2018-12-28T12:55:11.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
# This version string is semver compatible, but incompatible with pip.
# For pip, we will remove all '-' characters from this string, and use the
# result for pip.
_VERSION = '1.3.0'
REQUIRED_PACKAGES = [
'enum34 >= 1.1.6',
'numpy >= 1.12.1',
'six >= 1.10.0',
'protobuf >= 3.3.0',
'tensorflow-tensorboard >= 0.1.0, < 0.2.0',
'autograd >= 1.1.11',
]
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# remove tensorboard from tf-nightly packages
if 'tf_nightly' in project_name:
for package in REQUIRED_PACKAGES:
if 'tensorflow-tensorboard' in package:
REQUIRED_PACKAGES.remove(package)
break
# weakref.finalize was introduced in Python 3.4
if sys.version_info < (3, 4):
REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
# We need to keep the TensorBoard command, even though the console script
# is now declared by the tensorboard pip package. If we remove the
# TensorBoard command, pip will inappropriately remove it during install,
# even though the command is not removed, just moved to a different wheel.
'tensorboard = tensorboard.main:main',
]
# pylint: enable=line-too-long
# remove the tensorboard console script if building tf_nightly
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
# Copy eigen code into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
if 'external/eigen_archive/' in install_dir:
extra_dir = install_dir.replace('external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for path, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
matches += ['../' + x for x in find_files('*', '_solib_k8') if '.py' not in x]
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf_archive/src')) +
list(find_files('*', 'third_party/eigen3')) +
list(find_files('*', 'external/eigen_archive')) +
list(find_files('*.h', 'external/nsync/public')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description='TensorFlow helps the tensors flow',
long_description='',
url='http://tensorflow.org/',
author='Google Inc.',
author_email='opensource@google.com',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',)
| 33.264957 | 80 | 0.675745 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
_VERSION = '1.3.0'
REQUIRED_PACKAGES = [
'enum34 >= 1.1.6',
'numpy >= 1.12.1',
'six >= 1.10.0',
'protobuf >= 3.3.0',
'tensorflow-tensorboard >= 0.1.0, < 0.2.0',
'autograd >= 1.1.11',
]
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
REQUIRED_PACKAGES.append('mock >= 2.0.0')
if 'tf_nightly' in project_name:
for package in REQUIRED_PACKAGES:
if 'tensorflow-tensorboard' in package:
REQUIRED_PACKAGES.remove(package)
break
if sys.version_info < (3, 4):
REQUIRED_PACKAGES.append('backports.weakref >= 1.0rc1')
CONSOLE_SCRIPTS = [
'saved_model_cli = tensorflow.python.tools.saved_model_cli:main',
'tensorboard = tensorboard.main:main',
]
if 'tf_nightly' in project_name:
CONSOLE_SCRIPTS.remove('tensorboard = tensorboard.main:main')
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
install_dir = re.sub('/google/protobuf_archive/src', '', install_dir)
if 'external/eigen_archive/' in install_dir:
extra_dir = install_dir.replace('external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
for path, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
matches += ['../' + x for x in find_files('*', '_solib_k8') if '.py' not in x]
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'
headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'tensorflow/stream_executor')) +
list(find_files('*.h', 'google/protobuf_archive/src')) +
list(find_files('*', 'third_party/eigen3')) +
list(find_files('*', 'external/eigen_archive')) +
list(find_files('*.h', 'external/nsync/public')))
setup(
name=project_name,
version=_VERSION.replace('-', ''),
description='TensorFlow helps the tensors flow',
long_description='',
url='http://tensorflow.org/',
author='Google Inc.',
author_email='opensource@google.com',
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
include_package_data=True,
package_data={
'tensorflow': [
EXTENSION_NAME,
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',)
| true | true |
1c45eac1149605f449ce85664d9f605d2ddc6d4b | 14,968 | py | Python | release/python/l2tester/interface.py | gringolito/l2tester | eb8eddad6a9fee33e05e0d8d601229cd704e5464 | [
"MIT"
] | 8 | 2018-04-05T12:05:42.000Z | 2021-07-01T10:44:29.000Z | release/python/l2tester/interface.py | gringolito/l2tester | eb8eddad6a9fee33e05e0d8d601229cd704e5464 | [
"MIT"
] | 6 | 2018-04-05T10:36:31.000Z | 2021-08-08T08:06:13.000Z | release/python/l2tester/interface.py | gringolito/l2tester | eb8eddad6a9fee33e05e0d8d601229cd704e5464 | [
"MIT"
] | 9 | 2018-04-04T19:15:49.000Z | 2021-08-07T10:17:10.000Z |
try:
from pyroute2 import IPRoute
except:
raise Exception("""
l2tester.interface depends on the following module:
* pyroute2 : available at https://pypi.python.org/pypi/pyroute2
Download .tar.gz, extract it, enter folder and run 'sudo python setup.py install' to install this module.
""")
import socket
import struct
import fcntl
import ctypes
import os
import re
import logging
from select import select
# From <linux/if_ether.h>
ETH_P_ALL = 0x0003
# From <linux/socket.h>
SOL_PACKET = 263
# From <linux/if_packet.h>
PACKET_MR_PROMISC = 1
PACKET_ADD_MEMBERSHIP = 0x0001
PACKET_DROP_MEMBERSHIP = 0x0002
## Ethtool ########################################################################################
# From <linux/ethtool.h>
ETHTOOL_GSET = 0x00000001
ETHTOOL_SSET = 0x00000002
# From <linux/sockios.h>
SIOCETHTOOL = 0x8946
class Ethtool():
""" Implement ethtool functionality by ioctl with struct ethtool_cmd from <linux/ethtool.h>
struct ethtool_cmd {
u32 cmd;
u32 supported; /* Features this interface supports */
u32 advertising; /* Features this interface advertises */
u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */
u8 duplex; /* Duplex, half or full */
u8 port; /* Which connector port */
u8 phy_address;
u8 transceiver; /* Which transceiver to use */
u8 autoneg; /* Enable or disable autonegotiation */
u32 maxtxpkt; /* Tx pkts before generating tx int */
u32 maxrxpkt; /* Rx pkts before generating rx int */
u32 reserved[4];
};
"""
st_format = "IIIHBBBBBII16x"
def __init__(self, if_name):
""" Initialize ethtool.
@param if_name Name of interface.
"""
self.data = ctypes.create_string_buffer(44) # sizeof(struct ethtool_cmd)
self.__unpack()
self.ifreq_input = struct.pack('16sI12x', if_name, ctypes.addressof(self.data))
self.sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
def get(self):
""" Request ethtool information using ioctl. Update object parameters.
"""
self.cmd = ETHTOOL_GSET
self.__pack()
fcntl.ioctl(self.sockfd, SIOCETHTOOL, self.ifreq_input)
self.__unpack()
def set(self):
""" Configure ethtool information using ioctl.
Must be preceded by a 'get' if not all fields are changed.
"""
self.cmd = ETHTOOL_SSET
self.__pack()
fcntl.ioctl(self.sockfd, SIOCETHTOOL, self.ifreq_input)
def __unpack(self):
""" [private] Extract fields from buffer.
"""
unpacked = struct.unpack(self.st_format, self.data[:])
self.cmd = unpacked[0]
self.supported = unpacked[1]
self.advertising = unpacked[2]
self.speed = unpacked[3]
self.duplex = unpacked[4]
self.port = unpacked[5]
self.phy_address = unpacked[6]
self.transceiver = unpacked[7]
self.autoneg = unpacked[8]
self.maxtxpkt = unpacked[9]
self.maxrxpkt = unpacked[10]
def __pack(self):
""" Updated buffer with current fields.
"""
packed = struct.pack(self.st_format,
self.cmd,
self.supported,
self.advertising,
self.speed,
self.duplex,
self.port,
self.phy_address,
self.transceiver,
self.autoneg,
self.maxtxpkt,
self.maxrxpkt)
for i in xrange(44):
self.data[i] = packed[i]
## Interface ######################################################################################
class Interface():
""" Define ethernet interface using low level RAW sockets.
NOTE: To create RAW sockets, you must be superuser or have 'cap_net_raw' capabilities.
You can set the capabilities to python using:
$ sudo setcap cap_mac_admin,cap_net_raw,cap_net_admin=eip /usr/bin/python2.6
"""
netlink = IPRoute()
def __init__(self, name, eth_type=ETH_P_ALL):
""" Initialize interface. Open socket and set interface in promiscuous mode.
@param name Name of the interface. Ex: 'eth0'
@param eth_type Ethernet protocols read by this interface. Default: ALL PROTOCOLS.
"""
self.logger = logging.getLogger("PC eth")
self.eth_type = eth_type
self.name = name
self.added_ips = []
self.is_vlan = False
# If the interface is not part of IPDB, it can be a VLAN
if not self.netlink.link_lookup(ifname=self.name):
vlan_match = re.match(
"^(?P<base_interface>eth\d+)\.(?P<vlan_id>[1-9]\d{1,3})$", self.name)
if vlan_match is None:
raise Exception("Invalid interface name " + self.name)
base = vlan_match.group('base_interface')
vid = int(vlan_match.group('vlan_id'))
base_idx = self.netlink.link_lookup(ifname=base)
if not base_idx:
raise Exception("Invalid base interface name " + self.name)
try:
request = {
'index': 0,
'ipaddr': [],
'link': base_idx[0],
'flags': 0,
'ifname': self.name,
'ports': [],
'IFLA_LINKINFO': {
'attrs': [
['IFLA_INFO_DATA', {
'attrs': [['IFLA_VLAN_ID', vid]]
}],
['IFLA_INFO_KIND', 'vlan']
]
}
}
# Send request to create new interface with VLAN
self.netlink.link('add', **request)
self.is_vlan = True
except:
self.logger.critical("Couldn't create interface %s", self.name)
raise
# Get Interface Index, set to UP, get MTU and MAC Address
self.if_index = self.netlink.link_lookup(ifname=self.name)[0]
self.netlink.link('set', index=self.if_index, state='up')
info = dict(self.netlink.get_links(self.if_index)[0]['attrs'])
self.mac_address = info['IFLA_ADDRESS'].upper()
self.mtu = info['IFLA_MTU']
# Create socket to receive/send frames:
self.sockfd = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(self.eth_type))
self.sockfd.bind((self.name, self.eth_type))
# Enable promiscuous mode:
self.set_promiscuous(True)
# By default, start using auto-negotiation
self.using_forced_speed_duplex = False
def __del__(self):
""" Destructor. Disable promiscuous mode on interface.
"""
# Clean added IP addresses.
for ip in self.added_ips:
self.__set_ip_address(ip[0], ip[1], 'delete')
# Remove VLAN if it was created.
if self.is_vlan:
self.netlink.link('delete', index=self.if_index)
# Disable promiscuous mode:
self.set_promiscuous(False)
# Leave interface with auto-negotiation enabled:
if self.using_forced_speed_duplex:
self.enable_auto_negotiation()
def recv(self):
""" Receive a packet. If it's an outgoing packet ignore it.
"""
packet, address = self.sockfd.recvfrom(self.mtu)
return packet if address[2] != socket.PACKET_OUTGOING else None
def send(self, packet):
""" Send a packet through this interface.
"""
self.sockfd.sendto(str(packet), 0, (self.name, self.eth_type))
def flush(self):
""" Remove all packets from read buffer.
"""
self.sockfd.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 0)
while True:
r, w, e = select([self.sockfd.fileno()], [], [], 0)
if r:
os.read(self.sockfd.fileno(), self.mtu)
else:
break
self.sockfd.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
def set_promiscuous(self, enable):
""" Enable/Disable promiscuous mode on interface.
@param enable True to enable, False to disable.
"""
cmd = PACKET_ADD_MEMBERSHIP if enable else PACKET_DROP_MEMBERSHIP
mreq = struct.pack('IHH8s', self.if_index, PACKET_MR_PROMISC, 0, "")
self.sockfd.setsockopt(SOL_PACKET, cmd, mreq)
def add_ip_address(self, ip_address):
""" Adds an IP address/network mask (the default prefix is 24)
@param ip_address The IP address followed optionally by mask size. Ex: 192.168.0.24/24
"""
self.__set_ip_address(ip_address, socket.AF_INET, 'add')
self.added_ips.append((ip_address, socket.AF_INET))
def del_ip_address(self, ip_address):
""" Deletes an IP address/network mask (the default prefix is 24)
@param ip_address The IP address followed optionally by mask size. Ex: 192.168.0.24/24
"""
self.__set_ip_address(ip_address, socket.AF_INET, 'delete')
self.added_ips.remove((ip_address, socket.AF_INET))
def add_ipv6_address(self, ipv6_address):
""" Adds an IPv6 address/network mask (the default prefix is 24)
@param ip_address The IPv6 address followed optionally by mask size. Ex: 56::1/24
"""
self.__set_ip_address(ipv6_address, socket.AF_INET6, 'add')
self.added_ips.append((ipv6_address, socket.AF_INET6))
def del_ipv6_address(self, ipv6_address):
""" Deletes an IPv6 address/network mask (the default prefix is 24)
@param ip_address The IPv6 address followed optionally by mask size. Ex: 56::1/24
"""
self.__set_ip_address(ipv6_address, socket.AF_INET6, 'delete')
self.added_ips.remove((ipv6_address, socket.AF_INET6))
def enable_auto_negotiation(self):
""" Enable auto-negotiation for Ethernet link.
"""
ethtool = Ethtool(self.name)
ethtool.get()
ethtool.advertising = ethtool.supported
ethtool.autoneg = 1
ethtool.set()
self.using_forced_speed_duplex = False
self.logger.info("[%s] Enabled Auto-negotiation.", self.name)
def force_speed_duplex(self, speed, duplex):
""" Configure interface speed/duplex disabling auto-negotiation.
@param speed Set forced speed. Values: 10, 100, 1000, 2500, 10000.
@param duplex Set forced duplex.
"""
if not speed in [10, 100, 1000, 2500, 10000]:
raise ValueError("Speed can only be: 10, 100, 1000, 2500 or 10000 Mbps.")
ethtool = Ethtool(self.name)
ethtool.get()
ethtool.speed = speed
ethtool.duplex = 1 if duplex else 0
ethtool.autoneg = 0
ethtool.set()
self.using_forced_speed_duplex = True
self.logger.info("[%s] Configured forced speed: %d Mbps / %s duplex",
self.name, speed, "full" if duplex else "half")
def has_ip_address(self, ip_address):
""" Returns True if the address is already configured in the interface, and False otherwise
@param ip_address The IP address to be checked
"""
return (self.__check_ip_address(ip_address, socket.AF_INET)
or self.__check_ip_address(ip_address, socket.AF_INET6))
def set_mtu(self, mtu):
""" Configure interface MTU.
@param mtu New value for MTU.
"""
self.netlink.link('set', index=self.if_index, mtu=mtu)
def set_mac_address(self, mac_address):
""" Configure a new MAC address at this interface
@param mac_address The MAC address to be set
"""
self.netlink.link('set', index=self.if_index, address=mac_address)
self.mac_address = mac_address
def __check_ip_address(self, ip_address, ip_family):
""" Returns True if the address is already configured in the interface, and False otherwise
@param ip_address The IP address to be checked
@param ip_family socket.AF_INET if ip_address is an IPv4 address; socket.AF_INET6 otherwise
"""
address_types = ['IFA_ADDRESS', 'IFA_LOCAL',
'IFA_BROADCAST', 'IFA_ANYCAST', 'IFA_MULTICAST']
for interface in self.netlink.get_addr(family=ip_family):
if interface['index'] != self.if_index:
continue
for address in interface['attrs']:
if address[0] in address_types and address[1] == ip_address:
return True
return False
def __set_ip_address(self, ip_address, ip_family, action):
""" Adds or deletes an IP address/network mask (optional)
@param ip_address The IP address followed optionally by mask size. Ex: 192.168.0.24/24; 56::1/24
@param ip_family socket.AF_INET to IPv4 addresses; socket.AF_INET6 to IPv6 addresses
@param action 'add' or 'del', to add or delete an IP address, respectively
"""
ip_and_mask = ip_address.split('/')
ip_version = 4 if ip_family == socket.AF_INET else 6
network_mask = 24 if len(ip_and_mask) < 2 else int(ip_and_mask[1])
exists = self.__check_ip_address(ip_and_mask[0], ip_family)
if (action == 'add' and exists) or (action == 'delete' and not exists):
self.logger.info('No need to %s the IP%d address %s/%d from/to %s because it already %sexists',
action, ip_version, ip_and_mask[0], network_mask, self.name, '' if exists else 'does not ')
return
self.logger.info("%s IPv%d address %s/%d in %s", action, ip_version,
ip_and_mask[0], network_mask, self.name)
self.netlink.addr(action, self.if_index,
address=ip_and_mask[0], mask=network_mask, family=ip_family)
## Access PC interfaces ############################################################################
interface_instances = {}
def get_interface(if_name):
""" Get interface reference. It's used to avoid multiple sockets for the same interface.
"""
if not if_name in interface_instances:
interface_instances[if_name] = Interface(if_name)
return interface_instances[if_name]
def mac_address(if_name):
""" Shortcut to get_interface(if_name).mac_address.
"""
return get_interface(if_name).mac_address
def delete_interfaces():
""" Delete all created interfaces.
"""
for if_name in interface_instances.keys():
del interface_instances[if_name]
| 38.183673 | 120 | 0.587654 |
try:
from pyroute2 import IPRoute
except:
raise Exception("""
l2tester.interface depends on the following module:
* pyroute2 : available at https://pypi.python.org/pypi/pyroute2
Download .tar.gz, extract it, enter folder and run 'sudo python setup.py install' to install this module.
""")
import socket
import struct
import fcntl
import ctypes
import os
import re
import logging
from select import select
ETH_P_ALL = 0x0003
SOL_PACKET = 263
PACKET_MR_PROMISC = 1
PACKET_ADD_MEMBERSHIP = 0x0001
PACKET_DROP_MEMBERSHIP = 0x0002
ICAST']
for interface in self.netlink.get_addr(family=ip_family):
if interface['index'] != self.if_index:
continue
for address in interface['attrs']:
if address[0] in address_types and address[1] == ip_address:
return True
return False
def __set_ip_address(self, ip_address, ip_family, action):
ip_and_mask = ip_address.split('/')
ip_version = 4 if ip_family == socket.AF_INET else 6
network_mask = 24 if len(ip_and_mask) < 2 else int(ip_and_mask[1])
exists = self.__check_ip_address(ip_and_mask[0], ip_family)
if (action == 'add' and exists) or (action == 'delete' and not exists):
self.logger.info('No need to %s the IP%d address %s/%d from/to %s because it already %sexists',
action, ip_version, ip_and_mask[0], network_mask, self.name, '' if exists else 'does not ')
return
self.logger.info("%s IPv%d address %s/%d in %s", action, ip_version,
ip_and_mask[0], network_mask, self.name)
self.netlink.addr(action, self.if_index,
address=ip_and_mask[0], mask=network_mask, family=ip_family)
## Access PC interfaces ############################################################################
interface_instances = {}
def get_interface(if_name):
if not if_name in interface_instances:
interface_instances[if_name] = Interface(if_name)
return interface_instances[if_name]
def mac_address(if_name):
return get_interface(if_name).mac_address
def delete_interfaces():
for if_name in interface_instances.keys():
del interface_instances[if_name]
| true | true |
1c45eb2407679dda457ec86794fdeaee8e70ab96 | 90,227 | py | Python | src/reportlab/pdfbase/pdfdoc.py | radjkarl/reportlab | 48cafb6d64ff92fd9d4f9a4dd888be6f7d55b765 | [
"BSD-3-Clause"
] | 51 | 2015-01-20T19:50:34.000Z | 2022-03-05T21:23:32.000Z | src/reportlab/pdfbase/pdfdoc.py | radjkarl/reportlab | 48cafb6d64ff92fd9d4f9a4dd888be6f7d55b765 | [
"BSD-3-Clause"
] | 16 | 2015-11-15T04:23:43.000Z | 2021-09-27T14:14:20.000Z | src/reportlab/pdfbase/pdfdoc.py | radjkarl/reportlab | 48cafb6d64ff92fd9d4f9a4dd888be6f7d55b765 | [
"BSD-3-Clause"
] | 46 | 2015-03-28T10:18:14.000Z | 2021-12-16T15:57:47.000Z | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/pdfdoc.py
__version__=''' $Id$ '''
__doc__="""
The module pdfdoc.py handles the 'outer structure' of PDF documents, ensuring that
all objects are properly cross-referenced and indexed to the nearest byte. The
'inner structure' - the page descriptions - are presumed to be generated before
each page is saved.
pdfgen.py calls this and provides a 'canvas' object to handle page marking operators.
piddlePDF calls pdfgen and offers a high-level interface.
The classes within this generally mirror structures in the PDF file
and are not part of any public interface. Instead, canvas and font
classes are made available elsewhere for users to manipulate.
"""
import types, binascii, codecs
from collections import OrderedDict
from reportlab.pdfbase import pdfutils
from reportlab import rl_config
from reportlab.lib.utils import import_zlib, open_for_read, makeFileName, isSeq, isBytes, isUnicode, _digester, isStr, bytestr, isPy3, annotateException
from reportlab.lib.rl_accel import escapePDF, fp_str, asciiBase85Encode, asciiBase85Decode
from reportlab.pdfbase import pdfmetrics
from hashlib import md5
from sys import platform
from sys import version_info
from sys import stderr
if platform[:4] == 'java' and version_info[:2] == (2, 1):
# workaround for list()-bug in Jython 2.1 (should be fixed in 2.2)
def list(sequence):
def f(x):
return x
return list(map(f, sequence))
class PDFError(Exception):
pass
# __InternalName__ is a special attribute that can only be set by the Document arbitrator
__InternalName__ = "__InternalName__"
# __RefOnly__ marks reference only elements that must be formatted on top level
__RefOnly__ = "__RefOnly__"
# __Comment__ provides a (one line) comment to inline with an object ref, if present
# if it is more than one line then percentize it...
__Comment__ = "__Comment__"
# name for standard font dictionary
BasicFonts = "BasicFonts"
# name for the pages object
Pages = "Pages"
PDF_VERSION_DEFAULT = (1, 3)
PDF_SUPPORT_VERSION = dict( #map keyword to min version that supports it
transparency = (1, 4),
)
if isPy3:
def pdfdocEnc(x):
return x.encode('extpdfdoc') if isinstance(x,str) else x
else:
def pdfdocEnc(x):
return x.encode('extpdfdoc') if isinstance(x,unicode) else x
def format(element, document, toplevel=0):
"""Indirection step for formatting.
Ensures that document parameters alter behaviour
of formatting for all elements.
"""
if isinstance(element,PDFObject):
if not toplevel and hasattr(element, __RefOnly__):
# the object cannot be a component at non top level.
# make a reference to it and return it's format
return document.Reference(element).format(document)
else:
f = element.format(document)
if not rl_config.invariant and rl_config.pdfComments and hasattr(element, __Comment__):
f = pdfdocEnc("%% %s\r\n" % element.__Comment__)+f
return f
elif type(element) in (float, int):
#use a controlled number formatting routine
#instead of str, so Jython/Python etc do not differ
return pdfdocEnc(fp_str(element))
elif isBytes(element):
return element
elif isUnicode(element):
return pdfdocEnc(element)
else:
return pdfdocEnc(str(element))
def xObjectName(externalname):
return "FormXob.%s" % externalname
# backwards compatibility
formName = xObjectName
# no encryption
class NoEncryption:
def encode(self, t):
"encode a string, stream, text"
return t
def prepare(self, document):
# get ready to do encryption
pass
def register(self, objnum, version):
# enter a new direct object
pass
def info(self):
# the representation of self in file if any (should be None or PDFDict)
return None
class PDFObject(object):
pass
class DummyDoc(PDFObject):
"used to bypass encryption when required"
encrypt = NoEncryption()
### the global document structure manager
class PDFDocument(PDFObject):
# set this to define filters
defaultStreamFilters = None
encrypt = NoEncryption() # default no encryption
def __init__(self,
dummyoutline=0,
compression=rl_config.pageCompression,
invariant=rl_config.invariant,
filename=None,
pdfVersion=PDF_VERSION_DEFAULT,
):
self._ID = None
self.objectcounter = 0
self.shadingCounter = 0
self.inObject = None
self.pageCounter = 1
# allow None value to be passed in to mean 'give system defaults'
if invariant is None:
self.invariant = rl_config.invariant
else:
self.invariant = invariant
self.setCompression(compression)
self._pdfVersion = pdfVersion
# signature for creating PDF ID
sig = self.signature = md5()
sig.update(b"a reportlab document")
if not self.invariant:
cat = _getTimeStamp()
else:
cat = 946684800.0
cat = ascii(cat)
sig.update(bytestr(cat)) # initialize with timestamp digest
# mapping of internal identifier ("Page001") to PDF objectnumber and generation number (34, 0)
self.idToObjectNumberAndVersion = {}
# mapping of internal identifier ("Page001") to PDF object (PDFPage instance)
self.idToObject = {}
# internal id to file location
self.idToOffset = {}
# number to id
self.numberToId = {}
cat = self.Catalog = self._catalog = PDFCatalog()
pages = self.Pages = PDFPages()
cat.Pages = pages
if dummyoutline:
outlines = PDFOutlines0()
else:
outlines = PDFOutlines()
self.Outlines = self.outline = outlines
cat.Outlines = outlines
self.info = PDFInfo()
self.info.invariant = self.invariant
#self.Reference(self.Catalog)
#self.Reference(self.Info)
self.fontMapping = {}
#make an empty font dictionary
DD = PDFDictionary({})
DD.__Comment__ = "The standard fonts dictionary"
self.Reference(DD, BasicFonts)
self.delayedFonts = []
def setCompression(self, onoff):
# XXX: maybe this should also set self.defaultStreamFilters?
self.compression = onoff
def ensureMinPdfVersion(self, *keys):
"Ensure that the pdf version is greater than or equal to that specified by the keys"
for k in keys:
self._pdfVersion = max(self._pdfVersion, PDF_SUPPORT_VERSION[k])
def updateSignature(self, thing):
"add information to the signature"
if self._ID: return # but not if its used already!
self.signature.update(bytestr(thing))
def ID(self):
"A unique fingerprint for the file (unless in invariant mode)"
if self._ID:
return self._ID
digest = self.signature.digest()
doc = DummyDoc()
IDs = PDFString(digest,enc='raw').format(doc)
self._ID = (b'\r\n % ReportLab generated PDF document -- digest (http://www.reportlab.com)\r\n ['
+IDs+b' '+IDs+b']\r\n')
return self._ID
def SaveToFile(self, filename, canvas):
if hasattr(getattr(filename, "write",None),'__call__'):
myfile = 0
f = filename
filename = makeFileName(getattr(filename,'name',''))
else :
myfile = 1
filename = makeFileName(filename)
f = open(filename, "wb")
data = self.GetPDFData(canvas)
if isUnicode(data):
data = data.encode('latin1')
f.write(data)
if myfile:
f.close()
import os
if os.name=='mac':
from reportlab.lib.utils import markfilename
markfilename(filename) # do platform specific file junk
if getattr(canvas,'_verbosity',None): print('saved %s' % (filename,))
def GetPDFData(self, canvas):
# realize delayed fonts
for fnt in self.delayedFonts:
fnt.addObjects(self)
# add info stuff to signature
self.info.invariant = self.invariant
self.info.digest(self.signature)
### later: maybe add more info to sig?
# prepare outline
self.Reference(self.Catalog)
self.Reference(self.info)
outline = self.outline
outline.prepare(self, canvas)
return self.format()
def inPage(self):
"""specify the current object as a page (enables reference binding and other page features)"""
if self.inObject is not None:
if self.inObject=="page": return
raise ValueError("can't go in page already in object %s" % self.inObject)
self.inObject = "page"
def inForm(self):
"""specify that we are in a form xobject (disable page features, etc)"""
# don't need this check anymore since going in a form pushes old context at canvas level.
#if self.inObject not in ["form", None]:
# raise ValueError("can't go in form already in object %s" % self.inObject)
self.inObject = "form"
# don't need to do anything else, I think...
def getInternalFontName(self, psfontname):
fm = self.fontMapping
if psfontname in fm:
return fm[psfontname]
else:
try:
# does pdfmetrics know about it? if so, add
fontObj = pdfmetrics.getFont(psfontname)
if fontObj._dynamicFont:
raise PDFError("getInternalFontName(%s) called for a dynamic font" % repr(psfontname))
fontObj.addObjects(self)
return fm[psfontname]
except KeyError:
raise PDFError("Font %s not known!" % repr(psfontname))
def thisPageName(self):
return "Page"+repr(self.pageCounter)
def thisPageRef(self):
return PDFObjectReference(self.thisPageName())
def addPage(self, page):
name = self.thisPageName()
self.Reference(page, name)
self.Pages.addPage(page)
self.pageCounter += 1
self.inObject = None
def addForm(self, name, form):
"""add a Form XObject."""
# XXX should check that name is a legal PDF name
if self.inObject != "form":
self.inForm()
self.Reference(form, xObjectName(name))
self.inObject = None
def annotationName(self, externalname):
return "Annot.%s"%externalname
def addAnnotation(self, name, annotation):
self.Reference(annotation, self.annotationName(name))
def refAnnotation(self, name):
internalname = self.annotationName(name)
return PDFObjectReference(internalname)
def addShading(self, shading):
name = "Sh%d" % self.shadingCounter
self.Reference(shading, name)
self.shadingCounter += 1
return name
def addColor(self,cmyk):
sname = cmyk.spotName
if not sname:
if cmyk.cyan==0 and cmyk.magenta==0 and cmyk.yellow==0:
sname = 'BLACK'
elif cmyk.black==0 and cmyk.magenta==0 and cmyk.yellow==0:
sname = 'CYAN'
elif cmyk.cyan==0 and cmyk.black==0 and cmyk.yellow==0:
sname = 'MAGENTA'
elif cmyk.cyan==0 and cmyk.magenta==0 and cmyk.black==0:
sname = 'YELLOW'
if not sname:
raise ValueError("CMYK colour %r used without a spotName" % cmyk)
else:
cmyk = cmyk.clone(spotName = sname)
name = PDFName(sname)[1:]
if name not in self.idToObject:
sep = PDFSeparationCMYKColor(cmyk).value() #PDFArray([/Separation /name /DeviceCMYK tint_tf])
self.Reference(sep,name)
return name,sname
def setTitle(self, title):
"embeds in PDF file"
if title is None:
self.info.title = '(anonymous)'
else:
self.info.title = title
def setAuthor(self, author):
"embedded in PDF file"
#allow resetting to clear it
if author is None:
self.info.author = '(anonymous)'
else:
self.info.author = author
def setSubject(self, subject):
"embeds in PDF file"
#allow resetting to clear it
if subject is None:
self.info.subject = '(unspecified)'
else:
self.info.subject = subject
def setCreator(self, creator):
"embeds in PDF file"
#allow resetting to clear it
if creator is None:
self.info.creator = '(unspecified)'
else:
self.info.creator = creator
def setKeywords(self, keywords):
"embeds a string containing keywords in PDF file"
#allow resetting to clear it but ensure it's a string
if keywords is None:
self.info.keywords = ''
else:
self.info.keywords = keywords
def setDateFormatter(self, dateFormatter):
self.info._dateFormatter = dateFormatter
def getAvailableFonts(self):
fontnames = list(self.fontMapping.keys())
# the standard 14 are also always available! (even if not initialized yet)
from reportlab.pdfbase import _fontdata
for name in _fontdata.standardFonts:
if name not in fontnames:
fontnames.append(name)
fontnames.sort()
return fontnames
def format(self):
# register the Catalog/INfo and then format the objects one by one until exhausted
# (possible infinite loop if there is a bug that continually makes new objects/refs...)
# Prepare encryption
self.encrypt.prepare(self)
cat = self.Catalog
info = self.info
self.Reference(self.Catalog)
self.Reference(self.info)
# register the encryption dictionary if present
encryptref = None
encryptinfo = self.encrypt.info()
if encryptinfo:
encryptref = self.Reference(encryptinfo)
# make std fonts (this could be made optional
counter = 0 # start at first object (object 1 after preincrement)
ids = [] # the collection of object ids in object number order
numbertoid = self.numberToId
idToNV = self.idToObjectNumberAndVersion
idToOb = self.idToObject
idToOf = self.idToOffset
### note that new entries may be "appended" DURING FORMATTING
done = None
# __accum__ allows objects to know where they are in the file etc etc
self.__accum__ = File = PDFFile(self._pdfVersion) # output collector
while done is None:
counter += 1 # do next object...
if counter in numbertoid:
id = numbertoid[counter]
#printidToOb
obj = idToOb[id]
IO = PDFIndirectObject(id, obj)
# register object number and version
#encrypt.register(id,
IOf = IO.format(self)
# add a comment to the PDF output
if not rl_config.invariant and rl_config.pdfComments:
try:
classname = obj.__class__.__name__
except:
classname = ascii(obj)
File.add("%% %s: class %s \r\n" % (ascii(id), classname[:50]))
offset = File.add(IOf)
idToOf[id] = offset
ids.append(id)
else:
done = 1
del self.__accum__
# sanity checks (must happen AFTER formatting)
lno = len(numbertoid)
if counter-1!=lno:
raise ValueError("counter %s doesn't match number to id dictionary %s" %(counter, lno))
# now add the xref
xref = PDFCrossReferenceTable()
xref.addsection(0, ids)
xreff = xref.format(self)
xrefoffset = File.add(xreff)
# now add the trailer
trailer = PDFTrailer(
startxref = xrefoffset,
Size = lno+1,
Root = self.Reference(cat),
Info = self.Reference(info),
Encrypt = encryptref,
ID = self.ID(),
)
trailerf = trailer.format(self)
File.add(trailerf)
for ds in getattr(self,'_digiSigs',[]):
ds.sign(File)
# return string format for pdf file
return File.format(self)
def hasForm(self, name):
"""test for existence of named form"""
internalname = xObjectName(name)
return internalname in self.idToObject
def getFormBBox(self, name, boxType="MediaBox"):
"""get the declared bounding box of the form as a list.
If you specify a different PDF box definition (e.g. the
ArtBox) and it has one, that's what you'll get."""
internalname = xObjectName(name)
if internalname in self.idToObject:
theform = self.idToObject[internalname]
if hasattr(theform,'_extra_pageCatcher_info'):
return theform._extra_pageCatcher_info[boxType]
if isinstance(theform, PDFFormXObject):
# internally defined form
return theform.BBoxList()
elif isinstance(theform, PDFStream):
# externally defined form
return list(theform.dictionary.dict[boxType].sequence)
else:
raise ValueError("I don't understand the form instance %s" % repr(name))
def getXObjectName(self, name):
"""Lets canvas find out what form is called internally.
Never mind whether it is defined yet or not."""
return xObjectName(name)
def xobjDict(self, formnames):
"""construct an xobject dict (for inclusion in a resource dict, usually)
from a list of form names (images not yet supported)"""
D = {}
for name in formnames:
internalname = xObjectName(name)
reference = PDFObjectReference(internalname)
D[internalname] = reference
#print "xobjDict D", D
return PDFDictionary(D)
def Reference(self, obj, name=None):
### note references may "grow" during the final formatting pass: don't use d.keys()!
# don't make references to other references, or non instances, unless they are named!
iob = isinstance(obj,PDFObject)
idToObject = self.idToObject
if name is None and (not iob or obj.__class__ is PDFObjectReference):
return obj
if hasattr(obj, __InternalName__):
# already registered
intname = obj.__InternalName__
if name is not None and name!=intname:
raise ValueError("attempt to reregister object %s with new name %s" % (
repr(intname), repr(name)))
if intname not in idToObject:
raise ValueError("object of type %s named as %s, but not registered" % (type(obj),ascii(intname)))
return PDFObjectReference(intname)
# otherwise register the new object
objectcounter = self.objectcounter = self.objectcounter+1
if name is None:
name = "R"+repr(objectcounter)
if name in idToObject:
other = idToObject[name]
if other!=obj:
raise ValueError("redefining named object: "+repr(name))
return PDFObjectReference(name)
if iob:
obj.__InternalName__ = name
#print "name", name, "counter", objectcounter
self.idToObjectNumberAndVersion[name] = (objectcounter, 0)
self.numberToId[objectcounter] = name
idToObject[name] = obj
return PDFObjectReference(name)
### chapter 4 Objects
PDFtrue = "true"
PDFfalse = "false"
PDFnull = "null"
class PDFText(PDFObject):
def __init__(self, t):
self.t = t
def format(self, document):
t = self.t
if isUnicode(t):
t = t.encode('utf-8')
result = binascii.hexlify(document.encrypt.encode(t))
return b"<" + result + b">"
def __str__(self):
dummydoc = DummyDoc()
return self.format(dummydoc)
def PDFnumber(n):
return n
import re
_re_cleanparens=re.compile('[^()]')
del re
def _isbalanced(s):
'''test whether a string is balanced in parens'''
s = _re_cleanparens.sub('',s)
n = 0
for c in s:
if c=='(': n+=1
else:
n -= 1
if n<0: return 0
return not n and 1 or 0
def _checkPdfdoc(utext):
'''return true if no Pdfdoc encoding errors'''
try:
utext.encode('pdfdoc')
return 1
except UnicodeEncodeError as e:
return 0
class PDFString(PDFObject):
def __init__(self, s, escape=1, enc='auto'):
'''s can be unicode/utf8 or a PDFString
if escape is true then the output will be passed through escape
if enc is raw then the string will be left alone
if enc is auto we'll try and automatically adapt to utf_16_be if the
effective string is not entirely in pdfdoc
'''
if isinstance(s,PDFString):
self.s = s.s
self.escape = s.escape
self.enc = s.enc
else:
self.s = s
self.escape = escape
self.enc = enc
def format(self, document):
s = self.s
enc = getattr(self,'enc','auto')
if (isBytes(s)):
if enc is 'auto':
try:
u = s.decode(s.startswith(codecs.BOM_UTF16_BE) and 'utf16' or 'utf8')
if _checkPdfdoc(u):
s = u.encode('pdfdoc')
else:
s = codecs.BOM_UTF16_BE+u.encode('utf_16_be')
except:
try:
s.decode('pdfdoc')
except:
stderr.write('Error in %s' % (repr(s),))
raise
elif isUnicode(s):
if enc is 'auto':
if _checkPdfdoc(s):
s = s.encode('pdfdoc')
else:
s = codecs.BOM_UTF16_BE+s.encode('utf_16_be')
else:
s = codecs.BOM_UTF16_BE+s.encode('utf_16_be')
else:
raise ValueError('PDFString argument must be str/unicode not %s' % type(s))
escape = getattr(self,'escape',1)
if not isinstance(document.encrypt,NoEncryption):
s = document.encrypt.encode(s)
escape = 1
if escape:
try:
es = "(%s)" % escapePDF(s)
except:
raise ValueError("cannot escape %s %s" % (s, repr(s)))
if escape&2:
es = es.replace('\\012','\n')
if escape&4 and _isbalanced(es):
es = es.replace('\\(','(').replace('\\)',')')
return pdfdocEnc(es)
else:
return b'(' + s + b')'
def __str__(self):
return "(%s)" % escapePDF(self.s)
def PDFName(data,lo=chr(0x21),hi=chr(0x7e)):
# might need to change this to class for encryption
# NOTE: RESULT MUST ALWAYS SUPPORT MEANINGFUL COMPARISONS (EQUALITY) AND HASH
# first convert the name
L = list(data)
for i,c in enumerate(L):
if c<lo or c>hi or c in "%()<>{}[]#":
L[i] = "#"+hex(ord(c))[2:] # forget the 0x thing...
return "/"+(''.join(L))
class PDFDictionary(PDFObject):
multiline = True
def __init__(self, dict=None):
"""dict should be namestring to value eg "a": 122 NOT pdfname to value NOT "/a":122"""
if dict is None:
self.dict = {}
else:
self.dict = dict.copy()
def __setitem__(self, name, value):
self.dict[name] = value
def __getitem__(self, a):
return self.dict[a]
def __contains__(self,a):
return a in self.dict
def Reference(self, name, document):
self.dict[name] = document.Reference(self.dict[name])
def format(self, document,IND=b'\r\n '):
dict = self.dict
try:
keys = list(dict.keys())
except:
print(ascii(dict))
raise
if not isinstance(dict,OrderedDict): keys.sort()
L = [(format(PDFName(k),document)+b" "+format(dict[k],document)) for k in keys]
if self.multiline and rl_config.pdfMultiLine:
L = IND.join(L)
else:
# break up every 6 elements anyway
t=L.insert
for i in reversed(range(6, len(L), 6)):
t(i,b'\r\n ')
L = b" ".join(L)
return b'<< '+L+b' >>'
def copy(self):
return PDFDictionary(self.dict)
def normalize(self):
#normalize the names to use RL standard ie Name not /Name
D = self.dict
K = [k for k in D.keys() if k.startswith('/')]
for k in K:
D[k[1:]] = D.pop(k)
class checkPDFNames:
def __init__(self,*names):
self.names = list(map(PDFName,names))
def __call__(self,value):
if not value.startswith('/'):
value=PDFName(value)
if value in self.names:
return value
def checkPDFBoolean(value):
if value in ('true','false'): return value
class CheckedPDFDictionary(PDFDictionary):
validate = {}
def __init__(self,dict=None,validate=None):
PDFDictionary.__init__(self,dict)
if validate: self.validate = validate
def __setitem__(self,name,value):
if name not in self.validate:
raise ValueError('invalid key, %r' % name)
cvalue = self.validate[name](value)
if cvalue is None:
raise ValueError('Bad value %r for key %r' % (value,name))
PDFDictionary.__setitem__(self,name,cvalue)
class ViewerPreferencesPDFDictionary(CheckedPDFDictionary):
validate=dict(
HideToolbar=checkPDFBoolean,
HideMenubar=checkPDFBoolean,
HideWindowUI=checkPDFBoolean,
FitWindow=checkPDFBoolean,
CenterWindow=checkPDFBoolean,
DisplayDocTitle=checkPDFBoolean, #contributed by mark Erbaugh
NonFullScreenPageMode=checkPDFNames(*'UseNone UseOutlines UseThumbs UseOC'.split()),
Direction=checkPDFNames(*'L2R R2L'.split()),
ViewArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
ViewClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
PrintArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
PrintClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
PrintScaling=checkPDFNames(*'None AppDefault'.split()),
)
# stream filters are objects to support round trip and
# possibly in the future also support parameters
class PDFStreamFilterZCompress:
pdfname = "FlateDecode"
def encode(self, text):
from reportlab.lib.utils import import_zlib
zlib = import_zlib()
if not zlib: raise ImportError("cannot z-compress zlib unavailable")
if isUnicode(text):
text = text.encode('utf8')
return zlib.compress(text)
def decode(self, encoded):
from reportlab.lib.utils import import_zlib
zlib = import_zlib()
if not zlib: raise ImportError("cannot z-decompress zlib unavailable")
return zlib.decompress(encoded)
# need only one of these, unless we implement parameters later
PDFZCompress = PDFStreamFilterZCompress()
class PDFStreamFilterBase85Encode:
pdfname = "ASCII85Decode"
def encode(self, text):
from reportlab.pdfbase.pdfutils import _wrap
text = asciiBase85Encode(text)
if rl_config.wrapA85:
text = _wrap(text)
return text
def decode(self, text):
return asciiBase85Decode(text)
# need only one of these too
PDFBase85Encode = PDFStreamFilterBase85Encode()
class PDFStream(PDFObject):
'''set dictionary elements explicitly stream.dictionary[name]=value'''
### compression stuff not implemented yet
__RefOnly__ = 1 # must be at top level
def __init__(self, dictionary=None, content=None, filters=None):
if dictionary is None:
dictionary = PDFDictionary()
self.dictionary = dictionary
self.content = content
self.filters = filters
def format(self, document):
dictionary = self.dictionary
# copy it for modification
dictionary = PDFDictionary(dictionary.dict.copy())
content = self.content
filters = self.filters
if self.content is None:
raise ValueError("stream content not set")
if filters is None:
filters = document.defaultStreamFilters
# only apply filters if they haven't been applied elsewhere
if filters is not None and "Filter" not in dictionary.dict:
# apply filters in reverse order listed
rf = list(filters)
rf.reverse()
fnames = []
for f in rf:
#print "*****************content:"; print repr(content[:200])
#print "*****************filter", f.pdfname
content = f.encode(content)
fnames.insert(0, PDFName(f.pdfname))
#print "*****************finally:"; print content[:200]
#print "****** FILTERS", fnames
#stop
dictionary["Filter"] = PDFArray(fnames)
# "stream encoding is done after all filters have been applied"
content = document.encrypt.encode(content)
fc = format(content, document)
dictionary["Length"] = len(content)
fd = format(dictionary, document)
return fd+b'\r\nstream\r\n'+fc+b'endstream\r\n'
def teststream(content=None):
#content = "" # test
if content is None:
content = teststreamcontent
content = content.strip()
content = content.replace("\n", '\n\r') + '\n\r'
S = PDFStream(content = content,
filters=rl_config.useA85 and [PDFBase85Encode,PDFZCompress] or [PDFZCompress])
# nothing else needed...
S.__Comment__ = "test stream"
return S
teststreamcontent = """
1 0 0 1 0 0 cm BT /F9 12 Tf 14.4 TL ET
1.00 0.00 1.00 rg
n 72.00 72.00 432.00 648.00 re B*
"""
class PDFArray(PDFObject):
multiline = True
def __init__(self, sequence):
self.sequence = list(sequence)
def References(self, document):
"""make all objects in sequence references"""
self.sequence = list(map(document.Reference, self.sequence))
def format(self, document, IND=b'\r\n '):
L = [format(e, document) for e in self.sequence]
if self.multiline and rl_config.pdfMultiLine:
L = IND.join(L)
else:
n=len(L)
if n>10:
# break up every 10 elements anyway
t=L.insert
for i in reversed(range(10, n, 10)):
t(i,b'\r\n ')
L = b' '.join(L)
else:
L = b' '.join(L)
return b'[ ' + L + b' ]'
class PDFArrayCompact(PDFArray):
multiline=False
class PDFIndirectObject(PDFObject):
__RefOnly__ = 1
def __init__(self, name, content):
self.name = name
self.content = content
def format(self, document):
name = self.name
n, v = document.idToObjectNumberAndVersion[name]
# set encryption parameters
document.encrypt.register(n, v)
fcontent = format(self.content, document, toplevel=1) # yes this is at top level
return (pdfdocEnc("%s %s obj\r\n"%(n,v))
+fcontent+ (b'' if fcontent.endswith(b'\r\n') else b'\r\n')
+b'endobj\r\n')
class PDFObjectReference(PDFObject):
def __init__(self, name):
self.name = name
def format(self, document):
try:
return pdfdocEnc("%s %s R" % document.idToObjectNumberAndVersion[self.name])
except:
raise KeyError("forward reference to %s not resolved upon final formatting" % repr(self.name))
class PDFFile(PDFObject):
### just accumulates strings: keeps track of current offset
def __init__(self,pdfVersion=PDF_VERSION_DEFAULT):
self.strings = []
self.write = self.strings.append
self.offset = 0
### chapter 5
# Following Ken Lunde's advice and the PDF spec, this includes
# some high-order bytes. I chose the characters for Tokyo
# in Shift-JIS encoding, as these cannot be mistaken for
# any other encoding, and we'll be able to tell if something
# has run our PDF files through a dodgy Unicode conversion.
self.add((pdfdocEnc("%%PDF-%s.%s" % pdfVersion) +
b'\r\n%\223\214\213\236 ReportLab Generated PDF document http://www.reportlab.com\r\n'
))
def closeOrReset(self):
pass
def add(self, s):
"""should be constructed as late as possible, return position where placed"""
s = pdfdocEnc(s)
result = self.offset
self.offset = result+len(s)
self.write(s)
return result
def format(self, document):
return b''.join(self.strings)
XREFFMT = '%0.10d %0.5d n'
class PDFCrossReferenceSubsection(PDFObject):
def __init__(self, firstentrynumber, idsequence):
self.firstentrynumber = firstentrynumber
self.idsequence = idsequence
def format(self, document):
"""id sequence should represent contiguous object nums else error. free numbers not supported (yet)"""
firstentrynumber = self.firstentrynumber
idsequence = self.idsequence
entries = list(idsequence)
nentries = len(idsequence)
# special case: object number 0 is always free
taken = {}
if firstentrynumber==0:
taken[0] = "standard free entry"
nentries = nentries+1
entries.insert(0, "0000000000 65535 f")
idToNV = document.idToObjectNumberAndVersion
idToOffset = document.idToOffset
lastentrynumber = firstentrynumber+nentries-1
for id in idsequence:
(num, version) = idToNV[id]
if num in taken:
raise ValueError("object number collision %s %s %s" % (num, repr(id), repr(taken[id])))
if num>lastentrynumber or num<firstentrynumber:
raise ValueError("object number %s not in range %s..%s" % (num, firstentrynumber, lastentrynumber))
# compute position in list
rnum = num-firstentrynumber
taken[num] = id
offset = idToOffset[id]
entries[num] = XREFFMT % (offset, version)
# now add the initial line
firstline = "%s %s" % (firstentrynumber, nentries)
entries.insert(0, firstline)
# make sure it ends with \r\n
entries.append("")
return pdfdocEnc('\r\n'.join(entries))
class PDFCrossReferenceTable(PDFObject):
def __init__(self):
self.sections = []
def addsection(self, firstentry, ids):
section = PDFCrossReferenceSubsection(firstentry, ids)
self.sections.append(section)
def format(self, document):
sections = self.sections
if not sections:
raise ValueError("no crossref sections")
L = [b"xref\r\n"]
for s in self.sections:
fs = format(s, document)
L.append(fs)
return pdfdocEnc(b''.join(L))
class PDFTrailer(PDFObject):
def __init__(self, startxref, Size=None, Prev=None, Root=None, Info=None, ID=None, Encrypt=None):
self.startxref = startxref
if Size is None or Root is None:
raise ValueError("Size and Root keys required")
dict = self.dict = PDFDictionary()
for (n,v) in [("Size", Size), ("Prev", Prev), ("Root", Root),
("Info", Info), ("ID", ID), ("Encrypt", Encrypt)]:
if v is not None:
dict[n] = v
def format(self, document):
fdict = format(self.dict, document)
return b''.join([
b'trailer\r\n',
fdict,
b'\r\nstartxref\r\n',
pdfdocEnc(str(self.startxref)),
b'\r\n%%EOF\r\n',
]
)
#### XXXX skipping incremental update,
#### encryption
#### chapter 6, doc structure
class PDFCatalog(PDFObject):
__Comment__ = "Document Root"
__RefOnly__ = 1
# to override, set as attributes
__Defaults__ = {"Type": PDFName("Catalog"),
"PageMode": PDFName("UseNone"),
"Lang": None,
}
__NoDefault__ = """
Dests Outlines Pages Threads AcroForm Names OpenAction PageMode URI
ViewerPreferences PageLabels PageLayout JavaScript StructTreeRoot SpiderInfo""".split()
__Refs__ = __NoDefault__ # make these all into references, if present
def format(self, document):
self.check_format(document)
defaults = self.__Defaults__
Refs = self.__Refs__
D = {}
for k,v in defaults.items():
v = getattr(self,k,v)
if v is not None:
D[k] = v
for k in self.__NoDefault__:
v = getattr(self,k,None)
if v is not None:
D[k] = v
# force objects to be references where required
for k in Refs:
if k in D:
#print"k is", k, "value", D[k]
D[k] = document.Reference(D[k])
dict = PDFDictionary(D)
return format(dict, document)
def showOutline(self):
self.setPageMode("UseOutlines")
def showFullScreen(self):
self.setPageMode("FullScreen")
def setPageLayout(self,layout):
if layout:
self.PageLayout = PDFName(layout)
def setPageMode(self,mode):
if mode:
self.PageMode = PDFName(mode)
def check_format(self, document):
"""for use in subclasses"""
pass
class PDFPages(PDFCatalog):
"""PAGES TREE WITH ONE INTERNAL NODE, FOR "BALANCING" CHANGE IMPLEMENTATION"""
__Comment__ = "page tree"
__RefOnly__ = 1
# note: could implement page attribute inheritance...
__Defaults__ = {"Type": PDFName("Pages"),
}
__NoDefault__ = "Kids Count Parent".split()
__Refs__ = ["Parent"]
def __init__(self):
self.pages = []
def __getitem__(self, item):
return self.pages[item]
def addPage(self, page):
self.pages.append(page)
def check_format(self, document):
# convert all pages to page references
pages = self.pages
kids = PDFArray(pages)
# make sure all pages are references
kids.References(document)
self.Kids = kids
self.Count = len(pages)
class PDFPage(PDFCatalog):
__Comment__ = "Page dictionary"
# all PDF attributes can be set explicitly
# if this flag is set, the "usual" behavior will be suppressed
Override_default_compilation = 0
__RefOnly__ = 1
__Defaults__ = {"Type": PDFName("Page"),
# "Parent": PDFObjectReference(Pages), # no! use document.Pages
}
__NoDefault__ = """Parent
MediaBox Resources Contents CropBox Rotate Thumb Annots B Dur Hid Trans AA
PieceInfo LastModified SeparationInfo ArtBox TrimBox BleedBox ID PZ
Trans""".split()
__Refs__ = """Contents Parent ID""".split()
pagewidth = 595
pageheight = 842
stream = None
hasImages = 0
compression = 0
XObjects = None
_colorsUsed = {}
_shadingsUsed = {}
Trans = None
# transitionstring?
# xobjects?
# annotations
def __init__(self):
# set all nodefaults to None
for name in self.__NoDefault__:
setattr(self, name, None)
def setCompression(self, onoff):
self.compression = onoff
def setStream(self, code):
if self.Override_default_compilation:
raise ValueError("overridden! must set stream explicitly")
if isSeq(code):
code = '\r\n'.join(code)+'\r\n'
self.stream = code
def setPageTransition(self, tranDict):
self.Trans = PDFDictionary(tranDict)
def check_format(self, document):
# set up parameters unless usual behaviour is suppressed
if self.Override_default_compilation:
return
self.MediaBox = self.MediaBox or PDFArray(self.Rotate in (90,270) and [0,0,self.pageheight,self.pagewidth] or [0, 0, self.pagewidth, self.pageheight])
if not self.Annots:
self.Annots = None
else:
#print self.Annots
#raise ValueError("annotations not reimplemented yet")
if not isinstance(self.Annots,PDFObject):
self.Annots = PDFArray(self.Annots)
if not self.Contents:
stream = self.stream
if not stream:
self.Contents = teststream()
else:
S = PDFStream()
if self.compression:
S.filters = rl_config.useA85 and [PDFBase85Encode, PDFZCompress] or [PDFZCompress]
S.content = stream
S.__Comment__ = "page stream"
self.Contents = S
if not self.Resources:
resources = PDFResourceDictionary()
# fonts!
resources.basicFonts()
if self.hasImages:
resources.allProcs()
else:
resources.basicProcs()
if self.XObjects:
#print "XObjects", self.XObjects.dict
resources.XObject = self.XObjects
if self.ExtGState:
resources.ExtGState = self.ExtGState
resources.setShading(self._shadingUsed)
resources.setColorSpace(self._colorsUsed)
self.Resources = resources
if not self.Parent:
pages = document.Pages
self.Parent = document.Reference(pages)
#this code contributed by Christian Jacobs <cljacobsen@gmail.com>
class DuplicatePageLabelPage(Exception):
pass
class PDFPageLabels(PDFCatalog):
__comment__ = None
__RefOnly__ = 0
__Defaults__ = {}
__NoDefault__ = ["Nums"]
__Refs__ = []
def __init__(self):
self.labels = []
def addPageLabel(self, page, label):
""" Adds a new PDFPageLabel to this catalog.
The 'page' argument, an integer, is the page number in the PDF document
with which the 'label' should be associated. Page numbering in the PDF
starts at zero! Thus, to change the label on the first page, '0' should be
provided as an argument, and to change the 6th page, '5' should be provided
as the argument.
The 'label' argument should be a PDFPageLabel instance, which describes the
format of the labels starting on page 'page' in the PDF and continuing
until the next encounter of a PDFPageLabel.
The order in which labels are added is not important.
"""
self.labels.append((page, label))
def format(self, document):
try:
self.labels.sort()
except DuplicatePageLabelPage:
tmp = sorted([x[0] for x in self.labels])
annotateException('\n\n!!!!! Duplicate PageLabel seen for pages %r' % list(set([x for x in tmp if tmp.count(x)>1])))
labels = []
for page, label in self.labels:
labels.append(page)
labels.append(label)
self.Nums = PDFArray(labels) #PDFArray makes a copy with list()
return PDFCatalog.format(self, document)
class PDFPageLabel(PDFCatalog):
__Comment__ = None
__RefOnly__ = 0
__Defaults__ = {}
__NoDefault__ = "Type S P St".split()
__convertible__ = 'ARABIC ROMAN_UPPER ROMAN_LOWER LETTERS_UPPER LETTERS_LOWER'
ARABIC = 'D'
ROMAN_UPPER = 'R'
ROMAN_LOWER = 'r'
LETTERS_UPPER = 'A'
LETTERS_LOWER = 'a'
def __init__(self, style=None, start=None, prefix=None):
"""
A PDFPageLabel changes the style of page numbering as displayed in a PDF
viewer. PDF page labels have nothing to do with 'physical' page numbers
printed on a canvas, but instead influence the 'logical' page numbers
displayed by PDF viewers. However, when using roman numerals (i, ii,
iii...) or page prefixes for appendecies (A.1, A.2...) on the physical
pages PDF page labels are necessary to change the logical page numbers
displayed by the PDF viewer to match up with the physical numbers. A
PDFPageLabel changes the properties of numbering at the page on which it
appears (see the class 'PDFPageLabels' for specifying where a PDFPageLabel
is associated) and all subsequent pages, until a new PDFPageLabel is
encountered.
The arguments to this initialiser determine the properties of all
subsequent page labels. 'style' determines the numberings style, arabic,
roman, letters; 'start' specifies the starting number; and 'prefix' any
prefix to be applied to the page numbers. All these arguments can be left
out or set to None.
* style:
- None: No numbering, can be used to display the prefix only.
- PDFPageLabel.ARABIC: Use arabic numbers: 1, 2, 3, 4...
- PDFPageLabel.ROMAN_UPPER: Use upper case roman numerals: I, II, III...
- PDFPageLabel.ROMAN_LOWER: Use lower case roman numerals: i, ii, iii...
- PDFPageLabel.LETTERS_UPPER: Use upper case letters: A, B, C, D...
- PDFPageLabel.LETTERS_LOWER: Use lower case letters: a, b, c, d...
* start:
- An integer specifying the starting number for this PDFPageLabel. This
can be used when numbering style changes to reset the page number back
to one, ie from roman to arabic, or from arabic to appendecies. Can be
any positive integer or None. I'm not sure what the effect of
specifying None is, probably that page numbering continues with the
current sequence, I'd have to check the spec to clarify though.
* prefix:
- A string which is prefixed to the page numbers. Can be used to display
appendecies in the format: A.1, A.2, ..., B.1, B.2, ... where a
PDFPageLabel is used to set the properties for the first page of each
appendix to restart the page numbering at one and set the prefix to the
appropriate letter for current appendix. The prefix can also be used to
display text only, if the 'style' is set to None. This can be used to
display strings such as 'Front', 'Back', or 'Cover' for the covers on
books.
"""
if style:
if style.upper() in self.__convertible__: style = getattr(self,style.upper())
self.S = PDFName(style)
if start: self.St = PDFnumber(start)
if prefix: self.P = PDFString(prefix)
def __lt__(self,oth):
if rl_config.errorOnDuplicatePageLabelPage:
raise DuplicatePageLabelPage()
return False
#ends code contributed by Christian Jacobs <cljacobsen@gmail.com>
def testpage(document):
P = PDFPage()
P.Contents = teststream()
pages = document.Pages
P.Parent = document.Reference(pages)
P.MediaBox = PDFArray([0, 0, 595, 841])
resources = PDFResourceDictionary()
resources.allProcs() # enable all procsets
resources.basicFonts()
P.Resources = resources
pages.addPage(P)
#### DUMMY OUTLINES IMPLEMENTATION FOR testing
DUMMYOUTLINE = """
<<
/Count
0
/Type
/Outlines
>>"""
class PDFOutlines0(PDFObject):
__Comment__ = "TEST OUTLINE!"
text = DUMMYOUTLINE.replace("\n", '\r\n')
__RefOnly__ = 1
def format(self, document):
return pdfdocEnc(self.text)
class OutlineEntryObject(PDFObject):
"an entry in an outline"
Title = Dest = Parent = Prev = Next = First = Last = Count = None
def format(self, document):
D = {}
D["Title"] = PDFString(self.Title)
D["Parent"] = self.Parent
D["Dest"] = self.Dest
for n in ("Prev", "Next", "First", "Last", "Count"):
v = getattr(self, n)
if v is not None:
D[n] = v
PD = PDFDictionary(D)
return PD.format(document)
class PDFOutlines(PDFObject):
"""
takes a recursive list of outline destinations like::
out = PDFOutline1()
out.setNames(canvas, # requires canvas for name resolution
"chapter1dest",
("chapter2dest",
["chapter2section1dest",
"chapter2section2dest",
"chapter2conclusiondest"]
), # end of chapter2 description
"chapter3dest",
("chapter4dest", ["c4s1", "c4s2"])
)
Higher layers may build this structure incrementally. KISS at base level.
"""
# first attempt, many possible features missing.
#no init for now
mydestinations = ready = None
counter = 0
currentlevel = -1 # ie, no levels yet
def __init__(self):
self.destinationnamestotitles = {}
self.destinationstotitles = {}
self.levelstack = []
self.buildtree = []
self.closedict = {} # dictionary of "closed" destinations in the outline
def addOutlineEntry(self, destinationname, level=0, title=None, closed=None):
"""destinationname of None means "close the tree" """
if destinationname is None and level!=0:
raise ValueError("close tree must have level of 0")
if not isinstance(level,int): raise ValueError("level must be integer, got %s" % type(level))
if level<0: raise ValueError("negative levels not allowed")
if title is None: title = destinationname
currentlevel = self.currentlevel
stack = self.levelstack
tree = self.buildtree
# adjust currentlevel and stack to match level
if level>currentlevel:
if level>currentlevel+1:
raise ValueError("can't jump from outline level %s to level %s, need intermediates (destinationname=%r, title=%r)" %(currentlevel, level, destinationname, title))
level = currentlevel = currentlevel+1
stack.append([])
while level<currentlevel:
# pop off levels to match
current = stack[-1]
del stack[-1]
previous = stack[-1]
lastinprevious = previous[-1]
if isinstance(lastinprevious,tuple):
(name, sectionlist) = lastinprevious
raise ValueError("cannot reset existing sections: " + repr(lastinprevious))
else:
name = lastinprevious
sectionlist = current
previous[-1] = (name, sectionlist)
#sectionlist.append(current)
currentlevel = currentlevel-1
if destinationname is None: return
stack[-1].append(destinationname)
self.destinationnamestotitles[destinationname] = title
if closed: self.closedict[destinationname] = 1
self.currentlevel = level
def setDestinations(self, destinationtree):
self.mydestinations = destinationtree
def format(self, document):
D = {}
D["Type"] = PDFName("Outlines")
c = self.count
D["Count"] = c
if c!=0:
D["First"] = self.first
D["Last"] = self.last
PD = PDFDictionary(D)
return PD.format(document)
def setNames(self, canvas, *nametree):
desttree = self.translateNames(canvas, nametree)
self.setDestinations(desttree)
def setNameList(self, canvas, nametree):
"Explicit list so I don't need to do in the caller"
desttree = self.translateNames(canvas, nametree)
self.setDestinations(desttree)
def translateNames(self, canvas, object):
"recursively translate tree of names into tree of destinations"
destinationnamestotitles = self.destinationnamestotitles
destinationstotitles = self.destinationstotitles
closedict = self.closedict
if isStr(object):
if not isUnicode(object): object = object.decode('utf8')
destination = canvas._bookmarkReference(object)
title = object
if object in destinationnamestotitles:
title = destinationnamestotitles[object]
else:
destinationnamestotitles[title] = title
destinationstotitles[destination] = title
if object in closedict:
closedict[destination] = 1 # mark destination closed
return {object: canvas._bookmarkReference(object)} # name-->ref
if isSeq(object):
L = []
for o in object:
L.append(self.translateNames(canvas, o))
if isinstance(object,tuple):
return tuple(L)
return L
# bug contributed by Benjamin Dumke <reportlab@benjamin-dumke.de>
raise TypeError("in outline, destination name must be string: got a %s"%type(object))
def prepare(self, document, canvas):
"""prepare all data structures required for save operation (create related objects)"""
if self.mydestinations is None:
if self.levelstack:
self.addOutlineEntry(None) # close the tree
destnames = self.levelstack[0]
#from pprint import pprint; pprint(destnames); stop
self.mydestinations = self.translateNames(canvas, destnames)
else:
self.first = self.last = None
self.count = 0
self.ready = 1
return
#self.first = document.objectReference("Outline.First")
#self.last = document.objectReference("Outline.Last")
# XXXX this needs to be generalized for closed entries!
self.count = count(self.mydestinations, self.closedict)
(self.first, self.last) = self.maketree(document, self.mydestinations, toplevel=1)
self.ready = 1
def maketree(self, document, destinationtree, Parent=None, toplevel=0):
if toplevel:
levelname = "Outline"
Parent = document.Reference(document.Outlines)
else:
self.count = self.count+1
levelname = "Outline.%s" % self.count
if Parent is None:
raise ValueError("non-top level outline elt parent must be specified")
if not isSeq(destinationtree):
raise ValueError("destinationtree must be list or tuple, got %s")
nelts = len(destinationtree)
lastindex = nelts-1
lastelt = firstref = lastref = None
destinationnamestotitles = self.destinationnamestotitles
closedict = self.closedict
for index in range(nelts):
eltobj = OutlineEntryObject()
eltobj.Parent = Parent
eltname = "%s.%s" % (levelname, index)
eltref = document.Reference(eltobj, eltname)
#document.add(eltname, eltobj)
if lastelt is not None:
lastelt.Next = eltref
eltobj.Prev = lastref
if firstref is None:
firstref = eltref
lastref = eltref
lastelt = eltobj # advance eltobj
lastref = eltref
elt = destinationtree[index]
if isinstance(elt,dict):
# simple leaf {name: dest}
leafdict = elt
elif isinstance(elt,tuple):
# leaf with subsections: ({name: ref}, subsections) XXXX should clean up (see count(...))
try:
(leafdict, subsections) = elt
except:
raise ValueError("destination tree elt tuple should have two elts, got %s" % len(elt))
eltobj.Count = count(subsections, closedict)
(eltobj.First, eltobj.Last) = self.maketree(document, subsections, eltref)
else:
raise ValueError("destination tree elt should be dict or tuple, got %s" % type(elt))
try:
[(Title, Dest)] = list(leafdict.items())
except:
raise ValueError("bad outline leaf dictionary, should have one entry "+bytestr(elt))
eltobj.Title = destinationnamestotitles[Title]
eltobj.Dest = Dest
if isinstance(elt,tuple) and Dest in closedict:
# closed subsection, count should be negative
eltobj.Count = -eltobj.Count
return (firstref, lastref)
def count(tree, closedict=None):
"""utility for outline: recursively count leaves in a tuple/list tree"""
from operator import add
if isinstance(tree,tuple):
# leaf with subsections XXXX should clean up this structural usage
(leafdict, subsections) = tree
[(Title, Dest)] = list(leafdict.items())
if closedict and Dest in closedict:
return 1 # closed tree element
if isSeq(tree):
#return reduce(add, map(count, tree))
counts = []
for e in tree:
counts.append(count(e, closedict))
return sum(counts) #used to be: return reduce(add, counts)
return 1
class PDFInfo(PDFObject):
"""PDF documents can have basic information embedded, viewable from
File | Document Info in Acrobat Reader. If this is wrong, you get
Postscript errors while printing, even though it does not print."""
producer = "ReportLab PDF Library - www.reportlab.com"
creator = "ReportLab PDF Library - www.reportlab.com"
title = "untitled"
author = "anonymous"
subject = "unspecified"
keywords = ""
_dateFormatter = None
def __init__(self):
self.invariant = rl_config.invariant
self.trapped = 'False' #could be 'True' or 'Unknown'
def digest(self, md5object):
# add self information to signature
for x in (self.title, self.author, self.subject, self.keywords):
md5object.update(bytestr(x))
def format(self, document):
D = {}
D["Title"] = PDFString(self.title)
D["Author"] = PDFString(self.author)
D['ModDate'] = D["CreationDate"] = PDFDate(invariant=self.invariant,dateFormatter=self._dateFormatter)
D["Producer"] = PDFString(self.producer)
D["Creator"] = PDFString(self.creator)
D["Subject"] = PDFString(self.subject)
D["Keywords"] = PDFString(self.keywords)
D["Trapped"] = PDFName(self.trapped)
PD = PDFDictionary(D)
return PD.format(document)
def copy(self):
"shallow copy - useful in pagecatchering"
thing = self.__klass__()
for k, v in self.__dict__.items():
setattr(thing, k, v)
return thing
# skipping thumbnails, etc
class Annotation(PDFObject):
"""superclass for all annotations."""
defaults = [("Type", PDFName("Annot"),)]
required = ("Type", "Rect", "Contents", "Subtype")
permitted = required+(
"Border", "C", "T", "M", "F", "H", "BS", "AA", "AS", "Popup", "P", "AP")
def cvtdict(self, d, escape=1):
"""transform dict args from python form to pdf string rep as needed"""
Rect = d["Rect"]
if not isStr(Rect):
d["Rect"] = PDFArray(Rect)
d["Contents"] = PDFString(d["Contents"],escape)
return d
def AnnotationDict(self, **kw):
if 'escape' in kw:
escape = kw['escape']
del kw['escape']
else:
escape = 1
d = {}
for (name,val) in self.defaults:
d[name] = val
d.update(kw)
for name in self.required:
if name not in d:
raise ValueError("keyword argument %s missing" % name)
d = self.cvtdict(d,escape=escape)
permitted = self.permitted
for name in d.keys():
if name not in permitted:
raise ValueError("bad annotation dictionary name %s" % name)
return PDFDictionary(d)
def Dict(self):
raise ValueError("DictString undefined for virtual superclass Annotation, must overload")
# but usually
#return self.AnnotationDict(self, Rect=(a,b,c,d)) or whatever
def format(self, document):
D = self.Dict()
return D.format(document)
class TextAnnotation(Annotation):
permitted = Annotation.permitted + (
"Open", "Name")
def __init__(self, Rect, Contents, **kw):
self.Rect = Rect
self.Contents = Contents
self.otherkw = kw
def Dict(self):
d = {}
d.update(self.otherkw)
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["Subtype"] = "/Text"
return self.AnnotationDict(**d)
class FreeTextAnnotation(Annotation):
permitted = Annotation.permitted + ("DA",)
def __init__(self, Rect, Contents, DA, **kw):
self.Rect = Rect
self.Contents = Contents
self.DA = DA
self.otherkw = kw
def Dict(self):
d = {}
d.update(self.otherkw)
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["DA"] = self.DA
d["Subtype"] = "/FreeText"
return self.AnnotationDict(**d)
class LinkAnnotation(Annotation):
permitted = Annotation.permitted + (
"Dest", "A", "PA")
def __init__(self, Rect, Contents, Destination, Border="[0 0 1]", **kw):
self.Border = Border
self.Rect = Rect
self.Contents = Contents
self.Destination = Destination
self.otherkw = kw
def dummyDictString(self): # old, testing
return """
<< /Type /Annot /Subtype /Link /Rect [71 717 190 734] /Border [16 16 1]
/Dest [23 0 R /Fit] >>
"""
def Dict(self):
d = {}
d.update(self.otherkw)
d["Border"] = self.Border
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["Subtype"] = "/Link"
d["Dest"] = self.Destination
return self.AnnotationDict(**d)
class HighlightAnnotation(Annotation):
"""
HighlightAnnotation is an annotation that highlights the selected area.
Rect is the mouseover area that will show the contents.
QuadPoints is a list of points to highlight, you can have many groups of
four QuadPoints to allow highlighting many lines.
"""
permitted = Annotation.permitted + ("QuadPoints", )
def __init__(self, Rect, Contents, QuadPoints, Color=[0.83, 0.89, 0.95], **kw):
self.Rect = Rect
self.Contents = Contents
self.otherkw = kw
self.QuadPoints = QuadPoints
self.Color = Color
def cvtdict(self, d, escape=1):
"""transform dict args from python form to pdf string rep as needed"""
Rect = d["Rect"]
Quad = d["QuadPoints"]
Color = d["C"]
if not isinstance(Rect, str):
d["Rect"] = PDFArray(Rect).format(d, IND=b" ")
if not isinstance(Quad, str):
d["QuadPoints"] = PDFArray(Quad).format(d, IND=b" ")
if not isinstance(Color, str):
d["C"] = PDFArray(Color).format(d, IND=b" ")
d["Contents"] = PDFString(d["Contents"], escape)
return d
def Dict(self):
d = {}
d.update(self.otherkw)
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["Subtype"] = "/Highlight"
d["QuadPoints"] = self.QuadPoints
d["C"] = self.Color
return self.AnnotationDict(**d)
def rect_to_quad(Rect):
"""
Utility method to convert a Rect to a QuadPoint
"""
return [Rect[0], Rect[1], Rect[2], Rect[1],
Rect[0], Rect[3], Rect[2], Rect[3]]
# skipping names tree
# skipping actions
# skipping names trees
# skipping to chapter 7
class PDFRectangle(PDFObject):
def __init__(self, llx, lly, urx, ury):
self.llx, self.lly, self.ulx, self.ury = llx, lly, urx, ury
def format(self, document):
A = PDFArray([self.llx, self.lly, self.ulx, self.ury])
return format(A, document)
_NOWT=None
def _getTimeStamp():
global _NOWT
if not _NOWT:
import time
_NOWT = time.time()
return _NOWT
class PDFDate(PDFObject):
# gmt offset now suppported properly
def __init__(self, invariant=rl_config.invariant, dateFormatter=None):
if invariant:
now = (2000,1,1,0,0,0,0)
self.dhh = 0
self.dmm = 0
else:
import time
now = tuple(time.localtime(_getTimeStamp())[:6])
from time import timezone
self.dhh = int(timezone / (3600.0))
self.dmm = (timezone % 3600) % 60
self.date = now[:6]
self.dateFormatter = dateFormatter
def format(self, doc):
dfmt = self.dateFormatter or (
lambda yyyy,mm,dd,hh,m,s:
"D:%04d%02d%02d%02d%02d%02d%+03d'%02d'"
% (yyyy,mm,dd,hh,m,s,self.dhh,self.dmm))
return format(PDFString(dfmt(*self.date)), doc)
class Destination(PDFObject):
"""
not a PDFObject! This is a placeholder that can delegates
to a pdf object only after it has been defined by the methods
below.
EG a Destination can refer to Appendix A before it has been
defined, but only if Appendix A is explicitly noted as a destination
and resolved before the document is generated...
For example the following sequence causes resolution before doc generation.
d = Destination()
d.fit() # or other format defining method call
d.setPage(p)
(at present setPageRef is called on generation of the page).
"""
representation = format = page = None
def __init__(self,name):
self.name = name
self.fmt = self.page = None
def format(self, document):
f = self.fmt
if f is None: raise ValueError("format not resolved, probably missing URL scheme or undefined destination target for '%s'" % self.name)
p = self.page
if p is None: raise ValueError("Page not bound, probably missing URL scheme or undefined destination target for '%s'" % self.name)
f.page = p
return f.format(document)
def xyz(self, left, top, zoom): # see pdfspec mar 11 99 pp184+
self.fmt = PDFDestinationXYZ(None, left, top, zoom)
def fit(self):
self.fmt = PDFDestinationFit(None)
def fitb(self):
self.fmt = PDFDestinationFitB(None)
def fith(self, top):
self.fmt = PDFDestinationFitH(None,top)
def fitv(self, left):
self.fmt = PDFDestinationFitV(None, left)
def fitbh(self, top):
self.fmt = PDFDestinationFitBH(None, top)
def fitbv(self, left):
self.fmt = PDFDestinationFitBV(None, left)
def fitr(self, left, bottom, right, top):
self.fmt = PDFDestinationFitR(None, left, bottom, right, top)
def setPage(self, page):
self.page = page
#self.fmt.page = page # may not yet be defined!
class PDFDestinationXYZ(PDFObject):
typename = "XYZ"
def __init__(self, page, left, top, zoom):
self.page = page
self.top = top
self.zoom = zoom
self.left = left
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.top, self.zoom ] )
return format(A, document)
class PDFDestinationFit(PDFObject):
typename = "Fit"
def __init__(self, page):
self.page = page
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename) ] )
return format(A, document)
class PDFDestinationFitB(PDFDestinationFit):
typename = "FitB"
class PDFDestinationFitH(PDFObject):
typename = "FitH"
def __init__(self, page, top):
self.page = page; self.top=top
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.top ] )
return format(A, document)
class PDFDestinationFitBH(PDFDestinationFitH):
typename = "FitBH"
class PDFDestinationFitV(PDFObject):
typename = "FitV"
def __init__(self, page, left):
self.page = page; self.left=left
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.left ] )
return format(A, document)
class PDFDestinationFitBV(PDFDestinationFitV):
typename = "FitBV"
class PDFDestinationFitR(PDFObject):
typename = "FitR"
def __init__(self, page, left, bottom, right, top):
self.page = page; self.left=left; self.bottom=bottom; self.right=right; self.top=top
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.bottom, self.right, self.top] )
return format(A, document)
# named destinations need nothing
# skipping filespecs
class PDFResourceDictionary(PDFObject):
"""each element *could* be reset to a reference if desired"""
def __init__(self):
self.ColorSpace = {}
self.XObject = {}
self.ExtGState = {}
self.Font = {}
self.Pattern = {}
self.ProcSet = []
self.Properties = {}
self.Shading = {}
# ?by default define the basicprocs
self.basicProcs()
stdprocs = [PDFName(s) for s in "PDF Text ImageB ImageC ImageI".split()]
dict_attributes = ("ColorSpace", "XObject", "ExtGState", "Font", "Pattern", "Properties", "Shading")
def allProcs(self):
# define all standard procsets
self.ProcSet = self.stdprocs
def basicProcs(self):
self.ProcSet = self.stdprocs[:2] # just PDF and Text
def basicFonts(self):
self.Font = PDFObjectReference(BasicFonts)
def setColorSpace(self,colorsUsed):
for c,s in colorsUsed.items():
self.ColorSpace[s] = PDFObjectReference(c)
def setShading(self,shadingUsed):
for c,s in shadingUsed.items():
self.Shading[s] = PDFObjectReference(c)
def format(self, document):
D = {}
for dname in self.dict_attributes:
v = getattr(self, dname)
if isinstance(v,dict):
if v:
dv = PDFDictionary(v)
D[dname] = dv
else:
D[dname] = v
v = self.ProcSet
dname = "ProcSet"
if isSeq(v):
if v:
dv = PDFArray(v)
D[dname] = dv
else:
D[dname] = v
DD = PDFDictionary(D)
return format(DD, document)
##############################################################################
#
# Font objects - the PDFDocument.addFont() method knows which of these
# to construct when given a user-facing Font object
#
##############################################################################
class PDFType1Font(PDFObject):
"""no init: set attributes explicitly"""
__RefOnly__ = 1
# note! /Name appears to be an undocumented attribute....
name_attributes = "Type Subtype BaseFont Name".split()
Type = "Font"
Subtype = "Type1"
# these attributes are assumed to already be of the right type
local_attributes = "FirstChar LastChar Widths Encoding ToUnicode FontDescriptor".split()
def format(self, document):
D = {}
for name in self.name_attributes:
if hasattr(self, name):
value = getattr(self, name)
D[name] = PDFName(value)
for name in self.local_attributes:
if hasattr(self, name):
value = getattr(self, name)
D[name] = value
#print D
PD = PDFDictionary(D)
return PD.format(document)
## These attribute listings will be useful in future, even if we
## put them elsewhere
class PDFTrueTypeFont(PDFType1Font):
Subtype = "TrueType"
#local_attributes = "FirstChar LastChar Widths Encoding ToUnicode FontDescriptor".split() #same
##class PDFMMType1Font(PDFType1Font):
## Subtype = "MMType1"
##
##class PDFType3Font(PDFType1Font):
## Subtype = "Type3"
## local_attributes = "FirstChar LastChar Widths CharProcs FontBBox FontMatrix Resources Encoding".split()
##
##class PDFType0Font(PDFType1Font):
## Subtype = "Type0"
## local_attributes = "DescendantFonts Encoding".split(
##
##class PDFCIDFontType0(PDFType1Font):
## Subtype = "CIDFontType0"
## local_attributes = "CIDSystemInfo FontDescriptor DW W DW2 W2 Registry Ordering Supplement".split()
##
##class PDFCIDFontType0(PDFType1Font):
## Subtype = "CIDFontType2"
## local_attributes = "BaseFont CIDToGIDMap CIDSystemInfo FontDescriptor DW W DW2 W2".split()
##
##class PDFEncoding(PDFType1Font):
## Type = "Encoding"
## name_attributes = "Type BaseEncoding".split()
## # these attributes are assumed to already be of the right type
## local_attributes = ["Differences"]
##
# UGLY ALERT - this needs turning into something O-O, it was hacked
# across from the pdfmetrics.Encoding class to avoid circularity
# skipping CMaps
class PDFFormXObject(PDFObject):
# like page requires .info set by some higher level (doc)
# XXXX any resource used in a form must be propagated up to the page that (recursively) uses
# the form!! (not implemented yet).
XObjects = Annots = BBox = Matrix = Contents = stream = Resources = None
hasImages = 1 # probably should change
compression = 0
def __init__(self, lowerx, lowery, upperx, uppery):
#not done
self.lowerx = lowerx; self.lowery=lowery; self.upperx=upperx; self.uppery=uppery
def setStreamList(self, data):
if isSeq(data):
data = '\r\n'.join(data)
self.stream = pdfdocEnc(data)
def BBoxList(self):
"get the declared bounding box for the form as a list"
if self.BBox:
return list(self.BBox.sequence)
else:
return [self.lowerx, self.lowery, self.upperx, self.uppery]
def format(self, document):
self.BBox = self.BBox or PDFArray([self.lowerx, self.lowery, self.upperx, self.uppery])
self.Matrix = self.Matrix or PDFArray([1, 0, 0, 1, 0, 0])
if not self.Annots:
self.Annots = None
else:
#these must be transferred to the page when the form is used
raise ValueError("annotations don't work in PDFFormXObjects yet")
if not self.Contents:
stream = self.stream
if not stream:
self.Contents = teststream()
else:
S = PDFStream()
S.content = stream
# need to add filter stuff (?)
S.__Comment__ = "xobject form stream"
self.Contents = S
if not self.Resources:
resources = PDFResourceDictionary()
# fonts!
resources.basicFonts()
if self.hasImages:
resources.allProcs()
else:
resources.basicProcs()
if self.XObjects:
#print "XObjects", self.XObjects.dict
resources.XObject = self.XObjects
self.Resources=resources
if self.compression:
self.Contents.filters = rl_config.useA85 and [PDFBase85Encode, PDFZCompress] or [PDFZCompress]
sdict = self.Contents.dictionary
sdict["Type"] = PDFName("XObject")
sdict["Subtype"] = PDFName("Form")
sdict["FormType"] = 1
sdict["BBox"] = self.BBox
sdict["Matrix"] = self.Matrix
sdict["Resources"] = self.Resources
return self.Contents.format(document)
class PDFPostScriptXObject(PDFObject):
"For embedding PD (e.g. tray commands) in PDF"
def __init__(self, content=None):
self.content = content
def format(self, document):
S = PDFStream()
S.content = self.content
S.__Comment__ = "xobject postscript stream"
sdict = S.dictionary
sdict["Type"] = PDFName("XObject")
sdict["Subtype"] = PDFName("PS")
return S.format(document)
_mode2CS={'RGB':'DeviceRGB', 'L':'DeviceGray', 'CMYK':'DeviceCMYK'}
class PDFImageXObject(PDFObject):
# first attempts at a hard-coded one
# in the file, Image XObjects are stream objects. We already
# have a PDFStream object with 3 attributes: dictionary, content
# and filters. So the job of this thing is to construct the
# right PDFStream instance and ask it to format itself.
def __init__(self, name, source=None, mask=None):
self.name = name
self.width = 24
self.height = 23
self.bitsPerComponent = 1
self.colorSpace = 'DeviceGray'
self._filters = rl_config.useA85 and ('ASCII85Decode',) or ()
self.streamContent = """
003B00 002700 002480 0E4940 114920 14B220 3CB650
75FE88 17FF8C 175F14 1C07E2 3803C4 703182 F8EDFC
B2BBC2 BB6F84 31BFC2 18EA3C 0E3E00 07FC00 03F800
1E1800 1FF800>
"""
self.mask = mask
if source is None:
pass # use the canned one.
elif hasattr(source,'jpeg_fh'):
self.loadImageFromSRC(source) #it is already a PIL Image
else:
# it is a filename
import os
ext = os.path.splitext(source)[1].lower()
src = open_for_read(source)
try:
if not(ext in ('.jpg', '.jpeg') and self.loadImageFromJPEG(src)):
if rl_config.useA85:
self.loadImageFromA85(src)
else:
self.loadImageFromRaw(src)
finally:
src.close()
def loadImageFromA85(self,source):
IMG=[]
imagedata = pdfutils.makeA85Image(source,IMG=IMG,detectJpeg=True)
if not imagedata:
return self.loadImageFromSRC(IMG[0])
imagedata = [s.strip() for s in imagedata]
words = imagedata[1].split()
self.width, self.height = (int(words[1]),int(words[3]))
self.colorSpace = {'/RGB':'DeviceRGB', '/G':'DeviceGray', '/CMYK':'DeviceCMYK'}[words[7]]
self.bitsPerComponent = 8
self._filters = 'ASCII85Decode','FlateDecode' #'A85','Fl'
if IMG: self._checkTransparency(IMG[0])
elif self.mask=='auto': self.mask = None
self.streamContent = ''.join(imagedata[3:-1])
def loadImageFromJPEG(self,imageFile):
try:
try:
info = pdfutils.readJPEGInfo(imageFile)
finally:
imageFile.seek(0) #reset file pointer
except:
return False
self.width, self.height = info[0], info[1]
self.bitsPerComponent = 8
if info[2] == 1:
self.colorSpace = 'DeviceGray'
elif info[2] == 3:
self.colorSpace = 'DeviceRGB'
else: #maybe should generate an error, is this right for CMYK?
self.colorSpace = 'DeviceCMYK'
self._dotrans = 1
self.streamContent = imageFile.read()
if rl_config.useA85:
self.streamContent = asciiBase85Encode(self.streamContent)
self._filters = 'ASCII85Decode','DCTDecode' #'A85','DCT'
else:
self._filters = 'DCTDecode', #'DCT'
self.mask = None
return True
def loadImageFromRaw(self,source):
IMG=[]
imagedata = pdfutils.makeRawImage(source,IMG=IMG,detectJpeg=True)
if not imagedata:
return self.loadImageFromSRC(IMG[0])
words = imagedata[1].split()
self.width = int(words[1])
self.height = int(words[3])
self.colorSpace = {'/RGB':'DeviceRGB', '/G':'DeviceGray', '/CMYK':'DeviceCMYK'}[words[7]]
self.bitsPerComponent = 8
self._filters = 'FlateDecode', #'Fl'
if IMG: self._checkTransparency(IMG[0])
elif self.mask=='auto': self.mask = None
self.streamContent = ''.join(imagedata[3:-1])
def _checkTransparency(self,im):
if self.mask=='auto':
if im._dataA:
self.mask = None
self._smask = PDFImageXObject(_digester(im._dataA.getRGBData()),im._dataA,mask=None)
self._smask._decode = [0,1]
else:
tc = im.getTransparent()
if tc:
self.mask = (tc[0], tc[0], tc[1], tc[1], tc[2], tc[2])
else:
self.mask = None
elif hasattr(self.mask,'rgb'):
_ = self.mask.rgb()
self.mask = _[0],_[0],_[1],_[1],_[2],_[2]
def loadImageFromSRC(self, im):
"Extracts the stream, width and height"
fp = im.jpeg_fh()
if fp:
self.loadImageFromJPEG(fp)
else:
zlib = import_zlib()
if not zlib: return
self.width, self.height = im.getSize()
raw = im.getRGBData()
#assert len(raw) == self.width*self.height, "Wrong amount of data for image expected %sx%s=%s got %s" % (self.width,self.height,self.width*self.height,len(raw))
self.streamContent = zlib.compress(raw)
if rl_config.useA85:
self.streamContent = asciiBase85Encode(self.streamContent)
self._filters = 'ASCII85Decode','FlateDecode' #'A85','Fl'
else:
self._filters = 'FlateDecode', #'Fl'
self.colorSpace= _mode2CS[im.mode]
self.bitsPerComponent = 8
self._checkTransparency(im)
def format(self, document):
S = PDFStream(content = self.streamContent)
dict = S.dictionary
dict["Type"] = PDFName("XObject")
dict["Subtype"] = PDFName("Image")
dict["Width"] = self.width
dict["Height"] = self.height
dict["BitsPerComponent"] = self.bitsPerComponent
dict["ColorSpace"] = PDFName(self.colorSpace)
if self.colorSpace=='DeviceCMYK' and getattr(self,'_dotrans',0):
dict["Decode"] = PDFArray([1,0,1,0,1,0,1,0])
elif getattr(self,'_decode',None):
dict["Decode"] = PDFArray(self._decode)
dict["Filter"] = PDFArray(map(PDFName,self._filters))
dict["Length"] = len(self.streamContent)
if self.mask: dict["Mask"] = PDFArray(self.mask)
if getattr(self,'smask',None): dict["SMask"] = self.smask
return S.format(document)
class PDFSeparationCMYKColor:
def __init__(self, cmyk):
from reportlab.lib.colors import CMYKColor
if not isinstance(cmyk,CMYKColor):
raise ValueError('%s needs a CMYKColor argument' % self.__class__.__name__)
elif not cmyk.spotName:
raise ValueError('%s needs a CMYKColor argument with a spotName' % self.__class__.__name__)
self.cmyk = cmyk
def _makeFuncPS(self):
'''create the postscript code for the tint transfer function
effectively this is tint*c, tint*y, ... tint*k'''
R = [].append
for i,v in enumerate(self.cmyk.cmyk()):
v=float(v)
if i==3:
if v==0.0:
R('pop')
R('0.0')
else:
R(str(v))
R('mul')
else:
if v==0:
R('0.0')
else:
R('dup')
R(str(v))
R('mul')
R('exch')
return '{%s}' % (' '.join(R.__self__))
def value(self):
return PDFArrayCompact((
PDFName('Separation'),
PDFName(self.cmyk.spotName),
PDFName('DeviceCMYK'),
PDFStream(
dictionary=PDFDictionary(dict(
FunctionType=4,
Domain=PDFArrayCompact((0,1)),
Range=PDFArrayCompact((0,1,0,1,0,1,0,1))
)),
content=self._makeFuncPS(),
filters=None,#[PDFBase85Encode, PDFZCompress],
)
))
class PDFFunction(PDFObject):
"""superclass for all function types."""
defaults = []
required = ("FunctionType", "Domain")
permitted = required+("Range",)
def FunctionDict(self, **kw):
d = {}
for (name,val) in self.defaults:
d[name] = val
d.update(kw)
for name in self.required:
if name not in d:
raise ValueError("keyword argument %s missing" % name)
permitted = self.permitted
for name in d.keys():
if name not in permitted:
raise ValueError("bad annotation dictionary name %s" % name)
return PDFDictionary(d)
def Dict(self, document):
raise ValueError("Dict undefined for virtual superclass PDFShading, must overload")
# but usually
#return self.FunctionDict(self, ...)
def format(self, document):
D = self.Dict(document)
return D.format(document)
class PDFExponentialFunction(PDFFunction):
defaults = PDFFunction.defaults + [("Domain", PDFArrayCompact((0.0, 1.0)))]
required = PDFFunction.required + ("N",)
permitted = PDFFunction.permitted + ("C0", "C1", "N")
def __init__(self, C0, C1, N, **kw):
self.C0 = C0
self.C1 = C1
self.N = N
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["FunctionType"] = 2
d["C0"] = PDFArrayCompact(self.C0)
d["C1"] = PDFArrayCompact(self.C1)
d["N"] = self.N
return self.FunctionDict(**d)
class PDFStitchingFunction(PDFFunction):
required = PDFFunction.required + ("Functions", "Bounds", "Encode")
permitted = PDFFunction.permitted + ("Functions", "Bounds", "Encode")
def __init__(self, Functions, Bounds, Encode, **kw):
self.Functions = Functions
self.Bounds = Bounds
self.Encode = Encode
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["FunctionType"] = 3
d["Functions"] = PDFArray([document.Reference(x) for x in self.Functions])
d["Bounds"] = PDFArray(self.Bounds)
d["Encode"] = PDFArray(self.Encode)
return self.FunctionDict(**d)
class PDFShading(PDFObject):
"""superclass for all shading types."""
required = ("ShadingType", "ColorSpace")
permitted = required+("Background", "BBox", "AntiAlias")
def ShadingDict(self, **kw):
d = {}
d.update(kw)
for name in self.required:
if name not in d:
raise ValueError("keyword argument %s missing" % name)
permitted = self.permitted
for name in d.keys():
if name not in permitted:
raise ValueError("bad annotation dictionary name %s" % name)
return PDFDictionary(d)
def Dict(self, document):
raise ValueError("Dict undefined for virtual superclass PDFShading, must overload")
# but usually
#return self.ShadingDict(self, ...)
def format(self, document):
D = self.Dict(document)
return D.format(document)
class PDFFunctionShading(PDFShading):
required = PDFShading.required + ("Function",)
permitted = PDFShading.permitted + ("Domain", "Matrix", "Function")
def __init__(self, Function, ColorSpace, **kw):
self.Function = Function
self.ColorSpace = ColorSpace
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["ShadingType"] = 1
d["ColorSpace"] = PDFName(self.ColorSpace)
d["Function"] = document.Reference(self.Function)
return self.ShadingDict(**d)
class PDFAxialShading(PDFShading):
required = PDFShading.required + ("Coords", "Function")
permitted = PDFShading.permitted + (
"Coords", "Domain", "Function", "Extend")
def __init__(self, x0, y0, x1, y1, Function, ColorSpace, **kw):
self.Coords = (x0, y0, x1, y1)
self.Function = Function
self.ColorSpace = ColorSpace
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["ShadingType"] = 2
d["ColorSpace"] = PDFName(self.ColorSpace)
d["Coords"] = PDFArrayCompact(self.Coords)
d["Function"] = document.Reference(self.Function)
return self.ShadingDict(**d)
class PDFRadialShading(PDFShading):
required = PDFShading.required + ("Coords", "Function")
permitted = PDFShading.permitted + (
"Coords", "Domain", "Function", "Extend")
def __init__(self, x0, y0, r0, x1, y1, r1, Function, ColorSpace, **kw):
self.Coords = (x0, y0, r0, x1, y1, r1)
self.Function = Function
self.ColorSpace = ColorSpace
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["ShadingType"] = 3
d["ColorSpace"] = PDFName(self.ColorSpace)
d["Coords"] = PDFArrayCompact(self.Coords)
d["Function"] = document.Reference(self.Function)
return self.ShadingDict(**d)
if __name__=="__main__":
print("There is no script interpretation for pdfdoc.")
| 37.438589 | 178 | 0.594501 |
__version__=''' $Id$ '''
__doc__="""
The module pdfdoc.py handles the 'outer structure' of PDF documents, ensuring that
all objects are properly cross-referenced and indexed to the nearest byte. The
'inner structure' - the page descriptions - are presumed to be generated before
each page is saved.
pdfgen.py calls this and provides a 'canvas' object to handle page marking operators.
piddlePDF calls pdfgen and offers a high-level interface.
The classes within this generally mirror structures in the PDF file
and are not part of any public interface. Instead, canvas and font
classes are made available elsewhere for users to manipulate.
"""
import types, binascii, codecs
from collections import OrderedDict
from reportlab.pdfbase import pdfutils
from reportlab import rl_config
from reportlab.lib.utils import import_zlib, open_for_read, makeFileName, isSeq, isBytes, isUnicode, _digester, isStr, bytestr, isPy3, annotateException
from reportlab.lib.rl_accel import escapePDF, fp_str, asciiBase85Encode, asciiBase85Decode
from reportlab.pdfbase import pdfmetrics
from hashlib import md5
from sys import platform
from sys import version_info
from sys import stderr
if platform[:4] == 'java' and version_info[:2] == (2, 1):
def list(sequence):
def f(x):
return x
return list(map(f, sequence))
class PDFError(Exception):
pass
__InternalName__ = "__InternalName__"
__RefOnly__ = "__RefOnly__"
__Comment__ = "__Comment__"
BasicFonts = "BasicFonts"
Pages = "Pages"
PDF_VERSION_DEFAULT = (1, 3)
PDF_SUPPORT_VERSION = dict(
transparency = (1, 4),
)
if isPy3:
def pdfdocEnc(x):
return x.encode('extpdfdoc') if isinstance(x,str) else x
else:
def pdfdocEnc(x):
return x.encode('extpdfdoc') if isinstance(x,unicode) else x
def format(element, document, toplevel=0):
if isinstance(element,PDFObject):
if not toplevel and hasattr(element, __RefOnly__):
return document.Reference(element).format(document)
else:
f = element.format(document)
if not rl_config.invariant and rl_config.pdfComments and hasattr(element, __Comment__):
f = pdfdocEnc("%% %s\r\n" % element.__Comment__)+f
return f
elif type(element) in (float, int):
#use a controlled number formatting routine
#instead of str, so Jython/Python etc do not differ
return pdfdocEnc(fp_str(element))
elif isBytes(element):
return element
elif isUnicode(element):
return pdfdocEnc(element)
else:
return pdfdocEnc(str(element))
def xObjectName(externalname):
return "FormXob.%s" % externalname
# backwards compatibility
formName = xObjectName
# no encryption
class NoEncryption:
def encode(self, t):
return t
def prepare(self, document):
# get ready to do encryption
pass
def register(self, objnum, version):
# enter a new direct object
pass
def info(self):
# the representation of self in file if any (should be None or PDFDict)
return None
class PDFObject(object):
pass
class DummyDoc(PDFObject):
encrypt = NoEncryption()
### the global document structure manager
class PDFDocument(PDFObject):
# set this to define filters
defaultStreamFilters = None
encrypt = NoEncryption() # default no encryption
def __init__(self,
dummyoutline=0,
compression=rl_config.pageCompression,
invariant=rl_config.invariant,
filename=None,
pdfVersion=PDF_VERSION_DEFAULT,
):
self._ID = None
self.objectcounter = 0
self.shadingCounter = 0
self.inObject = None
self.pageCounter = 1
# allow None value to be passed in to mean 'give system defaults'
if invariant is None:
self.invariant = rl_config.invariant
else:
self.invariant = invariant
self.setCompression(compression)
self._pdfVersion = pdfVersion
# signature for creating PDF ID
sig = self.signature = md5()
sig.update(b"a reportlab document")
if not self.invariant:
cat = _getTimeStamp()
else:
cat = 946684800.0
cat = ascii(cat)
sig.update(bytestr(cat)) # initialize with timestamp digest
# mapping of internal identifier ("Page001") to PDF objectnumber and generation number (34, 0)
self.idToObjectNumberAndVersion = {}
# mapping of internal identifier ("Page001") to PDF object (PDFPage instance)
self.idToObject = {}
# internal id to file location
self.idToOffset = {}
# number to id
self.numberToId = {}
cat = self.Catalog = self._catalog = PDFCatalog()
pages = self.Pages = PDFPages()
cat.Pages = pages
if dummyoutline:
outlines = PDFOutlines0()
else:
outlines = PDFOutlines()
self.Outlines = self.outline = outlines
cat.Outlines = outlines
self.info = PDFInfo()
self.info.invariant = self.invariant
#self.Reference(self.Catalog)
#self.Reference(self.Info)
self.fontMapping = {}
#make an empty font dictionary
DD = PDFDictionary({})
DD.__Comment__ = "The standard fonts dictionary"
self.Reference(DD, BasicFonts)
self.delayedFonts = []
def setCompression(self, onoff):
# XXX: maybe this should also set self.defaultStreamFilters?
self.compression = onoff
def ensureMinPdfVersion(self, *keys):
for k in keys:
self._pdfVersion = max(self._pdfVersion, PDF_SUPPORT_VERSION[k])
def updateSignature(self, thing):
if self._ID: return # but not if its used already!
self.signature.update(bytestr(thing))
def ID(self):
if self._ID:
return self._ID
digest = self.signature.digest()
doc = DummyDoc()
IDs = PDFString(digest,enc='raw').format(doc)
self._ID = (b'\r\n % ReportLab generated PDF document -- digest (http://www.reportlab.com)\r\n ['
+IDs+b' '+IDs+b']\r\n')
return self._ID
def SaveToFile(self, filename, canvas):
if hasattr(getattr(filename, "write",None),'__call__'):
myfile = 0
f = filename
filename = makeFileName(getattr(filename,'name',''))
else :
myfile = 1
filename = makeFileName(filename)
f = open(filename, "wb")
data = self.GetPDFData(canvas)
if isUnicode(data):
data = data.encode('latin1')
f.write(data)
if myfile:
f.close()
import os
if os.name=='mac':
from reportlab.lib.utils import markfilename
markfilename(filename) # do platform specific file junk
if getattr(canvas,'_verbosity',None): print('saved %s' % (filename,))
def GetPDFData(self, canvas):
# realize delayed fonts
for fnt in self.delayedFonts:
fnt.addObjects(self)
# add info stuff to signature
self.info.invariant = self.invariant
self.info.digest(self.signature)
### later: maybe add more info to sig?
# prepare outline
self.Reference(self.Catalog)
self.Reference(self.info)
outline = self.outline
outline.prepare(self, canvas)
return self.format()
def inPage(self):
if self.inObject is not None:
if self.inObject=="page": return
raise ValueError("can't go in page already in object %s" % self.inObject)
self.inObject = "page"
def inForm(self):
#if self.inObject not in ["form", None]:
# raise ValueError("can't go in form already in object %s" % self.inObject)
self.inObject = "form"
def getInternalFontName(self, psfontname):
fm = self.fontMapping
if psfontname in fm:
return fm[psfontname]
else:
try:
# does pdfmetrics know about it? if so, add
fontObj = pdfmetrics.getFont(psfontname)
if fontObj._dynamicFont:
raise PDFError("getInternalFontName(%s) called for a dynamic font" % repr(psfontname))
fontObj.addObjects(self)
return fm[psfontname]
except KeyError:
raise PDFError("Font %s not known!" % repr(psfontname))
def thisPageName(self):
return "Page"+repr(self.pageCounter)
def thisPageRef(self):
return PDFObjectReference(self.thisPageName())
def addPage(self, page):
name = self.thisPageName()
self.Reference(page, name)
self.Pages.addPage(page)
self.pageCounter += 1
self.inObject = None
def addForm(self, name, form):
# XXX should check that name is a legal PDF name
if self.inObject != "form":
self.inForm()
self.Reference(form, xObjectName(name))
self.inObject = None
def annotationName(self, externalname):
return "Annot.%s"%externalname
def addAnnotation(self, name, annotation):
self.Reference(annotation, self.annotationName(name))
def refAnnotation(self, name):
internalname = self.annotationName(name)
return PDFObjectReference(internalname)
def addShading(self, shading):
name = "Sh%d" % self.shadingCounter
self.Reference(shading, name)
self.shadingCounter += 1
return name
def addColor(self,cmyk):
sname = cmyk.spotName
if not sname:
if cmyk.cyan==0 and cmyk.magenta==0 and cmyk.yellow==0:
sname = 'BLACK'
elif cmyk.black==0 and cmyk.magenta==0 and cmyk.yellow==0:
sname = 'CYAN'
elif cmyk.cyan==0 and cmyk.black==0 and cmyk.yellow==0:
sname = 'MAGENTA'
elif cmyk.cyan==0 and cmyk.magenta==0 and cmyk.black==0:
sname = 'YELLOW'
if not sname:
raise ValueError("CMYK colour %r used without a spotName" % cmyk)
else:
cmyk = cmyk.clone(spotName = sname)
name = PDFName(sname)[1:]
if name not in self.idToObject:
sep = PDFSeparationCMYKColor(cmyk).value() #PDFArray([/Separation /name /DeviceCMYK tint_tf])
self.Reference(sep,name)
return name,sname
def setTitle(self, title):
if title is None:
self.info.title = '(anonymous)'
else:
self.info.title = title
def setAuthor(self, author):
#allow resetting to clear it
if author is None:
self.info.author = '(anonymous)'
else:
self.info.author = author
def setSubject(self, subject):
#allow resetting to clear it
if subject is None:
self.info.subject = '(unspecified)'
else:
self.info.subject = subject
def setCreator(self, creator):
#allow resetting to clear it
if creator is None:
self.info.creator = '(unspecified)'
else:
self.info.creator = creator
def setKeywords(self, keywords):
#allow resetting to clear it but ensure it's a string
if keywords is None:
self.info.keywords = ''
else:
self.info.keywords = keywords
def setDateFormatter(self, dateFormatter):
self.info._dateFormatter = dateFormatter
def getAvailableFonts(self):
fontnames = list(self.fontMapping.keys())
from reportlab.pdfbase import _fontdata
for name in _fontdata.standardFonts:
if name not in fontnames:
fontnames.append(name)
fontnames.sort()
return fontnames
def format(self):
self.encrypt.prepare(self)
cat = self.Catalog
info = self.info
self.Reference(self.Catalog)
self.Reference(self.info)
encryptref = None
encryptinfo = self.encrypt.info()
if encryptinfo:
encryptref = self.Reference(encryptinfo)
counter = 0
ids = []
numbertoid = self.numberToId
idToNV = self.idToObjectNumberAndVersion
idToOb = self.idToObject
idToOf = self.idToOffset
counter += 1
if counter in numbertoid:
id = numbertoid[counter]
obj = idToOb[id]
IO = PDFIndirectObject(id, obj)
IOf = IO.format(self)
if not rl_config.invariant and rl_config.pdfComments:
try:
classname = obj.__class__.__name__
except:
classname = ascii(obj)
File.add("%% %s: class %s \r\n" % (ascii(id), classname[:50]))
offset = File.add(IOf)
idToOf[id] = offset
ids.append(id)
else:
done = 1
del self.__accum__
lno = len(numbertoid)
if counter-1!=lno:
raise ValueError("counter %s doesn't match number to id dictionary %s" %(counter, lno))
# now add the xref
xref = PDFCrossReferenceTable()
xref.addsection(0, ids)
xreff = xref.format(self)
xrefoffset = File.add(xreff)
# now add the trailer
trailer = PDFTrailer(
startxref = xrefoffset,
Size = lno+1,
Root = self.Reference(cat),
Info = self.Reference(info),
Encrypt = encryptref,
ID = self.ID(),
)
trailerf = trailer.format(self)
File.add(trailerf)
for ds in getattr(self,'_digiSigs',[]):
ds.sign(File)
# return string format for pdf file
return File.format(self)
def hasForm(self, name):
internalname = xObjectName(name)
return internalname in self.idToObject
def getFormBBox(self, name, boxType="MediaBox"):
internalname = xObjectName(name)
if internalname in self.idToObject:
theform = self.idToObject[internalname]
if hasattr(theform,'_extra_pageCatcher_info'):
return theform._extra_pageCatcher_info[boxType]
if isinstance(theform, PDFFormXObject):
# internally defined form
return theform.BBoxList()
elif isinstance(theform, PDFStream):
# externally defined form
return list(theform.dictionary.dict[boxType].sequence)
else:
raise ValueError("I don't understand the form instance %s" % repr(name))
def getXObjectName(self, name):
return xObjectName(name)
def xobjDict(self, formnames):
D = {}
for name in formnames:
internalname = xObjectName(name)
reference = PDFObjectReference(internalname)
D[internalname] = reference
return PDFDictionary(D)
def Reference(self, obj, name=None):
Object
if name is None and (not iob or obj.__class__ is PDFObjectReference):
return obj
if hasattr(obj, __InternalName__):
intname = obj.__InternalName__
if name is not None and name!=intname:
raise ValueError("attempt to reregister object %s with new name %s" % (
repr(intname), repr(name)))
if intname not in idToObject:
raise ValueError("object of type %s named as %s, but not registered" % (type(obj),ascii(intname)))
return PDFObjectReference(intname)
objectcounter = self.objectcounter = self.objectcounter+1
if name is None:
name = "R"+repr(objectcounter)
if name in idToObject:
other = idToObject[name]
if other!=obj:
raise ValueError("redefining named object: "+repr(name))
return PDFObjectReference(name)
if iob:
obj.__InternalName__ = name
self.idToObjectNumberAndVersion[name] = (objectcounter, 0)
self.numberToId[objectcounter] = name
idToObject[name] = obj
return PDFObjectReference(name)
Fnull = "null"
class PDFText(PDFObject):
def __init__(self, t):
self.t = t
def format(self, document):
t = self.t
if isUnicode(t):
t = t.encode('utf-8')
result = binascii.hexlify(document.encrypt.encode(t))
return b"<" + result + b">"
def __str__(self):
dummydoc = DummyDoc()
return self.format(dummydoc)
def PDFnumber(n):
return n
import re
_re_cleanparens=re.compile('[^()]')
del re
def _isbalanced(s):
s = _re_cleanparens.sub('',s)
n = 0
for c in s:
if c=='(': n+=1
else:
n -= 1
if n<0: return 0
return not n and 1 or 0
def _checkPdfdoc(utext):
try:
utext.encode('pdfdoc')
return 1
except UnicodeEncodeError as e:
return 0
class PDFString(PDFObject):
def __init__(self, s, escape=1, enc='auto'):
if isinstance(s,PDFString):
self.s = s.s
self.escape = s.escape
self.enc = s.enc
else:
self.s = s
self.escape = escape
self.enc = enc
def format(self, document):
s = self.s
enc = getattr(self,'enc','auto')
if (isBytes(s)):
if enc is 'auto':
try:
u = s.decode(s.startswith(codecs.BOM_UTF16_BE) and 'utf16' or 'utf8')
if _checkPdfdoc(u):
s = u.encode('pdfdoc')
else:
s = codecs.BOM_UTF16_BE+u.encode('utf_16_be')
except:
try:
s.decode('pdfdoc')
except:
stderr.write('Error in %s' % (repr(s),))
raise
elif isUnicode(s):
if enc is 'auto':
if _checkPdfdoc(s):
s = s.encode('pdfdoc')
else:
s = codecs.BOM_UTF16_BE+s.encode('utf_16_be')
else:
s = codecs.BOM_UTF16_BE+s.encode('utf_16_be')
else:
raise ValueError('PDFString argument must be str/unicode not %s' % type(s))
escape = getattr(self,'escape',1)
if not isinstance(document.encrypt,NoEncryption):
s = document.encrypt.encode(s)
escape = 1
if escape:
try:
es = "(%s)" % escapePDF(s)
except:
raise ValueError("cannot escape %s %s" % (s, repr(s)))
if escape&2:
es = es.replace('\\012','\n')
if escape&4 and _isbalanced(es):
es = es.replace('\\(','(').replace('\\)',')')
return pdfdocEnc(es)
else:
return b'(' + s + b')'
def __str__(self):
return "(%s)" % escapePDF(self.s)
def PDFName(data,lo=chr(0x21),hi=chr(0x7e)):
L = list(data)
for i,c in enumerate(L):
if c<lo or c>hi or c in "%()<>{}[]#":
L[i] = "#"+hex(ord(c))[2:]
return "/"+(''.join(L))
class PDFDictionary(PDFObject):
multiline = True
def __init__(self, dict=None):
if dict is None:
self.dict = {}
else:
self.dict = dict.copy()
def __setitem__(self, name, value):
self.dict[name] = value
def __getitem__(self, a):
return self.dict[a]
def __contains__(self,a):
return a in self.dict
def Reference(self, name, document):
self.dict[name] = document.Reference(self.dict[name])
def format(self, document,IND=b'\r\n '):
dict = self.dict
try:
keys = list(dict.keys())
except:
print(ascii(dict))
raise
if not isinstance(dict,OrderedDict): keys.sort()
L = [(format(PDFName(k),document)+b" "+format(dict[k],document)) for k in keys]
if self.multiline and rl_config.pdfMultiLine:
L = IND.join(L)
else:
t=L.insert
for i in reversed(range(6, len(L), 6)):
t(i,b'\r\n ')
L = b" ".join(L)
return b'<< '+L+b' >>'
def copy(self):
return PDFDictionary(self.dict)
def normalize(self):
D = self.dict
K = [k for k in D.keys() if k.startswith('/')]
for k in K:
D[k[1:]] = D.pop(k)
class checkPDFNames:
def __init__(self,*names):
self.names = list(map(PDFName,names))
def __call__(self,value):
if not value.startswith('/'):
value=PDFName(value)
if value in self.names:
return value
def checkPDFBoolean(value):
if value in ('true','false'): return value
class CheckedPDFDictionary(PDFDictionary):
validate = {}
def __init__(self,dict=None,validate=None):
PDFDictionary.__init__(self,dict)
if validate: self.validate = validate
def __setitem__(self,name,value):
if name not in self.validate:
raise ValueError('invalid key, %r' % name)
cvalue = self.validate[name](value)
if cvalue is None:
raise ValueError('Bad value %r for key %r' % (value,name))
PDFDictionary.__setitem__(self,name,cvalue)
class ViewerPreferencesPDFDictionary(CheckedPDFDictionary):
validate=dict(
HideToolbar=checkPDFBoolean,
HideMenubar=checkPDFBoolean,
HideWindowUI=checkPDFBoolean,
FitWindow=checkPDFBoolean,
CenterWindow=checkPDFBoolean,
DisplayDocTitle=checkPDFBoolean,
NonFullScreenPageMode=checkPDFNames(*'UseNone UseOutlines UseThumbs UseOC'.split()),
Direction=checkPDFNames(*'L2R R2L'.split()),
ViewArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
ViewClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
PrintArea=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
PrintClip=checkPDFNames(*'MediaBox CropBox BleedBox TrimBox ArtBox'.split()),
PrintScaling=checkPDFNames(*'None AppDefault'.split()),
)
class PDFStreamFilterZCompress:
pdfname = "FlateDecode"
def encode(self, text):
from reportlab.lib.utils import import_zlib
zlib = import_zlib()
if not zlib: raise ImportError("cannot z-compress zlib unavailable")
if isUnicode(text):
text = text.encode('utf8')
return zlib.compress(text)
def decode(self, encoded):
from reportlab.lib.utils import import_zlib
zlib = import_zlib()
if not zlib: raise ImportError("cannot z-decompress zlib unavailable")
return zlib.decompress(encoded)
PDFZCompress = PDFStreamFilterZCompress()
class PDFStreamFilterBase85Encode:
pdfname = "ASCII85Decode"
def encode(self, text):
from reportlab.pdfbase.pdfutils import _wrap
text = asciiBase85Encode(text)
if rl_config.wrapA85:
text = _wrap(text)
return text
def decode(self, text):
return asciiBase85Decode(text)
PDFBase85Encode = PDFStreamFilterBase85Encode()
class PDFStream(PDFObject):
ters=None):
if dictionary is None:
dictionary = PDFDictionary()
self.dictionary = dictionary
self.content = content
self.filters = filters
def format(self, document):
dictionary = self.dictionary
dictionary = PDFDictionary(dictionary.dict.copy())
content = self.content
filters = self.filters
if self.content is None:
raise ValueError("stream content not set")
if filters is None:
filters = document.defaultStreamFilters
if filters is not None and "Filter" not in dictionary.dict:
# apply filters in reverse order listed
rf = list(filters)
rf.reverse()
fnames = []
for f in rf:
#print "*****************content:"; print repr(content[:200])
#print "*****************filter", f.pdfname
content = f.encode(content)
fnames.insert(0, PDFName(f.pdfname))
#print "*****************finally:"; print content[:200]
#print "****** FILTERS", fnames
#stop
dictionary["Filter"] = PDFArray(fnames)
# "stream encoding is done after all filters have been applied"
content = document.encrypt.encode(content)
fc = format(content, document)
dictionary["Length"] = len(content)
fd = format(dictionary, document)
return fd+b'\r\nstream\r\n'+fc+b'endstream\r\n'
def teststream(content=None):
#content = "" # test
if content is None:
content = teststreamcontent
content = content.strip()
content = content.replace("\n", '\n\r') + '\n\r'
S = PDFStream(content = content,
filters=rl_config.useA85 and [PDFBase85Encode,PDFZCompress] or [PDFZCompress])
# nothing else needed...
S.__Comment__ = "test stream"
return S
teststreamcontent = """
1 0 0 1 0 0 cm BT /F9 12 Tf 14.4 TL ET
1.00 0.00 1.00 rg
n 72.00 72.00 432.00 648.00 re B*
"""
class PDFArray(PDFObject):
multiline = True
def __init__(self, sequence):
self.sequence = list(sequence)
def References(self, document):
self.sequence = list(map(document.Reference, self.sequence))
def format(self, document, IND=b'\r\n '):
L = [format(e, document) for e in self.sequence]
if self.multiline and rl_config.pdfMultiLine:
L = IND.join(L)
else:
n=len(L)
if n>10:
# break up every 10 elements anyway
t=L.insert
for i in reversed(range(10, n, 10)):
t(i,b'\r\n ')
L = b' '.join(L)
else:
L = b' '.join(L)
return b'[ ' + L + b' ]'
class PDFArrayCompact(PDFArray):
multiline=False
class PDFIndirectObject(PDFObject):
__RefOnly__ = 1
def __init__(self, name, content):
self.name = name
self.content = content
def format(self, document):
name = self.name
n, v = document.idToObjectNumberAndVersion[name]
# set encryption parameters
document.encrypt.register(n, v)
fcontent = format(self.content, document, toplevel=1) # yes this is at top level
return (pdfdocEnc("%s %s obj\r\n"%(n,v))
+fcontent+ (b'' if fcontent.endswith(b'\r\n') else b'\r\n')
+b'endobj\r\n')
class PDFObjectReference(PDFObject):
def __init__(self, name):
self.name = name
def format(self, document):
try:
return pdfdocEnc("%s %s R" % document.idToObjectNumberAndVersion[self.name])
except:
raise KeyError("forward reference to %s not resolved upon final formatting" % repr(self.name))
class PDFFile(PDFObject):
### just accumulates strings: keeps track of current offset
def __init__(self,pdfVersion=PDF_VERSION_DEFAULT):
self.strings = []
self.write = self.strings.append
self.offset = 0
### chapter 5
# Following Ken Lunde's advice and the PDF spec, this includes
# has run our PDF files through a dodgy Unicode conversion.
self.add((pdfdocEnc("%%PDF-%s.%s" % pdfVersion) +
b'\r\n%\223\214\213\236 ReportLab Generated PDF document http://www.reportlab.com\r\n'
))
def closeOrReset(self):
pass
def add(self, s):
s = pdfdocEnc(s)
result = self.offset
self.offset = result+len(s)
self.write(s)
return result
def format(self, document):
return b''.join(self.strings)
XREFFMT = '%0.10d %0.5d n'
class PDFCrossReferenceSubsection(PDFObject):
def __init__(self, firstentrynumber, idsequence):
self.firstentrynumber = firstentrynumber
self.idsequence = idsequence
def format(self, document):
firstentrynumber = self.firstentrynumber
idsequence = self.idsequence
entries = list(idsequence)
nentries = len(idsequence)
# special case: object number 0 is always free
taken = {}
if firstentrynumber==0:
taken[0] = "standard free entry"
nentries = nentries+1
entries.insert(0, "0000000000 65535 f")
idToNV = document.idToObjectNumberAndVersion
idToOffset = document.idToOffset
lastentrynumber = firstentrynumber+nentries-1
for id in idsequence:
(num, version) = idToNV[id]
if num in taken:
raise ValueError("object number collision %s %s %s" % (num, repr(id), repr(taken[id])))
if num>lastentrynumber or num<firstentrynumber:
raise ValueError("object number %s not in range %s..%s" % (num, firstentrynumber, lastentrynumber))
# compute position in list
rnum = num-firstentrynumber
taken[num] = id
offset = idToOffset[id]
entries[num] = XREFFMT % (offset, version)
# now add the initial line
firstline = "%s %s" % (firstentrynumber, nentries)
entries.insert(0, firstline)
# make sure it ends with \r\n
entries.append("")
return pdfdocEnc('\r\n'.join(entries))
class PDFCrossReferenceTable(PDFObject):
def __init__(self):
self.sections = []
def addsection(self, firstentry, ids):
section = PDFCrossReferenceSubsection(firstentry, ids)
self.sections.append(section)
def format(self, document):
sections = self.sections
if not sections:
raise ValueError("no crossref sections")
L = [b"xref\r\n"]
for s in self.sections:
fs = format(s, document)
L.append(fs)
return pdfdocEnc(b''.join(L))
class PDFTrailer(PDFObject):
def __init__(self, startxref, Size=None, Prev=None, Root=None, Info=None, ID=None, Encrypt=None):
self.startxref = startxref
if Size is None or Root is None:
raise ValueError("Size and Root keys required")
dict = self.dict = PDFDictionary()
for (n,v) in [("Size", Size), ("Prev", Prev), ("Root", Root),
("Info", Info), ("ID", ID), ("Encrypt", Encrypt)]:
if v is not None:
dict[n] = v
def format(self, document):
fdict = format(self.dict, document)
return b''.join([
b'trailer\r\n',
fdict,
b'\r\nstartxref\r\n',
pdfdocEnc(str(self.startxref)),
b'\r\n%%EOF\r\n',
]
)
#### XXXX skipping incremental update,
#### encryption
#### chapter 6, doc structure
class PDFCatalog(PDFObject):
__Comment__ = "Document Root"
__RefOnly__ = 1
# to override, set as attributes
__Defaults__ = {"Type": PDFName("Catalog"),
"PageMode": PDFName("UseNone"),
"Lang": None,
}
__NoDefault__ = """
Dests Outlines Pages Threads AcroForm Names OpenAction PageMode URI
ViewerPreferences PageLabels PageLayout JavaScript StructTreeRoot SpiderInfo""".split()
__Refs__ = __NoDefault__ # make these all into references, if present
def format(self, document):
self.check_format(document)
defaults = self.__Defaults__
Refs = self.__Refs__
D = {}
for k,v in defaults.items():
v = getattr(self,k,v)
if v is not None:
D[k] = v
for k in self.__NoDefault__:
v = getattr(self,k,None)
if v is not None:
D[k] = v
# force objects to be references where required
for k in Refs:
if k in D:
#print"k is", k, "value", D[k]
D[k] = document.Reference(D[k])
dict = PDFDictionary(D)
return format(dict, document)
def showOutline(self):
self.setPageMode("UseOutlines")
def showFullScreen(self):
self.setPageMode("FullScreen")
def setPageLayout(self,layout):
if layout:
self.PageLayout = PDFName(layout)
def setPageMode(self,mode):
if mode:
self.PageMode = PDFName(mode)
def check_format(self, document):
pass
class PDFPages(PDFCatalog):
__Comment__ = "page tree"
__RefOnly__ = 1
# note: could implement page attribute inheritance...
__Defaults__ = {"Type": PDFName("Pages"),
}
__NoDefault__ = "Kids Count Parent".split()
__Refs__ = ["Parent"]
def __init__(self):
self.pages = []
def __getitem__(self, item):
return self.pages[item]
def addPage(self, page):
self.pages.append(page)
def check_format(self, document):
# convert all pages to page references
pages = self.pages
kids = PDFArray(pages)
# make sure all pages are references
kids.References(document)
self.Kids = kids
self.Count = len(pages)
class PDFPage(PDFCatalog):
__Comment__ = "Page dictionary"
# all PDF attributes can be set explicitly
# if this flag is set, the "usual" behavior will be suppressed
Override_default_compilation = 0
__RefOnly__ = 1
__Defaults__ = {"Type": PDFName("Page"),
# "Parent": PDFObjectReference(Pages), # no! use document.Pages
}
__NoDefault__ = """Parent
MediaBox Resources Contents CropBox Rotate Thumb Annots B Dur Hid Trans AA
PieceInfo LastModified SeparationInfo ArtBox TrimBox BleedBox ID PZ
Trans""".split()
__Refs__ = """Contents Parent ID""".split()
pagewidth = 595
pageheight = 842
stream = None
hasImages = 0
compression = 0
XObjects = None
_colorsUsed = {}
_shadingsUsed = {}
Trans = None
# transitionstring?
# xobjects?
# annotations
def __init__(self):
# set all nodefaults to None
for name in self.__NoDefault__:
setattr(self, name, None)
def setCompression(self, onoff):
self.compression = onoff
def setStream(self, code):
if self.Override_default_compilation:
raise ValueError("overridden! must set stream explicitly")
if isSeq(code):
code = '\r\n'.join(code)+'\r\n'
self.stream = code
def setPageTransition(self, tranDict):
self.Trans = PDFDictionary(tranDict)
def check_format(self, document):
# set up parameters unless usual behaviour is suppressed
if self.Override_default_compilation:
return
self.MediaBox = self.MediaBox or PDFArray(self.Rotate in (90,270) and [0,0,self.pageheight,self.pagewidth] or [0, 0, self.pagewidth, self.pageheight])
if not self.Annots:
self.Annots = None
else:
#print self.Annots
#raise ValueError("annotations not reimplemented yet")
if not isinstance(self.Annots,PDFObject):
self.Annots = PDFArray(self.Annots)
if not self.Contents:
stream = self.stream
if not stream:
self.Contents = teststream()
else:
S = PDFStream()
if self.compression:
S.filters = rl_config.useA85 and [PDFBase85Encode, PDFZCompress] or [PDFZCompress]
S.content = stream
S.__Comment__ = "page stream"
self.Contents = S
if not self.Resources:
resources = PDFResourceDictionary()
# fonts!
resources.basicFonts()
if self.hasImages:
resources.allProcs()
else:
resources.basicProcs()
if self.XObjects:
#print "XObjects", self.XObjects.dict
resources.XObject = self.XObjects
if self.ExtGState:
resources.ExtGState = self.ExtGState
resources.setShading(self._shadingUsed)
resources.setColorSpace(self._colorsUsed)
self.Resources = resources
if not self.Parent:
pages = document.Pages
self.Parent = document.Reference(pages)
#this code contributed by Christian Jacobs <cljacobsen@gmail.com>
class DuplicatePageLabelPage(Exception):
pass
class PDFPageLabels(PDFCatalog):
__comment__ = None
__RefOnly__ = 0
__Defaults__ = {}
__NoDefault__ = ["Nums"]
__Refs__ = []
def __init__(self):
self.labels = []
def addPageLabel(self, page, label):
self.labels.append((page, label))
def format(self, document):
try:
self.labels.sort()
except DuplicatePageLabelPage:
tmp = sorted([x[0] for x in self.labels])
annotateException('\n\n!!!!! Duplicate PageLabel seen for pages %r' % list(set([x for x in tmp if tmp.count(x)>1])))
labels = []
for page, label in self.labels:
labels.append(page)
labels.append(label)
self.Nums = PDFArray(labels) #PDFArray makes a copy with list()
return PDFCatalog.format(self, document)
class PDFPageLabel(PDFCatalog):
__Comment__ = None
__RefOnly__ = 0
__Defaults__ = {}
__NoDefault__ = "Type S P St".split()
__convertible__ = 'ARABIC ROMAN_UPPER ROMAN_LOWER LETTERS_UPPER LETTERS_LOWER'
ARABIC = 'D'
ROMAN_UPPER = 'R'
ROMAN_LOWER = 'r'
LETTERS_UPPER = 'A'
LETTERS_LOWER = 'a'
def __init__(self, style=None, start=None, prefix=None):
if style:
if style.upper() in self.__convertible__: style = getattr(self,style.upper())
self.S = PDFName(style)
if start: self.St = PDFnumber(start)
if prefix: self.P = PDFString(prefix)
def __lt__(self,oth):
if rl_config.errorOnDuplicatePageLabelPage:
raise DuplicatePageLabelPage()
return False
#ends code contributed by Christian Jacobs <cljacobsen@gmail.com>
def testpage(document):
P = PDFPage()
P.Contents = teststream()
pages = document.Pages
P.Parent = document.Reference(pages)
P.MediaBox = PDFArray([0, 0, 595, 841])
resources = PDFResourceDictionary()
resources.allProcs() # enable all procsets
resources.basicFonts()
P.Resources = resources
pages.addPage(P)
#### DUMMY OUTLINES IMPLEMENTATION FOR testing
DUMMYOUTLINE = """
<<
/Count
0
/Type
/Outlines
>>"""
class PDFOutlines0(PDFObject):
__Comment__ = "TEST OUTLINE!"
text = DUMMYOUTLINE.replace("\n", '\r\n')
__RefOnly__ = 1
def format(self, document):
return pdfdocEnc(self.text)
class OutlineEntryObject(PDFObject):
Title = Dest = Parent = Prev = Next = First = Last = Count = None
def format(self, document):
D = {}
D["Title"] = PDFString(self.Title)
D["Parent"] = self.Parent
D["Dest"] = self.Dest
for n in ("Prev", "Next", "First", "Last", "Count"):
v = getattr(self, n)
if v is not None:
D[n] = v
PD = PDFDictionary(D)
return PD.format(document)
class PDFOutlines(PDFObject):
# first attempt, many possible features missing.
#no init for now
mydestinations = ready = None
counter = 0
currentlevel = -1 # ie, no levels yet
def __init__(self):
self.destinationnamestotitles = {}
self.destinationstotitles = {}
self.levelstack = []
self.buildtree = []
self.closedict = {} # dictionary of "closed" destinations in the outline
def addOutlineEntry(self, destinationname, level=0, title=None, closed=None):
if destinationname is None and level!=0:
raise ValueError("close tree must have level of 0")
if not isinstance(level,int): raise ValueError("level must be integer, got %s" % type(level))
if level<0: raise ValueError("negative levels not allowed")
if title is None: title = destinationname
currentlevel = self.currentlevel
stack = self.levelstack
tree = self.buildtree
# adjust currentlevel and stack to match level
if level>currentlevel:
if level>currentlevel+1:
raise ValueError("can't jump from outline level %s to level %s, need intermediates (destinationname=%r, title=%r)" %(currentlevel, level, destinationname, title))
level = currentlevel = currentlevel+1
stack.append([])
while level<currentlevel:
current = stack[-1]
del stack[-1]
previous = stack[-1]
lastinprevious = previous[-1]
if isinstance(lastinprevious,tuple):
(name, sectionlist) = lastinprevious
raise ValueError("cannot reset existing sections: " + repr(lastinprevious))
else:
name = lastinprevious
sectionlist = current
previous[-1] = (name, sectionlist)
currentlevel = currentlevel-1
if destinationname is None: return
stack[-1].append(destinationname)
self.destinationnamestotitles[destinationname] = title
if closed: self.closedict[destinationname] = 1
self.currentlevel = level
def setDestinations(self, destinationtree):
self.mydestinations = destinationtree
def format(self, document):
D = {}
D["Type"] = PDFName("Outlines")
c = self.count
D["Count"] = c
if c!=0:
D["First"] = self.first
D["Last"] = self.last
PD = PDFDictionary(D)
return PD.format(document)
def setNames(self, canvas, *nametree):
desttree = self.translateNames(canvas, nametree)
self.setDestinations(desttree)
def setNameList(self, canvas, nametree):
desttree = self.translateNames(canvas, nametree)
self.setDestinations(desttree)
def translateNames(self, canvas, object):
destinationnamestotitles = self.destinationnamestotitles
destinationstotitles = self.destinationstotitles
closedict = self.closedict
if isStr(object):
if not isUnicode(object): object = object.decode('utf8')
destination = canvas._bookmarkReference(object)
title = object
if object in destinationnamestotitles:
title = destinationnamestotitles[object]
else:
destinationnamestotitles[title] = title
destinationstotitles[destination] = title
if object in closedict:
closedict[destination] = 1
return {object: canvas._bookmarkReference(object)}
if isSeq(object):
L = []
for o in object:
L.append(self.translateNames(canvas, o))
if isinstance(object,tuple):
return tuple(L)
return L
raise TypeError("in outline, destination name must be string: got a %s"%type(object))
def prepare(self, document, canvas):
if self.mydestinations is None:
if self.levelstack:
self.addOutlineEntry(None)
destnames = self.levelstack[0]
self.mydestinations = self.translateNames(canvas, destnames)
else:
self.first = self.last = None
self.count = 0
self.ready = 1
return
self.count = count(self.mydestinations, self.closedict)
(self.first, self.last) = self.maketree(document, self.mydestinations, toplevel=1)
self.ready = 1
def maketree(self, document, destinationtree, Parent=None, toplevel=0):
if toplevel:
levelname = "Outline"
Parent = document.Reference(document.Outlines)
else:
self.count = self.count+1
levelname = "Outline.%s" % self.count
if Parent is None:
raise ValueError("non-top level outline elt parent must be specified")
if not isSeq(destinationtree):
raise ValueError("destinationtree must be list or tuple, got %s")
nelts = len(destinationtree)
lastindex = nelts-1
lastelt = firstref = lastref = None
destinationnamestotitles = self.destinationnamestotitles
closedict = self.closedict
for index in range(nelts):
eltobj = OutlineEntryObject()
eltobj.Parent = Parent
eltname = "%s.%s" % (levelname, index)
eltref = document.Reference(eltobj, eltname)
if lastelt is not None:
lastelt.Next = eltref
eltobj.Prev = lastref
if firstref is None:
firstref = eltref
lastref = eltref
lastelt = eltobj
lastref = eltref
elt = destinationtree[index]
if isinstance(elt,dict):
leafdict = elt
elif isinstance(elt,tuple):
try:
(leafdict, subsections) = elt
except:
raise ValueError("destination tree elt tuple should have two elts, got %s" % len(elt))
eltobj.Count = count(subsections, closedict)
(eltobj.First, eltobj.Last) = self.maketree(document, subsections, eltref)
else:
raise ValueError("destination tree elt should be dict or tuple, got %s" % type(elt))
try:
[(Title, Dest)] = list(leafdict.items())
except:
raise ValueError("bad outline leaf dictionary, should have one entry "+bytestr(elt))
eltobj.Title = destinationnamestotitles[Title]
eltobj.Dest = Dest
if isinstance(elt,tuple) and Dest in closedict:
eltobj.Count = -eltobj.Count
return (firstref, lastref)
def count(tree, closedict=None):
from operator import add
if isinstance(tree,tuple):
(leafdict, subsections) = tree
[(Title, Dest)] = list(leafdict.items())
if closedict and Dest in closedict:
return 1
if isSeq(tree):
counts = []
for e in tree:
counts.append(count(e, closedict))
return sum(counts)
return 1
class PDFInfo(PDFObject):
producer = "ReportLab PDF Library - www.reportlab.com"
creator = "ReportLab PDF Library - www.reportlab.com"
title = "untitled"
author = "anonymous"
subject = "unspecified"
keywords = ""
_dateFormatter = None
def __init__(self):
self.invariant = rl_config.invariant
self.trapped = 'False'
def digest(self, md5object):
for x in (self.title, self.author, self.subject, self.keywords):
md5object.update(bytestr(x))
def format(self, document):
D = {}
D["Title"] = PDFString(self.title)
D["Author"] = PDFString(self.author)
D['ModDate'] = D["CreationDate"] = PDFDate(invariant=self.invariant,dateFormatter=self._dateFormatter)
D["Producer"] = PDFString(self.producer)
D["Creator"] = PDFString(self.creator)
D["Subject"] = PDFString(self.subject)
D["Keywords"] = PDFString(self.keywords)
D["Trapped"] = PDFName(self.trapped)
PD = PDFDictionary(D)
return PD.format(document)
def copy(self):
thing = self.__klass__()
for k, v in self.__dict__.items():
setattr(thing, k, v)
return thing
class Annotation(PDFObject):
defaults = [("Type", PDFName("Annot"),)]
required = ("Type", "Rect", "Contents", "Subtype")
permitted = required+(
"Border", "C", "T", "M", "F", "H", "BS", "AA", "AS", "Popup", "P", "AP")
def cvtdict(self, d, escape=1):
Rect = d["Rect"]
if not isStr(Rect):
d["Rect"] = PDFArray(Rect)
d["Contents"] = PDFString(d["Contents"],escape)
return d
def AnnotationDict(self, **kw):
if 'escape' in kw:
escape = kw['escape']
del kw['escape']
else:
escape = 1
d = {}
for (name,val) in self.defaults:
d[name] = val
d.update(kw)
for name in self.required:
if name not in d:
raise ValueError("keyword argument %s missing" % name)
d = self.cvtdict(d,escape=escape)
permitted = self.permitted
for name in d.keys():
if name not in permitted:
raise ValueError("bad annotation dictionary name %s" % name)
return PDFDictionary(d)
def Dict(self):
raise ValueError("DictString undefined for virtual superclass Annotation, must overload")
def format(self, document):
D = self.Dict()
return D.format(document)
class TextAnnotation(Annotation):
permitted = Annotation.permitted + (
"Open", "Name")
def __init__(self, Rect, Contents, **kw):
self.Rect = Rect
self.Contents = Contents
self.otherkw = kw
def Dict(self):
d = {}
d.update(self.otherkw)
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["Subtype"] = "/Text"
return self.AnnotationDict(**d)
class FreeTextAnnotation(Annotation):
permitted = Annotation.permitted + ("DA",)
def __init__(self, Rect, Contents, DA, **kw):
self.Rect = Rect
self.Contents = Contents
self.DA = DA
self.otherkw = kw
def Dict(self):
d = {}
d.update(self.otherkw)
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["DA"] = self.DA
d["Subtype"] = "/FreeText"
return self.AnnotationDict(**d)
class LinkAnnotation(Annotation):
permitted = Annotation.permitted + (
"Dest", "A", "PA")
def __init__(self, Rect, Contents, Destination, Border="[0 0 1]", **kw):
self.Border = Border
self.Rect = Rect
self.Contents = Contents
self.Destination = Destination
self.otherkw = kw
def dummyDictString(self):
return """
<< /Type /Annot /Subtype /Link /Rect [71 717 190 734] /Border [16 16 1]
/Dest [23 0 R /Fit] >>
"""
def Dict(self):
d = {}
d.update(self.otherkw)
d["Border"] = self.Border
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["Subtype"] = "/Link"
d["Dest"] = self.Destination
return self.AnnotationDict(**d)
class HighlightAnnotation(Annotation):
permitted = Annotation.permitted + ("QuadPoints", )
def __init__(self, Rect, Contents, QuadPoints, Color=[0.83, 0.89, 0.95], **kw):
self.Rect = Rect
self.Contents = Contents
self.otherkw = kw
self.QuadPoints = QuadPoints
self.Color = Color
def cvtdict(self, d, escape=1):
Rect = d["Rect"]
Quad = d["QuadPoints"]
Color = d["C"]
if not isinstance(Rect, str):
d["Rect"] = PDFArray(Rect).format(d, IND=b" ")
if not isinstance(Quad, str):
d["QuadPoints"] = PDFArray(Quad).format(d, IND=b" ")
if not isinstance(Color, str):
d["C"] = PDFArray(Color).format(d, IND=b" ")
d["Contents"] = PDFString(d["Contents"], escape)
return d
def Dict(self):
d = {}
d.update(self.otherkw)
d["Rect"] = self.Rect
d["Contents"] = self.Contents
d["Subtype"] = "/Highlight"
d["QuadPoints"] = self.QuadPoints
d["C"] = self.Color
return self.AnnotationDict(**d)
def rect_to_quad(Rect):
return [Rect[0], Rect[1], Rect[2], Rect[1],
Rect[0], Rect[3], Rect[2], Rect[3]]
class PDFRectangle(PDFObject):
def __init__(self, llx, lly, urx, ury):
self.llx, self.lly, self.ulx, self.ury = llx, lly, urx, ury
def format(self, document):
A = PDFArray([self.llx, self.lly, self.ulx, self.ury])
return format(A, document)
_NOWT=None
def _getTimeStamp():
global _NOWT
if not _NOWT:
import time
_NOWT = time.time()
return _NOWT
class PDFDate(PDFObject):
def __init__(self, invariant=rl_config.invariant, dateFormatter=None):
if invariant:
now = (2000,1,1,0,0,0,0)
self.dhh = 0
self.dmm = 0
else:
import time
now = tuple(time.localtime(_getTimeStamp())[:6])
from time import timezone
self.dhh = int(timezone / (3600.0))
self.dmm = (timezone % 3600) % 60
self.date = now[:6]
self.dateFormatter = dateFormatter
def format(self, doc):
dfmt = self.dateFormatter or (
lambda yyyy,mm,dd,hh,m,s:
"D:%04d%02d%02d%02d%02d%02d%+03d'%02d'"
% (yyyy,mm,dd,hh,m,s,self.dhh,self.dmm))
return format(PDFString(dfmt(*self.date)), doc)
class Destination(PDFObject):
representation = format = page = None
def __init__(self,name):
self.name = name
self.fmt = self.page = None
def format(self, document):
f = self.fmt
if f is None: raise ValueError("format not resolved, probably missing URL scheme or undefined destination target for '%s'" % self.name)
p = self.page
if p is None: raise ValueError("Page not bound, probably missing URL scheme or undefined destination target for '%s'" % self.name)
f.page = p
return f.format(document)
def xyz(self, left, top, zoom):
self.fmt = PDFDestinationXYZ(None, left, top, zoom)
def fit(self):
self.fmt = PDFDestinationFit(None)
def fitb(self):
self.fmt = PDFDestinationFitB(None)
def fith(self, top):
self.fmt = PDFDestinationFitH(None,top)
def fitv(self, left):
self.fmt = PDFDestinationFitV(None, left)
def fitbh(self, top):
self.fmt = PDFDestinationFitBH(None, top)
def fitbv(self, left):
self.fmt = PDFDestinationFitBV(None, left)
def fitr(self, left, bottom, right, top):
self.fmt = PDFDestinationFitR(None, left, bottom, right, top)
def setPage(self, page):
self.page = page
(PDFObject):
typename = "XYZ"
def __init__(self, page, left, top, zoom):
self.page = page
self.top = top
self.zoom = zoom
self.left = left
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.top, self.zoom ] )
return format(A, document)
class PDFDestinationFit(PDFObject):
typename = "Fit"
def __init__(self, page):
self.page = page
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename) ] )
return format(A, document)
class PDFDestinationFitB(PDFDestinationFit):
typename = "FitB"
class PDFDestinationFitH(PDFObject):
typename = "FitH"
def __init__(self, page, top):
self.page = page; self.top=top
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.top ] )
return format(A, document)
class PDFDestinationFitBH(PDFDestinationFitH):
typename = "FitBH"
class PDFDestinationFitV(PDFObject):
typename = "FitV"
def __init__(self, page, left):
self.page = page; self.left=left
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.left ] )
return format(A, document)
class PDFDestinationFitBV(PDFDestinationFitV):
typename = "FitBV"
class PDFDestinationFitR(PDFObject):
typename = "FitR"
def __init__(self, page, left, bottom, right, top):
self.page = page; self.left=left; self.bottom=bottom; self.right=right; self.top=top
def format(self, document):
pageref = document.Reference(self.page)
A = PDFArray( [ pageref, PDFName(self.typename), self.left, self.bottom, self.right, self.top] )
return format(A, document)
class PDFResourceDictionary(PDFObject):
def __init__(self):
self.ColorSpace = {}
self.XObject = {}
self.ExtGState = {}
self.Font = {}
self.Pattern = {}
self.ProcSet = []
self.Properties = {}
self.Shading = {}
self.basicProcs()
stdprocs = [PDFName(s) for s in "PDF Text ImageB ImageC ImageI".split()]
dict_attributes = ("ColorSpace", "XObject", "ExtGState", "Font", "Pattern", "Properties", "Shading")
def allProcs(self):
self.ProcSet = self.stdprocs
def basicProcs(self):
self.ProcSet = self.stdprocs[:2]
def basicFonts(self):
self.Font = PDFObjectReference(BasicFonts)
def setColorSpace(self,colorsUsed):
for c,s in colorsUsed.items():
self.ColorSpace[s] = PDFObjectReference(c)
def setShading(self,shadingUsed):
for c,s in shadingUsed.items():
self.Shading[s] = PDFObjectReference(c)
def format(self, document):
D = {}
for dname in self.dict_attributes:
v = getattr(self, dname)
if isinstance(v,dict):
if v:
dv = PDFDictionary(v)
D[dname] = dv
else:
D[dname] = v
v = self.ProcSet
dname = "ProcSet"
if isSeq(v):
if v:
dv = PDFArray(v)
D[dname] = dv
else:
D[dname] = v
DD = PDFDictionary(D)
return format(DD, document)
', '/G':'DeviceGray', '/CMYK':'DeviceCMYK'}[words[7]]
self.bitsPerComponent = 8
self._filters = 'FlateDecode', #'Fl'
if IMG: self._checkTransparency(IMG[0])
elif self.mask=='auto': self.mask = None
self.streamContent = ''.join(imagedata[3:-1])
def _checkTransparency(self,im):
if self.mask=='auto':
if im._dataA:
self.mask = None
self._smask = PDFImageXObject(_digester(im._dataA.getRGBData()),im._dataA,mask=None)
self._smask._decode = [0,1]
else:
tc = im.getTransparent()
if tc:
self.mask = (tc[0], tc[0], tc[1], tc[1], tc[2], tc[2])
else:
self.mask = None
elif hasattr(self.mask,'rgb'):
_ = self.mask.rgb()
self.mask = _[0],_[0],_[1],_[1],_[2],_[2]
def loadImageFromSRC(self, im):
fp = im.jpeg_fh()
if fp:
self.loadImageFromJPEG(fp)
else:
zlib = import_zlib()
if not zlib: return
self.width, self.height = im.getSize()
raw = im.getRGBData()
#assert len(raw) == self.width*self.height, "Wrong amount of data for image expected %sx%s=%s got %s" % (self.width,self.height,self.width*self.height,len(raw))
self.streamContent = zlib.compress(raw)
if rl_config.useA85:
self.streamContent = asciiBase85Encode(self.streamContent)
self._filters = 'ASCII85Decode','FlateDecode' #'A85','Fl'
else:
self._filters = 'FlateDecode', #'Fl'
self.colorSpace= _mode2CS[im.mode]
self.bitsPerComponent = 8
self._checkTransparency(im)
def format(self, document):
S = PDFStream(content = self.streamContent)
dict = S.dictionary
dict["Type"] = PDFName("XObject")
dict["Subtype"] = PDFName("Image")
dict["Width"] = self.width
dict["Height"] = self.height
dict["BitsPerComponent"] = self.bitsPerComponent
dict["ColorSpace"] = PDFName(self.colorSpace)
if self.colorSpace=='DeviceCMYK' and getattr(self,'_dotrans',0):
dict["Decode"] = PDFArray([1,0,1,0,1,0,1,0])
elif getattr(self,'_decode',None):
dict["Decode"] = PDFArray(self._decode)
dict["Filter"] = PDFArray(map(PDFName,self._filters))
dict["Length"] = len(self.streamContent)
if self.mask: dict["Mask"] = PDFArray(self.mask)
if getattr(self,'smask',None): dict["SMask"] = self.smask
return S.format(document)
class PDFSeparationCMYKColor:
def __init__(self, cmyk):
from reportlab.lib.colors import CMYKColor
if not isinstance(cmyk,CMYKColor):
raise ValueError('%s needs a CMYKColor argument' % self.__class__.__name__)
elif not cmyk.spotName:
raise ValueError('%s needs a CMYKColor argument with a spotName' % self.__class__.__name__)
self.cmyk = cmyk
def _makeFuncPS(self):
R = [].append
for i,v in enumerate(self.cmyk.cmyk()):
v=float(v)
if i==3:
if v==0.0:
R('pop')
R('0.0')
else:
R(str(v))
R('mul')
else:
if v==0:
R('0.0')
else:
R('dup')
R(str(v))
R('mul')
R('exch')
return '{%s}' % (' '.join(R.__self__))
def value(self):
return PDFArrayCompact((
PDFName('Separation'),
PDFName(self.cmyk.spotName),
PDFName('DeviceCMYK'),
PDFStream(
dictionary=PDFDictionary(dict(
FunctionType=4,
Domain=PDFArrayCompact((0,1)),
Range=PDFArrayCompact((0,1,0,1,0,1,0,1))
)),
content=self._makeFuncPS(),
filters=None,#[PDFBase85Encode, PDFZCompress],
)
))
class PDFFunction(PDFObject):
defaults = []
required = ("FunctionType", "Domain")
permitted = required+("Range",)
def FunctionDict(self, **kw):
d = {}
for (name,val) in self.defaults:
d[name] = val
d.update(kw)
for name in self.required:
if name not in d:
raise ValueError("keyword argument %s missing" % name)
permitted = self.permitted
for name in d.keys():
if name not in permitted:
raise ValueError("bad annotation dictionary name %s" % name)
return PDFDictionary(d)
def Dict(self, document):
raise ValueError("Dict undefined for virtual superclass PDFShading, must overload")
# but usually
#return self.FunctionDict(self, ...)
def format(self, document):
D = self.Dict(document)
return D.format(document)
class PDFExponentialFunction(PDFFunction):
defaults = PDFFunction.defaults + [("Domain", PDFArrayCompact((0.0, 1.0)))]
required = PDFFunction.required + ("N",)
permitted = PDFFunction.permitted + ("C0", "C1", "N")
def __init__(self, C0, C1, N, **kw):
self.C0 = C0
self.C1 = C1
self.N = N
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["FunctionType"] = 2
d["C0"] = PDFArrayCompact(self.C0)
d["C1"] = PDFArrayCompact(self.C1)
d["N"] = self.N
return self.FunctionDict(**d)
class PDFStitchingFunction(PDFFunction):
required = PDFFunction.required + ("Functions", "Bounds", "Encode")
permitted = PDFFunction.permitted + ("Functions", "Bounds", "Encode")
def __init__(self, Functions, Bounds, Encode, **kw):
self.Functions = Functions
self.Bounds = Bounds
self.Encode = Encode
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["FunctionType"] = 3
d["Functions"] = PDFArray([document.Reference(x) for x in self.Functions])
d["Bounds"] = PDFArray(self.Bounds)
d["Encode"] = PDFArray(self.Encode)
return self.FunctionDict(**d)
class PDFShading(PDFObject):
required = ("ShadingType", "ColorSpace")
permitted = required+("Background", "BBox", "AntiAlias")
def ShadingDict(self, **kw):
d = {}
d.update(kw)
for name in self.required:
if name not in d:
raise ValueError("keyword argument %s missing" % name)
permitted = self.permitted
for name in d.keys():
if name not in permitted:
raise ValueError("bad annotation dictionary name %s" % name)
return PDFDictionary(d)
def Dict(self, document):
raise ValueError("Dict undefined for virtual superclass PDFShading, must overload")
# but usually
#return self.ShadingDict(self, ...)
def format(self, document):
D = self.Dict(document)
return D.format(document)
class PDFFunctionShading(PDFShading):
required = PDFShading.required + ("Function",)
permitted = PDFShading.permitted + ("Domain", "Matrix", "Function")
def __init__(self, Function, ColorSpace, **kw):
self.Function = Function
self.ColorSpace = ColorSpace
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["ShadingType"] = 1
d["ColorSpace"] = PDFName(self.ColorSpace)
d["Function"] = document.Reference(self.Function)
return self.ShadingDict(**d)
class PDFAxialShading(PDFShading):
required = PDFShading.required + ("Coords", "Function")
permitted = PDFShading.permitted + (
"Coords", "Domain", "Function", "Extend")
def __init__(self, x0, y0, x1, y1, Function, ColorSpace, **kw):
self.Coords = (x0, y0, x1, y1)
self.Function = Function
self.ColorSpace = ColorSpace
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["ShadingType"] = 2
d["ColorSpace"] = PDFName(self.ColorSpace)
d["Coords"] = PDFArrayCompact(self.Coords)
d["Function"] = document.Reference(self.Function)
return self.ShadingDict(**d)
class PDFRadialShading(PDFShading):
required = PDFShading.required + ("Coords", "Function")
permitted = PDFShading.permitted + (
"Coords", "Domain", "Function", "Extend")
def __init__(self, x0, y0, r0, x1, y1, r1, Function, ColorSpace, **kw):
self.Coords = (x0, y0, r0, x1, y1, r1)
self.Function = Function
self.ColorSpace = ColorSpace
self.otherkw = kw
def Dict(self, document):
d = {}
d.update(self.otherkw)
d["ShadingType"] = 3
d["ColorSpace"] = PDFName(self.ColorSpace)
d["Coords"] = PDFArrayCompact(self.Coords)
d["Function"] = document.Reference(self.Function)
return self.ShadingDict(**d)
if __name__=="__main__":
print("There is no script interpretation for pdfdoc.")
| true | true |
1c45ebcafc988d6417656fc2f57c14e952094419 | 591 | py | Python | kombu/asynchronous/http/__init__.py | kaiix/kombu | 580b5219cc50cad278c4b664d0e0f85e37a5e9ea | [
"BSD-3-Clause"
] | 1,920 | 2015-01-03T15:43:23.000Z | 2022-03-30T19:30:35.000Z | kombu/asynchronous/http/__init__.py | kaiix/kombu | 580b5219cc50cad278c4b664d0e0f85e37a5e9ea | [
"BSD-3-Clause"
] | 949 | 2015-01-02T18:56:00.000Z | 2022-03-31T23:14:59.000Z | kombu/asynchronous/http/__init__.py | kaiix/kombu | 580b5219cc50cad278c4b664d0e0f85e37a5e9ea | [
"BSD-3-Clause"
] | 833 | 2015-01-07T23:56:35.000Z | 2022-03-31T22:04:11.000Z | from kombu.asynchronous import get_event_loop
from .base import Headers, Request, Response
__all__ = ('Client', 'Headers', 'Response', 'Request')
def Client(hub=None, **kwargs):
"""Create new HTTP client."""
from .curl import CurlClient
return CurlClient(hub, **kwargs)
def get_client(hub=None, **kwargs):
"""Get or create HTTP client bound to the current event loop."""
hub = hub or get_event_loop()
try:
return hub._current_http_client
except AttributeError:
client = hub._current_http_client = Client(hub, **kwargs)
return client
| 26.863636 | 68 | 0.685279 | from kombu.asynchronous import get_event_loop
from .base import Headers, Request, Response
__all__ = ('Client', 'Headers', 'Response', 'Request')
def Client(hub=None, **kwargs):
from .curl import CurlClient
return CurlClient(hub, **kwargs)
def get_client(hub=None, **kwargs):
hub = hub or get_event_loop()
try:
return hub._current_http_client
except AttributeError:
client = hub._current_http_client = Client(hub, **kwargs)
return client
| true | true |
1c45ec2cae11560444aa0d63d936a8e946da8104 | 1,851 | py | Python | setup.py | lassejaco/pretix-eth-payment-plugin | be514a7387de8399cb11c9dd8971f286ccc9a72c | [
"Apache-2.0"
] | null | null | null | setup.py | lassejaco/pretix-eth-payment-plugin | be514a7387de8399cb11c9dd8971f286ccc9a72c | [
"Apache-2.0"
] | null | null | null | setup.py | lassejaco/pretix-eth-payment-plugin | be514a7387de8399cb11c9dd8971f286ccc9a72c | [
"Apache-2.0"
] | null | null | null | import os
from distutils.command.build import build # type: ignore
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding='utf-8') as f:
long_description = f.read()
class CustomBuild(build):
def run(self):
from django.core import management
management.call_command('compilemessages', verbosity=1)
build.run(self)
cmdclass = {
'build': CustomBuild
}
extras_require = {
'test': [
'pytest>=5.1,<6',
'pytest-django>=3.5,<4',
],
'lint': [
'flake8>=3.7,<4',
'mypy==0.720',
],
'dev': [
'tox>=3.14.5,<4',
],
}
extras_require['dev'] = (
extras_require['dev']
+ extras_require['test']
+ extras_require['lint']
)
setup(
name='pretix-eth-payment-plugin',
version='2.0.4-dev',
description='Ethereum payment provider plugin for pretix software',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/esPass/pretix-eth-payment-plugin',
author='Pretix Ethereum Plugin Developers',
author_email='pretix-eth-payment-plugin@ethereum.org',
license='Apache Software License',
install_requires=[
"pretix>=3.8.0",
"web3>=5.7.0",
"eth-abi>=2.1.1,<3",
"eth-typing>=2.2.1,<3",
"eth-utils>=1.8.4,<2",
"eth-hash[pycryptodome]>=0.3.1,<0.4",
# Requests requires urllib3 <1.26.0. Can delete this later after
# requests gets its act together.
"urllib3<1.26.0",
],
python_requires='>=3.6, <4',
extras_require=extras_require,
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
cmdclass=cmdclass,
entry_points="""
[pretix.plugin]
pretix_eth=pretix_eth:PretixPluginMeta
""",
)
| 24.68 | 87 | 0.622907 | import os
from distutils.command.build import build
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding='utf-8') as f:
long_description = f.read()
class CustomBuild(build):
def run(self):
from django.core import management
management.call_command('compilemessages', verbosity=1)
build.run(self)
cmdclass = {
'build': CustomBuild
}
extras_require = {
'test': [
'pytest>=5.1,<6',
'pytest-django>=3.5,<4',
],
'lint': [
'flake8>=3.7,<4',
'mypy==0.720',
],
'dev': [
'tox>=3.14.5,<4',
],
}
extras_require['dev'] = (
extras_require['dev']
+ extras_require['test']
+ extras_require['lint']
)
setup(
name='pretix-eth-payment-plugin',
version='2.0.4-dev',
description='Ethereum payment provider plugin for pretix software',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/esPass/pretix-eth-payment-plugin',
author='Pretix Ethereum Plugin Developers',
author_email='pretix-eth-payment-plugin@ethereum.org',
license='Apache Software License',
install_requires=[
"pretix>=3.8.0",
"web3>=5.7.0",
"eth-abi>=2.1.1,<3",
"eth-typing>=2.2.1,<3",
"eth-utils>=1.8.4,<2",
"eth-hash[pycryptodome]>=0.3.1,<0.4",
"urllib3<1.26.0",
],
python_requires='>=3.6, <4',
extras_require=extras_require,
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
cmdclass=cmdclass,
entry_points="""
[pretix.plugin]
pretix_eth=pretix_eth:PretixPluginMeta
""",
)
| true | true |
1c45ef8254822d3c204624c76142cdc54dcca2e2 | 457 | py | Python | dedomeno/houses/migrations/0098_auto_20170117_1650.py | ginopalazzo/dedomeno | e43df365849102016c8819b2082d2cde9109360f | [
"MIT"
] | 38 | 2018-03-19T12:52:17.000Z | 2022-02-17T14:45:57.000Z | dedomeno/houses/migrations/0098_auto_20170117_1650.py | ginopalazzo/dedomeno | e43df365849102016c8819b2082d2cde9109360f | [
"MIT"
] | 7 | 2020-02-11T23:01:40.000Z | 2020-08-06T13:30:58.000Z | dedomeno/houses/migrations/0098_auto_20170117_1650.py | ginopalazzo/dedomeno | e43df365849102016c8819b2082d2cde9109360f | [
"MIT"
] | 12 | 2019-02-23T22:10:34.000Z | 2022-03-24T12:01:38.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-17 15:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('houses', '0097_property_online'),
]
operations = [
migrations.AlterField(
model_name='realestate',
name='desc',
field=models.TextField(blank=True, null=True),
),
]
| 21.761905 | 58 | 0.61488 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('houses', '0097_property_online'),
]
operations = [
migrations.AlterField(
model_name='realestate',
name='desc',
field=models.TextField(blank=True, null=True),
),
]
| true | true |
1c45f025c99e2b8c21037589076efa5e71227813 | 17,668 | py | Python | graphsage/unsupervised_train.py | LiTszOn/GraphSAGE | dbeb50d52e8d242b3c4ad3e4264c168a2c406e70 | [
"MIT"
] | null | null | null | graphsage/unsupervised_train.py | LiTszOn/GraphSAGE | dbeb50d52e8d242b3c4ad3e4264c168a2c406e70 | [
"MIT"
] | null | null | null | graphsage/unsupervised_train.py | LiTszOn/GraphSAGE | dbeb50d52e8d242b3c4ad3e4264c168a2c406e70 | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
from graphsage.models import SampleAndAggregate, SAGEInfo, Node2VecModel
from graphsage.minibatch import EdgeMinibatchIterator
from graphsage.neigh_samplers import UniformNeighborSampler
from graphsage.utils import load_data
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# Set random seed
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
# Settings
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
#core params..
flags.DEFINE_string('model', 'graphsage', 'model names. See README for possible values.')
flags.DEFINE_float('learning_rate', 0.00001, 'initial learning rate.')
flags.DEFINE_string("model_size", "small", "Can be big or small; model specific def'ns")
flags.DEFINE_string('train_prefix', '', 'name of the object file that stores the training data. must be specified.')
# left to default values in main experiments
flags.DEFINE_integer('epochs', 1, 'number of epochs to train.')
flags.DEFINE_float('dropout', 0.0, 'dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 0.0, 'weight for l2 loss on embedding matrix.')
flags.DEFINE_integer('max_degree', 100, 'maximum node degree.')
flags.DEFINE_integer('samples_1', 25, 'number of samples in layer 1')
flags.DEFINE_integer('samples_2', 10, 'number of users samples in layer 2')
flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')
flags.DEFINE_integer('dim_2', 128, 'Size of output dim (final is 2x this, if using concat)')
flags.DEFINE_boolean('random_context', True, 'Whether to use random context or direct edges')
flags.DEFINE_integer('neg_sample_size', 20, 'number of negative samples')
flags.DEFINE_integer('batch_size', 512, 'minibatch size.')
flags.DEFINE_integer('n2v_test_epochs', 1, 'Number of new SGD epochs for n2v.')
flags.DEFINE_integer('identity_dim', 0, 'Set to positive value to use identity embedding features of that dimension. Default 0.')
#logging, saving, validation settings etc.
flags.DEFINE_boolean('save_embeddings', True, 'whether to save embeddings for all nodes after training')
flags.DEFINE_string('base_log_dir', '.', 'base directory for logging and saving embeddings')
flags.DEFINE_integer('validate_iter', 5000, "how often to run a validation minibatch.")
flags.DEFINE_integer('validate_batch_size', 256, "how many nodes per validation sample.")
flags.DEFINE_integer('gpu', 1, "which gpu to use.")
flags.DEFINE_integer('print_every', 50, "How often to print training info.")
flags.DEFINE_integer('max_total_steps', 10**10, "Maximum total number of iterations")
os.environ["CUDA_VISIBLE_DEVICES"]=str(FLAGS.gpu)
GPU_MEM_FRACTION = 0.8
def log_dir():
log_dir = FLAGS.base_log_dir + "/unsup-" + FLAGS.train_prefix.split("/")[-2]
log_dir += "/{model:s}_{model_size:s}_{lr:0.6f}/".format(
model=FLAGS.model,
model_size=FLAGS.model_size,
lr=FLAGS.learning_rate)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
# Define model evaluation function
def evaluate(sess, model, minibatch_iter, size=None):
t_test = time.time()
feed_dict_val = minibatch_iter.val_feed_dict(size)
outs_val = sess.run([model.loss, model.ranks, model.mrr],
feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], outs_val[2], (time.time() - t_test)
def incremental_evaluate(sess, model, minibatch_iter, size):
t_test = time.time()
finished = False
val_losses = []
val_mrrs = []
iter_num = 0
while not finished:
feed_dict_val, finished, _ = minibatch_iter.incremental_val_feed_dict(size, iter_num)
iter_num += 1
outs_val = sess.run([model.loss, model.ranks, model.mrr],
feed_dict=feed_dict_val)
val_losses.append(outs_val[0])
val_mrrs.append(outs_val[2])
return np.mean(val_losses), np.mean(val_mrrs), (time.time() - t_test)
def save_val_embeddings(sess, model, minibatch_iter, size, out_dir, mod=""):
val_embeddings = []
finished = False
seen = set([])
nodes = []
iter_num = 0
name = "val"
while not finished:
feed_dict_val, finished, edges = minibatch_iter.incremental_embed_feed_dict(size, iter_num)
iter_num += 1
outs_val = sess.run([model.loss, model.mrr, model.outputs1],
feed_dict=feed_dict_val)
#ONLY SAVE FOR embeds1 because of planetoid
for i, edge in enumerate(edges):
if not edge[0] in seen:
val_embeddings.append(outs_val[-1][i,:])
nodes.append(edge[0])
seen.add(edge[0])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
val_embeddings = np.vstack(val_embeddings)
np.save(out_dir + name + mod + ".npy", val_embeddings)
with open(out_dir + name + mod + ".txt", "w") as fp:
fp.write("\n".join(map(str,nodes)))
def construct_placeholders():
# Define placeholders
placeholders = {
'batch1' : tf.placeholder(tf.int32, shape=(None), name='batch1'),
'batch2' : tf.placeholder(tf.int32, shape=(None), name='batch2'),
# negative samples for all nodes in the batch
'neg_samples': tf.placeholder(tf.int32, shape=(None,),
name='neg_sample_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'batch_size' : tf.placeholder(tf.int32, name='batch_size'),
}
return placeholders
def train(train_data, test_data=None):
G = train_data[0]
features = train_data[1]
id_map = train_data[2]
print("G: " + str(G))
print("features: " + str(features))
print("id_map: " + str(id_map))
if not features is None:
# pad with dummy zero vector
features = np.vstack([features, np.zeros((features.shape[1],))])
context_pairs = train_data[3] if FLAGS.random_context else None
placeholders = construct_placeholders() #returns a dictionary of placeholder
minibatch = EdgeMinibatchIterator(G, #produce a bunch of minibatch
id_map,
placeholders, batch_size=FLAGS.batch_size,
max_degree=FLAGS.max_degree,
num_neg_samples=FLAGS.neg_sample_size,
context_pairs = context_pairs) #a useful object
adj_info_ph = tf.placeholder(tf.int32, shape=minibatch.adj.shape)
adj_info = tf.Variable(adj_info_ph, trainable=False, name="adj_info")
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
# print("adj_info: " + str(sess.run(adj_info)))
if FLAGS.model == 'graphsage_mean':
# Create model
sampler = UniformNeighborSampler(adj_info)#to wrap the lookup function
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
model_size=FLAGS.model_size,
identity_dim = FLAGS.identity_dim,
logging=True) #set training parameters and define loss function etc
elif FLAGS.model == 'gcn':
# Create model
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, 2*FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, 2*FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
aggregator_type="gcn",
model_size=FLAGS.model_size,
identity_dim = FLAGS.identity_dim,
concat=False,
logging=True)
elif FLAGS.model == 'graphsage_seq':
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
identity_dim = FLAGS.identity_dim,
aggregator_type="seq",
model_size=FLAGS.model_size,
logging=True)
elif FLAGS.model == 'graphsage_maxpool':
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
aggregator_type="maxpool",
model_size=FLAGS.model_size,
identity_dim = FLAGS.identity_dim,
logging=True)
elif FLAGS.model == 'graphsage_meanpool':
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
aggregator_type="meanpool",
model_size=FLAGS.model_size,
identity_dim = FLAGS.identity_dim,
logging=True)
elif FLAGS.model == 'n2v':
model = Node2VecModel(placeholders, features.shape[0],
minibatch.deg,
#2x because graphsage uses concat
nodevec_dim=2*FLAGS.dim_1,
lr=FLAGS.learning_rate)
else:
raise Exception('Error: model name unrecognized.')
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION
config.allow_soft_placement = True
# Initialize session
sess = tf.Session(config=config)
merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_dir(), sess.graph)
# Init variables
sess.run(tf.global_variables_initializer(), feed_dict={adj_info_ph: minibatch.adj})
# Train model
train_shadow_mrr = None
shadow_mrr = None
total_steps = 0
avg_time = 0.0
epoch_val_costs = []
train_adj_info = tf.assign(adj_info, minibatch.adj)
val_adj_info = tf.assign(adj_info, minibatch.test_adj)
for epoch in range(FLAGS.epochs):
minibatch.shuffle()
iter = 0
print('Epoch: %04d' % (epoch + 1))
epoch_val_costs.append(0)
while not minibatch.end():
# Construct feed dictionary
feed_dict = minibatch.next_minibatch_feed_dict()
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
t = time.time()
# Training step
outs = sess.run([merged, model.opt_op, model.loss, model.ranks, model.aff_all,
model.mrr, model.outputs1], feed_dict=feed_dict)
train_cost = outs[2]
train_mrr = outs[5]
if train_shadow_mrr is None:
train_shadow_mrr = train_mrr#
else:
train_shadow_mrr -= (1-0.99) * (train_shadow_mrr - train_mrr)
if iter % FLAGS.validate_iter == 0:
# Validation
sess.run(val_adj_info.op)
val_cost, ranks, val_mrr, duration = evaluate(sess, model, minibatch, size=FLAGS.validate_batch_size)
sess.run(train_adj_info.op)
epoch_val_costs[-1] += val_cost
if shadow_mrr is None:
shadow_mrr = val_mrr
else:
shadow_mrr -= (1-0.99) * (shadow_mrr - val_mrr)
if total_steps % FLAGS.print_every == 0:
summary_writer.add_summary(outs[0], total_steps)
# Print results
avg_time = (avg_time * total_steps + time.time() - t) / (total_steps + 1)
if total_steps % FLAGS.print_every == 0:
print("Iter:", '%04d' % iter,
"train_loss=", "{:.5f}".format(train_cost),
"train_mrr=", "{:.5f}".format(train_mrr),#Mean reciprocal rank
"train_mrr_ema=", "{:.5f}".format(train_shadow_mrr), # exponential moving average
"val_loss=", "{:.5f}".format(val_cost),
"val_mrr=", "{:.5f}".format(val_mrr),
"val_mrr_ema=", "{:.5f}".format(shadow_mrr), # exponential moving average
"time=", "{:.5f}".format(avg_time))
iter += 1
total_steps += 1
if total_steps > FLAGS.max_total_steps:
break
if total_steps > FLAGS.max_total_steps:
break
print("Optimization Finished!")
if FLAGS.save_embeddings:
sess.run(val_adj_info.op)
save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir())
if FLAGS.model == "n2v":
# stopping the gradient for the already trained nodes
train_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if not G.node[n]['val'] and not G.node[n]['test']],
dtype=tf.int32)
test_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if G.node[n]['val'] or G.node[n]['test']],
dtype=tf.int32)
update_nodes = tf.nn.embedding_lookup(model.context_embeds, tf.squeeze(test_ids))
no_update_nodes = tf.nn.embedding_lookup(model.context_embeds,tf.squeeze(train_ids))
update_nodes = tf.scatter_nd(test_ids, update_nodes, tf.shape(model.context_embeds))
no_update_nodes = tf.stop_gradient(tf.scatter_nd(train_ids, no_update_nodes, tf.shape(model.context_embeds)))
model.context_embeds = update_nodes + no_update_nodes
sess.run(model.context_embeds)
# run random walks
from graphsage.utils import run_random_walks
nodes = [n for n in G.nodes_iter() if G.node[n]["val"] or G.node[n]["test"]]
start_time = time.time()
pairs = run_random_walks(G, nodes, num_walks=50)
walk_time = time.time() - start_time
test_minibatch = EdgeMinibatchIterator(G,
id_map,
placeholders, batch_size=FLAGS.batch_size,
max_degree=FLAGS.max_degree,
num_neg_samples=FLAGS.neg_sample_size,
context_pairs = pairs,
n2v_retrain=True,
fixed_n2v=True)
start_time = time.time()
print("Doing test training for n2v.")
test_steps = 0
for epoch in range(FLAGS.n2v_test_epochs):
test_minibatch.shuffle()
while not test_minibatch.end():
feed_dict = test_minibatch.next_minibatch_feed_dict()
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
outs = sess.run([model.opt_op, model.loss, model.ranks, model.aff_all,
model.mrr, model.outputs1], feed_dict=feed_dict)
if test_steps % FLAGS.print_every == 0:
print("Iter:", '%04d' % test_steps,
"train_loss=", "{:.5f}".format(outs[1]),
"train_mrr=", "{:.5f}".format(outs[-2]))
test_steps += 1
train_time = time.time() - start_time
save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir(), mod="-test")
print("Total time: ", train_time+walk_time)
print("Walk time: ", walk_time)
print("Train time: ", train_time)
def main(argv=None):
print("Loading training data..")
train_data = load_data(FLAGS.train_prefix, load_walks=True) # for processing Redit's data (Reddit's data is a bit wired)
print("Done loading training data..")
train(train_data)
if __name__ == '__main__':
tf.app.run()
| 45.302564 | 129 | 0.586258 | from __future__ import division
from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
from graphsage.models import SampleAndAggregate, SAGEInfo, Node2VecModel
from graphsage.minibatch import EdgeMinibatchIterator
from graphsage.neigh_samplers import UniformNeighborSampler
from graphsage.utils import load_data
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
seed = 123
np.random.seed(seed)
tf.set_random_seed(seed)
flags = tf.app.flags
FLAGS = flags.FLAGS
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
flags.DEFINE_string('model', 'graphsage', 'model names. See README for possible values.')
flags.DEFINE_float('learning_rate', 0.00001, 'initial learning rate.')
flags.DEFINE_string("model_size", "small", "Can be big or small; model specific def'ns")
flags.DEFINE_string('train_prefix', '', 'name of the object file that stores the training data. must be specified.')
# left to default values in main experiments
flags.DEFINE_integer('epochs', 1, 'number of epochs to train.')
flags.DEFINE_float('dropout', 0.0, 'dropout rate (1 - keep probability).')
flags.DEFINE_float('weight_decay', 0.0, 'weight for l2 loss on embedding matrix.')
flags.DEFINE_integer('max_degree', 100, 'maximum node degree.')
flags.DEFINE_integer('samples_1', 25, 'number of samples in layer 1')
flags.DEFINE_integer('samples_2', 10, 'number of users samples in layer 2')
flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)')
flags.DEFINE_integer('dim_2', 128, 'Size of output dim (final is 2x this, if using concat)')
flags.DEFINE_boolean('random_context', True, 'Whether to use random context or direct edges')
flags.DEFINE_integer('neg_sample_size', 20, 'number of negative samples')
flags.DEFINE_integer('batch_size', 512, 'minibatch size.')
flags.DEFINE_integer('n2v_test_epochs', 1, 'Number of new SGD epochs for n2v.')
flags.DEFINE_integer('identity_dim', 0, 'Set to positive value to use identity embedding features of that dimension. Default 0.')
#logging, saving, validation settings etc.
flags.DEFINE_boolean('save_embeddings', True, 'whether to save embeddings for all nodes after training')
flags.DEFINE_string('base_log_dir', '.', 'base directory for logging and saving embeddings')
flags.DEFINE_integer('validate_iter', 5000, "how often to run a validation minibatch.")
flags.DEFINE_integer('validate_batch_size', 256, "how many nodes per validation sample.")
flags.DEFINE_integer('gpu', 1, "which gpu to use.")
flags.DEFINE_integer('print_every', 50, "How often to print training info.")
flags.DEFINE_integer('max_total_steps', 10**10, "Maximum total number of iterations")
os.environ["CUDA_VISIBLE_DEVICES"]=str(FLAGS.gpu)
GPU_MEM_FRACTION = 0.8
def log_dir():
log_dir = FLAGS.base_log_dir + "/unsup-" + FLAGS.train_prefix.split("/")[-2]
log_dir += "/{model:s}_{model_size:s}_{lr:0.6f}/".format(
model=FLAGS.model,
model_size=FLAGS.model_size,
lr=FLAGS.learning_rate)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
# Define model evaluation function
def evaluate(sess, model, minibatch_iter, size=None):
t_test = time.time()
feed_dict_val = minibatch_iter.val_feed_dict(size)
outs_val = sess.run([model.loss, model.ranks, model.mrr],
feed_dict=feed_dict_val)
return outs_val[0], outs_val[1], outs_val[2], (time.time() - t_test)
def incremental_evaluate(sess, model, minibatch_iter, size):
t_test = time.time()
finished = False
val_losses = []
val_mrrs = []
iter_num = 0
while not finished:
feed_dict_val, finished, _ = minibatch_iter.incremental_val_feed_dict(size, iter_num)
iter_num += 1
outs_val = sess.run([model.loss, model.ranks, model.mrr],
feed_dict=feed_dict_val)
val_losses.append(outs_val[0])
val_mrrs.append(outs_val[2])
return np.mean(val_losses), np.mean(val_mrrs), (time.time() - t_test)
def save_val_embeddings(sess, model, minibatch_iter, size, out_dir, mod=""):
val_embeddings = []
finished = False
seen = set([])
nodes = []
iter_num = 0
name = "val"
while not finished:
feed_dict_val, finished, edges = minibatch_iter.incremental_embed_feed_dict(size, iter_num)
iter_num += 1
outs_val = sess.run([model.loss, model.mrr, model.outputs1],
feed_dict=feed_dict_val)
#ONLY SAVE FOR embeds1 because of planetoid
for i, edge in enumerate(edges):
if not edge[0] in seen:
val_embeddings.append(outs_val[-1][i,:])
nodes.append(edge[0])
seen.add(edge[0])
if not os.path.exists(out_dir):
os.makedirs(out_dir)
val_embeddings = np.vstack(val_embeddings)
np.save(out_dir + name + mod + ".npy", val_embeddings)
with open(out_dir + name + mod + ".txt", "w") as fp:
fp.write("\n".join(map(str,nodes)))
def construct_placeholders():
# Define placeholders
placeholders = {
'batch1' : tf.placeholder(tf.int32, shape=(None), name='batch1'),
'batch2' : tf.placeholder(tf.int32, shape=(None), name='batch2'),
# negative samples for all nodes in the batch
'neg_samples': tf.placeholder(tf.int32, shape=(None,),
name='neg_sample_size'),
'dropout': tf.placeholder_with_default(0., shape=(), name='dropout'),
'batch_size' : tf.placeholder(tf.int32, name='batch_size'),
}
return placeholders
def train(train_data, test_data=None):
G = train_data[0]
features = train_data[1]
id_map = train_data[2]
print("G: " + str(G))
print("features: " + str(features))
print("id_map: " + str(id_map))
if not features is None:
# pad with dummy zero vector
features = np.vstack([features, np.zeros((features.shape[1],))])
context_pairs = train_data[3] if FLAGS.random_context else None
placeholders = construct_placeholders() #returns a dictionary of placeholder
minibatch = EdgeMinibatchIterator(G, #produce a bunch of minibatch
id_map,
placeholders, batch_size=FLAGS.batch_size,
max_degree=FLAGS.max_degree,
num_neg_samples=FLAGS.neg_sample_size,
context_pairs = context_pairs) #a useful object
adj_info_ph = tf.placeholder(tf.int32, shape=minibatch.adj.shape)
adj_info = tf.Variable(adj_info_ph, trainable=False, name="adj_info")
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# sess.run(tf.local_variables_initializer())
# print("adj_info: " + str(sess.run(adj_info)))
if FLAGS.model == 'graphsage_mean':
# Create model
sampler = UniformNeighborSampler(adj_info)#to wrap the lookup function
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
model_size=FLAGS.model_size,
identity_dim = FLAGS.identity_dim,
logging=True) #set training parameters and define loss function etc
elif FLAGS.model == 'gcn':
# Create model
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, 2*FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, 2*FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
aggregator_type="gcn",
model_size=FLAGS.model_size,
identity_dim = FLAGS.identity_dim,
concat=False,
logging=True)
elif FLAGS.model == 'graphsage_seq':
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
identity_dim = FLAGS.identity_dim,
aggregator_type="seq",
model_size=FLAGS.model_size,
logging=True)
elif FLAGS.model == 'graphsage_maxpool':
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
aggregator_type="maxpool",
model_size=FLAGS.model_size,
identity_dim = FLAGS.identity_dim,
logging=True)
elif FLAGS.model == 'graphsage_meanpool':
sampler = UniformNeighborSampler(adj_info)
layer_infos = [SAGEInfo("node", sampler, FLAGS.samples_1, FLAGS.dim_1),
SAGEInfo("node", sampler, FLAGS.samples_2, FLAGS.dim_2)]
model = SampleAndAggregate(placeholders,
features,
adj_info,
minibatch.deg,
layer_infos=layer_infos,
aggregator_type="meanpool",
model_size=FLAGS.model_size,
identity_dim = FLAGS.identity_dim,
logging=True)
elif FLAGS.model == 'n2v':
model = Node2VecModel(placeholders, features.shape[0],
minibatch.deg,
#2x because graphsage uses concat
nodevec_dim=2*FLAGS.dim_1,
lr=FLAGS.learning_rate)
else:
raise Exception('Error: model name unrecognized.')
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = GPU_MEM_FRACTION
config.allow_soft_placement = True
# Initialize session
sess = tf.Session(config=config)
merged = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(log_dir(), sess.graph)
# Init variables
sess.run(tf.global_variables_initializer(), feed_dict={adj_info_ph: minibatch.adj})
# Train model
train_shadow_mrr = None
shadow_mrr = None
total_steps = 0
avg_time = 0.0
epoch_val_costs = []
train_adj_info = tf.assign(adj_info, minibatch.adj)
val_adj_info = tf.assign(adj_info, minibatch.test_adj)
for epoch in range(FLAGS.epochs):
minibatch.shuffle()
iter = 0
print('Epoch: %04d' % (epoch + 1))
epoch_val_costs.append(0)
while not minibatch.end():
# Construct feed dictionary
feed_dict = minibatch.next_minibatch_feed_dict()
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
t = time.time()
# Training step
outs = sess.run([merged, model.opt_op, model.loss, model.ranks, model.aff_all,
model.mrr, model.outputs1], feed_dict=feed_dict)
train_cost = outs[2]
train_mrr = outs[5]
if train_shadow_mrr is None:
train_shadow_mrr = train_mrr#
else:
train_shadow_mrr -= (1-0.99) * (train_shadow_mrr - train_mrr)
if iter % FLAGS.validate_iter == 0:
# Validation
sess.run(val_adj_info.op)
val_cost, ranks, val_mrr, duration = evaluate(sess, model, minibatch, size=FLAGS.validate_batch_size)
sess.run(train_adj_info.op)
epoch_val_costs[-1] += val_cost
if shadow_mrr is None:
shadow_mrr = val_mrr
else:
shadow_mrr -= (1-0.99) * (shadow_mrr - val_mrr)
if total_steps % FLAGS.print_every == 0:
summary_writer.add_summary(outs[0], total_steps)
# Print results
avg_time = (avg_time * total_steps + time.time() - t) / (total_steps + 1)
if total_steps % FLAGS.print_every == 0:
print("Iter:", '%04d' % iter,
"train_loss=", "{:.5f}".format(train_cost),
"train_mrr=", "{:.5f}".format(train_mrr),#Mean reciprocal rank
"train_mrr_ema=", "{:.5f}".format(train_shadow_mrr), # exponential moving average
"val_loss=", "{:.5f}".format(val_cost),
"val_mrr=", "{:.5f}".format(val_mrr),
"val_mrr_ema=", "{:.5f}".format(shadow_mrr), # exponential moving average
"time=", "{:.5f}".format(avg_time))
iter += 1
total_steps += 1
if total_steps > FLAGS.max_total_steps:
break
if total_steps > FLAGS.max_total_steps:
break
print("Optimization Finished!")
if FLAGS.save_embeddings:
sess.run(val_adj_info.op)
save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir())
if FLAGS.model == "n2v":
# stopping the gradient for the already trained nodes
train_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if not G.node[n]['val'] and not G.node[n]['test']],
dtype=tf.int32)
test_ids = tf.constant([[id_map[n]] for n in G.nodes_iter() if G.node[n]['val'] or G.node[n]['test']],
dtype=tf.int32)
update_nodes = tf.nn.embedding_lookup(model.context_embeds, tf.squeeze(test_ids))
no_update_nodes = tf.nn.embedding_lookup(model.context_embeds,tf.squeeze(train_ids))
update_nodes = tf.scatter_nd(test_ids, update_nodes, tf.shape(model.context_embeds))
no_update_nodes = tf.stop_gradient(tf.scatter_nd(train_ids, no_update_nodes, tf.shape(model.context_embeds)))
model.context_embeds = update_nodes + no_update_nodes
sess.run(model.context_embeds)
# run random walks
from graphsage.utils import run_random_walks
nodes = [n for n in G.nodes_iter() if G.node[n]["val"] or G.node[n]["test"]]
start_time = time.time()
pairs = run_random_walks(G, nodes, num_walks=50)
walk_time = time.time() - start_time
test_minibatch = EdgeMinibatchIterator(G,
id_map,
placeholders, batch_size=FLAGS.batch_size,
max_degree=FLAGS.max_degree,
num_neg_samples=FLAGS.neg_sample_size,
context_pairs = pairs,
n2v_retrain=True,
fixed_n2v=True)
start_time = time.time()
print("Doing test training for n2v.")
test_steps = 0
for epoch in range(FLAGS.n2v_test_epochs):
test_minibatch.shuffle()
while not test_minibatch.end():
feed_dict = test_minibatch.next_minibatch_feed_dict()
feed_dict.update({placeholders['dropout']: FLAGS.dropout})
outs = sess.run([model.opt_op, model.loss, model.ranks, model.aff_all,
model.mrr, model.outputs1], feed_dict=feed_dict)
if test_steps % FLAGS.print_every == 0:
print("Iter:", '%04d' % test_steps,
"train_loss=", "{:.5f}".format(outs[1]),
"train_mrr=", "{:.5f}".format(outs[-2]))
test_steps += 1
train_time = time.time() - start_time
save_val_embeddings(sess, model, minibatch, FLAGS.validate_batch_size, log_dir(), mod="-test")
print("Total time: ", train_time+walk_time)
print("Walk time: ", walk_time)
print("Train time: ", train_time)
def main(argv=None):
print("Loading training data..")
train_data = load_data(FLAGS.train_prefix, load_walks=True) # for processing Redit's data (Reddit's data is a bit wired)
print("Done loading training data..")
train(train_data)
if __name__ == '__main__':
tf.app.run()
| true | true |
1c45f085e004e34a83549f22c405ac311d6001c4 | 48,492 | bzl | Python | examples/crate_universe/vendor_remote_pkgs/crates/defs.bzl | cfredric/rules_rust | 521e649ff44e9711fe3c45b0ec1e792f7e1d361e | [
"Apache-2.0"
] | null | null | null | examples/crate_universe/vendor_remote_pkgs/crates/defs.bzl | cfredric/rules_rust | 521e649ff44e9711fe3c45b0ec1e792f7e1d361e | [
"Apache-2.0"
] | null | null | null | examples/crate_universe/vendor_remote_pkgs/crates/defs.bzl | cfredric/rules_rust | 521e649ff44e9711fe3c45b0ec1e792f7e1d361e | [
"Apache-2.0"
] | null | null | null | ###############################################################################
# @generated
# This file is auto-generated by the cargo-bazel tool.
#
# DO NOT MODIFY: Local changes may be replaced in future executions.
###############################################################################
"""
# `crates_repository` API
- [aliases](#aliases)
- [crate_deps](#crate_deps)
- [all_crate_deps](#all_crate_deps)
- [crate_repositories](#crate_repositories)
"""
load("@bazel_skylib//lib:selects.bzl", "selects")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
###############################################################################
# MACROS API
###############################################################################
# An identifier that represent common dependencies (unconditional).
_COMMON_CONDITION = ""
def _flatten_dependency_maps(all_dependency_maps):
"""Flatten a list of dependency maps into one dictionary.
Dependency maps have the following structure:
```python
DEPENDENCIES_MAP = {
# The first key in the map is a Bazel package
# name of the workspace this file is defined in.
"workspace_member_package": {
# Not all dependnecies are supported for all platforms.
# the condition key is the condition required to be true
# on the host platform.
"condition": {
# An alias to a crate target. # The label of the crate target the
# Aliases are only crate names. # package name refers to.
"package_name": "@full//:label",
}
}
}
```
Args:
all_dependency_maps (list): A list of dicts as described above
Returns:
dict: A dictionary as described above
"""
dependencies = {}
for workspace_deps_map in all_dependency_maps:
for pkg_name, conditional_deps_map in workspace_deps_map.items():
if pkg_name not in dependencies:
non_frozen_map = dict()
for key, values in conditional_deps_map.items():
non_frozen_map.update({key: dict(values.items())})
dependencies.setdefault(pkg_name, non_frozen_map)
continue
for condition, deps_map in conditional_deps_map.items():
# If the condition has not been recorded, do so and continue
if condition not in dependencies[pkg_name]:
dependencies[pkg_name].setdefault(condition, dict(deps_map.items()))
continue
# Alert on any miss-matched dependencies
inconsistent_entries = []
for crate_name, crate_label in deps_map.items():
existing = dependencies[pkg_name][condition].get(crate_name)
if existing and existing != crate_label:
inconsistent_entries.append((crate_name, existing, crate_label))
dependencies[pkg_name][condition].update({crate_name: crate_label})
return dependencies
def crate_deps(deps, package_name = None):
"""Finds the fully qualified label of the requested crates for the package where this macro is called.
Args:
deps (list): The desired list of crate targets.
package_name (str, optional): The package name of the set of dependencies to look up.
Defaults to `native.package_name()`.
Returns:
list: A list of labels to generated rust targets (str)
"""
if not deps:
return []
if package_name == None:
package_name = native.package_name()
# Join both sets of dependencies
dependencies = _flatten_dependency_maps([
_NORMAL_DEPENDENCIES,
_NORMAL_DEV_DEPENDENCIES,
_PROC_MACRO_DEPENDENCIES,
_PROC_MACRO_DEV_DEPENDENCIES,
_BUILD_DEPENDENCIES,
_BUILD_PROC_MACRO_DEPENDENCIES,
]).pop(package_name, {})
# Combine all conditional packages so we can easily index over a flat list
# TODO: Perhaps this should actually return select statements and maintain
# the conditionals of the dependencies
flat_deps = {}
for deps_set in dependencies.values():
for crate_name, crate_label in deps_set.items():
flat_deps.update({crate_name: crate_label})
missing_crates = []
crate_targets = []
for crate_target in deps:
if crate_target not in flat_deps:
missing_crates.append(crate_target)
else:
crate_targets.append(flat_deps[crate_target])
if missing_crates:
fail("Could not find crates `{}` among dependencies of `{}`. Available dependencies were `{}`".format(
missing_crates,
package_name,
dependencies,
))
return crate_targets
def all_crate_deps(
normal = False,
normal_dev = False,
proc_macro = False,
proc_macro_dev = False,
build = False,
build_proc_macro = False,
package_name = None):
"""Finds the fully qualified label of all requested direct crate dependencies \
for the package where this macro is called.
If no parameters are set, all normal dependencies are returned. Setting any one flag will
otherwise impact the contents of the returned list.
Args:
normal (bool, optional): If True, normal dependencies are included in the
output list.
normal_dev (bool, optional): If True, normla dev dependencies will be
included in the output list..
proc_macro (bool, optional): If True, proc_macro dependencies are included
in the output list.
proc_macro_dev (bool, optional): If True, dev proc_macro dependencies are
included in the output list.
build (bool, optional): If True, build dependencies are included
in the output list.
build_proc_macro (bool, optional): If True, build proc_macro dependencies are
included in the output list.
package_name (str, optional): The package name of the set of dependencies to look up.
Defaults to `native.package_name()` when unset.
Returns:
list: A list of labels to generated rust targets (str)
"""
if package_name == None:
package_name = native.package_name()
# Determine the relevant maps to use
all_dependency_maps = []
if normal:
all_dependency_maps.append(_NORMAL_DEPENDENCIES)
if normal_dev:
all_dependency_maps.append(_NORMAL_DEV_DEPENDENCIES)
if proc_macro:
all_dependency_maps.append(_PROC_MACRO_DEPENDENCIES)
if proc_macro_dev:
all_dependency_maps.append(_PROC_MACRO_DEV_DEPENDENCIES)
if build:
all_dependency_maps.append(_BUILD_DEPENDENCIES)
if build_proc_macro:
all_dependency_maps.append(_BUILD_PROC_MACRO_DEPENDENCIES)
# Default to always using normal dependencies
if not all_dependency_maps:
all_dependency_maps.append(_NORMAL_DEPENDENCIES)
dependencies = _flatten_dependency_maps(all_dependency_maps).pop(package_name, None)
if not dependencies:
if dependencies == None:
fail("Tried to get all_crate_deps for package " + package_name + " but that package had no Cargo.toml file")
else:
return []
crate_deps = list(dependencies.pop(_COMMON_CONDITION, {}).values())
for condition, deps in dependencies.items():
crate_deps += selects.with_or({_CONDITIONS[condition]: deps.values()})
return crate_deps
def aliases(
normal = False,
normal_dev = False,
proc_macro = False,
proc_macro_dev = False,
build = False,
build_proc_macro = False,
package_name = None):
"""Produces a map of Crate alias names to their original label
If no dependency kinds are specified, `normal` and `proc_macro` are used by default.
Setting any one flag will otherwise determine the contents of the returned dict.
Args:
normal (bool, optional): If True, normal dependencies are included in the
output list.
normal_dev (bool, optional): If True, normla dev dependencies will be
included in the output list..
proc_macro (bool, optional): If True, proc_macro dependencies are included
in the output list.
proc_macro_dev (bool, optional): If True, dev proc_macro dependencies are
included in the output list.
build (bool, optional): If True, build dependencies are included
in the output list.
build_proc_macro (bool, optional): If True, build proc_macro dependencies are
included in the output list.
package_name (str, optional): The package name of the set of dependencies to look up.
Defaults to `native.package_name()` when unset.
Returns:
dict: The aliases of all associated packages
"""
if package_name == None:
package_name = native.package_name()
# Determine the relevant maps to use
all_aliases_maps = []
if normal:
all_aliases_maps.append(_NORMAL_ALIASES)
if normal_dev:
all_aliases_maps.append(_NORMAL_DEV_ALIASES)
if proc_macro:
all_aliases_maps.append(_PROC_MACRO_ALIASES)
if proc_macro_dev:
all_aliases_maps.append(_PROC_MACRO_DEV_ALIASES)
if build:
all_aliases_maps.append(_BUILD_ALIASES)
if build_proc_macro:
all_aliases_maps.append(_BUILD_PROC_MACRO_ALIASES)
# Default to always using normal aliases
if not all_aliases_maps:
all_aliases_maps.append(_NORMAL_ALIASES)
all_aliases_maps.append(_PROC_MACRO_ALIASES)
aliases = _flatten_dependency_maps(all_aliases_maps).pop(package_name, None)
if not aliases:
return dict()
common_items = aliases.pop(_COMMON_CONDITION, {}).items()
# If there are only common items in the dictionary, immediately return them
if not len(aliases.keys()) == 1:
return dict(common_items)
# Build a single select statement where each conditional has accounted for the
# common set of aliases.
crate_aliases = {"//conditions:default": common_items}
for condition, deps in aliases.items():
condition_triples = _CONDITIONS[condition]
if condition_triples in crate_aliases:
crate_aliases[condition_triples].update(deps)
else:
crate_aliases.update({_CONDITIONS[condition]: dict(deps.items() + common_items)})
return selects.with_or(crate_aliases)
###############################################################################
# WORKSPACE MEMBER DEPS AND ALIASES
###############################################################################
_NORMAL_DEPENDENCIES = {
"": {
_COMMON_CONDITION: {
"axum": "@crates_vendor_pkgs__axum-0.4.8//:axum",
"hyper": "@crates_vendor_pkgs__hyper-0.14.18//:hyper",
"mime": "@crates_vendor_pkgs__mime-0.3.16//:mime",
"serde_json": "@crates_vendor_pkgs__serde_json-1.0.81//:serde_json",
"tokio": "@crates_vendor_pkgs__tokio-1.16.1//:tokio",
"tower": "@crates_vendor_pkgs__tower-0.4.12//:tower",
"tower-http": "@crates_vendor_pkgs__tower-http-0.2.5//:tower_http",
"tracing": "@crates_vendor_pkgs__tracing-0.1.34//:tracing",
"tracing-subscriber": "@crates_vendor_pkgs__tracing-subscriber-0.3.11//:tracing_subscriber",
},
},
}
_NORMAL_ALIASES = {
"": {
_COMMON_CONDITION: {
},
},
}
_NORMAL_DEV_DEPENDENCIES = {
"": {
},
}
_NORMAL_DEV_ALIASES = {
"": {
},
}
_PROC_MACRO_DEPENDENCIES = {
"": {
},
}
_PROC_MACRO_ALIASES = {
"": {
},
}
_PROC_MACRO_DEV_DEPENDENCIES = {
"": {
},
}
_PROC_MACRO_DEV_ALIASES = {
"": {
},
}
_BUILD_DEPENDENCIES = {
"": {
},
}
_BUILD_ALIASES = {
"": {
},
}
_BUILD_PROC_MACRO_DEPENDENCIES = {
"": {
},
}
_BUILD_PROC_MACRO_ALIASES = {
"": {
},
}
_CONDITIONS = {
"cfg(all(any(target_arch = \"x86_64\", target_arch = \"aarch64\"), target_os = \"hermit\"))": [],
"cfg(not(windows))": ["aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", "aarch64-linux-android", "aarch64-unknown-linux-gnu", "arm-unknown-linux-gnueabi", "armv7-unknown-linux-gnueabi", "i686-apple-darwin", "i686-linux-android", "i686-unknown-freebsd", "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "riscv32imc-unknown-none-elf", "s390x-unknown-linux-gnu", "wasm32-unknown-unknown", "wasm32-wasi", "x86_64-apple-darwin", "x86_64-apple-ios", "x86_64-linux-android", "x86_64-unknown-freebsd", "x86_64-unknown-linux-gnu"],
"cfg(target_os = \"redox\")": [],
"cfg(target_os = \"windows\")": ["i686-pc-windows-msvc", "x86_64-pc-windows-msvc"],
"cfg(tracing_unstable)": [],
"cfg(unix)": ["aarch64-apple-darwin", "aarch64-apple-ios", "aarch64-apple-ios-sim", "aarch64-linux-android", "aarch64-unknown-linux-gnu", "arm-unknown-linux-gnueabi", "armv7-unknown-linux-gnueabi", "i686-apple-darwin", "i686-linux-android", "i686-unknown-freebsd", "i686-unknown-linux-gnu", "powerpc-unknown-linux-gnu", "s390x-unknown-linux-gnu", "x86_64-apple-darwin", "x86_64-apple-ios", "x86_64-linux-android", "x86_64-unknown-freebsd", "x86_64-unknown-linux-gnu"],
"cfg(windows)": ["i686-pc-windows-msvc", "x86_64-pc-windows-msvc"],
"i686-pc-windows-gnu": [],
"x86_64-pc-windows-gnu": [],
}
###############################################################################
def crate_repositories():
"""A macro for defining repositories for all generated crates"""
maybe(
http_archive,
name = "crates_vendor_pkgs__ansi_term-0.12.1",
sha256 = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/ansi_term/0.12.1/download"],
strip_prefix = "ansi_term-0.12.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ansi_term-0.12.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__async-trait-0.1.53",
sha256 = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/async-trait/0.1.53/download"],
strip_prefix = "async-trait-0.1.53",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.async-trait-0.1.53.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__autocfg-1.1.0",
sha256 = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/autocfg/1.1.0/download"],
strip_prefix = "autocfg-1.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.autocfg-1.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__axum-0.4.8",
sha256 = "c9f346c92c1e9a71d14fe4aaf7c2a5d9932cc4e5e48d8fb6641524416eb79ddd",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/axum/0.4.8/download"],
strip_prefix = "axum-0.4.8",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.axum-0.4.8.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__axum-core-0.1.2",
sha256 = "6dbcda393bef9c87572779cb8ef916f12d77750b27535dd6819fa86591627a51",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/axum-core/0.1.2/download"],
strip_prefix = "axum-core-0.1.2",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.axum-core-0.1.2.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__bitflags-1.3.2",
sha256 = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/bitflags/1.3.2/download"],
strip_prefix = "bitflags-1.3.2",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.bitflags-1.3.2.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__bytes-1.1.0",
sha256 = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/bytes/1.1.0/download"],
strip_prefix = "bytes-1.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.bytes-1.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__cfg-if-1.0.0",
sha256 = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/cfg-if/1.0.0/download"],
strip_prefix = "cfg-if-1.0.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.cfg-if-1.0.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__fnv-1.0.7",
sha256 = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/fnv/1.0.7/download"],
strip_prefix = "fnv-1.0.7",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.fnv-1.0.7.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__form_urlencoded-1.0.1",
sha256 = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/form_urlencoded/1.0.1/download"],
strip_prefix = "form_urlencoded-1.0.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.form_urlencoded-1.0.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__futures-channel-0.3.21",
sha256 = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/futures-channel/0.3.21/download"],
strip_prefix = "futures-channel-0.3.21",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-channel-0.3.21.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__futures-core-0.3.21",
sha256 = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/futures-core/0.3.21/download"],
strip_prefix = "futures-core-0.3.21",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-core-0.3.21.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__futures-sink-0.3.21",
sha256 = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/futures-sink/0.3.21/download"],
strip_prefix = "futures-sink-0.3.21",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-sink-0.3.21.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__futures-task-0.3.21",
sha256 = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/futures-task/0.3.21/download"],
strip_prefix = "futures-task-0.3.21",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-task-0.3.21.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__futures-util-0.3.21",
sha256 = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/futures-util/0.3.21/download"],
strip_prefix = "futures-util-0.3.21",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.futures-util-0.3.21.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__h2-0.3.13",
sha256 = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/h2/0.3.13/download"],
strip_prefix = "h2-0.3.13",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.h2-0.3.13.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__hashbrown-0.11.2",
sha256 = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/hashbrown/0.11.2/download"],
strip_prefix = "hashbrown-0.11.2",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.hashbrown-0.11.2.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__hermit-abi-0.1.19",
sha256 = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/hermit-abi/0.1.19/download"],
strip_prefix = "hermit-abi-0.1.19",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.hermit-abi-0.1.19.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__http-0.2.7",
sha256 = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/http/0.2.7/download"],
strip_prefix = "http-0.2.7",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.http-0.2.7.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__http-body-0.4.5",
sha256 = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/http-body/0.4.5/download"],
strip_prefix = "http-body-0.4.5",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.http-body-0.4.5.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__http-range-header-0.3.0",
sha256 = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/http-range-header/0.3.0/download"],
strip_prefix = "http-range-header-0.3.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.http-range-header-0.3.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__httparse-1.7.1",
sha256 = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/httparse/1.7.1/download"],
strip_prefix = "httparse-1.7.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.httparse-1.7.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__httpdate-1.0.2",
sha256 = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/httpdate/1.0.2/download"],
strip_prefix = "httpdate-1.0.2",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.httpdate-1.0.2.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__hyper-0.14.18",
sha256 = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/hyper/0.14.18/download"],
strip_prefix = "hyper-0.14.18",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.hyper-0.14.18.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__indexmap-1.8.1",
sha256 = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/indexmap/1.8.1/download"],
strip_prefix = "indexmap-1.8.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.indexmap-1.8.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__instant-0.1.12",
sha256 = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/instant/0.1.12/download"],
strip_prefix = "instant-0.1.12",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.instant-0.1.12.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__itoa-1.0.2",
sha256 = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/itoa/1.0.2/download"],
strip_prefix = "itoa-1.0.2",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.itoa-1.0.2.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__lazy_static-1.4.0",
sha256 = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/lazy_static/1.4.0/download"],
strip_prefix = "lazy_static-1.4.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.lazy_static-1.4.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__libc-0.2.126",
sha256 = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/libc/0.2.126/download"],
strip_prefix = "libc-0.2.126",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.libc-0.2.126.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__lock_api-0.4.7",
sha256 = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/lock_api/0.4.7/download"],
strip_prefix = "lock_api-0.4.7",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.lock_api-0.4.7.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__log-0.4.17",
sha256 = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/log/0.4.17/download"],
strip_prefix = "log-0.4.17",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.log-0.4.17.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__matches-0.1.9",
sha256 = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/matches/0.1.9/download"],
strip_prefix = "matches-0.1.9",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.matches-0.1.9.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__matchit-0.4.6",
sha256 = "9376a4f0340565ad675d11fc1419227faf5f60cd7ac9cb2e7185a471f30af833",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/matchit/0.4.6/download"],
strip_prefix = "matchit-0.4.6",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.matchit-0.4.6.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__memchr-2.5.0",
sha256 = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/memchr/2.5.0/download"],
strip_prefix = "memchr-2.5.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.memchr-2.5.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__mime-0.3.16",
sha256 = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/mime/0.3.16/download"],
strip_prefix = "mime-0.3.16",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.mime-0.3.16.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__mio-0.7.14",
sha256 = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/mio/0.7.14/download"],
strip_prefix = "mio-0.7.14",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.mio-0.7.14.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__miow-0.3.7",
sha256 = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/miow/0.3.7/download"],
strip_prefix = "miow-0.3.7",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.miow-0.3.7.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__ntapi-0.3.7",
sha256 = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/ntapi/0.3.7/download"],
strip_prefix = "ntapi-0.3.7",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ntapi-0.3.7.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__num_cpus-1.13.1",
sha256 = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/num_cpus/1.13.1/download"],
strip_prefix = "num_cpus-1.13.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.num_cpus-1.13.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__once_cell-1.12.0",
sha256 = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/once_cell/1.12.0/download"],
strip_prefix = "once_cell-1.12.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.once_cell-1.12.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__parking_lot-0.11.2",
sha256 = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/parking_lot/0.11.2/download"],
strip_prefix = "parking_lot-0.11.2",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.parking_lot-0.11.2.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__parking_lot_core-0.8.5",
sha256 = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/parking_lot_core/0.8.5/download"],
strip_prefix = "parking_lot_core-0.8.5",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.parking_lot_core-0.8.5.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__percent-encoding-2.1.0",
sha256 = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/percent-encoding/2.1.0/download"],
strip_prefix = "percent-encoding-2.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.percent-encoding-2.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__pin-project-1.0.10",
sha256 = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/pin-project/1.0.10/download"],
strip_prefix = "pin-project-1.0.10",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-1.0.10.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__pin-project-internal-1.0.10",
sha256 = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/pin-project-internal/1.0.10/download"],
strip_prefix = "pin-project-internal-1.0.10",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-internal-1.0.10.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__pin-project-lite-0.2.9",
sha256 = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/pin-project-lite/0.2.9/download"],
strip_prefix = "pin-project-lite-0.2.9",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-lite-0.2.9.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__pin-utils-0.1.0",
sha256 = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/pin-utils/0.1.0/download"],
strip_prefix = "pin-utils-0.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-utils-0.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__proc-macro2-1.0.39",
sha256 = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/proc-macro2/1.0.39/download"],
strip_prefix = "proc-macro2-1.0.39",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.proc-macro2-1.0.39.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__quote-1.0.18",
sha256 = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/quote/1.0.18/download"],
strip_prefix = "quote-1.0.18",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.quote-1.0.18.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__redox_syscall-0.2.13",
sha256 = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/redox_syscall/0.2.13/download"],
strip_prefix = "redox_syscall-0.2.13",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.redox_syscall-0.2.13.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__ryu-1.0.10",
sha256 = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/ryu/1.0.10/download"],
strip_prefix = "ryu-1.0.10",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ryu-1.0.10.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__scopeguard-1.1.0",
sha256 = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/scopeguard/1.1.0/download"],
strip_prefix = "scopeguard-1.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.scopeguard-1.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__serde-1.0.137",
sha256 = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/serde/1.0.137/download"],
strip_prefix = "serde-1.0.137",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde-1.0.137.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__serde_json-1.0.81",
sha256 = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/serde_json/1.0.81/download"],
strip_prefix = "serde_json-1.0.81",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde_json-1.0.81.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__serde_urlencoded-0.7.1",
sha256 = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/serde_urlencoded/0.7.1/download"],
strip_prefix = "serde_urlencoded-0.7.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde_urlencoded-0.7.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__sharded-slab-0.1.4",
sha256 = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/sharded-slab/0.1.4/download"],
strip_prefix = "sharded-slab-0.1.4",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.sharded-slab-0.1.4.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__signal-hook-registry-1.4.0",
sha256 = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/signal-hook-registry/1.4.0/download"],
strip_prefix = "signal-hook-registry-1.4.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.signal-hook-registry-1.4.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__slab-0.4.6",
sha256 = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/slab/0.4.6/download"],
strip_prefix = "slab-0.4.6",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.slab-0.4.6.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__smallvec-1.8.0",
sha256 = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/smallvec/1.8.0/download"],
strip_prefix = "smallvec-1.8.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.smallvec-1.8.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__socket2-0.4.4",
sha256 = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/socket2/0.4.4/download"],
strip_prefix = "socket2-0.4.4",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.socket2-0.4.4.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__syn-1.0.95",
sha256 = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/syn/1.0.95/download"],
strip_prefix = "syn-1.0.95",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.syn-1.0.95.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__sync_wrapper-0.1.1",
sha256 = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/sync_wrapper/0.1.1/download"],
strip_prefix = "sync_wrapper-0.1.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.sync_wrapper-0.1.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__thread_local-1.1.4",
sha256 = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/thread_local/1.1.4/download"],
strip_prefix = "thread_local-1.1.4",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.thread_local-1.1.4.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tokio-1.16.1",
sha256 = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tokio/1.16.1/download"],
strip_prefix = "tokio-1.16.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-1.16.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tokio-macros-1.7.0",
sha256 = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tokio-macros/1.7.0/download"],
strip_prefix = "tokio-macros-1.7.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-macros-1.7.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tokio-util-0.7.2",
sha256 = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tokio-util/0.7.2/download"],
strip_prefix = "tokio-util-0.7.2",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-util-0.7.2.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tower-0.4.12",
sha256 = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tower/0.4.12/download"],
strip_prefix = "tower-0.4.12",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-0.4.12.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tower-http-0.2.5",
sha256 = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tower-http/0.2.5/download"],
strip_prefix = "tower-http-0.2.5",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-http-0.2.5.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tower-layer-0.3.1",
sha256 = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tower-layer/0.3.1/download"],
strip_prefix = "tower-layer-0.3.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-layer-0.3.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tower-service-0.3.1",
sha256 = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tower-service/0.3.1/download"],
strip_prefix = "tower-service-0.3.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-service-0.3.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-0.1.34",
sha256 = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing/0.1.34/download"],
strip_prefix = "tracing-0.1.34",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-0.1.34.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-attributes-0.1.21",
sha256 = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing-attributes/0.1.21/download"],
strip_prefix = "tracing-attributes-0.1.21",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-attributes-0.1.21.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-core-0.1.26",
sha256 = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing-core/0.1.26/download"],
strip_prefix = "tracing-core-0.1.26",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-core-0.1.26.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-log-0.1.3",
sha256 = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing-log/0.1.3/download"],
strip_prefix = "tracing-log-0.1.3",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-log-0.1.3.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-subscriber-0.3.11",
sha256 = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing-subscriber/0.3.11/download"],
strip_prefix = "tracing-subscriber-0.3.11",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-subscriber-0.3.11.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__try-lock-0.2.3",
sha256 = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/try-lock/0.2.3/download"],
strip_prefix = "try-lock-0.2.3",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.try-lock-0.2.3.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__unicode-ident-1.0.0",
sha256 = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/unicode-ident/1.0.0/download"],
strip_prefix = "unicode-ident-1.0.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.unicode-ident-1.0.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__valuable-0.1.0",
sha256 = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/valuable/0.1.0/download"],
strip_prefix = "valuable-0.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.valuable-0.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__want-0.3.0",
sha256 = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/want/0.3.0/download"],
strip_prefix = "want-0.3.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.want-0.3.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__winapi-0.3.9",
sha256 = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/winapi/0.3.9/download"],
strip_prefix = "winapi-0.3.9",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-0.3.9.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__winapi-i686-pc-windows-gnu-0.4.0",
sha256 = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/winapi-i686-pc-windows-gnu/0.4.0/download"],
strip_prefix = "winapi-i686-pc-windows-gnu-0.4.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-i686-pc-windows-gnu-0.4.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__winapi-x86_64-pc-windows-gnu-0.4.0",
sha256 = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/winapi-x86_64-pc-windows-gnu/0.4.0/download"],
strip_prefix = "winapi-x86_64-pc-windows-gnu-0.4.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-x86_64-pc-windows-gnu-0.4.0.bazel"),
)
| 40.477462 | 552 | 0.644086 | es.io/api/v1/crates/matches/0.1.9/download"],
strip_prefix = "matches-0.1.9",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.matches-0.1.9.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__matchit-0.4.6",
sha256 = "9376a4f0340565ad675d11fc1419227faf5f60cd7ac9cb2e7185a471f30af833",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/matchit/0.4.6/download"],
strip_prefix = "matchit-0.4.6",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.matchit-0.4.6.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__memchr-2.5.0",
sha256 = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/memchr/2.5.0/download"],
strip_prefix = "memchr-2.5.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.memchr-2.5.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__mime-0.3.16",
sha256 = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/mime/0.3.16/download"],
strip_prefix = "mime-0.3.16",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.mime-0.3.16.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__mio-0.7.14",
sha256 = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/mio/0.7.14/download"],
strip_prefix = "mio-0.7.14",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.mio-0.7.14.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__miow-0.3.7",
sha256 = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/miow/0.3.7/download"],
strip_prefix = "miow-0.3.7",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.miow-0.3.7.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__ntapi-0.3.7",
sha256 = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/ntapi/0.3.7/download"],
strip_prefix = "ntapi-0.3.7",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ntapi-0.3.7.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__num_cpus-1.13.1",
sha256 = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/num_cpus/1.13.1/download"],
strip_prefix = "num_cpus-1.13.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.num_cpus-1.13.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__once_cell-1.12.0",
sha256 = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/once_cell/1.12.0/download"],
strip_prefix = "once_cell-1.12.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.once_cell-1.12.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__parking_lot-0.11.2",
sha256 = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/parking_lot/0.11.2/download"],
strip_prefix = "parking_lot-0.11.2",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.parking_lot-0.11.2.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__parking_lot_core-0.8.5",
sha256 = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/parking_lot_core/0.8.5/download"],
strip_prefix = "parking_lot_core-0.8.5",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.parking_lot_core-0.8.5.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__percent-encoding-2.1.0",
sha256 = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/percent-encoding/2.1.0/download"],
strip_prefix = "percent-encoding-2.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.percent-encoding-2.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__pin-project-1.0.10",
sha256 = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/pin-project/1.0.10/download"],
strip_prefix = "pin-project-1.0.10",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-1.0.10.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__pin-project-internal-1.0.10",
sha256 = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/pin-project-internal/1.0.10/download"],
strip_prefix = "pin-project-internal-1.0.10",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-internal-1.0.10.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__pin-project-lite-0.2.9",
sha256 = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/pin-project-lite/0.2.9/download"],
strip_prefix = "pin-project-lite-0.2.9",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-project-lite-0.2.9.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__pin-utils-0.1.0",
sha256 = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/pin-utils/0.1.0/download"],
strip_prefix = "pin-utils-0.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.pin-utils-0.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__proc-macro2-1.0.39",
sha256 = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/proc-macro2/1.0.39/download"],
strip_prefix = "proc-macro2-1.0.39",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.proc-macro2-1.0.39.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__quote-1.0.18",
sha256 = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/quote/1.0.18/download"],
strip_prefix = "quote-1.0.18",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.quote-1.0.18.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__redox_syscall-0.2.13",
sha256 = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/redox_syscall/0.2.13/download"],
strip_prefix = "redox_syscall-0.2.13",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.redox_syscall-0.2.13.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__ryu-1.0.10",
sha256 = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/ryu/1.0.10/download"],
strip_prefix = "ryu-1.0.10",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.ryu-1.0.10.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__scopeguard-1.1.0",
sha256 = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/scopeguard/1.1.0/download"],
strip_prefix = "scopeguard-1.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.scopeguard-1.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__serde-1.0.137",
sha256 = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/serde/1.0.137/download"],
strip_prefix = "serde-1.0.137",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde-1.0.137.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__serde_json-1.0.81",
sha256 = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/serde_json/1.0.81/download"],
strip_prefix = "serde_json-1.0.81",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde_json-1.0.81.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__serde_urlencoded-0.7.1",
sha256 = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/serde_urlencoded/0.7.1/download"],
strip_prefix = "serde_urlencoded-0.7.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.serde_urlencoded-0.7.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__sharded-slab-0.1.4",
sha256 = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/sharded-slab/0.1.4/download"],
strip_prefix = "sharded-slab-0.1.4",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.sharded-slab-0.1.4.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__signal-hook-registry-1.4.0",
sha256 = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/signal-hook-registry/1.4.0/download"],
strip_prefix = "signal-hook-registry-1.4.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.signal-hook-registry-1.4.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__slab-0.4.6",
sha256 = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/slab/0.4.6/download"],
strip_prefix = "slab-0.4.6",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.slab-0.4.6.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__smallvec-1.8.0",
sha256 = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/smallvec/1.8.0/download"],
strip_prefix = "smallvec-1.8.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.smallvec-1.8.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__socket2-0.4.4",
sha256 = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/socket2/0.4.4/download"],
strip_prefix = "socket2-0.4.4",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.socket2-0.4.4.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__syn-1.0.95",
sha256 = "fbaf6116ab8924f39d52792136fb74fd60a80194cf1b1c6ffa6453eef1c3f942",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/syn/1.0.95/download"],
strip_prefix = "syn-1.0.95",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.syn-1.0.95.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__sync_wrapper-0.1.1",
sha256 = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/sync_wrapper/0.1.1/download"],
strip_prefix = "sync_wrapper-0.1.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.sync_wrapper-0.1.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__thread_local-1.1.4",
sha256 = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/thread_local/1.1.4/download"],
strip_prefix = "thread_local-1.1.4",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.thread_local-1.1.4.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tokio-1.16.1",
sha256 = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tokio/1.16.1/download"],
strip_prefix = "tokio-1.16.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-1.16.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tokio-macros-1.7.0",
sha256 = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tokio-macros/1.7.0/download"],
strip_prefix = "tokio-macros-1.7.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-macros-1.7.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tokio-util-0.7.2",
sha256 = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tokio-util/0.7.2/download"],
strip_prefix = "tokio-util-0.7.2",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tokio-util-0.7.2.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tower-0.4.12",
sha256 = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tower/0.4.12/download"],
strip_prefix = "tower-0.4.12",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-0.4.12.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tower-http-0.2.5",
sha256 = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tower-http/0.2.5/download"],
strip_prefix = "tower-http-0.2.5",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-http-0.2.5.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tower-layer-0.3.1",
sha256 = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tower-layer/0.3.1/download"],
strip_prefix = "tower-layer-0.3.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-layer-0.3.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tower-service-0.3.1",
sha256 = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tower-service/0.3.1/download"],
strip_prefix = "tower-service-0.3.1",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tower-service-0.3.1.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-0.1.34",
sha256 = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing/0.1.34/download"],
strip_prefix = "tracing-0.1.34",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-0.1.34.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-attributes-0.1.21",
sha256 = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing-attributes/0.1.21/download"],
strip_prefix = "tracing-attributes-0.1.21",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-attributes-0.1.21.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-core-0.1.26",
sha256 = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing-core/0.1.26/download"],
strip_prefix = "tracing-core-0.1.26",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-core-0.1.26.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-log-0.1.3",
sha256 = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing-log/0.1.3/download"],
strip_prefix = "tracing-log-0.1.3",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-log-0.1.3.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__tracing-subscriber-0.3.11",
sha256 = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/tracing-subscriber/0.3.11/download"],
strip_prefix = "tracing-subscriber-0.3.11",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.tracing-subscriber-0.3.11.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__try-lock-0.2.3",
sha256 = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/try-lock/0.2.3/download"],
strip_prefix = "try-lock-0.2.3",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.try-lock-0.2.3.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__unicode-ident-1.0.0",
sha256 = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/unicode-ident/1.0.0/download"],
strip_prefix = "unicode-ident-1.0.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.unicode-ident-1.0.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__valuable-0.1.0",
sha256 = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/valuable/0.1.0/download"],
strip_prefix = "valuable-0.1.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.valuable-0.1.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__want-0.3.0",
sha256 = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/want/0.3.0/download"],
strip_prefix = "want-0.3.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.want-0.3.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__winapi-0.3.9",
sha256 = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/winapi/0.3.9/download"],
strip_prefix = "winapi-0.3.9",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-0.3.9.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__winapi-i686-pc-windows-gnu-0.4.0",
sha256 = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/winapi-i686-pc-windows-gnu/0.4.0/download"],
strip_prefix = "winapi-i686-pc-windows-gnu-0.4.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-i686-pc-windows-gnu-0.4.0.bazel"),
)
maybe(
http_archive,
name = "crates_vendor_pkgs__winapi-x86_64-pc-windows-gnu-0.4.0",
sha256 = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f",
type = "tar.gz",
urls = ["https://crates.io/api/v1/crates/winapi-x86_64-pc-windows-gnu/0.4.0/download"],
strip_prefix = "winapi-x86_64-pc-windows-gnu-0.4.0",
build_file = Label("@examples//vendor_remote_pkgs/crates:BUILD.winapi-x86_64-pc-windows-gnu-0.4.0.bazel"),
)
| true | true |
1c45f0f1abed23f91b8387f0e241a1dcfc49a84b | 3,035 | py | Python | examples/neurospin/histogram_fits.py | fperez/nipy | 559f17150bd9fa8ead4fd088b330d7cf7db7aa79 | [
"BSD-3-Clause"
] | 1 | 2015-05-07T16:53:33.000Z | 2015-05-07T16:53:33.000Z | examples/neurospin/histogram_fits.py | fperez/nipy | 559f17150bd9fa8ead4fd088b330d7cf7db7aa79 | [
"BSD-3-Clause"
] | null | null | null | examples/neurospin/histogram_fits.py | fperez/nipy | 559f17150bd9fa8ead4fd088b330d7cf7db7aa79 | [
"BSD-3-Clause"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
__doc__ = \
"""
Example of a script that perfoms histogram analysis of an activation
image, to estimate activation Z-score with various heuristics:
* Gamma-Gaussian model
* Gaussian mixture model
* Empirical normal null
This example is based on a (simplistic) simulated image.
"""
# Author : Bertrand Thirion, Gael Varoquaux 2008-2009
print __doc__
import numpy as np
import nipy.neurospin.utils.simul_multisubject_fmri_dataset as simul
import nipy.neurospin.utils.emp_null as en
################################################################################
# simulate the data
dimx = 60
dimy = 60
pos = 2*np.array([[6,7],[10,10],[15,10]])
ampli = np.array([3,4,4])
dataset = simul.surrogate_2d_dataset(nbsubj=1, dimx=dimx, dimy=dimy, pos=pos,
ampli=ampli, width=10.0).squeeze()
import pylab as pl
fig = pl.figure(figsize=(12, 10))
pl.subplot(3, 3, 1)
pl.imshow(dataset, cmap=pl.cm.hot)
pl.colorbar()
pl.title('Raw data')
Beta = dataset.ravel().squeeze()
################################################################################
# fit Beta's histogram with a Gamma-Gaussian mixture
gam_gaus_pp = en.Gamma_Gaussian_fit(Beta, Beta)
gam_gaus_pp = np.reshape(gam_gaus_pp, (dimx, dimy, 3))
pl.figure(fig.number)
pl.subplot(3, 3, 4)
pl.imshow(gam_gaus_pp[..., 0], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n first component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 5)
pl.imshow(gam_gaus_pp[..., 1], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n second component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 6)
pl.imshow(gam_gaus_pp[..., 2], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n third component posterior proba.')
pl.colorbar()
################################################################################
# fit Beta's histogram with a mixture of Gaussians
alpha = 0.01
gaus_mix_pp = en.three_classes_GMM_fit(Beta, None,
alpha, prior_strength=100)
gaus_mix_pp = np.reshape(gaus_mix_pp, (dimx, dimy, 3))
pl.figure(fig.number)
pl.subplot(3, 3, 7)
pl.imshow(gaus_mix_pp[..., 0], cmap=pl.cm.hot)
pl.title('Gaussian mixture,\n first component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 8)
pl.imshow(gaus_mix_pp[..., 1], cmap=pl.cm.hot)
pl.title('Gaussian mixture,\n second component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 9)
pl.imshow(gaus_mix_pp[..., 2], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n third component posterior proba.')
pl.colorbar()
################################################################################
# Fit the null mode of Beta with an empirical normal null
efdr = en.ENN(Beta)
emp_null_fdr = efdr.fdr(Beta)
emp_null_fdr = emp_null_fdr.reshape((dimx, dimy))
pl.subplot(3, 3, 3)
pl.imshow(1-emp_null_fdr, cmap=pl.cm.hot)
pl.colorbar()
pl.title('Empirical FDR\n ')
#efdr.plot()
#pl.title('Empirical FDR fit')
pl.show()
| 30.969388 | 80 | 0.629984 |
__doc__ = \
"""
Example of a script that perfoms histogram analysis of an activation
image, to estimate activation Z-score with various heuristics:
* Gamma-Gaussian model
* Gaussian mixture model
* Empirical normal null
This example is based on a (simplistic) simulated image.
"""
print __doc__
import numpy as np
import nipy.neurospin.utils.simul_multisubject_fmri_dataset as simul
import nipy.neurospin.utils.emp_null as en
| false | true |
1c45f11c3e11537796edf6b58ae0fb0bad91f88e | 10,785 | py | Python | code/scene.py | NoOneZero/coppelia | 14f589b361025506bf1dc2733edc5cf3ce27f45a | [
"Apache-2.0"
] | 1 | 2021-01-09T20:14:11.000Z | 2021-01-09T20:14:11.000Z | code/scene.py | NoOneZero/coppelia | 14f589b361025506bf1dc2733edc5cf3ce27f45a | [
"Apache-2.0"
] | null | null | null | code/scene.py | NoOneZero/coppelia | 14f589b361025506bf1dc2733edc5cf3ce27f45a | [
"Apache-2.0"
] | null | null | null | from code.character import Character
from code.spider import Spider
from code.neuro import Neuro
from code.excel import ExcelManager
from code.csv_manager import CscManager
import b0RemoteApi
import code.config as config
import time
from random import choices
import random
class Scene:
def __init__(self):
self.__set_connetcion_variables_to_lib_files()
self.__create_spiders()
self.__create_or_connect_to_file()
self.__create_neuro()
self.__set_parameters_of_neuro()
self.__set_parameters_for_loop_work()
def __set_connetcion_variables_to_lib_files(self):
self.python_client = 'b0RemoteApi_pythonClient'
self.remote_api = 'b0RemoteApi_first'
self.client = None
def __create_spiders(self):
# self.characters = []
# for i in range(config.NUMBER_OF_SPIDERS):
# self.characters.append(Character())
self.spiders = [Spider()]
for i in range(1, config.NUMBER_OF_SPIDERS):
self.spiders.append(Spider("#{}".format(i - 1)))
def __create_or_connect_to_file(self):
# self.excel = ExcelManager(name=config.FILE_NAME, size=len(self.spiders))
self.csv_manager = CscManager(name=config.FILE_NAME)
def __create_neuro(self):
self.neuro = []
self.neuro_father = Neuro()
self.neuro_mother = Neuro()
self.neuro.append(Neuro())
self.fitnes = [0] * len(self.spiders)
self.fitnes_radical = [0] * len(self.spiders)
for i in range(1, len(self.spiders)):
self.neuro.append(Neuro(mutant_power = 1))
# high, weigh = self.excel.read(0)
# if (high != None):
#
# count = 0
# for i in range(len(self.neuro[0].axon_weigh)):
# to_add = count
# for j in range(len(self.neuro[0].axon_weigh[i])):
# for k in range(len(self.neuro[0].axon_weigh[i][j])):
# count += 1
# self.neuro[0].axon_weigh[i][j][k] \
# = weigh[to_add + k + j * len(self.neuro[0].axon_weigh[i][j])]
#
# for w in range(1, len(self.spiders)):
# high, weigh = self.excel.read(w)
# self.neuro.append(Neuro())
# count = 0
# for i in range(len(self.neuro[w].axon_weigh)):
# to_add = count
# for j in range(len(self.neuro[w].axon_weigh[i])):
# for k in range(len(self.neuro[w].axon_weigh[i][j])):
# count += 1
# self.neuro[w].axon_weigh[i][j][k] \
# = weigh[to_add + k + j * len(self.neuro[w].axon_weigh[i][j])]
# else:
# for i in range(1, len(self.spiders)):
# self.neuro.append(Neuro(mutant_power=1))
def __set_parameters_of_neuro(self):
self.life_time = config.CYCLE_TIME
self.count_of_alive = config.COUNT_OF_ALIVE
self.mutation_power = config.MUTATION_POWER
def __set_parameters_for_loop_work(self):
self.do_next_step = True
self.flag = True
self.counter = 0
def start(self):
while True:
with b0RemoteApi.RemoteApiClient(self.python_client, self.remote_api) as self.client:
self.__add_method()
self.__add_objects()
self.__start_simulation()
self.__loop()
self.__finish_simulation()
self.__remake_neural_network()
time.sleep(1)
def __add_method(self):
self.client.simxSynchronous(True)
self.client.simxGetSimulationStepStarted(self.client.simxDefaultSubscriber(self.simulationStepStarted))
self.client.simxGetSimulationStepDone(self.client.simxDefaultSubscriber(self.simulationStepDone))
def __add_objects(self):
#err_hand_cube, self.obj_hund_cube = self.client.simxGetObjectHandle('Cuboid', self.client.simxServiceCall())
for spider in self.spiders:
spider.set_robot(self.client)
def __start_simulation(self):
self.client.simxStartSimulation(self.client.simxDefaultPublisher())
def __loop(self):
while self.flag:
if self.do_next_step:
self.do_next_step = False
self.client.simxSynchronousTrigger()
self.client.simxSpinOnce()
def __finish_simulation(self):
self.client.simxStopSimulation(self.client.simxDefaultPublisher())
def __remake_neural_network(self):
self.remake_neural()
def remake_neural(self):
self.counter = 0
self.flag = True
# self.fitnes = []
# self.fitnes_radical = []
# for i in range(len(self.spiders)):
# self.fitnes[i] += ((self.spiders[i].get_position()[1] + 10) / 20.0) * self.life_time
# self.fitnes_radical[i] += (self.spiders[i].get_position()[1]) * 10 * self.life_time
# if self.fitnes_radical[i] <= 0: self.fitnes_radical[i] = 0.001
# if self.fitnes[i] <= 0: self.fitnes[i] = 0.001
# print(self.fitnes[i], self.fitnes_radical[i])
#
#
# self.max = 0
# for i in range(1, len(self.spiders)):
# if (self.fitnes[i]> self.fitnes[self.max]):
# self.max = i
# print(max, self.spiders[self.max].get_position()[1])
# print("best: ", max, "//", len(self.neuro))
# for i in range(len(neuro_best.axon_weigh)):
# for j in range(len(neuro_best.axon_weigh[i])):
# print(i, j, neuro_best.axon_weigh[i][j])
# self.__make_parents()
# self.__make_who_not_die()
# self.__make_new_population()
# self.__make_mutation()
# self.__save_to_db()
def __make_parents(self):
self.__roulette()
def __tournament(self): pass
def __roulette(self):
index = []
for i in range(len(self.neuro)):
index.append(i)
print(index)
self.index_father = choices(index, weights = self.fitnes_radical, k = 1)[0]
self.index_mother = choices(index, weights = self.fitnes_radical, k = 1)[0]
while self.index_father == self.index_mother:
self.index_mother = choices(index, weights=self.fitnes_radical, k=1)[0]
self.neuro_father = self.neuro[self.index_father]
self.neuro_mother = self.neuro[self.index_mother]
def __make_who_not_die(self):
self.alive = []
index = []
for i in range(len(self.neuro)):
index.append(i)
print(index)
self.alive.append(choices(index, weights = self.fitnes_radical, k = 1)[0])
for i in range(self.count_of_alive - 1):
else_number = choices(index, weights = self.fitnes_radical, k = 1)[0]
while else_number in self.alive:
print("Same {}".format(else_number))
else_number = choices(index, weights=self.fitnes_radical, k=1)[0]
print("new {}, all {}".format(else_number, self.alive))
self.alive.append(else_number)
def __make_new_population(self):
neuro_new = []
for i in range(self.count_of_alive):
neuro_new.append(self.neuro[self.alive[i]])
self.neuro = neuro_new
for i in range(self.count_of_alive, len(self.spiders)):
if random.random() > 0.5:
self.neuro.append(Neuro.randomize_new(self.neuro_father, self.neuro_mother))
else:
self.neuro.append(Neuro.randomize_new(self.neuro_mother, self.neuro_father))
for i in range(len(self.spiders)):
self.spiders[i].reset_position()
def __make_mutation(self):
for i in range(len(self.spiders)):
self.neuro[i].make_mutation(self.mutation_power)
print("Зроблена мутація")
def __save_to_db(self):
print("Почався запис в ексель")
# self.excel.write_data2D_best(self.fitnes[self.max], self.neuro[self.max].axon_weigh)
# self.excel.write_data2D_father(self.fitnes[self.index_father], self.neuro_father.axon_weigh)
# self.excel.write_data2D_mother(self.fitnes[self.index_mother], self.neuro_mother.axon_weigh)
dict_of_data = {"max fitnes" : self.fitnes[self.max]}
for i in range(len(self.spiders)):
dict_of_data["fit {}".format(i)] = self.fitnes[i]
for i in range(len(self.spiders)):
axon_line = self.neuro[i].axon_line()
for j in range(len(axon_line)):
dict_of_data["s{}a{}".format(i, j)] = axon_line[j]
self.csv_manager.extend_row_by_dicts()
self.csv_manager.write_sometimes()
print("Завершився запис в ексель")
def simulationStepStarted(self, msg):
simTime = msg[1][b'simulationTime']
print('Simulation step started', simTime)
counter = 0
normal_angle = (0, -1.5707963705062866, 0)
normal_z = 0.088
for spider in self.spiders:
spider.receive_position(self.client)
self.fitnes[counter] += 5 + \
- abs(self.spiders[counter].get_rotation()[0] - normal_angle[0]) \
- abs(self.spiders[counter].get_rotation()[1] - normal_angle[2]) \
- abs(self.spiders[counter].get_rotation()[2] - normal_angle[2]) \
- 5 * abs(self.spiders[counter].get_position()[2] - normal_z)
self.fitnes_radical[counter] += 5 + \
- 1.2 * abs(self.spiders[counter].get_rotation()[0] - normal_angle[0]) \
- 1.2 * abs(self.spiders[counter].get_rotation()[1] - normal_angle[2]) \
- 1.2 * abs(self.spiders[counter].get_rotation()[2] - normal_angle[2]) \
- 6 * abs(self.spiders[counter].get_position()[2] - normal_z)
print("spin", self.spiders[counter].get_rotation(), self.fitnes[counter], self.fitnes_radical[counter])
counter += 1
def simulationStepDone(self, msg):
simTime = msg[1][b'simulationTime']
print('Simulation step done. Simulation time: ', simTime)
for i in range(len(self.spiders)):
self.spiders[i].move(self.client, output_data = self.neuro[i]._calculate(self.spiders[i].get_all()))
self.do_next_step = True
self.fitnes = [0] * len(self.spiders)
self.fitnes_radical = [0] * len(self.spiders)
self.__timer()
def __timer(self):
self.counter += 1
if self.counter > self.life_time:
self.flag = False
print(self.counter, "//", self.life_time)
| 41.964981 | 117 | 0.590357 | from code.character import Character
from code.spider import Spider
from code.neuro import Neuro
from code.excel import ExcelManager
from code.csv_manager import CscManager
import b0RemoteApi
import code.config as config
import time
from random import choices
import random
class Scene:
def __init__(self):
self.__set_connetcion_variables_to_lib_files()
self.__create_spiders()
self.__create_or_connect_to_file()
self.__create_neuro()
self.__set_parameters_of_neuro()
self.__set_parameters_for_loop_work()
def __set_connetcion_variables_to_lib_files(self):
self.python_client = 'b0RemoteApi_pythonClient'
self.remote_api = 'b0RemoteApi_first'
self.client = None
def __create_spiders(self):
self.spiders = [Spider()]
for i in range(1, config.NUMBER_OF_SPIDERS):
self.spiders.append(Spider("#{}".format(i - 1)))
def __create_or_connect_to_file(self):
self.csv_manager = CscManager(name=config.FILE_NAME)
def __create_neuro(self):
self.neuro = []
self.neuro_father = Neuro()
self.neuro_mother = Neuro()
self.neuro.append(Neuro())
self.fitnes = [0] * len(self.spiders)
self.fitnes_radical = [0] * len(self.spiders)
for i in range(1, len(self.spiders)):
self.neuro.append(Neuro(mutant_power = 1))
def __set_parameters_of_neuro(self):
self.life_time = config.CYCLE_TIME
self.count_of_alive = config.COUNT_OF_ALIVE
self.mutation_power = config.MUTATION_POWER
def __set_parameters_for_loop_work(self):
self.do_next_step = True
self.flag = True
self.counter = 0
def start(self):
while True:
with b0RemoteApi.RemoteApiClient(self.python_client, self.remote_api) as self.client:
self.__add_method()
self.__add_objects()
self.__start_simulation()
self.__loop()
self.__finish_simulation()
self.__remake_neural_network()
time.sleep(1)
def __add_method(self):
self.client.simxSynchronous(True)
self.client.simxGetSimulationStepStarted(self.client.simxDefaultSubscriber(self.simulationStepStarted))
self.client.simxGetSimulationStepDone(self.client.simxDefaultSubscriber(self.simulationStepDone))
def __add_objects(self):
for spider in self.spiders:
spider.set_robot(self.client)
def __start_simulation(self):
self.client.simxStartSimulation(self.client.simxDefaultPublisher())
def __loop(self):
while self.flag:
if self.do_next_step:
self.do_next_step = False
self.client.simxSynchronousTrigger()
self.client.simxSpinOnce()
def __finish_simulation(self):
self.client.simxStopSimulation(self.client.simxDefaultPublisher())
def __remake_neural_network(self):
self.remake_neural()
def remake_neural(self):
self.counter = 0
self.flag = True
def __make_parents(self):
self.__roulette()
def __tournament(self): pass
def __roulette(self):
index = []
for i in range(len(self.neuro)):
index.append(i)
print(index)
self.index_father = choices(index, weights = self.fitnes_radical, k = 1)[0]
self.index_mother = choices(index, weights = self.fitnes_radical, k = 1)[0]
while self.index_father == self.index_mother:
self.index_mother = choices(index, weights=self.fitnes_radical, k=1)[0]
self.neuro_father = self.neuro[self.index_father]
self.neuro_mother = self.neuro[self.index_mother]
def __make_who_not_die(self):
self.alive = []
index = []
for i in range(len(self.neuro)):
index.append(i)
print(index)
self.alive.append(choices(index, weights = self.fitnes_radical, k = 1)[0])
for i in range(self.count_of_alive - 1):
else_number = choices(index, weights = self.fitnes_radical, k = 1)[0]
while else_number in self.alive:
print("Same {}".format(else_number))
else_number = choices(index, weights=self.fitnes_radical, k=1)[0]
print("new {}, all {}".format(else_number, self.alive))
self.alive.append(else_number)
def __make_new_population(self):
neuro_new = []
for i in range(self.count_of_alive):
neuro_new.append(self.neuro[self.alive[i]])
self.neuro = neuro_new
for i in range(self.count_of_alive, len(self.spiders)):
if random.random() > 0.5:
self.neuro.append(Neuro.randomize_new(self.neuro_father, self.neuro_mother))
else:
self.neuro.append(Neuro.randomize_new(self.neuro_mother, self.neuro_father))
for i in range(len(self.spiders)):
self.spiders[i].reset_position()
def __make_mutation(self):
for i in range(len(self.spiders)):
self.neuro[i].make_mutation(self.mutation_power)
print("Зроблена мутація")
def __save_to_db(self):
print("Почався запис в ексель")
dict_of_data = {"max fitnes" : self.fitnes[self.max]}
for i in range(len(self.spiders)):
dict_of_data["fit {}".format(i)] = self.fitnes[i]
for i in range(len(self.spiders)):
axon_line = self.neuro[i].axon_line()
for j in range(len(axon_line)):
dict_of_data["s{}a{}".format(i, j)] = axon_line[j]
self.csv_manager.extend_row_by_dicts()
self.csv_manager.write_sometimes()
print("Завершився запис в ексель")
def simulationStepStarted(self, msg):
simTime = msg[1][b'simulationTime']
print('Simulation step started', simTime)
counter = 0
normal_angle = (0, -1.5707963705062866, 0)
normal_z = 0.088
for spider in self.spiders:
spider.receive_position(self.client)
self.fitnes[counter] += 5 + \
- abs(self.spiders[counter].get_rotation()[0] - normal_angle[0]) \
- abs(self.spiders[counter].get_rotation()[1] - normal_angle[2]) \
- abs(self.spiders[counter].get_rotation()[2] - normal_angle[2]) \
- 5 * abs(self.spiders[counter].get_position()[2] - normal_z)
self.fitnes_radical[counter] += 5 + \
- 1.2 * abs(self.spiders[counter].get_rotation()[0] - normal_angle[0]) \
- 1.2 * abs(self.spiders[counter].get_rotation()[1] - normal_angle[2]) \
- 1.2 * abs(self.spiders[counter].get_rotation()[2] - normal_angle[2]) \
- 6 * abs(self.spiders[counter].get_position()[2] - normal_z)
print("spin", self.spiders[counter].get_rotation(), self.fitnes[counter], self.fitnes_radical[counter])
counter += 1
def simulationStepDone(self, msg):
simTime = msg[1][b'simulationTime']
print('Simulation step done. Simulation time: ', simTime)
for i in range(len(self.spiders)):
self.spiders[i].move(self.client, output_data = self.neuro[i]._calculate(self.spiders[i].get_all()))
self.do_next_step = True
self.fitnes = [0] * len(self.spiders)
self.fitnes_radical = [0] * len(self.spiders)
self.__timer()
def __timer(self):
self.counter += 1
if self.counter > self.life_time:
self.flag = False
print(self.counter, "//", self.life_time)
| true | true |
1c45f2188bc7857583ffee040cd434578399dbd7 | 24,516 | py | Python | modules/webgrid2.py | dedebf/trilhas-poeticas-web2py-application | 61b28a60143a8bdce84a9fd8511f6b4504a34f33 | [
"MIT"
] | null | null | null | modules/webgrid2.py | dedebf/trilhas-poeticas-web2py-application | 61b28a60143a8bdce84a9fd8511f6b4504a34f33 | [
"MIT"
] | null | null | null | modules/webgrid2.py | dedebf/trilhas-poeticas-web2py-application | 61b28a60143a8bdce84a9fd8511f6b4504a34f33 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
WebGrid for web2py
Developed by Nathan Freeze (Copyright � 2009)
Email <nathan@freezable.com>
License: GPL v2
This file contains code to build a table that supports
paging, sorting, editing and totals.
"""
##################-----WEBGRID2 CRIADO PARA ALTERAR A FUNÇÃO page_total ONDE É COLOCADO IMAGEM AO INVEZ DE LINK NO----##############
from gluon.sql import Rows, Field, Set
from gluon.sqlhtml import *
from gluon.html import *
from gluon.storage import *
class Webgrid2(object):
def __init__(self, crud, name=None, datasource=None):
self.crud = crud
self.environment = crud.environment
self.name = name
self.css_prefix = None
self.id = None
self.datasource = datasource
self.crud_function = 'data'
self.download_function = 'download'
self.messages = Messages(self.crud.environment.T)
self.messages.confirm_delete = 'Are you sure?'
self.messages.no_records = 'No records'
self.messages.add_link = '[add %s]'
self.messages.edit_link = 'edit'
self.messages.delete_link = 'delete'
self.messages.view_link = 'view'
self.messages.file_link = 'anexo'
self.messages.page_info = 'page %(pagenum)s of %(pagecount)s (total records: %(total)s)'
self.messages.page_total = "Total:"
self.messages.filter = 'Filtrar'
self.messages.pagesize = ' pagesize: '
self.messages.previous_page = '<-prev-'
self.messages.next_page = '-next->'
self.action_links = ['view', 'edit', 'delete']
self.action_headers = ['view', 'edit', 'delete']
self.field_headers = self.fields = self.totals = []
self.enabled_rows = ['header', 'filter', 'pager', 'totals',
'footer', 'add_links']
self.allowed_vars = ['pagesize', 'pagenum', 'sortby',
'ascending', 'groupby', 'totals']
self.pagenum = self.pagecount = self.pagesize = 0
self.sortby = self.groupby = self.page_total = self.filters = None
self.view_link = self.edit_link = self.delete_link = None
self.add_links = self.action_header = None
self.header = self.filter = self.footer = None
self.pager = self.datarow = None
self.pageinfo_separator = ' - '
self.pagesizes = [10,20,30,40,50]
self.ascending = False
self.row_created = None
self.filter_query = lambda field,value: field==value
self.filter_items_query = lambda field: field['id'] > 0
self.filter_cache = None
self.total_function = lambda fieldvalues: sum(fieldvalues)
def get_header(self, c):
try:
return self.field_headers[self.fields.index(c)]
except:
return c
def get_value(self, f, r):
(_t, _f) = f.split('.')
v = r[_t][_f] if self.joined else r[_f]
return v
def update_filters(self,vrs,flt):
if not flt:
return
for k, v in flt.items():
vrs[self.name + '_filter-'+k] = v
def __call__(self):
request = self.crud.environment.request
db = self.crud.db
datasource = self.datasource
if not self.name:
self.name = self.crud.environment.request.function
if not self.css_prefix:
self.css_prefix = self.name
if not self.id:
self.id = self.name
# Set defaults
vars = request.get_vars
allowed = self.allowed_vars
name = self.name
if getattr(vars,name+'_pagesize') and 'pagesize' in allowed:
self.pagesize = int(vars[name+'_pagesize'])
if not self.pagesize:
self.pagesize = 10
if getattr(vars,name+'_pagenum') and 'pagenum' in allowed:
self.pagenum = int(vars[name+'_pagenum'])
if not self.pagenum:
self.pagenum = 1
if getattr(vars,name+'_sortby') and 'sortby' in allowed:
self.sortby = vars[name+'_sortby']
if getattr(vars,name+'_groupby') and 'groupby' in allowed:
self.groupby = vars[name+'_groupby']
if getattr(vars,name+'_totals') and 'totals' in allowed:
self.totals = vars[name+'_totals']
if getattr(vars,name+'_ascending') and 'ascending' in allowed:
self.ascending = vars[name+'_ascending'] == "True"
page = sortby = groupby = query = None
filters = dict()
#Build filters
if 'filter' in self.enabled_rows:
if request.post_vars:
request.vars.update(request.post_vars)
for k, v in request.vars.items():
if isinstance(v,list):
v = v[0]
if name + '_filter-' in k:
tf = k.split('-')[-1]
filters[tf] = v
for k, v in filters.items():
if v=='0': continue
(ft,ff) = k.split('.')
fld = db[ft][ff]
if query:
query &= self.filter_query(fld,v)
else:
query = self.filter_query(fld,v)
if filters and request.vars.get(name+'_submit_filter'):
self.pagenum = 1
# Build limitby
if self.pagesize > 0:
pagenum = self.pagenum - 1
page = (self.pagesize * pagenum,
self.pagesize * pagenum + self.pagesize)
else:
self.pagenum = 0
# Build sortby
if self.sortby:
if isinstance(self.sortby, Field):
(ts, fs) = (self.sortby._tablename, self.sortby.name)
else:
(ts, fs) = self.sortby.split('.')
if self.ascending:
sortby = db[ts][fs]
else:
sortby = ~db[ts][fs]
if self.groupby:
if isinstance(self.groupby, Field):
(tg, fg) = (self.groupby._tablename, self.groupby.name)
else:
(tg, fg) = self.groupby.split('.')
groupby = db[tg][fg]
# Get rows
rows = total = None
if isinstance(datasource, Rows):
rows = datasource
joined = len(set(map(lambda c: c.split('.')[0], rows.colnames))) > 1
for k,v in filters.items():
if v=='0': continue
(flt_t,flt_f) = k.split('.')
if joined:
rows = rows.find(lambda row: row[flt_t][flt_f]==v)
else:
rows = rows.find(lambda row: row[flt_f]==v)
total = len(rows)
if sortby and joined:
rows = rows.sort(lambda row: row[ts][fs], reverse=self.ascending)
elif sortby:
rows = rows.sort(lambda row: row[fs], reverse=self.ascending)
if self.pagesize > 0:
rows = rows[page[0]:page[1]]
elif isinstance(datasource, Set):
if query:
datasource = datasource(query)
id_in_fields = [f for f in self.fields if
f.split('.')[-1] == 'id']
idfield = self.fields[0].split('.')[0] +'.id'
if not id_in_fields:
self.fields.append(idfield)
rows = datasource.select(limitby=page, orderby=sortby,
groupby=groupby, *self.fields)
if not id_in_fields:
self.fields.remove(idfield)
total = datasource.count()
elif isinstance(datasource, Table):
rows = db(query).select(datasource.ALL, limitby=page,
orderby=sortby, groupby=groupby)
total = db(datasource.id > 0).count()
elif isinstance(datasource, list) and isinstance(datasource[0], Table):
rows = db(query).select(limitby=page, orderby=sortby, groupby=groupby,
*[t.ALL for t in datasource])
total = db(datasource[0].id > 0).count()
else:
raise AttributeError("Invalid datasource for WebGrid")
self.tablenames = list(set(map(lambda c: c.split('.')[0], rows.colnames)))
joined = len(self.tablenames) > 1
self.response = rows
self.colnames = rows.colnames
self.joined = joined
self.total = total
if not self.fields:
self.fields = rows.colnames
if isinstance(self.fields[0], Field):
self.fields = ['%s.%s' % (f._tablename, f.name) for f in self.fields]
if self.filters and isinstance(self.filters[0],Field):
self.filters = ['%s.%s' % (f._tablename, f.name) for f in self.filters]
if self.totals and isinstance(self.totals[0], Field):
self.totals = ['%s.%s' % (f._tablename, f.name) for f in self.totals]
if not self.filters:
self.filters = self.fields
if not self.field_headers:
self.field_headers = []
for f in self.fields:
(t,f) = f.split('.')
field = db[t][f]
if hasattr(field,'label'):
self.field_headers.append(field.label)
else:
lbl = f.split('.')[1].replace("_", " ").capitalize()
self.field_headers.append(lbl)
if not self.action_headers:
self.action_headers = self.action_links
if not self.view_link and 'view' in self.action_links:
self.view_link = lambda row: A(self.messages.view_link, _href=self.crud.url(f=self.crud_function,
args=['read', self.tablenames[0],
row[self.tablenames[0]]['id'] \
if self.joined else row['id']]))
if not self.edit_link and 'edit' in self.action_links:
self.edit_link = lambda row: A(self.messages.edit_link, _href=self.crud.url(f=self.crud_function,
args=['update', self.tablenames[0],
row[self.tablenames[0]]['id'] \
if self.joined else row['id']]))
if not self.delete_link and 'delete' in self.action_links:
self.delete_link = lambda row: A(self.messages.delete_link, _href=self.crud.url(f=self.crud_function,
args=['delete', self.tablenames[0],
row[self.tablenames[0]]['id'] \
if self.joined else row['id']]),
_onclick="return confirm('%s');" % \
self.messages.confirm_delete)
if not self.add_links and 'add_links' in self.enabled_rows:
self.add_links = lambda tables: TR(TD([A(self.messages.add_link % t,
_href=self.crud.url(f=self.crud_function,
args=['create', t])) for t in self.tablenames],
_colspan=len(self.action_headers)+
len(self.field_headers)),
_class='-webgrid add_links')
if not self.header and 'header' in self.enabled_rows:
def header(fields):
thead = TR([TH(c) for c in self.action_headers],
_class='-webgrid header')
for f in fields:
vars = dict(request.get_vars)
self.update_filters(vars,filters)
vars[name+'_pagenum'] = 1
vars[name+'_sortby'] = f
vars[name+'_ascending'] = not self.ascending
href = URL(r=request,vars=vars,args=request.args)
th = TH(A(self.get_header(f),_href=href))
thead.components.append(th)
return thead
self.header = header
if not self.filter and 'filter' in self.enabled_rows:
def filter(fields):
tr = TR([TD('') for c in self.action_links],
_class='-webgrid filter')
if self.action_links:
tr.components[-1] = TD(INPUT(_type='submit',
_value=self.messages.filter,
_name=name+'_submit_filter',
_class="btn-sm")) #fix this
for f in fields:
if not f in self.filters:
tr.components.append(TD(''))
continue
(tf,ff) = f.split('.')
curfld = db[tf][ff]
if curfld.type=='upload' or curfld.type=='blob':
continue
vals = db(self.filter_items_query(db[tf])).select(db[tf]['id'],curfld,
cache=self.filter_cache)
dval = filters.get(f)
prev = []
opts = []
for v in vals:
opt = None
if curfld.type.startswith('reference '):
if curfld.represent:
rp = curfld.represent(v[ff])
if rp and not rp in prev:
opt = OPTION(rp, _value=v[ff])
prev.append(rp)
else:
v = v[ff]
if v and not v in prev:
opt = OPTION(v,_value=v)
prev.append(v)
elif curfld.represent:
rp = curfld.represent(v[ff])
if rp and not rp in prev:
opt = OPTION(rp, _value=rp)
prev.append(rp)
else:
if v[ff] and not v[ff] in prev:
opt = OPTION(v[ff], _value=v[ff])
prev.append(v[ff])
if opt:
opts.append(opt)
opts.sort(key=lambda x: x.components[0])
inp = SELECT(opts, _name = name+'_filter-'+f,value=dval)
inp.components.insert(0,OPTION('',_value='0'))
tr.components.append(TD(inp))
return tr
self.filter = filter
if not self.footer and 'footer' in self.enabled_rows:
def footer(fields):
pageinfo = pagesize = ''
pagelinks = SPAN(self.messages.pagesize)
if not self.groupby:
vars = dict(request.get_vars)
self.update_filters(vars,filters)
for p in self.pagesizes:
vars[name+'_pagesize'] = p
vars[name+'_pagenum'] = 1
lnk = A(str(p),' ',_href=URL(r=request,args=request.args,
vars=vars))
pagelinks.components.append(lnk)
pageinfo = self.messages.page_info % {'pagenum':self.pagenum,
'pagecount':self.pagecount,
'total':self.total}
tr = TR(_class='-webgrid footer')
td = TD(pageinfo,self.pageinfo_separator,pagelinks,
_colspan=len(self.fields) + len(self.action_links))
tr.components.append(td)
return tr
self.footer = footer
if not self.pager and 'pager' in self.enabled_rows:
def pager(pagecount):
vars = dict(request.get_vars)
self.update_filters(vars,filters)
prev = A(self.messages.previous_page, _href="#")
next = A(self.messages.next_page, _href="#")
if self.pagesize > 0 and pagenum > 0:
vars[name+'_pagenum'] = self.pagenum - 1
prev = A(B(self.messages.previous_page),
_href=URL(r=request,vars=vars,args=request.args))
if self.pagesize > 0 and self.pagenum < pagecount and \
len(self.response) >= self.pagesize:
vars[name+'_pagenum'] = self.pagenum + 1
next = A(B(self.messages.next_page),
_href=URL(r=request,vars=vars,args=request.args))
tr = TR(_class='-webgrid pager')
td = TD(prev,_colspan=len(self.fields) + len(self.action_links) )
for x in xrange(1, pagecount + 1):
if not self.groupby:
vars[name+'_pagenum'] = x
href = URL(r=request,vars=vars,args=request.args)
td.components.append(A(x,'-',_href=href))
td.components.append(next)
tr.components.append(td)
return tr
self.pager = pager
if not self.page_total and 'totals' in self.enabled_rows:
def page_total():
pagetotal = TR(['' for l in self.action_links],
_class='-webgrid totals')
if self.action_links:
pagetotal.components[-1] = TD(self.messages.page_total)
for f in self.fields:
if f in self.totals:
fieldvalues = [self.get_value(f, r) for r in self.response]
fieldtotal = self.total_function(fieldvalues)
pagetotal.components.append(TD(fieldtotal))
else:
pagetotal.components.append(TD())
return pagetotal
self.page_total = page_total
if not self.action_links:
if self.totals or self.filters:
self.action_links = ['delete']
self.action_headers = ['']
self.delete_link = lambda row: ' '
table_field = re.compile('[\w_]+\.[\w_]+')
table = TABLE(_id=self.id, _class="table-striped")
if 'header' in self.enabled_rows:
_row = self.header(self.fields)
if self.row_created:
self.row_created(_row,'header',None)
table.components.append(THEAD(_row))
if 'filter' in self.enabled_rows:
_row = self.filter(self.fields)
if self.row_created:
self.row_created(_row,'filter',None)
table.components.append(_row)
if len(rows) == 0:
table.components.append(TR(TD(self.messages.no_records,
_colspan=len(self.fields) + len(self.action_links),
_style="text-align:center;")))
for (rc, row) in enumerate(rows):
if self.datarow:
_row = self.datarow(row)
if self.row_created:
self.row_created(_row,'datarow',row)
table.components.append(_row)
continue
_class = 'even' if rc % 2 == 0 else 'odd'
tr = TR(_class='-webgrid-row %s' % _class)
if 'view' in self.action_links:
tr.components.append(TD(self.view_link(row),
_class='-webgrid view_link'))
if 'edit' in self.action_links:
tr.components.append(TD(self.edit_link(row),
_class='-webgrid edit_link'))
if 'delete' in self.action_links:
tr.components.append(TD(self.delete_link(row),
_class='-webgrid delete_link'))
for colname in self.fields:
if not table_field.match(colname):
r = row._extra[colname]
tr.components.append(TD(r))
continue
(tablename, fieldname) = colname.split('.')
field = rows.db[tablename][fieldname]
r = row[tablename][fieldname] if joined else row[fieldname]
if field.represent:
r = field.represent(r)
tr.components.append(TD(r))
continue
if field.type == 'blob' and r:
tr.components.append(TD('DATA'))
continue
r = str(field.formatter(r))
if field.type == 'upload':
if r:
tr.components.append(TD(A(IMG(_src=URL(r=self.environment.request,
f=self.download_function, args=r), _width='82px'),
_href=URL(r=self.environment.request,
f=self.download_function, args=r))))
else:
tr.components.append(TD(self.messages.file_link))
continue
tr.components.append(TD(r))
if self.row_created:
self.row_created(tr,'datarow',row)
table.components.append(tr)
if self.pagesize > 0:
pagecount = int(total / self.pagesize)
if total % self.pagesize != 0: pagecount += 1
else:
pagecount = 1
self.pagecount = pagecount
footer_wrap = TFOOT()
if 'totals' in self.enabled_rows and len(rows):
_row = self.page_total()
if self.row_created:
self.row_created(_row,'totals',None)
footer_wrap.components.append(_row)
if 'add_links' in self.enabled_rows:
_row = self.add_links(self.tablenames)
if self.row_created:
self.row_created(_row,'add_links',None)
footer_wrap.components.append(_row)
if 'pager' in self.enabled_rows and len(rows):
_row = self.pager(pagecount)
if self.row_created:
self.row_created(_row,'pager',None)
footer_wrap.components.append(_row)
if 'footer' in self.enabled_rows and len(rows):
_row = self.footer(self.fields)
if self.row_created:
self.row_created(_row,'footer',None)
footer_wrap.components.append(_row)
table.components.append(footer_wrap)
return FORM(table,_class='webgrid',_name=name+'-webgrid-form')
def links_right(tablerow,rowtype,rowdata):
if rowtype != 'pager':
links = tablerow.components[:3]
del tablerow.components[:3]
tablerow.components.extend(links)
| 45.653631 | 132 | 0.465206 |
=value
self.filter_items_query = lambda field: field['id'] > 0
self.filter_cache = None
self.total_function = lambda fieldvalues: sum(fieldvalues)
def get_header(self, c):
try:
return self.field_headers[self.fields.index(c)]
except:
return c
def get_value(self, f, r):
(_t, _f) = f.split('.')
v = r[_t][_f] if self.joined else r[_f]
return v
def update_filters(self,vrs,flt):
if not flt:
return
for k, v in flt.items():
vrs[self.name + '_filter-'+k] = v
def __call__(self):
request = self.crud.environment.request
db = self.crud.db
datasource = self.datasource
if not self.name:
self.name = self.crud.environment.request.function
if not self.css_prefix:
self.css_prefix = self.name
if not self.id:
self.id = self.name
vars = request.get_vars
allowed = self.allowed_vars
name = self.name
if getattr(vars,name+'_pagesize') and 'pagesize' in allowed:
self.pagesize = int(vars[name+'_pagesize'])
if not self.pagesize:
self.pagesize = 10
if getattr(vars,name+'_pagenum') and 'pagenum' in allowed:
self.pagenum = int(vars[name+'_pagenum'])
if not self.pagenum:
self.pagenum = 1
if getattr(vars,name+'_sortby') and 'sortby' in allowed:
self.sortby = vars[name+'_sortby']
if getattr(vars,name+'_groupby') and 'groupby' in allowed:
self.groupby = vars[name+'_groupby']
if getattr(vars,name+'_totals') and 'totals' in allowed:
self.totals = vars[name+'_totals']
if getattr(vars,name+'_ascending') and 'ascending' in allowed:
self.ascending = vars[name+'_ascending'] == "True"
page = sortby = groupby = query = None
filters = dict()
if 'filter' in self.enabled_rows:
if request.post_vars:
request.vars.update(request.post_vars)
for k, v in request.vars.items():
if isinstance(v,list):
v = v[0]
if name + '_filter-' in k:
tf = k.split('-')[-1]
filters[tf] = v
for k, v in filters.items():
if v=='0': continue
(ft,ff) = k.split('.')
fld = db[ft][ff]
if query:
query &= self.filter_query(fld,v)
else:
query = self.filter_query(fld,v)
if filters and request.vars.get(name+'_submit_filter'):
self.pagenum = 1
if self.pagesize > 0:
pagenum = self.pagenum - 1
page = (self.pagesize * pagenum,
self.pagesize * pagenum + self.pagesize)
else:
self.pagenum = 0
if self.sortby:
if isinstance(self.sortby, Field):
(ts, fs) = (self.sortby._tablename, self.sortby.name)
else:
(ts, fs) = self.sortby.split('.')
if self.ascending:
sortby = db[ts][fs]
else:
sortby = ~db[ts][fs]
if self.groupby:
if isinstance(self.groupby, Field):
(tg, fg) = (self.groupby._tablename, self.groupby.name)
else:
(tg, fg) = self.groupby.split('.')
groupby = db[tg][fg]
rows = total = None
if isinstance(datasource, Rows):
rows = datasource
joined = len(set(map(lambda c: c.split('.')[0], rows.colnames))) > 1
for k,v in filters.items():
if v=='0': continue
(flt_t,flt_f) = k.split('.')
if joined:
rows = rows.find(lambda row: row[flt_t][flt_f]==v)
else:
rows = rows.find(lambda row: row[flt_f]==v)
total = len(rows)
if sortby and joined:
rows = rows.sort(lambda row: row[ts][fs], reverse=self.ascending)
elif sortby:
rows = rows.sort(lambda row: row[fs], reverse=self.ascending)
if self.pagesize > 0:
rows = rows[page[0]:page[1]]
elif isinstance(datasource, Set):
if query:
datasource = datasource(query)
id_in_fields = [f for f in self.fields if
f.split('.')[-1] == 'id']
idfield = self.fields[0].split('.')[0] +'.id'
if not id_in_fields:
self.fields.append(idfield)
rows = datasource.select(limitby=page, orderby=sortby,
groupby=groupby, *self.fields)
if not id_in_fields:
self.fields.remove(idfield)
total = datasource.count()
elif isinstance(datasource, Table):
rows = db(query).select(datasource.ALL, limitby=page,
orderby=sortby, groupby=groupby)
total = db(datasource.id > 0).count()
elif isinstance(datasource, list) and isinstance(datasource[0], Table):
rows = db(query).select(limitby=page, orderby=sortby, groupby=groupby,
*[t.ALL for t in datasource])
total = db(datasource[0].id > 0).count()
else:
raise AttributeError("Invalid datasource for WebGrid")
self.tablenames = list(set(map(lambda c: c.split('.')[0], rows.colnames)))
joined = len(self.tablenames) > 1
self.response = rows
self.colnames = rows.colnames
self.joined = joined
self.total = total
if not self.fields:
self.fields = rows.colnames
if isinstance(self.fields[0], Field):
self.fields = ['%s.%s' % (f._tablename, f.name) for f in self.fields]
if self.filters and isinstance(self.filters[0],Field):
self.filters = ['%s.%s' % (f._tablename, f.name) for f in self.filters]
if self.totals and isinstance(self.totals[0], Field):
self.totals = ['%s.%s' % (f._tablename, f.name) for f in self.totals]
if not self.filters:
self.filters = self.fields
if not self.field_headers:
self.field_headers = []
for f in self.fields:
(t,f) = f.split('.')
field = db[t][f]
if hasattr(field,'label'):
self.field_headers.append(field.label)
else:
lbl = f.split('.')[1].replace("_", " ").capitalize()
self.field_headers.append(lbl)
if not self.action_headers:
self.action_headers = self.action_links
if not self.view_link and 'view' in self.action_links:
self.view_link = lambda row: A(self.messages.view_link, _href=self.crud.url(f=self.crud_function,
args=['read', self.tablenames[0],
row[self.tablenames[0]]['id'] \
if self.joined else row['id']]))
if not self.edit_link and 'edit' in self.action_links:
self.edit_link = lambda row: A(self.messages.edit_link, _href=self.crud.url(f=self.crud_function,
args=['update', self.tablenames[0],
row[self.tablenames[0]]['id'] \
if self.joined else row['id']]))
if not self.delete_link and 'delete' in self.action_links:
self.delete_link = lambda row: A(self.messages.delete_link, _href=self.crud.url(f=self.crud_function,
args=['delete', self.tablenames[0],
row[self.tablenames[0]]['id'] \
if self.joined else row['id']]),
_onclick="return confirm('%s');" % \
self.messages.confirm_delete)
if not self.add_links and 'add_links' in self.enabled_rows:
self.add_links = lambda tables: TR(TD([A(self.messages.add_link % t,
_href=self.crud.url(f=self.crud_function,
args=['create', t])) for t in self.tablenames],
_colspan=len(self.action_headers)+
len(self.field_headers)),
_class='-webgrid add_links')
if not self.header and 'header' in self.enabled_rows:
def header(fields):
thead = TR([TH(c) for c in self.action_headers],
_class='-webgrid header')
for f in fields:
vars = dict(request.get_vars)
self.update_filters(vars,filters)
vars[name+'_pagenum'] = 1
vars[name+'_sortby'] = f
vars[name+'_ascending'] = not self.ascending
href = URL(r=request,vars=vars,args=request.args)
th = TH(A(self.get_header(f),_href=href))
thead.components.append(th)
return thead
self.header = header
if not self.filter and 'filter' in self.enabled_rows:
def filter(fields):
tr = TR([TD('') for c in self.action_links],
_class='-webgrid filter')
if self.action_links:
tr.components[-1] = TD(INPUT(_type='submit',
_value=self.messages.filter,
_name=name+'_submit_filter',
_class="btn-sm"))
for f in fields:
if not f in self.filters:
tr.components.append(TD(''))
continue
(tf,ff) = f.split('.')
curfld = db[tf][ff]
if curfld.type=='upload' or curfld.type=='blob':
continue
vals = db(self.filter_items_query(db[tf])).select(db[tf]['id'],curfld,
cache=self.filter_cache)
dval = filters.get(f)
prev = []
opts = []
for v in vals:
opt = None
if curfld.type.startswith('reference '):
if curfld.represent:
rp = curfld.represent(v[ff])
if rp and not rp in prev:
opt = OPTION(rp, _value=v[ff])
prev.append(rp)
else:
v = v[ff]
if v and not v in prev:
opt = OPTION(v,_value=v)
prev.append(v)
elif curfld.represent:
rp = curfld.represent(v[ff])
if rp and not rp in prev:
opt = OPTION(rp, _value=rp)
prev.append(rp)
else:
if v[ff] and not v[ff] in prev:
opt = OPTION(v[ff], _value=v[ff])
prev.append(v[ff])
if opt:
opts.append(opt)
opts.sort(key=lambda x: x.components[0])
inp = SELECT(opts, _name = name+'_filter-'+f,value=dval)
inp.components.insert(0,OPTION('',_value='0'))
tr.components.append(TD(inp))
return tr
self.filter = filter
if not self.footer and 'footer' in self.enabled_rows:
def footer(fields):
pageinfo = pagesize = ''
pagelinks = SPAN(self.messages.pagesize)
if not self.groupby:
vars = dict(request.get_vars)
self.update_filters(vars,filters)
for p in self.pagesizes:
vars[name+'_pagesize'] = p
vars[name+'_pagenum'] = 1
lnk = A(str(p),' ',_href=URL(r=request,args=request.args,
vars=vars))
pagelinks.components.append(lnk)
pageinfo = self.messages.page_info % {'pagenum':self.pagenum,
'pagecount':self.pagecount,
'total':self.total}
tr = TR(_class='-webgrid footer')
td = TD(pageinfo,self.pageinfo_separator,pagelinks,
_colspan=len(self.fields) + len(self.action_links))
tr.components.append(td)
return tr
self.footer = footer
if not self.pager and 'pager' in self.enabled_rows:
def pager(pagecount):
vars = dict(request.get_vars)
self.update_filters(vars,filters)
prev = A(self.messages.previous_page, _href="#")
next = A(self.messages.next_page, _href="#")
if self.pagesize > 0 and pagenum > 0:
vars[name+'_pagenum'] = self.pagenum - 1
prev = A(B(self.messages.previous_page),
_href=URL(r=request,vars=vars,args=request.args))
if self.pagesize > 0 and self.pagenum < pagecount and \
len(self.response) >= self.pagesize:
vars[name+'_pagenum'] = self.pagenum + 1
next = A(B(self.messages.next_page),
_href=URL(r=request,vars=vars,args=request.args))
tr = TR(_class='-webgrid pager')
td = TD(prev,_colspan=len(self.fields) + len(self.action_links) )
for x in xrange(1, pagecount + 1):
if not self.groupby:
vars[name+'_pagenum'] = x
href = URL(r=request,vars=vars,args=request.args)
td.components.append(A(x,'-',_href=href))
td.components.append(next)
tr.components.append(td)
return tr
self.pager = pager
if not self.page_total and 'totals' in self.enabled_rows:
def page_total():
pagetotal = TR(['' for l in self.action_links],
_class='-webgrid totals')
if self.action_links:
pagetotal.components[-1] = TD(self.messages.page_total)
for f in self.fields:
if f in self.totals:
fieldvalues = [self.get_value(f, r) for r in self.response]
fieldtotal = self.total_function(fieldvalues)
pagetotal.components.append(TD(fieldtotal))
else:
pagetotal.components.append(TD())
return pagetotal
self.page_total = page_total
if not self.action_links:
if self.totals or self.filters:
self.action_links = ['delete']
self.action_headers = ['']
self.delete_link = lambda row: ' '
table_field = re.compile('[\w_]+\.[\w_]+')
table = TABLE(_id=self.id, _class="table-striped")
if 'header' in self.enabled_rows:
_row = self.header(self.fields)
if self.row_created:
self.row_created(_row,'header',None)
table.components.append(THEAD(_row))
if 'filter' in self.enabled_rows:
_row = self.filter(self.fields)
if self.row_created:
self.row_created(_row,'filter',None)
table.components.append(_row)
if len(rows) == 0:
table.components.append(TR(TD(self.messages.no_records,
_colspan=len(self.fields) + len(self.action_links),
_style="text-align:center;")))
for (rc, row) in enumerate(rows):
if self.datarow:
_row = self.datarow(row)
if self.row_created:
self.row_created(_row,'datarow',row)
table.components.append(_row)
continue
_class = 'even' if rc % 2 == 0 else 'odd'
tr = TR(_class='-webgrid-row %s' % _class)
if 'view' in self.action_links:
tr.components.append(TD(self.view_link(row),
_class='-webgrid view_link'))
if 'edit' in self.action_links:
tr.components.append(TD(self.edit_link(row),
_class='-webgrid edit_link'))
if 'delete' in self.action_links:
tr.components.append(TD(self.delete_link(row),
_class='-webgrid delete_link'))
for colname in self.fields:
if not table_field.match(colname):
r = row._extra[colname]
tr.components.append(TD(r))
continue
(tablename, fieldname) = colname.split('.')
field = rows.db[tablename][fieldname]
r = row[tablename][fieldname] if joined else row[fieldname]
if field.represent:
r = field.represent(r)
tr.components.append(TD(r))
continue
if field.type == 'blob' and r:
tr.components.append(TD('DATA'))
continue
r = str(field.formatter(r))
if field.type == 'upload':
if r:
tr.components.append(TD(A(IMG(_src=URL(r=self.environment.request,
f=self.download_function, args=r), _width='82px'),
_href=URL(r=self.environment.request,
f=self.download_function, args=r))))
else:
tr.components.append(TD(self.messages.file_link))
continue
tr.components.append(TD(r))
if self.row_created:
self.row_created(tr,'datarow',row)
table.components.append(tr)
if self.pagesize > 0:
pagecount = int(total / self.pagesize)
if total % self.pagesize != 0: pagecount += 1
else:
pagecount = 1
self.pagecount = pagecount
footer_wrap = TFOOT()
if 'totals' in self.enabled_rows and len(rows):
_row = self.page_total()
if self.row_created:
self.row_created(_row,'totals',None)
footer_wrap.components.append(_row)
if 'add_links' in self.enabled_rows:
_row = self.add_links(self.tablenames)
if self.row_created:
self.row_created(_row,'add_links',None)
footer_wrap.components.append(_row)
if 'pager' in self.enabled_rows and len(rows):
_row = self.pager(pagecount)
if self.row_created:
self.row_created(_row,'pager',None)
footer_wrap.components.append(_row)
if 'footer' in self.enabled_rows and len(rows):
_row = self.footer(self.fields)
if self.row_created:
self.row_created(_row,'footer',None)
footer_wrap.components.append(_row)
table.components.append(footer_wrap)
return FORM(table,_class='webgrid',_name=name+'-webgrid-form')
def links_right(tablerow,rowtype,rowdata):
if rowtype != 'pager':
links = tablerow.components[:3]
del tablerow.components[:3]
tablerow.components.extend(links)
| true | true |
1c45f5a3f7513812d906fa04e329c3a1e9236159 | 1,065 | py | Python | edmunds/foundation/concerns/serviceproviders.py | LowieHuyghe/edmunds-python | 236d087746cb8802a8854b2706b8d3ff009e9209 | [
"Apache-2.0"
] | 4 | 2017-09-07T13:39:50.000Z | 2018-05-31T16:14:50.000Z | edmunds/foundation/concerns/serviceproviders.py | LowieHuyghe/edmunds-python | 236d087746cb8802a8854b2706b8d3ff009e9209 | [
"Apache-2.0"
] | 103 | 2017-03-19T15:58:21.000Z | 2018-07-11T20:36:17.000Z | edmunds/foundation/concerns/serviceproviders.py | LowieHuyghe/edmunds-python | 236d087746cb8802a8854b2706b8d3ff009e9209 | [
"Apache-2.0"
] | 2 | 2017-10-14T15:20:11.000Z | 2018-04-20T09:55:44.000Z |
from threading import Lock
class ServiceProviders(object):
"""
This class concerns service providers code for Application to extend from
"""
def register(self, class_):
"""
Register a Service Provider
:param class_: The class of the provider
:type class_: ServiceProvider
"""
lock_key = 'edmunds.serviceprovider.lock'
providers_key = 'edmunds.serviceprovider.providers'
# Register the lock
if lock_key not in self.extensions:
self.extensions[lock_key] = Lock()
# Define list to register providers
if providers_key not in self.extensions:
with self.extensions[lock_key]:
if providers_key not in self.extensions:
self.extensions[providers_key] = []
# Only register a provider once
if class_ in self.extensions[providers_key]:
return
self.extensions[providers_key].append(class_)
service_provider = class_(self)
service_provider.register()
| 28.783784 | 77 | 0.629108 |
from threading import Lock
class ServiceProviders(object):
def register(self, class_):
lock_key = 'edmunds.serviceprovider.lock'
providers_key = 'edmunds.serviceprovider.providers'
if lock_key not in self.extensions:
self.extensions[lock_key] = Lock()
if providers_key not in self.extensions:
with self.extensions[lock_key]:
if providers_key not in self.extensions:
self.extensions[providers_key] = []
if class_ in self.extensions[providers_key]:
return
self.extensions[providers_key].append(class_)
service_provider = class_(self)
service_provider.register()
| true | true |
1c45f686a688b7c613282c5f90a0c54d646b4457 | 4,988 | py | Python | mmdet/distillation/distillers/csd_distiller.py | Senwang98/Lightweight-Detection-and-KD | 7d6a4c02d922d4ed0920c9108f1f06dd63c5e90b | [
"Apache-2.0"
] | 8 | 2021-12-28T02:47:16.000Z | 2022-03-28T13:13:49.000Z | mmdet/distillation/distillers/csd_distiller.py | Senwang98/Lightweight-Detection-and-KD | 7d6a4c02d922d4ed0920c9108f1f06dd63c5e90b | [
"Apache-2.0"
] | 1 | 2022-03-29T10:52:49.000Z | 2022-03-31T01:28:01.000Z | mmdet/distillation/distillers/csd_distiller.py | Senwang98/Lightweight-Detection-and-KD | 7d6a4c02d922d4ed0920c9108f1f06dd63c5e90b | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
import torch
from mmdet.models.detectors.base import BaseDetector
from mmdet.models import build_detector
from mmcv.runner import load_checkpoint, _load_checkpoint, load_state_dict
from ..builder import DISTILLER, build_distill_loss
from collections import OrderedDict
@DISTILLER.register_module()
class CSD_DetectionDistiller(BaseDetector):
"""Base distiller for detectors.
It typically consists of teacher_model and student_model.
"""
def __init__(self,
teacher_cfg,
student_cfg,
distill_cfg=None,
teacher_pretrained=None,
init_student=False):
super(CSD_DetectionDistiller, self).__init__()
self.teacher = build_detector(teacher_cfg.model,
train_cfg=teacher_cfg.get('train_cfg'),
test_cfg=teacher_cfg.get('test_cfg'))
self.init_weights_teacher(teacher_pretrained)
self.teacher.eval()
self.student = build_detector(student_cfg.model,
train_cfg=student_cfg.get('train_cfg'),
test_cfg=student_cfg.get('test_cfg'))
# inheriting strategy
if init_student:
t_checkpoint = _load_checkpoint(teacher_pretrained)
all_name = []
for name, v in t_checkpoint["state_dict"].items():
if name.startswith("backbone."):
continue
else:
all_name.append((name, v))
state_dict = OrderedDict(all_name)
load_state_dict(self.student, state_dict)
self.distill_losses = nn.ModuleDict()
self.distill_cfg = distill_cfg
for item_loc in distill_cfg:
for item_loss in item_loc.methods:
loss_name = item_loss.name
self.distill_losses[loss_name] = build_distill_loss(item_loss)
def base_parameters(self):
return nn.ModuleList([self.student, self.distill_losses])
@property
def with_neck(self):
"""bool: whether the detector has a neck"""
return hasattr(self.student, 'neck') and self.student.neck is not None
@property
def with_shared_head(self):
"""bool: whether the detector has a shared head in the RoI Head"""
return hasattr(self.student, 'roi_head') and self.student.roi_head.with_shared_head
@property
def with_bbox(self):
"""bool: whether the detector has a bbox head"""
return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_bbox)
or (hasattr(self.student, 'bbox_head') and self.student.bbox_head is not None))
@property
def with_mask(self):
"""bool: whether the detector has a mask head"""
return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_mask)
or (hasattr(self.student, 'mask_head') and self.student.mask_head is not None))
def init_weights_teacher(self, path=None):
"""Load the pretrained model in teacher detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
checkpoint = load_checkpoint(self.teacher, path, map_location='cpu')
def forward_train(self, img, img_metas, **kwargs):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
Returns:
dict[str, Tensor]: A dictionary of loss components(student's losses and distiller's losses).
"""
with torch.no_grad():
self.teacher.eval()
fea_t = self.teacher.extract_feat(img)
student_feat = self.student.extract_feat(img)
student_loss = self.student.bbox_head.forward_train(
student_feat, img_metas, **kwargs)
for i in range(len(student_feat)):
loss_name = 'loss_csd_fpn_'+str(i)
student_loss[loss_name] = self.distill_losses[loss_name](
student_feat[i], fea_t[i].detach(), kwargs['gt_bboxes'], img_metas)
return student_loss
def simple_test(self, img, img_metas, **kwargs):
return self.student.simple_test(img, img_metas, **kwargs)
def aug_test(self, imgs, img_metas, **kwargs):
return self.student.aug_test(imgs, img_metas, **kwargs)
def extract_feat(self, imgs):
"""Extract features from images."""
return self.student.extract_feat(imgs)
| 38.666667 | 104 | 0.621893 | import torch.nn as nn
import torch.nn.functional as F
import torch
from mmdet.models.detectors.base import BaseDetector
from mmdet.models import build_detector
from mmcv.runner import load_checkpoint, _load_checkpoint, load_state_dict
from ..builder import DISTILLER, build_distill_loss
from collections import OrderedDict
@DISTILLER.register_module()
class CSD_DetectionDistiller(BaseDetector):
def __init__(self,
teacher_cfg,
student_cfg,
distill_cfg=None,
teacher_pretrained=None,
init_student=False):
super(CSD_DetectionDistiller, self).__init__()
self.teacher = build_detector(teacher_cfg.model,
train_cfg=teacher_cfg.get('train_cfg'),
test_cfg=teacher_cfg.get('test_cfg'))
self.init_weights_teacher(teacher_pretrained)
self.teacher.eval()
self.student = build_detector(student_cfg.model,
train_cfg=student_cfg.get('train_cfg'),
test_cfg=student_cfg.get('test_cfg'))
if init_student:
t_checkpoint = _load_checkpoint(teacher_pretrained)
all_name = []
for name, v in t_checkpoint["state_dict"].items():
if name.startswith("backbone."):
continue
else:
all_name.append((name, v))
state_dict = OrderedDict(all_name)
load_state_dict(self.student, state_dict)
self.distill_losses = nn.ModuleDict()
self.distill_cfg = distill_cfg
for item_loc in distill_cfg:
for item_loss in item_loc.methods:
loss_name = item_loss.name
self.distill_losses[loss_name] = build_distill_loss(item_loss)
def base_parameters(self):
return nn.ModuleList([self.student, self.distill_losses])
@property
def with_neck(self):
return hasattr(self.student, 'neck') and self.student.neck is not None
@property
def with_shared_head(self):
return hasattr(self.student, 'roi_head') and self.student.roi_head.with_shared_head
@property
def with_bbox(self):
return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_bbox)
or (hasattr(self.student, 'bbox_head') and self.student.bbox_head is not None))
@property
def with_mask(self):
return ((hasattr(self.student, 'roi_head') and self.student.roi_head.with_mask)
or (hasattr(self.student, 'mask_head') and self.student.mask_head is not None))
def init_weights_teacher(self, path=None):
checkpoint = load_checkpoint(self.teacher, path, map_location='cpu')
def forward_train(self, img, img_metas, **kwargs):
with torch.no_grad():
self.teacher.eval()
fea_t = self.teacher.extract_feat(img)
student_feat = self.student.extract_feat(img)
student_loss = self.student.bbox_head.forward_train(
student_feat, img_metas, **kwargs)
for i in range(len(student_feat)):
loss_name = 'loss_csd_fpn_'+str(i)
student_loss[loss_name] = self.distill_losses[loss_name](
student_feat[i], fea_t[i].detach(), kwargs['gt_bboxes'], img_metas)
return student_loss
def simple_test(self, img, img_metas, **kwargs):
return self.student.simple_test(img, img_metas, **kwargs)
def aug_test(self, imgs, img_metas, **kwargs):
return self.student.aug_test(imgs, img_metas, **kwargs)
def extract_feat(self, imgs):
return self.student.extract_feat(imgs)
| true | true |
1c45f6fcf56f16cac02a648a17e1f72c3d8a6b99 | 270 | py | Python | tests/basics/bytearray_construct.py | peterson79/pycom-micropython-sigfox | 3f93fc2c02567c96f18cff4af9125db8fd7a6fb4 | [
"MIT"
] | 37 | 2017-12-07T15:49:29.000Z | 2022-03-16T16:01:38.000Z | tests/basics/bytearray_construct.py | peterson79/pycom-micropython-sigfox | 3f93fc2c02567c96f18cff4af9125db8fd7a6fb4 | [
"MIT"
] | 27 | 2015-01-02T16:17:37.000Z | 2015-09-07T19:21:26.000Z | tests/basics/bytearray_construct.py | peterson79/pycom-micropython-sigfox | 3f93fc2c02567c96f18cff4af9125db8fd7a6fb4 | [
"MIT"
] | 22 | 2016-08-01T01:35:30.000Z | 2022-03-22T18:12:23.000Z | # test construction of bytearray from different objects
from array import array
# bytes, tuple, list
print(bytearray(b'123'))
print(bytearray((1, 2)))
print(bytearray([1, 2]))
# arrays
print(bytearray(array('b', [1, 2])))
print(bytearray(array('h', [0x101, 0x202])))
| 20.769231 | 55 | 0.7 |
from array import array
print(bytearray(b'123'))
print(bytearray((1, 2)))
print(bytearray([1, 2]))
print(bytearray(array('b', [1, 2])))
print(bytearray(array('h', [0x101, 0x202])))
| true | true |
1c45fb3f361b4037f9e9310bf53c677582ab3001 | 2,473 | py | Python | boto/pyami/startup.py | rectalogic/boto | 1ac79d0c984bfd83f26e7c3af4877a731a63ecc2 | [
"MIT"
] | 1 | 2019-06-22T23:31:13.000Z | 2019-06-22T23:31:13.000Z | boto/pyami/startup.py | rectalogic/boto | 1ac79d0c984bfd83f26e7c3af4877a731a63ecc2 | [
"MIT"
] | null | null | null | boto/pyami/startup.py | rectalogic/boto | 1ac79d0c984bfd83f26e7c3af4877a731a63ecc2 | [
"MIT"
] | null | null | null | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import sys
import boto
from boto.utils import find_class
from boto import config
from boto.pyami.scriptbase import ScriptBase
class Startup(ScriptBase):
def run_scripts(self):
scripts = config.get('Pyami', 'scripts')
if scripts:
for script in scripts.split(','):
script = script.strip(" ")
try:
pos = script.rfind('.')
if pos > 0:
mod_name = script[0:pos]
cls_name = script[pos+1:]
cls = find_class(mod_name, cls_name)
boto.log.info('Running Script: %s' % script)
s = cls()
s.main()
else:
boto.log.warning('Trouble parsing script: %s' % script)
except Exception as e:
boto.log.exception('Problem Running Script: %s. Startup process halting.' % script)
raise e
def main(self):
self.run_scripts()
self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id'))
if __name__ == "__main__":
if not config.has_section('loggers'):
boto.set_file_logger('startup', '/var/log/boto.log')
sys.path.append(config.get('Pyami', 'working_dir'))
su = Startup()
su.main()
| 40.540984 | 103 | 0.630004 |
import sys
import boto
from boto.utils import find_class
from boto import config
from boto.pyami.scriptbase import ScriptBase
class Startup(ScriptBase):
def run_scripts(self):
scripts = config.get('Pyami', 'scripts')
if scripts:
for script in scripts.split(','):
script = script.strip(" ")
try:
pos = script.rfind('.')
if pos > 0:
mod_name = script[0:pos]
cls_name = script[pos+1:]
cls = find_class(mod_name, cls_name)
boto.log.info('Running Script: %s' % script)
s = cls()
s.main()
else:
boto.log.warning('Trouble parsing script: %s' % script)
except Exception as e:
boto.log.exception('Problem Running Script: %s. Startup process halting.' % script)
raise e
def main(self):
self.run_scripts()
self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id'))
if __name__ == "__main__":
if not config.has_section('loggers'):
boto.set_file_logger('startup', '/var/log/boto.log')
sys.path.append(config.get('Pyami', 'working_dir'))
su = Startup()
su.main()
| true | true |
1c45fb42c9cea7abcaef7ad6b5250326ab3d502e | 73,700 | py | Python | ambassador/tests/t_tls.py | jhsiaomei/ambassador | c2726366612e31b74c177329f51265b5ad0f8df7 | [
"Apache-2.0"
] | null | null | null | ambassador/tests/t_tls.py | jhsiaomei/ambassador | c2726366612e31b74c177329f51265b5ad0f8df7 | [
"Apache-2.0"
] | null | null | null | ambassador/tests/t_tls.py | jhsiaomei/ambassador | c2726366612e31b74c177329f51265b5ad0f8df7 | [
"Apache-2.0"
] | null | null | null | from kat.harness import Query
from abstract_tests import AmbassadorTest, HTTP, ServiceType
class TLSContextsTest(AmbassadorTest):
"""
This test makes sure that TLS is not turned on when it's not intended to. For example, when an 'upstream'
TLS configuration is passed, the port is not supposed to switch to 443
"""
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
metadata:
name: test-tlscontexts-secret
labels:
kat-ambassador-id: tlscontextstest
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR1RENDQXFDZ0F3SUJBZ0lKQUowWDU3ZXlwQk5UTUEwR0NTcUdTSWIzRFFFQkN3VUFNSEV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4R3pBWkJnTlZCQU1NRW0xaGMzUmxjaTVrCllYUmhkMmx5WlM1cGJ6QWVGdzB4T1RBeE1UQXhPVEF6TXpCYUZ3MHlOREF4TURreE9UQXpNekJhTUhFeEN6QUoKQmdOVkJBWVRBbFZUTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRSwpEQWhFWVhSaGQybHlaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEd6QVpCZ05WQkFNTUVtMWhjM1JsCmNpNWtZWFJoZDJseVpTNXBiekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPdlEKVjVad1NmcmQ1Vndtelo5SmNoOTdyUW40OXA2b1FiNkVIWjF5T2EyZXZBNzE2NWpkMHFqS1BPMlgyRk80MVg4QgpwQWFLZExnMmltaC9wL2NXN2JncjNHNnRHVEZVMVZHanllTE1EV0Q1MGV2TTYydnpYOFRuYVV6ZFRHTjFOdTM2CnJaM2JnK0VLcjhFYjI1b2RabEpyMm1mNktSeDdTcjZzT1N4NlE1VHhSb3NycmZ0d0tjejI5cHZlMGQ4b0NiZGkKRFJPVlZjNXpBaW0zc2Nmd3VwRUJrQzYxdlpKMzhmaXYwRENYOVpna3BMdEZKUTllTEVQSEdKUGp5ZmV3alNTeQovbk52L21Sc2J6aUNtQ3R3Z3BmbFRtODljK3EzSWhvbUE1YXhZQVFjQ0NqOXBvNUhVZHJtSUJKR0xBTVZ5OWJ5CkZnZE50aFdBeHZCNHZmQXl4OXNDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRkdUOVAvOHBQeGI3UVJVeFcvV2gKaXpkMnNnbEtNQjhHQTFVZEl3UVlNQmFBRkdUOVAvOHBQeGI3UVJVeFcvV2hpemQyc2dsS01BOEdBMVVkRXdFQgovd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS3NWT2Fyc01aSXhLOUpLUzBHVHNnRXNjYThqCllhTDg1YmFsbndBbnBxMllSMGNIMlhvd2dLYjNyM3VmbVRCNERzWS9RMGllaENKeTMzOUJyNjVQMVBKMGgvemYKZEZOcnZKNGlvWDVMWnc5YkowQVFORCtZUTBFK010dFppbE9DbHNPOVBCdm1tUEp1dWFlYVdvS2pWZnNOL1RjMAoycUxVM1pVMHo5bmhYeDZlOWJxYUZLSU1jYnFiVk9nS2p3V0ZpbDlkRG4vQ29KbGFUUzRJWjlOaHFjUzhYMXd0ClQybWQvSUtaaEtKc3A3VlBGeDU5ZWhuZ0VPakZocGhzd20xdDhnQWVxL1A3SkhaUXlBUGZYbDNyZDFSQVJuRVIKQUpmVUxET2tzWFNFb2RTZittR0NrVWh1b2QvaDhMTUdXTFh6Q2d0SHBKMndaVHA5a1ZWVWtKdkpqSVU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
kind: Secret
type: Opaque
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
name: tls
ambassador_id: {self.ambassador_id}
config:
upstream:
enabled: True
secret: test-tlscontexts-secret
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.target.path.k8s}
prefix: /{self.name}/
service: {self.target.path.fqdn}
""")
def scheme(self) -> str:
return "https"
def queries(self):
yield Query(self.url(self.name + "/"), error=['connection refused', 'connection reset by peer', 'EOF', 'request canceled'])
def requirements(self):
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("http://"))
class ClientCertificateAuthentication(AmbassadorTest):
presto_crt = """
-----BEGIN CERTIFICATE-----
MIIDYTCCAkkCCQCrK74a3GFhijANBgkqhkiG9w0BAQsFADBxMQswCQYDVQQGEwJV
UzELMAkGA1UECAwCTUExDzANBgNVBAcMBkJvc3RvbjERMA8GA1UECgwIRGF0YXdp
cmUxFDASBgNVBAsMC0VuZ2luZWVyaW5nMRswGQYDVQQDDBJtYXN0ZXIuZGF0YXdp
cmUuaW8wIBcNMTkwMTEwMTkxOTUyWhgPMjExODEyMTcxOTE5NTJaMHIxCzAJBgNV
BAYTAklOMQswCQYDVQQIDAJLQTESMBAGA1UEBwwJQmFuZ2Fsb3JlMQ8wDQYDVQQK
DAZQcmVzdG8xFDASBgNVBAsMC0VuZ2luZWVyaW5nMRswGQYDVQQDDBJwcmVzdG8u
ZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvPcFp
hw5Ja67z23L4YCYTgNdw4eVh7EHyzOpmf3VGhvx/UtNMVOH7Dcf+I7QEyxtQeBiZ
HOcThgr/k/wrAbMjdThRS8yJxRZgj79Li92pKkJbhLGsBeTuw8lBhtwyn85vEZrt
TOWEjlXHHLlz1OHiSAfYChIGjenPu5sT++O1AAs15b/0STBxkrZHGVimCU6qEWqB
PYVcGYqXdb90mbsuY5GAdAzUBCGQH/RLZAl8ledT+uzkcgHcF30gUT5Ik5Ks4l/V
t+C6I52Y0S4aCkT38XMYKMiBh7XzpjJUnR0pW5TYS37wq6nnVFsNReaMKmbOWp1X
5wEjoRJqDrHtVvjDAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAI3LR5fS6D6yFa6b
yl6+U/i44R3VYJP1rkee0s4C4WbyXHURTqQ/0z9wLU+0Hk57HI+7f5HO/Sr0q3B3
wuZih+TUbbsx5jZW5e++FKydFWpx7KY4MUJmePydEMoUaSQjHWnlAuv9PGp5ZZ30
t0lP/mVGNAeiXsILV8gRHnP6aV5XywK8c+828BQDRfizJ+uKYvnAJmqpn4aOOJh9
csjrK52+RNebMT0VxZF4JYGd0k00au9CaciWpPk69C+A/7K/xtV4ZFtddVP9SldF
ahmIu2g3fI5G+/2Oz8J+qX2B+QqT21/pOPKnMQU54BQ6bmI3fBM9B+2zm92FfgYH
9wgA5+Y=
-----END CERTIFICATE-----
"""
presto_key = """
-----BEGIN RSA PRIVATE KEY-----
MIIEoQIBAAKCAQEArz3BaYcOSWuu89ty+GAmE4DXcOHlYexB8szqZn91Rob8f1LT
TFTh+w3H/iO0BMsbUHgYmRznE4YK/5P8KwGzI3U4UUvMicUWYI+/S4vdqSpCW4Sx
rAXk7sPJQYbcMp/ObxGa7UzlhI5Vxxy5c9Th4kgH2AoSBo3pz7ubE/vjtQALNeW/
9EkwcZK2RxlYpglOqhFqgT2FXBmKl3W/dJm7LmORgHQM1AQhkB/0S2QJfJXnU/rs
5HIB3Bd9IFE+SJOSrOJf1bfguiOdmNEuGgpE9/FzGCjIgYe186YyVJ0dKVuU2Et+
8Kup51RbDUXmjCpmzlqdV+cBI6ESag6x7Vb4wwIDAQABAoIBAHfXwPS9Mw0NAoms
kzS+9Gs0GqINKoTMQNGeR9Mu6XIBEJ62cuBp0F2TsCjiG9OHXzep2hCkDndwnQbq
GnMC55KhMJGQR+IUEdiZldZBYaa1ysmxtpwRL94FsRYJ9377gP6+SHhutSvw90KD
J2TKumu4nPym7mrjFHpHL6f8BF6b9dJftE2o27TX04+39kPiX4d+4CLfG7YFteYR
98qYHwAk58+s3jJxk7gaDehb0PvOIma02eLF7dNA7h0BtB2h2rfPLNlgKv2MN7k3
NxRHwXEzSCfK8rL8yxQLo4gOy3up+LU7LRERBIkpOyS5tkKcIGoG1w5zEB4sqJZC
Me2ZbUkCgYEA4RGHtfYkecTIBwSCgdCqJYa1zEr35xbgqxOWF7DfjjMwfxeitdh+
U487SpDpoH68Rl/pnqQcHToQWRfLGXv0NZxsQDH5UulK2dLy2JfQSlFMWc0rQ210
v8F35GXohB3vi4Tfrl8wrkEBbCBoZDmp7MPZEGVGb0KVl+gU2u19CwUCgYEAx1Mt
w6M8+bj3ZQ9Va9tcHSk9IVRKx0fklWY0/cmoGw5P2q/Yudd3CGupINGEA/lHqqW3
boxfdneYijOmTQO9/od3/NQRDdTrCRKOautts5zeJw7fUvls5/Iip5ZryR5mYqEz
Q/yMffzZPYVPXR0E/HEnCjf8Vs+0dDa2QwAhDycCf0j4ZgeYxjq0kiW0UJvGC2Qf
SNHzfGxv/md48jC8J77y2cZa42YRyuNMjOygDx75+BDZB+VnT7YqHSLFlBOvHH5F
ONOXYD6BZMM6oYGXtvBha1+yJVS3KCMDltt2LuymyAN0ERF3y1CzwsJLv4y/JVie
JsIqE6v+6oFVvW09kk0CgYEAuazRL7ILJfDYfAqJnxxLNVrp9/cmZXaiB02bRWIp
N3Lgji1KbOu6lVx8wvaIzI7U5LDUK6WVc6y6qtqsKoe237hf3GPLsx/JBb2EbzL6
ENuq0aV4AToZ6gLTp1tm8oVgCLZzI/zI/r+fukBJispyj5n0LP+0D0YSqkMhC06+
fPcCgYB85vDLHorvbb8CYcIOvJxogMjXVasOfSLqtCkzICg4i6qCmLkXbs0qmDIz
bIpIFzUdXu3tu+gPV6ab9dPmpj1M77yu7+QLL7zRy/1/EJaY/tFjWzcuF5tP7jKT
UZCMWuBXFwTbeSQHESs5IWpSDxBGJbSNFmCeyo52Dw/fSYxUEg==
-----END RSA PRIVATE KEY-----
"""
ca_cert = """
-----BEGIN CERTIFICATE-----
MIIDuDCCAqCgAwIBAgIJAJ0X57eypBNTMA0GCSqGSIb3DQEBCwUAMHExCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJNQTEPMA0GA1UEBwwGQm9zdG9uMREwDwYDVQQKDAhE
YXRhd2lyZTEUMBIGA1UECwwLRW5naW5lZXJpbmcxGzAZBgNVBAMMEm1hc3Rlci5k
YXRhd2lyZS5pbzAeFw0xOTAxMTAxOTAzMzBaFw0yNDAxMDkxOTAzMzBaMHExCzAJ
BgNVBAYTAlVTMQswCQYDVQQIDAJNQTEPMA0GA1UEBwwGQm9zdG9uMREwDwYDVQQK
DAhEYXRhd2lyZTEUMBIGA1UECwwLRW5naW5lZXJpbmcxGzAZBgNVBAMMEm1hc3Rl
ci5kYXRhd2lyZS5pbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOvQ
V5ZwSfrd5VwmzZ9Jch97rQn49p6oQb6EHZ1yOa2evA7165jd0qjKPO2X2FO41X8B
pAaKdLg2imh/p/cW7bgr3G6tGTFU1VGjyeLMDWD50evM62vzX8TnaUzdTGN1Nu36
rZ3bg+EKr8Eb25odZlJr2mf6KRx7Sr6sOSx6Q5TxRosrrftwKcz29pve0d8oCbdi
DROVVc5zAim3scfwupEBkC61vZJ38fiv0DCX9ZgkpLtFJQ9eLEPHGJPjyfewjSSy
/nNv/mRsbziCmCtwgpflTm89c+q3IhomA5axYAQcCCj9po5HUdrmIBJGLAMVy9by
FgdNthWAxvB4vfAyx9sCAwEAAaNTMFEwHQYDVR0OBBYEFGT9P/8pPxb7QRUxW/Wh
izd2sglKMB8GA1UdIwQYMBaAFGT9P/8pPxb7QRUxW/Whizd2sglKMA8GA1UdEwEB
/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAKsVOarsMZIxK9JKS0GTsgEsca8j
YaL85balnwAnpq2YR0cH2XowgKb3r3ufmTB4DsY/Q0iehCJy339Br65P1PJ0h/zf
dFNrvJ4ioX5LZw9bJ0AQND+YQ0E+MttZilOClsO9PBvmmPJuuaeaWoKjVfsN/Tc0
2qLU3ZU0z9nhXx6e9bqaFKIMcbqbVOgKjwWFil9dDn/CoJlaTS4IZ9NhqcS8X1wt
T2md/IKZhKJsp7VPFx59ehngEOjFhphswm1t8gAeq/P7JHZQyAPfXl3rd1RARnER
AJfULDOksXSEodSf+mGCkUhuod/h8LMGWLXzCgtHpJ2wZTp9kVVUkJvJjIU=
-----END CERTIFICATE-----
"""
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
metadata:
name: test-clientcert-client-secret
labels:
kat-ambassador-id: clientcertificateauthentication
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR1RENDQXFDZ0F3SUJBZ0lKQUowWDU3ZXlwQk5UTUEwR0NTcUdTSWIzRFFFQkN3VUFNSEV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4R3pBWkJnTlZCQU1NRW0xaGMzUmxjaTVrCllYUmhkMmx5WlM1cGJ6QWVGdzB4T1RBeE1UQXhPVEF6TXpCYUZ3MHlOREF4TURreE9UQXpNekJhTUhFeEN6QUoKQmdOVkJBWVRBbFZUTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRSwpEQWhFWVhSaGQybHlaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEd6QVpCZ05WQkFNTUVtMWhjM1JsCmNpNWtZWFJoZDJseVpTNXBiekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPdlEKVjVad1NmcmQ1Vndtelo5SmNoOTdyUW40OXA2b1FiNkVIWjF5T2EyZXZBNzE2NWpkMHFqS1BPMlgyRk80MVg4QgpwQWFLZExnMmltaC9wL2NXN2JncjNHNnRHVEZVMVZHanllTE1EV0Q1MGV2TTYydnpYOFRuYVV6ZFRHTjFOdTM2CnJaM2JnK0VLcjhFYjI1b2RabEpyMm1mNktSeDdTcjZzT1N4NlE1VHhSb3NycmZ0d0tjejI5cHZlMGQ4b0NiZGkKRFJPVlZjNXpBaW0zc2Nmd3VwRUJrQzYxdlpKMzhmaXYwRENYOVpna3BMdEZKUTllTEVQSEdKUGp5ZmV3alNTeQovbk52L21Sc2J6aUNtQ3R3Z3BmbFRtODljK3EzSWhvbUE1YXhZQVFjQ0NqOXBvNUhVZHJtSUJKR0xBTVZ5OWJ5CkZnZE50aFdBeHZCNHZmQXl4OXNDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRkdUOVAvOHBQeGI3UVJVeFcvV2gKaXpkMnNnbEtNQjhHQTFVZEl3UVlNQmFBRkdUOVAvOHBQeGI3UVJVeFcvV2hpemQyc2dsS01BOEdBMVVkRXdFQgovd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS3NWT2Fyc01aSXhLOUpLUzBHVHNnRXNjYThqCllhTDg1YmFsbndBbnBxMllSMGNIMlhvd2dLYjNyM3VmbVRCNERzWS9RMGllaENKeTMzOUJyNjVQMVBKMGgvemYKZEZOcnZKNGlvWDVMWnc5YkowQVFORCtZUTBFK010dFppbE9DbHNPOVBCdm1tUEp1dWFlYVdvS2pWZnNOL1RjMAoycUxVM1pVMHo5bmhYeDZlOWJxYUZLSU1jYnFiVk9nS2p3V0ZpbDlkRG4vQ29KbGFUUzRJWjlOaHFjUzhYMXd0ClQybWQvSUtaaEtKc3A3VlBGeDU5ZWhuZ0VPakZocGhzd20xdDhnQWVxL1A3SkhaUXlBUGZYbDNyZDFSQVJuRVIKQUpmVUxET2tzWFNFb2RTZittR0NrVWh1b2QvaDhMTUdXTFh6Q2d0SHBKMndaVHA5a1ZWVWtKdkpqSVU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
kind: Secret
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: test-clientcert-server-secret
labels:
kat-ambassador-id: clientcertificateauthentication
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaekNDQWs4Q0NRQ3JLNzRhM0dGaGlUQU5CZ2txaGtpRzl3MEJBUXNGQURCeE1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1RVRXhEekFOQmdOVkJBY01Ca0p2YzNSdmJqRVJNQThHQTFVRUNnd0lSR0YwWVhkcApjbVV4RkRBU0JnTlZCQXNNQzBWdVoybHVaV1Z5YVc1bk1Sc3dHUVlEVlFRRERCSnRZWE4wWlhJdVpHRjBZWGRwCmNtVXVhVzh3SGhjTk1Ua3dNVEV3TVRrd056TTRXaGNOTWprd01UQTNNVGt3TnpNNFdqQjZNUXN3Q1FZRFZRUUcKRXdKSlRqRUxNQWtHQTFVRUNBd0NTMEV4RWpBUUJnTlZCQWNNQ1VKaGJtZGhiRzl5WlRFVE1CRUdBMVVFQ2d3SwpRVzFpWVhOellXUnZjakVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEh6QWRCZ05WQkFNTUZtRnRZbUZ6CmMyRmtiM0l1WlhoaGJYQnNaUzVqYjIwd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUIKQVFDN1liY3o5SkZOSHVYY3pvZERrTURvUXd0M1pmQnpjaElwTFlkeHNDZnB1UUYybGNmOGxXMEJKNnZlNU0xTAovMjNZalFYeEFsV25VZ3FZdFlEL1hiZGh3RCtyRWx3RXZWUzR1US9IT2EyUTUwVkF6SXNYa0lxWm00dVA1QzNECk8rQ0NncXJ3UUgzYS8vdlBERldYWkUyeTJvcUdZdE1Xd20zVXQrYnFWSFEzOThqcTNoaGt3MmNXL0pLTjJkR2UKRjk0OWxJWG15NHMrbGE3b21RWldWY0JFcWdQVzJDL1VrZktSbVdsVkRwK0duSk8vZHFobDlMN3d2a2hhc2JETAphbVkweXdiOG9LSjFRdmlvV1JxcjhZZnQ5NzVwaGgzazRlRVdMMUNFTmxFK09vUWNTNVRPUEdndko3WlMyaU43CllVTDRBK0gydCt1WWdUdnFSYVNqcTdnckFnTUJBQUV3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUJURGJ4MzkKUGpoT2JpVW1Rdm9vbVhOVjJ1TG1FZkxJcGlKQUhWOTM0VTlmMnhVUS93eExkcElhVXM0WTlRSzhOR2h2U3dSSAp4Y2w4R2hGYzBXRDRoNEJTdmNhdUdVS21LRzh5ZVFhdGhGVjBzcGFHYjUvaFBqUVdDWnNYK3crbjU4WDROOHBrCmx5YkE4akZGdUZlb3R3Z1l6UUhzQUppU29DbW9OQ0ZkaE4xT05FS1FMY1gxT2NRSUFUd3JVYzRBRkw2Y0hXZ1MKb1FOc3BTMlZIbENsVkpVN0E3Mkh4R3E5RFVJOWlaMmYxVnc1Rmpod0dxalBQMDJVZms1Tk9RNFgzNWlrcjlDcApyQWtJSnh1NkZPUUgwbDBmZ3VNUDlsUFhJZndlMUowQnNLZHRtd2wvcHp0TVV5dW5TbURVWEgyR1l5YmdQTlQyCnNMVFF1RFZaR0xmbFJUdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdTJHM00vU1JUUjdsM002SFE1REE2RU1MZDJYd2MzSVNLUzJIY2JBbjZia0JkcFhICi9KVnRBU2VyM3VUTlMvOXQySTBGOFFKVnAxSUttTFdBLzEyM1ljQS9xeEpjQkwxVXVMa1B4em10a09kRlFNeUwKRjVDS21adUxqK1F0d3p2Z2dvS3E4RUI5MnYvN3p3eFZsMlJOc3RxS2htTFRGc0p0MUxmbTZsUjBOL2ZJNnQ0WQpaTU5uRnZ5U2pkblJuaGZlUFpTRjVzdUxQcFd1NkprR1ZsWEFSS29EMXRndjFKSHlrWmxwVlE2ZmhweVR2M2FvClpmUys4TDVJV3JHd3kycG1OTXNHL0tDaWRVTDRxRmthcS9HSDdmZSthWVlkNU9IaEZpOVFoRFpSUGpxRUhFdVUKemp4b0x5ZTJVdG9qZTJGQytBUGg5cmZybUlFNzZrV2tvNnU0S3dJREFRQUJBb0lCQVFDbmZrZjViQko1Z2pYcgpzcnliKzRkRDFiSXBMdmpJNk4wczY2S1hUK1BOZW03QlprOVdDdWRkMGUxQ2x2aWZoeG5VS1BKM3BTT1ZKYk9OCkh5aklteWV4ZTl3dGVZTEJSYysyTXMzVXdrelFLcm52bXlaMWtPRWpQek40RW5tSmV6dEt6YXdvaHkwNGxmcXEKNzVhT2RiMHlNMEVCc05LSkZKQ0NSVVJtajhrMndJQXIwbHFhV0ZNcGlYT3FzTXBvWTZMY3plaGlMZHU0bUFaSQpRRHhCM3dLVGpmdGNIdzcxTmFKZlg5V2t2OFI4ZWlqeWpNOUl2Y1cwZmRQem9YVTBPZEFTa09ZRlFIZHlCUFNiCjllNWhDSGFJczZia1hBOEs4YmZRazBSL0d6STcyVXArd0JrbnJnTlhZTXFudHJSa0ljNURER1g0b3VOc2lqUkoKSWtrWER2TjVBb0dCQU8veFQrNTYyQ2hwc3R2NUpvMi9ycFdGb05tZ3ZJT0RMRGxiamhHZEpqKytwNk1BdjFQWgo2d042WnozMmppUG1OYzdCK2hrQm40RFQvVkFpU3NLRG1SK09tUkg1TVNzQXh6aWRxU3lNcldxdG1lMDNBVzd6Cklja0FNTGdwWHhDdW1HMzRCM2Jxb3VUdGVRdm5WcmRlR2hvdUJ5OUJSMVpXbnRtWHVscVhyNUFmQW9HQkFNZnIKN29NVGwzdUVVeml5a0IzYmkxb0RYdUNjN01Qc3h0c1IwdElqZXc3RStwTGoyaUxXZUZuMGVhdnJYaHQ1ODRJbwpDZG90a1ZMMHhrZ1g3M2ZremxEd1hobTJVTXBaQmxzSzBnR09SaUYzd0ZMU0hJNmxRUmJkaXRIb0JqcDRGTEZzCitlanZKUDZ1ZitBekZ5cjBLTnc3TnpyaCthbFhFQ09RS2NqUXJlWjFBb0dBQXRLZzhScEszcmJYbnRUZ2lqeGUKRG01REJTeHA2MVlvdUFnR3ROaFhjZHFKV0ZhUzZhYWZxQ3ZSZVI0a2IvR3VZbDlQMU9sNitlWUVqZVBKWTE1dQo5N3NTdSs1bGtLN3lxUXpaeDZka0J1UkI4bE42VmRiUVorL3pvc2NCMGsxcmg2ZXFWdEROMThtZmFlOXZ5cnAxCnJpY3FlSGpaSVAvbDRJTnpjc3RrQ2xzQ2dZQmh5TVZkZVZ5emZuS1NIY3lkdmY5MzVJUW9pcmpIeiswbnc1MEIKU1hkc0x1NThvRlBXakY1TGFXZUZybGJXUzV6T1FiVW44UGZPd29pbFJJZk5kYTF3SzFGcmRDQXFDTWN5Q3FYVApPdnFVYmhVMHJTNW9tdTJ1T0dnbzZUcjZxRGMrM1JXVFdEMFpFTkxkSDBBcXMwZTFDSVdvR0ZWYi9ZaVlUSEFUCmwvWW03UUtCZ1FEcFYvSjRMakY5VzBlUlNXenFBaDN1TStCdzNNN2NEMUxnUlZ6ZWxGS2w2ZzRBMWNvdU8wbHAKalpkMkVMZDlzTHhBVENVeFhQZ0dDTjY0RVNZSi92ZUozUmJzMTMrU2xqdjRleTVKck1ieEhNRC9CU1ovY2VjaAp4aFNWNkJsMHVKb2tlMTRPMEJ3OHJzSUlxZTVZSUxqSlMwL2E2eTllSlJtaGZJVG9PZU5PTUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
ambassador_id: {self.ambassador_id}
name: tls
config:
server:
enabled: True
secret: test-clientcert-server-secret
client:
enabled: True
secret: test-clientcert-client-secret
cert_required: True
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.target.path.k8s}
prefix: /{self.name}/
service: {self.target.path.fqdn}
""")
def scheme(self) -> str:
return "https"
def queries(self):
yield Query(self.url(self.name + "/"), insecure=True, client_crt=self.presto_crt, client_key=self.presto_key, client_cert_required=True, ca_cert=self.ca_cert)
yield Query(self.url(self.name + "/"), insecure=True, error="handshake failure")
def requirements(self):
for r in super().requirements():
query = r[1]
query.insecure = True
query.client_cert = self.presto_crt
query.client_key = self.presto_key
query.client_cert_required = True
query.ca_cert = self.ca_cert
yield (r[0], query)
class TLSOriginationSecret(AmbassadorTest):
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
kind: Secret
metadata:
name: test-origination-secret
labels:
kat-ambassador-id: tlsoriginationsecret
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
ambassador_id: {self.ambassador_id}
name: tls
config:
upstream:
secret: test-origination-secret
upstream-files:
cert_chain_file: /tmp/ambassador/snapshots/default/secrets-decoded/test-origination-secret/F94E4DCF30ABC50DEF240AA8024599B67CC03991.crt
private_key_file: /tmp/ambassador/snapshots/default/secrets-decoded/test-origination-secret/F94E4DCF30ABC50DEF240AA8024599B67CC03991.key
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.target.path.k8s}
prefix: /{self.name}/
service: {self.target.path.fqdn}
tls: upstream
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.target.path.k8s}-files
prefix: /{self.name}-files/
service: {self.target.path.fqdn}
tls: upstream-files
""")
def queries(self):
yield Query(self.url(self.name + "/"))
yield Query(self.url(self.name + "-files/"))
def check(self):
for r in self.results:
assert r.backend.request.tls.enabled
class TLS(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
kind: Secret
metadata:
name: test-tls-secret
labels:
kat-ambassador-id: tls
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
---
apiVersion: v1
kind: Secret
metadata:
name: ambassador-certs
labels:
kat-ambassador-id: tls
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
"""
def config(self):
# Use self here, not self.target, because we want the TLS module to
# be annotated on the Ambassador itself.
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
name: tls
ambassador_id: {self.ambassador_id}
config:
server:
enabled: True
secret: test-tls-secret
""")
# Use self.target _here_, because we want the httpbin mapping to
# be annotated on the service, not the Ambassador. Also, you don't
# need to include the ambassador_id unless you need some special
# ambassador_id that isn't something that kat already knows about.
#
# If the test were more complex, we'd probably need to do some sort
# of mangling for the mapping name and prefix. For this simple test,
# it's not necessary.
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tls_target_mapping
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def scheme(self) -> str:
return "https"
def queries(self):
yield Query(self.url("tls-target/"), insecure=True)
class TLSInvalidSecret(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
name: tls
ambassador_id: {self.ambassador_id}
config:
server:
enabled: True
secret: test-certs-secret-invalid
missing-secret-key:
cert_chain_file: /nonesuch
bad-path-info:
cert_chain_file: /nonesuch
private_key_file: /nonesuch
validation-without-termination:
enabled: True
secret: test-certs-secret-invalid
ca_secret: ambassador-certs
""")
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tls_target_mapping
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def scheme(self) -> str:
return "http"
def queries(self):
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), phase=2)
def check(self):
errors = self.results[0].backend.response
expected = set({
"TLSContext server found no certificate in secret test-certs-secret-invalid in namespace default, ignoring...",
"TLSContext bad-path-info found no cert_chain_file '/nonesuch'",
"TLSContext bad-path-info found no private_key_file '/nonesuch'",
"TLSContext validation-without-termination found no certificate in secret test-certs-secret-invalid in namespace default, ignoring...",
"TLSContext missing-secret-key: 'cert_chain_file' requires 'private_key_file' as well",
})
current = set({})
for errsvc, errtext in errors:
current.add(errtext)
diff = expected - current
assert len(diff) == 0, f'expected {len(expected)} errors, got {len(errors)}: Missing {diff}'
class TLSContextTest(AmbassadorTest):
# debug = True
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
kind: Namespace
metadata:
name: secret-namespace
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: test-tlscontext-secret-0
labels:
kat-ambassador-id: tlscontext
type: kubernetes.io/tls
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: test-tlscontext-secret-1
namespace: secret-namespace
labels:
kat-ambassador-id: tlscontext
type: kubernetes.io/tls
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUlIWTY3cFNoZ3NyTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB5TUI0WERURTRNVEV3TVRFME1EUXhObG9YCkRUSTRNVEF5T1RFME1EUXhObG93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRJd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURjQThZdGgvUFdhT0dTCm9ObXZFSFoyNGpRN1BLTitENG93TEhXZWl1UmRtaEEwWU92VTN3cUczVnFZNFpwbFpBVjBQS2xELysyWlNGMTQKejh3MWVGNFFUelphWXh3eTkrd2ZITmtUREVwTWpQOEpNMk9FYnlrVVJ4VVJ2VzQrN0QzMEUyRXo1T1BseG1jMApNWU0vL0pINUVEUWhjaURybFlxZTFTUk1SQUxaZVZta2FBeXU2TkhKVEJ1ajBTSVB1ZExUY2grOTBxK3Jkd255CmZrVDF4M09UYW5iV2pub21FSmU3TXZ5NG12dnFxSUh1NDhTOUM4WmQxQkdWUGJ1OFYvVURyU1dROXpZQ1g0U0cKT2FzbDhDMFhtSDZrZW1oUERsRC9UdjB4dnlINXE1TVVjSGk0bUp0Titnem9iNTREd3pWR0VqZWY1TGVTMVY1RgowVEFQMGQrWEFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFmCkJnTlZIU01FR0RBV2dCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBbUFLYkNsdUhFZS9JRmJ1QWJneDBNenV6aTkwd2xtQVBiOGdtTwpxdmJwMjl1T1ZzVlNtUUFkZFBuZEZhTVhWcDFaaG1UVjVDU1F0ZFgyQ1ZNVyswVzQ3Qy9DT0Jkb1NFUTl5akJmCmlGRGNseG04QU4yUG1hR1FhK3hvT1hnWkxYZXJDaE5LV0JTWlIrWktYTEpTTTlVYUVTbEhmNXVuQkxFcENqK2oKZEJpSXFGY2E3eElGUGtyKzBSRW9BVmMveFBubnNhS2pMMlV5Z0dqUWZGTnhjT042Y3VjYjZMS0pYT1pFSVRiNQpINjhKdWFSQ0tyZWZZK0l5aFFWVk5taWk3dE1wY1UyS2pXNXBrVktxVTNkS0l0RXEyVmtTZHpNVUtqTnhZd3FGCll6YnozNFQ1MENXbm9HbU5SQVdKc0xlVmlPWVUyNmR3YkFXZDlVYitWMDFRam43OAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktrd2dnU2xBZ0VBQW9JQkFRRGNBOFl0aC9QV2FPR1MKb05tdkVIWjI0alE3UEtOK0Q0b3dMSFdlaXVSZG1oQTBZT3ZVM3dxRzNWcVk0WnBsWkFWMFBLbEQvKzJaU0YxNAp6OHcxZUY0UVR6WmFZeHd5OSt3ZkhOa1RERXBNalA4Sk0yT0VieWtVUnhVUnZXNCs3RDMwRTJFejVPUGx4bWMwCk1ZTS8vSkg1RURRaGNpRHJsWXFlMVNSTVJBTFplVm1rYUF5dTZOSEpUQnVqMFNJUHVkTFRjaCs5MHErcmR3bnkKZmtUMXgzT1RhbmJXam5vbUVKZTdNdnk0bXZ2cXFJSHU0OFM5QzhaZDFCR1ZQYnU4Vi9VRHJTV1E5ellDWDRTRwpPYXNsOEMwWG1INmtlbWhQRGxEL1R2MHh2eUg1cTVNVWNIaTRtSnROK2d6b2I1NER3elZHRWplZjVMZVMxVjVGCjBUQVAwZCtYQWdNQkFBRUNnZ0VCQUk2U3I0anYwZForanJhN0gzVnZ3S1RYZnl0bjV6YVlrVjhZWUh3RjIyakEKbm9HaTBSQllIUFU2V2l3NS9oaDRFWVM2anFHdkptUXZYY3NkTldMdEJsK2hSVUtiZVRtYUtWd2NFSnRrV24xeQozUTQwUytnVk5OU2NINDRvYUZuRU0zMklWWFFRZnBKMjJJZ2RFY1dVUVcvWnpUNWpPK3dPTXc4c1plSTZMSEtLCkdoOENsVDkrRGUvdXFqbjNCRnQwelZ3cnFLbllKSU1DSWFrb2lDRmtIcGhVTURFNVkyU1NLaGFGWndxMWtLd0sKdHFvWFpKQnlzYXhnUTFRa21mS1RnRkx5WlpXT01mRzVzb1VrU1RTeURFRzFsYnVYcHpUbTlVSTlKU2lsK01yaAp1LzVTeXBLOHBCSHhBdFg5VXdiTjFiRGw3Sng1SWJyMnNoM0F1UDF4OUpFQ2dZRUE4dGNTM09URXNOUFpQZlptCk9jaUduOW9STTdHVmVGdjMrL05iL3JodHp1L1RQUWJBSzhWZ3FrS0dPazNGN1krY2txS1NTWjFnUkF2SHBsZEIKaTY0Y0daT1dpK01jMWZVcEdVV2sxdnZXbG1nTUlQVjVtbFpvOHowMlNTdXhLZTI1Y2VNb09oenFlay9vRmFtdgoyTmxFeTh0dEhOMUxMS3grZllhMkpGcWVycThDZ1lFQTUvQUxHSXVrU3J0K0dkektJLzV5cjdSREpTVzIzUTJ4CkM5ZklUTUFSL1Q4dzNsWGhyUnRXcmlHL3l0QkVPNXdTMVIwdDkydW1nVkhIRTA5eFFXbzZ0Tm16QVBNb1RSekMKd08yYnJqQktBdUJkQ0RISjZsMlFnOEhPQWovUncrK2x4bEN0VEI2YS8xWEZIZnNHUGhqMEQrWlJiWVZzaE00UgpnSVVmdmpmQ1Y1a0NnWUVBMzdzL2FieHJhdThEaTQ3a0NBQ3o1N3FsZHBiNk92V2d0OFF5MGE5aG0vSmhFQ3lVCkNML0VtNWpHeWhpMWJuV05yNXVRWTdwVzR0cG5pdDJCU2d1VFlBMFYrck8zOFhmNThZcTBvRTFPR3l5cFlBUkoKa09SanRSYUVXVTJqNEJsaGJZZjNtL0xnSk9oUnp3T1RPNXFSUTZHY1dhZVlod1ExVmJrelByTXUxNGtDZ1lCbwp4dEhjWnNqelVidm5wd3hTTWxKUStaZ1RvZlAzN0lWOG1pQk1POEJrclRWQVczKzFtZElRbkFKdWRxTThZb2RICmF3VW03cVNyYXV3SjF5dU1wNWFadUhiYkNQMjl5QzVheFh3OHRtZlk0TTVtTTBmSjdqYW9ydGFId1pqYmNObHMKdTJsdUo2MVJoOGVpZ1pJU1gyZHgvMVB0ckFhWUFCZDcvYWVYWU0wVWtRS0JnUUNVbkFIdmRQUGhIVnJDWU1rTgpOOFBEK0t0YmhPRks2S3MvdlgyUkcyRnFmQkJPQWV3bEo1d0xWeFBLT1RpdytKS2FSeHhYMkcvREZVNzduOEQvCkR5V2RjM2ZCQWQ0a1lJamZVaGRGa1hHNEFMUDZBNVFIZVN4NzNScTFLNWxMVWhPbEZqc3VPZ0NKS28wVlFmRC8KT05paDB6SzN5Wmc3aDVQamZ1TUdGb09OQWc9PQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==
kind: Secret
metadata:
name: test-tlscontext-secret-2
labels:
kat-ambassador-id: tlscontext
type: kubernetes.io/tls
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.name}-same-prefix-1
prefix: /tls-context-same/
service: http://{self.target.path.fqdn}
host: tls-context-host-1
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-1
hosts:
- tls-context-host-1
secret: test-tlscontext-secret-1.secret-namespace
min_tls_version: v1.0
max_tls_version: v1.3
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: Mapping
name: {self.name}-same-prefix-2
prefix: /tls-context-same/
service: http://{self.target.path.fqdn}
host: tls-context-host-2
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-2
hosts:
- tls-context-host-2
secret: test-tlscontext-secret-2
alpn_protocols: h2,http/1.1
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: Module
name: tls
config:
server:
enabled: True
secret: test-tlscontext-secret-0
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: Mapping
name: {self.name}-other-mapping
prefix: /{self.name}/
service: https://{self.target.path.fqdn}
""")
# Ambassador should not return an error when hostname is not present.
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-no-secret
min_tls_version: v1.0
max_tls_version: v1.3
""")
# Ambassador should return and error for this configuration.
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-error
hosts:
- tls-context-host-1
""")
def scheme(self) -> str:
return "https"
@staticmethod
def _go_close_connection_error(url):
"""
:param url: url passed to the query
:return: error message string that Go's net/http package throws when server closes connection
"""
return "Get {}: EOF".format(url)
def queries(self):
# 0
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"),
headers={"Host": "tls-context-host-2"},
insecure=True,
sni=True)
# 1 - Correct host #1
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True)
# 2 - Correct host #2
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-2"},
expected=200,
insecure=True,
sni=True)
# 3 - Incorrect host
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-3"},
# error=self._go_close_connection_error(self.url("tls-context-same/")),
expected=404,
insecure=True)
# 4 - Incorrect path, correct host
yield Query(self.url("tls-context-different/"),
headers={"Host": "tls-context-host-1"},
expected=404,
insecure=True,
sni=True)
# Other mappings with no host will respond with the fallbock cert.
# 5 - no Host header, fallback cert from the TLS module
yield Query(self.url(self.name + "/"),
# error=self._go_close_connection_error(self.url(self.name + "/")),
insecure=True)
# 6 - explicit Host header, fallback cert
yield Query(self.url(self.name + "/"),
# error=self._go_close_connection_error(self.url(self.name + "/")),
# sni=True,
headers={"Host": "tls-context-host-3"},
insecure=True)
# 7 - explicit Host header 1 wins, we'll get the SNI cert for this overlapping path
yield Query(self.url(self.name + "/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True)
# 7 - explicit Host header 2 wins, we'll get the SNI cert for this overlapping path
yield Query(self.url(self.name + "/"),
headers={"Host": "tls-context-host-2"},
expected=200,
insecure=True,
sni=True)
def check(self):
# XXX Ew. If self.results[0].json is empty, the harness won't convert it to a response.
errors = self.results[0].json
num_errors = len(errors)
assert num_errors == 2, "expected 2 errors, got {} -\n{}".format(num_errors, errors)
cert_err = errors[0]
pkey_err = errors[1]
assert cert_err[1] == 'TLSContext TLSContextTest-same-context-error is missing cert_chain_file'
assert pkey_err[1] == 'TLSContext TLSContextTest-same-context-error is missing private_key_file'
idx = 0
for result in self.results:
if result.status == 200 and result.query.headers:
host_header = result.query.headers['Host']
tls_common_name = result.tls[0]['Issuer']['CommonName']
# XXX Weirdness with the fallback cert here! You see, if we use host
# tls-context-host-3 (or, really, anything except -1 or -2), then the
# fallback cert actually has CN 'localhost'. We should replace this with
# a real fallback cert, but for now, just hack the host_header.
#
# Ew.
if host_header == 'tls-context-host-3':
host_header = 'localhost'
assert host_header == tls_common_name, "test %d wanted CN %s, but got %s" % (idx, host_header, tls_common_name)
idx += 1
def requirements(self):
# We're replacing super()'s requirements deliberately here. Without a Host header they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True))
class TLSContextProtocolMaxVersion(AmbassadorTest):
# Here we're testing that the client can't exceed the maximum TLS version
# configured.
#
# XXX 2019-09-11: vet that the test client's support for TLS v1.3 is up-to-date.
# It appears not to be.
# debug = True
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: secret.max-version
labels:
kat-ambassador-id: tlscontextprotocolmaxversion
type: kubernetes.io/tls
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
name: ambassador
config:
defaults:
tls_secret_namespacing: False
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.name}-same-prefix-1
prefix: /tls-context-same/
service: http://{self.target.path.fqdn}
host: tls-context-host-1
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-1
hosts:
- tls-context-host-1
secret: secret.max-version
min_tls_version: v1.1
max_tls_version: v1.2
""")
def scheme(self) -> str:
return "https"
@staticmethod
def _go_close_connection_error(url):
"""
:param url: url passed to the query
:return: error message string that Go's net/http package throws when server closes connection
"""
return "Get {}: EOF".format(url)
def queries(self):
# ----
# XXX 2019-09-11
# These aren't actually reporting the negotiated version, alhough correct
# behavior can be verified with a custom log format. What, does the silly thing just not
# report the negotiated version if it's the max you've requested??
#
# For now, we're checking for the None result, but, ew.
# ----
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.2",
maxTLSv="v1.2")
# This should give us TLS v1.1
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.0",
maxTLSv="v1.1")
# This should be an error.
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.3",
maxTLSv="v1.3",
error=[ "tls: server selected unsupported protocol version 303",
"tls: no supported versions satisfy MinVersion and MaxVersion",
"tls: protocol version not supported" ])
def check(self):
tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version
tls_1_version = self.results[1].backend.request.tls.negotiated_protocol_version
# See comment in queries for why these are None. They should be v1.2 and v1.1 respectively.
assert tls_0_version == None, f"requesting TLS v1.2 got TLS {tls_0_version}"
assert tls_1_version == None, f"requesting TLS v1.0-v1.1 got TLS {tls_1_version}"
def requirements(self):
# We're replacing super()'s requirements deliberately here. Without a Host header they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True, minTLSv="v1.2"))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True, minTLSv="v1.2"))
class TLSContextProtocolMinVersion(AmbassadorTest):
# Here we're testing that the client can't drop below the minimum TLS version
# configured.
#
# XXX 2019-09-11: vet that the test client's support for TLS v1.3 is up-to-date.
# It appears not to be.
# debug = True
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: secret.min-version
labels:
kat-ambassador-id: tlscontextprotocolminversion
type: kubernetes.io/tls
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.name}-same-prefix-1
prefix: /tls-context-same/
service: https://{self.target.path.fqdn}
host: tls-context-host-1
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-1
hosts:
- tls-context-host-1
secret: secret.min-version
secret_namespacing: False
min_tls_version: v1.2
max_tls_version: v1.3
""")
def scheme(self) -> str:
return "https"
@staticmethod
def _go_close_connection_error(url):
"""
:param url: url passed to the query
:return: error message string that Go's net/http package throws when server closes connection
"""
return "Get {}: EOF".format(url)
def queries(self):
# This should give v1.3, but it currently seems to give 1.2.
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.2",
maxTLSv="v1.3")
# This should give v1.2
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.1",
maxTLSv="v1.2")
# This should be an error.
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.0",
maxTLSv="v1.0",
error=[ "tls: server selected unsupported protocol version 303",
"tls: no supported versions satisfy MinVersion and MaxVersion",
"tls: protocol version not supported" ])
def check(self):
tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version
tls_1_version = self.results[1].backend.request.tls.negotiated_protocol_version
# Hmmm. Why does Envoy prefer 1.2 to 1.3 here?? This may be a client thing -- have to
# rebuild with Go 1.13.
assert tls_0_version == "v1.2", f"requesting TLS v1.2-v1.3 got TLS {tls_0_version}"
assert tls_1_version == "v1.2", f"requesting TLS v1.1-v1.2 got TLS {tls_1_version}"
def requirements(self):
# We're replacing super()'s requirements deliberately here. Without a Host header they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
class TLSContextCipherSuites(AmbassadorTest):
# debug = True
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: secret.cipher-suites
labels:
kat-ambassador-id: tlscontextciphersuites
type: kubernetes.io/tls
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.name}-same-prefix-1
prefix: /tls-context-same/
service: https://{self.target.path.fqdn}
host: tls-context-host-1
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-1
hosts:
- tls-context-host-1
secret: secret.cipher-suites
secret_namespacing: False
max_tls_version: v1.2
cipher_suites:
- ECDHE-RSA-AES128-GCM-SHA256
ecdh_curves:
- P-256
""")
def scheme(self) -> str:
return "https"
@staticmethod
def _go_close_connection_error(url):
"""
:param url: url passed to the query
:return: error message string that Go's net/http package throws when server closes connection
"""
return "Get {}: EOF".format(url)
def queries(self):
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
cipherSuites=["TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"],
maxTLSv="v1.2")
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
cipherSuites=["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"],
maxTLSv="v1.2",
error="tls: handshake failure",)
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
cipherSuites=["TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"],
ecdhCurves=["X25519"],
maxTLSv="v1.2",
error="tls: handshake failure",)
def check(self):
tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version
assert tls_0_version == "v1.2", f"requesting TLS v1.2 got TLS {tls_0_version}"
def requirements(self):
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
| 76.136364 | 2,291 | 0.859824 | from kat.harness import Query
from abstract_tests import AmbassadorTest, HTTP, ServiceType
class TLSContextsTest(AmbassadorTest):
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
metadata:
name: test-tlscontexts-secret
labels:
kat-ambassador-id: tlscontextstest
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR1RENDQXFDZ0F3SUJBZ0lKQUowWDU3ZXlwQk5UTUEwR0NTcUdTSWIzRFFFQkN3VUFNSEV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4R3pBWkJnTlZCQU1NRW0xaGMzUmxjaTVrCllYUmhkMmx5WlM1cGJ6QWVGdzB4T1RBeE1UQXhPVEF6TXpCYUZ3MHlOREF4TURreE9UQXpNekJhTUhFeEN6QUoKQmdOVkJBWVRBbFZUTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRSwpEQWhFWVhSaGQybHlaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEd6QVpCZ05WQkFNTUVtMWhjM1JsCmNpNWtZWFJoZDJseVpTNXBiekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPdlEKVjVad1NmcmQ1Vndtelo5SmNoOTdyUW40OXA2b1FiNkVIWjF5T2EyZXZBNzE2NWpkMHFqS1BPMlgyRk80MVg4QgpwQWFLZExnMmltaC9wL2NXN2JncjNHNnRHVEZVMVZHanllTE1EV0Q1MGV2TTYydnpYOFRuYVV6ZFRHTjFOdTM2CnJaM2JnK0VLcjhFYjI1b2RabEpyMm1mNktSeDdTcjZzT1N4NlE1VHhSb3NycmZ0d0tjejI5cHZlMGQ4b0NiZGkKRFJPVlZjNXpBaW0zc2Nmd3VwRUJrQzYxdlpKMzhmaXYwRENYOVpna3BMdEZKUTllTEVQSEdKUGp5ZmV3alNTeQovbk52L21Sc2J6aUNtQ3R3Z3BmbFRtODljK3EzSWhvbUE1YXhZQVFjQ0NqOXBvNUhVZHJtSUJKR0xBTVZ5OWJ5CkZnZE50aFdBeHZCNHZmQXl4OXNDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRkdUOVAvOHBQeGI3UVJVeFcvV2gKaXpkMnNnbEtNQjhHQTFVZEl3UVlNQmFBRkdUOVAvOHBQeGI3UVJVeFcvV2hpemQyc2dsS01BOEdBMVVkRXdFQgovd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS3NWT2Fyc01aSXhLOUpLUzBHVHNnRXNjYThqCllhTDg1YmFsbndBbnBxMllSMGNIMlhvd2dLYjNyM3VmbVRCNERzWS9RMGllaENKeTMzOUJyNjVQMVBKMGgvemYKZEZOcnZKNGlvWDVMWnc5YkowQVFORCtZUTBFK010dFppbE9DbHNPOVBCdm1tUEp1dWFlYVdvS2pWZnNOL1RjMAoycUxVM1pVMHo5bmhYeDZlOWJxYUZLSU1jYnFiVk9nS2p3V0ZpbDlkRG4vQ29KbGFUUzRJWjlOaHFjUzhYMXd0ClQybWQvSUtaaEtKc3A3VlBGeDU5ZWhuZ0VPakZocGhzd20xdDhnQWVxL1A3SkhaUXlBUGZYbDNyZDFSQVJuRVIKQUpmVUxET2tzWFNFb2RTZittR0NrVWh1b2QvaDhMTUdXTFh6Q2d0SHBKMndaVHA5a1ZWVWtKdkpqSVU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
kind: Secret
type: Opaque
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
name: tls
ambassador_id: {self.ambassador_id}
config:
upstream:
enabled: True
secret: test-tlscontexts-secret
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.target.path.k8s}
prefix: /{self.name}/
service: {self.target.path.fqdn}
""")
def scheme(self) -> str:
return "https"
def queries(self):
yield Query(self.url(self.name + "/"), error=['connection refused', 'connection reset by peer', 'EOF', 'request canceled'])
def requirements(self):
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("http://"))
class ClientCertificateAuthentication(AmbassadorTest):
presto_crt = """
-----BEGIN CERTIFICATE-----
MIIDYTCCAkkCCQCrK74a3GFhijANBgkqhkiG9w0BAQsFADBxMQswCQYDVQQGEwJV
UzELMAkGA1UECAwCTUExDzANBgNVBAcMBkJvc3RvbjERMA8GA1UECgwIRGF0YXdp
cmUxFDASBgNVBAsMC0VuZ2luZWVyaW5nMRswGQYDVQQDDBJtYXN0ZXIuZGF0YXdp
cmUuaW8wIBcNMTkwMTEwMTkxOTUyWhgPMjExODEyMTcxOTE5NTJaMHIxCzAJBgNV
BAYTAklOMQswCQYDVQQIDAJLQTESMBAGA1UEBwwJQmFuZ2Fsb3JlMQ8wDQYDVQQK
DAZQcmVzdG8xFDASBgNVBAsMC0VuZ2luZWVyaW5nMRswGQYDVQQDDBJwcmVzdG8u
ZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvPcFp
hw5Ja67z23L4YCYTgNdw4eVh7EHyzOpmf3VGhvx/UtNMVOH7Dcf+I7QEyxtQeBiZ
HOcThgr/k/wrAbMjdThRS8yJxRZgj79Li92pKkJbhLGsBeTuw8lBhtwyn85vEZrt
TOWEjlXHHLlz1OHiSAfYChIGjenPu5sT++O1AAs15b/0STBxkrZHGVimCU6qEWqB
PYVcGYqXdb90mbsuY5GAdAzUBCGQH/RLZAl8ledT+uzkcgHcF30gUT5Ik5Ks4l/V
t+C6I52Y0S4aCkT38XMYKMiBh7XzpjJUnR0pW5TYS37wq6nnVFsNReaMKmbOWp1X
5wEjoRJqDrHtVvjDAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAI3LR5fS6D6yFa6b
yl6+U/i44R3VYJP1rkee0s4C4WbyXHURTqQ/0z9wLU+0Hk57HI+7f5HO/Sr0q3B3
wuZih+TUbbsx5jZW5e++FKydFWpx7KY4MUJmePydEMoUaSQjHWnlAuv9PGp5ZZ30
t0lP/mVGNAeiXsILV8gRHnP6aV5XywK8c+828BQDRfizJ+uKYvnAJmqpn4aOOJh9
csjrK52+RNebMT0VxZF4JYGd0k00au9CaciWpPk69C+A/7K/xtV4ZFtddVP9SldF
ahmIu2g3fI5G+/2Oz8J+qX2B+QqT21/pOPKnMQU54BQ6bmI3fBM9B+2zm92FfgYH
9wgA5+Y=
-----END CERTIFICATE-----
"""
presto_key = """
-----BEGIN RSA PRIVATE KEY-----
MIIEoQIBAAKCAQEArz3BaYcOSWuu89ty+GAmE4DXcOHlYexB8szqZn91Rob8f1LT
TFTh+w3H/iO0BMsbUHgYmRznE4YK/5P8KwGzI3U4UUvMicUWYI+/S4vdqSpCW4Sx
rAXk7sPJQYbcMp/ObxGa7UzlhI5Vxxy5c9Th4kgH2AoSBo3pz7ubE/vjtQALNeW/
9EkwcZK2RxlYpglOqhFqgT2FXBmKl3W/dJm7LmORgHQM1AQhkB/0S2QJfJXnU/rs
5HIB3Bd9IFE+SJOSrOJf1bfguiOdmNEuGgpE9/FzGCjIgYe186YyVJ0dKVuU2Et+
8Kup51RbDUXmjCpmzlqdV+cBI6ESag6x7Vb4wwIDAQABAoIBAHfXwPS9Mw0NAoms
kzS+9Gs0GqINKoTMQNGeR9Mu6XIBEJ62cuBp0F2TsCjiG9OHXzep2hCkDndwnQbq
GnMC55KhMJGQR+IUEdiZldZBYaa1ysmxtpwRL94FsRYJ9377gP6+SHhutSvw90KD
J2TKumu4nPym7mrjFHpHL6f8BF6b9dJftE2o27TX04+39kPiX4d+4CLfG7YFteYR
98qYHwAk58+s3jJxk7gaDehb0PvOIma02eLF7dNA7h0BtB2h2rfPLNlgKv2MN7k3
NxRHwXEzSCfK8rL8yxQLo4gOy3up+LU7LRERBIkpOyS5tkKcIGoG1w5zEB4sqJZC
Me2ZbUkCgYEA4RGHtfYkecTIBwSCgdCqJYa1zEr35xbgqxOWF7DfjjMwfxeitdh+
U487SpDpoH68Rl/pnqQcHToQWRfLGXv0NZxsQDH5UulK2dLy2JfQSlFMWc0rQ210
v8F35GXohB3vi4Tfrl8wrkEBbCBoZDmp7MPZEGVGb0KVl+gU2u19CwUCgYEAx1Mt
w6M8+bj3ZQ9Va9tcHSk9IVRKx0fklWY0/cmoGw5P2q/Yudd3CGupINGEA/lHqqW3
boxfdneYijOmTQO9/od3/NQRDdTrCRKOautts5zeJw7fUvls5/Iip5ZryR5mYqEz
Q/yMffzZPYVPXR0E/HEnCjf8Vs+0dDa2QwAhDycCf0j4ZgeYxjq0kiW0UJvGC2Qf
SNHzfGxv/md48jC8J77y2cZa42YRyuNMjOygDx75+BDZB+VnT7YqHSLFlBOvHH5F
ONOXYD6BZMM6oYGXtvBha1+yJVS3KCMDltt2LuymyAN0ERF3y1CzwsJLv4y/JVie
JsIqE6v+6oFVvW09kk0CgYEAuazRL7ILJfDYfAqJnxxLNVrp9/cmZXaiB02bRWIp
N3Lgji1KbOu6lVx8wvaIzI7U5LDUK6WVc6y6qtqsKoe237hf3GPLsx/JBb2EbzL6
ENuq0aV4AToZ6gLTp1tm8oVgCLZzI/zI/r+fukBJispyj5n0LP+0D0YSqkMhC06+
fPcCgYB85vDLHorvbb8CYcIOvJxogMjXVasOfSLqtCkzICg4i6qCmLkXbs0qmDIz
bIpIFzUdXu3tu+gPV6ab9dPmpj1M77yu7+QLL7zRy/1/EJaY/tFjWzcuF5tP7jKT
UZCMWuBXFwTbeSQHESs5IWpSDxBGJbSNFmCeyo52Dw/fSYxUEg==
-----END RSA PRIVATE KEY-----
"""
ca_cert = """
-----BEGIN CERTIFICATE-----
MIIDuDCCAqCgAwIBAgIJAJ0X57eypBNTMA0GCSqGSIb3DQEBCwUAMHExCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJNQTEPMA0GA1UEBwwGQm9zdG9uMREwDwYDVQQKDAhE
YXRhd2lyZTEUMBIGA1UECwwLRW5naW5lZXJpbmcxGzAZBgNVBAMMEm1hc3Rlci5k
YXRhd2lyZS5pbzAeFw0xOTAxMTAxOTAzMzBaFw0yNDAxMDkxOTAzMzBaMHExCzAJ
BgNVBAYTAlVTMQswCQYDVQQIDAJNQTEPMA0GA1UEBwwGQm9zdG9uMREwDwYDVQQK
DAhEYXRhd2lyZTEUMBIGA1UECwwLRW5naW5lZXJpbmcxGzAZBgNVBAMMEm1hc3Rl
ci5kYXRhd2lyZS5pbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOvQ
V5ZwSfrd5VwmzZ9Jch97rQn49p6oQb6EHZ1yOa2evA7165jd0qjKPO2X2FO41X8B
pAaKdLg2imh/p/cW7bgr3G6tGTFU1VGjyeLMDWD50evM62vzX8TnaUzdTGN1Nu36
rZ3bg+EKr8Eb25odZlJr2mf6KRx7Sr6sOSx6Q5TxRosrrftwKcz29pve0d8oCbdi
DROVVc5zAim3scfwupEBkC61vZJ38fiv0DCX9ZgkpLtFJQ9eLEPHGJPjyfewjSSy
/nNv/mRsbziCmCtwgpflTm89c+q3IhomA5axYAQcCCj9po5HUdrmIBJGLAMVy9by
FgdNthWAxvB4vfAyx9sCAwEAAaNTMFEwHQYDVR0OBBYEFGT9P/8pPxb7QRUxW/Wh
izd2sglKMB8GA1UdIwQYMBaAFGT9P/8pPxb7QRUxW/Whizd2sglKMA8GA1UdEwEB
/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAKsVOarsMZIxK9JKS0GTsgEsca8j
YaL85balnwAnpq2YR0cH2XowgKb3r3ufmTB4DsY/Q0iehCJy339Br65P1PJ0h/zf
dFNrvJ4ioX5LZw9bJ0AQND+YQ0E+MttZilOClsO9PBvmmPJuuaeaWoKjVfsN/Tc0
2qLU3ZU0z9nhXx6e9bqaFKIMcbqbVOgKjwWFil9dDn/CoJlaTS4IZ9NhqcS8X1wt
T2md/IKZhKJsp7VPFx59ehngEOjFhphswm1t8gAeq/P7JHZQyAPfXl3rd1RARnER
AJfULDOksXSEodSf+mGCkUhuod/h8LMGWLXzCgtHpJ2wZTp9kVVUkJvJjIU=
-----END CERTIFICATE-----
"""
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
metadata:
name: test-clientcert-client-secret
labels:
kat-ambassador-id: clientcertificateauthentication
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR1RENDQXFDZ0F3SUJBZ0lKQUowWDU3ZXlwQk5UTUEwR0NTcUdTSWIzRFFFQkN3VUFNSEV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4R3pBWkJnTlZCQU1NRW0xaGMzUmxjaTVrCllYUmhkMmx5WlM1cGJ6QWVGdzB4T1RBeE1UQXhPVEF6TXpCYUZ3MHlOREF4TURreE9UQXpNekJhTUhFeEN6QUoKQmdOVkJBWVRBbFZUTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRSwpEQWhFWVhSaGQybHlaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEd6QVpCZ05WQkFNTUVtMWhjM1JsCmNpNWtZWFJoZDJseVpTNXBiekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFPdlEKVjVad1NmcmQ1Vndtelo5SmNoOTdyUW40OXA2b1FiNkVIWjF5T2EyZXZBNzE2NWpkMHFqS1BPMlgyRk80MVg4QgpwQWFLZExnMmltaC9wL2NXN2JncjNHNnRHVEZVMVZHanllTE1EV0Q1MGV2TTYydnpYOFRuYVV6ZFRHTjFOdTM2CnJaM2JnK0VLcjhFYjI1b2RabEpyMm1mNktSeDdTcjZzT1N4NlE1VHhSb3NycmZ0d0tjejI5cHZlMGQ4b0NiZGkKRFJPVlZjNXpBaW0zc2Nmd3VwRUJrQzYxdlpKMzhmaXYwRENYOVpna3BMdEZKUTllTEVQSEdKUGp5ZmV3alNTeQovbk52L21Sc2J6aUNtQ3R3Z3BmbFRtODljK3EzSWhvbUE1YXhZQVFjQ0NqOXBvNUhVZHJtSUJKR0xBTVZ5OWJ5CkZnZE50aFdBeHZCNHZmQXl4OXNDQXdFQUFhTlRNRkV3SFFZRFZSME9CQllFRkdUOVAvOHBQeGI3UVJVeFcvV2gKaXpkMnNnbEtNQjhHQTFVZEl3UVlNQmFBRkdUOVAvOHBQeGI3UVJVeFcvV2hpemQyc2dsS01BOEdBMVVkRXdFQgovd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBS3NWT2Fyc01aSXhLOUpLUzBHVHNnRXNjYThqCllhTDg1YmFsbndBbnBxMllSMGNIMlhvd2dLYjNyM3VmbVRCNERzWS9RMGllaENKeTMzOUJyNjVQMVBKMGgvemYKZEZOcnZKNGlvWDVMWnc5YkowQVFORCtZUTBFK010dFppbE9DbHNPOVBCdm1tUEp1dWFlYVdvS2pWZnNOL1RjMAoycUxVM1pVMHo5bmhYeDZlOWJxYUZLSU1jYnFiVk9nS2p3V0ZpbDlkRG4vQ29KbGFUUzRJWjlOaHFjUzhYMXd0ClQybWQvSUtaaEtKc3A3VlBGeDU5ZWhuZ0VPakZocGhzd20xdDhnQWVxL1A3SkhaUXlBUGZYbDNyZDFSQVJuRVIKQUpmVUxET2tzWFNFb2RTZittR0NrVWh1b2QvaDhMTUdXTFh6Q2d0SHBKMndaVHA5a1ZWVWtKdkpqSVU9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
kind: Secret
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
name: test-clientcert-server-secret
labels:
kat-ambassador-id: clientcertificateauthentication
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURaekNDQWs4Q0NRQ3JLNzRhM0dGaGlUQU5CZ2txaGtpRzl3MEJBUXNGQURCeE1Rc3dDUVlEVlFRR0V3SlYKVXpFTE1Ba0dBMVVFQ0F3Q1RVRXhEekFOQmdOVkJBY01Ca0p2YzNSdmJqRVJNQThHQTFVRUNnd0lSR0YwWVhkcApjbVV4RkRBU0JnTlZCQXNNQzBWdVoybHVaV1Z5YVc1bk1Sc3dHUVlEVlFRRERCSnRZWE4wWlhJdVpHRjBZWGRwCmNtVXVhVzh3SGhjTk1Ua3dNVEV3TVRrd056TTRXaGNOTWprd01UQTNNVGt3TnpNNFdqQjZNUXN3Q1FZRFZRUUcKRXdKSlRqRUxNQWtHQTFVRUNBd0NTMEV4RWpBUUJnTlZCQWNNQ1VKaGJtZGhiRzl5WlRFVE1CRUdBMVVFQ2d3SwpRVzFpWVhOellXUnZjakVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEh6QWRCZ05WQkFNTUZtRnRZbUZ6CmMyRmtiM0l1WlhoaGJYQnNaUzVqYjIwd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUIKQVFDN1liY3o5SkZOSHVYY3pvZERrTURvUXd0M1pmQnpjaElwTFlkeHNDZnB1UUYybGNmOGxXMEJKNnZlNU0xTAovMjNZalFYeEFsV25VZ3FZdFlEL1hiZGh3RCtyRWx3RXZWUzR1US9IT2EyUTUwVkF6SXNYa0lxWm00dVA1QzNECk8rQ0NncXJ3UUgzYS8vdlBERldYWkUyeTJvcUdZdE1Xd20zVXQrYnFWSFEzOThqcTNoaGt3MmNXL0pLTjJkR2UKRjk0OWxJWG15NHMrbGE3b21RWldWY0JFcWdQVzJDL1VrZktSbVdsVkRwK0duSk8vZHFobDlMN3d2a2hhc2JETAphbVkweXdiOG9LSjFRdmlvV1JxcjhZZnQ5NzVwaGgzazRlRVdMMUNFTmxFK09vUWNTNVRPUEdndko3WlMyaU43CllVTDRBK0gydCt1WWdUdnFSYVNqcTdnckFnTUJBQUV3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUJURGJ4MzkKUGpoT2JpVW1Rdm9vbVhOVjJ1TG1FZkxJcGlKQUhWOTM0VTlmMnhVUS93eExkcElhVXM0WTlRSzhOR2h2U3dSSAp4Y2w4R2hGYzBXRDRoNEJTdmNhdUdVS21LRzh5ZVFhdGhGVjBzcGFHYjUvaFBqUVdDWnNYK3crbjU4WDROOHBrCmx5YkE4akZGdUZlb3R3Z1l6UUhzQUppU29DbW9OQ0ZkaE4xT05FS1FMY1gxT2NRSUFUd3JVYzRBRkw2Y0hXZ1MKb1FOc3BTMlZIbENsVkpVN0E3Mkh4R3E5RFVJOWlaMmYxVnc1Rmpod0dxalBQMDJVZms1Tk9RNFgzNWlrcjlDcApyQWtJSnh1NkZPUUgwbDBmZ3VNUDlsUFhJZndlMUowQnNLZHRtd2wvcHp0TVV5dW5TbURVWEgyR1l5YmdQTlQyCnNMVFF1RFZaR0xmbFJUdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdTJHM00vU1JUUjdsM002SFE1REE2RU1MZDJYd2MzSVNLUzJIY2JBbjZia0JkcFhICi9KVnRBU2VyM3VUTlMvOXQySTBGOFFKVnAxSUttTFdBLzEyM1ljQS9xeEpjQkwxVXVMa1B4em10a09kRlFNeUwKRjVDS21adUxqK1F0d3p2Z2dvS3E4RUI5MnYvN3p3eFZsMlJOc3RxS2htTFRGc0p0MUxmbTZsUjBOL2ZJNnQ0WQpaTU5uRnZ5U2pkblJuaGZlUFpTRjVzdUxQcFd1NkprR1ZsWEFSS29EMXRndjFKSHlrWmxwVlE2ZmhweVR2M2FvClpmUys4TDVJV3JHd3kycG1OTXNHL0tDaWRVTDRxRmthcS9HSDdmZSthWVlkNU9IaEZpOVFoRFpSUGpxRUhFdVUKemp4b0x5ZTJVdG9qZTJGQytBUGg5cmZybUlFNzZrV2tvNnU0S3dJREFRQUJBb0lCQVFDbmZrZjViQko1Z2pYcgpzcnliKzRkRDFiSXBMdmpJNk4wczY2S1hUK1BOZW03QlprOVdDdWRkMGUxQ2x2aWZoeG5VS1BKM3BTT1ZKYk9OCkh5aklteWV4ZTl3dGVZTEJSYysyTXMzVXdrelFLcm52bXlaMWtPRWpQek40RW5tSmV6dEt6YXdvaHkwNGxmcXEKNzVhT2RiMHlNMEVCc05LSkZKQ0NSVVJtajhrMndJQXIwbHFhV0ZNcGlYT3FzTXBvWTZMY3plaGlMZHU0bUFaSQpRRHhCM3dLVGpmdGNIdzcxTmFKZlg5V2t2OFI4ZWlqeWpNOUl2Y1cwZmRQem9YVTBPZEFTa09ZRlFIZHlCUFNiCjllNWhDSGFJczZia1hBOEs4YmZRazBSL0d6STcyVXArd0JrbnJnTlhZTXFudHJSa0ljNURER1g0b3VOc2lqUkoKSWtrWER2TjVBb0dCQU8veFQrNTYyQ2hwc3R2NUpvMi9ycFdGb05tZ3ZJT0RMRGxiamhHZEpqKytwNk1BdjFQWgo2d042WnozMmppUG1OYzdCK2hrQm40RFQvVkFpU3NLRG1SK09tUkg1TVNzQXh6aWRxU3lNcldxdG1lMDNBVzd6Cklja0FNTGdwWHhDdW1HMzRCM2Jxb3VUdGVRdm5WcmRlR2hvdUJ5OUJSMVpXbnRtWHVscVhyNUFmQW9HQkFNZnIKN29NVGwzdUVVeml5a0IzYmkxb0RYdUNjN01Qc3h0c1IwdElqZXc3RStwTGoyaUxXZUZuMGVhdnJYaHQ1ODRJbwpDZG90a1ZMMHhrZ1g3M2ZremxEd1hobTJVTXBaQmxzSzBnR09SaUYzd0ZMU0hJNmxRUmJkaXRIb0JqcDRGTEZzCitlanZKUDZ1ZitBekZ5cjBLTnc3TnpyaCthbFhFQ09RS2NqUXJlWjFBb0dBQXRLZzhScEszcmJYbnRUZ2lqeGUKRG01REJTeHA2MVlvdUFnR3ROaFhjZHFKV0ZhUzZhYWZxQ3ZSZVI0a2IvR3VZbDlQMU9sNitlWUVqZVBKWTE1dQo5N3NTdSs1bGtLN3lxUXpaeDZka0J1UkI4bE42VmRiUVorL3pvc2NCMGsxcmg2ZXFWdEROMThtZmFlOXZ5cnAxCnJpY3FlSGpaSVAvbDRJTnpjc3RrQ2xzQ2dZQmh5TVZkZVZ5emZuS1NIY3lkdmY5MzVJUW9pcmpIeiswbnc1MEIKU1hkc0x1NThvRlBXakY1TGFXZUZybGJXUzV6T1FiVW44UGZPd29pbFJJZk5kYTF3SzFGcmRDQXFDTWN5Q3FYVApPdnFVYmhVMHJTNW9tdTJ1T0dnbzZUcjZxRGMrM1JXVFdEMFpFTkxkSDBBcXMwZTFDSVdvR0ZWYi9ZaVlUSEFUCmwvWW03UUtCZ1FEcFYvSjRMakY5VzBlUlNXenFBaDN1TStCdzNNN2NEMUxnUlZ6ZWxGS2w2ZzRBMWNvdU8wbHAKalpkMkVMZDlzTHhBVENVeFhQZ0dDTjY0RVNZSi92ZUozUmJzMTMrU2xqdjRleTVKck1ieEhNRC9CU1ovY2VjaAp4aFNWNkJsMHVKb2tlMTRPMEJ3OHJzSUlxZTVZSUxqSlMwL2E2eTllSlJtaGZJVG9PZU5PTUE9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
ambassador_id: {self.ambassador_id}
name: tls
config:
server:
enabled: True
secret: test-clientcert-server-secret
client:
enabled: True
secret: test-clientcert-client-secret
cert_required: True
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.target.path.k8s}
prefix: /{self.name}/
service: {self.target.path.fqdn}
""")
def scheme(self) -> str:
return "https"
def queries(self):
yield Query(self.url(self.name + "/"), insecure=True, client_crt=self.presto_crt, client_key=self.presto_key, client_cert_required=True, ca_cert=self.ca_cert)
yield Query(self.url(self.name + "/"), insecure=True, error="handshake failure")
def requirements(self):
for r in super().requirements():
query = r[1]
query.insecure = True
query.client_cert = self.presto_crt
query.client_key = self.presto_key
query.client_cert_required = True
query.ca_cert = self.ca_cert
yield (r[0], query)
class TLSOriginationSecret(AmbassadorTest):
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
kind: Secret
metadata:
name: test-origination-secret
labels:
kat-ambassador-id: tlsoriginationsecret
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
ambassador_id: {self.ambassador_id}
name: tls
config:
upstream:
secret: test-origination-secret
upstream-files:
cert_chain_file: /tmp/ambassador/snapshots/default/secrets-decoded/test-origination-secret/F94E4DCF30ABC50DEF240AA8024599B67CC03991.crt
private_key_file: /tmp/ambassador/snapshots/default/secrets-decoded/test-origination-secret/F94E4DCF30ABC50DEF240AA8024599B67CC03991.key
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.target.path.k8s}
prefix: /{self.name}/
service: {self.target.path.fqdn}
tls: upstream
""")
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.target.path.k8s}-files
prefix: /{self.name}-files/
service: {self.target.path.fqdn}
tls: upstream-files
""")
def queries(self):
yield Query(self.url(self.name + "/"))
yield Query(self.url(self.name + "-files/"))
def check(self):
for r in self.results:
assert r.backend.request.tls.enabled
class TLS(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
kind: Secret
metadata:
name: test-tls-secret
labels:
kat-ambassador-id: tls
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
---
apiVersion: v1
kind: Secret
metadata:
name: ambassador-certs
labels:
kat-ambassador-id: tls
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
name: tls
ambassador_id: {self.ambassador_id}
config:
server:
enabled: True
secret: test-tls-secret
""")
# need to include the ambassador_id unless you need some special
# ambassador_id that isn't something that kat already knows about.
# of mangling for the mapping name and prefix. For this simple test,
# it's not necessary.
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tls_target_mapping
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def scheme(self) -> str:
return "https"
def queries(self):
yield Query(self.url("tls-target/"), insecure=True)
class TLSInvalidSecret(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
name: tls
ambassador_id: {self.ambassador_id}
config:
server:
enabled: True
secret: test-certs-secret-invalid
missing-secret-key:
cert_chain_file: /nonesuch
bad-path-info:
cert_chain_file: /nonesuch
private_key_file: /nonesuch
validation-without-termination:
enabled: True
secret: test-certs-secret-invalid
ca_secret: ambassador-certs
""")
yield self.target, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: tls_target_mapping
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def scheme(self) -> str:
return "http"
def queries(self):
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), phase=2)
def check(self):
errors = self.results[0].backend.response
expected = set({
"TLSContext server found no certificate in secret test-certs-secret-invalid in namespace default, ignoring...",
"TLSContext bad-path-info found no cert_chain_file '/nonesuch'",
"TLSContext bad-path-info found no private_key_file '/nonesuch'",
"TLSContext validation-without-termination found no certificate in secret test-certs-secret-invalid in namespace default, ignoring...",
"TLSContext missing-secret-key: 'cert_chain_file' requires 'private_key_file' as well",
})
current = set({})
for errsvc, errtext in errors:
current.add(errtext)
diff = expected - current
assert len(diff) == 0, f'expected {len(expected)} errors, got {len(errors)}: Missing {diff}'
class TLSContextTest(AmbassadorTest):
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
kind: Namespace
metadata:
name: secret-namespace
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: test-tlscontext-secret-0
labels:
kat-ambassador-id: tlscontext
type: kubernetes.io/tls
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: test-tlscontext-secret-1
namespace: secret-namespace
labels:
kat-ambassador-id: tlscontext
type: kubernetes.io/tls
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUlIWTY3cFNoZ3NyTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB5TUI0WERURTRNVEV3TVRFME1EUXhObG9YCkRUSTRNVEF5T1RFME1EUXhObG93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRJd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURjQThZdGgvUFdhT0dTCm9ObXZFSFoyNGpRN1BLTitENG93TEhXZWl1UmRtaEEwWU92VTN3cUczVnFZNFpwbFpBVjBQS2xELysyWlNGMTQKejh3MWVGNFFUelphWXh3eTkrd2ZITmtUREVwTWpQOEpNMk9FYnlrVVJ4VVJ2VzQrN0QzMEUyRXo1T1BseG1jMApNWU0vL0pINUVEUWhjaURybFlxZTFTUk1SQUxaZVZta2FBeXU2TkhKVEJ1ajBTSVB1ZExUY2grOTBxK3Jkd255CmZrVDF4M09UYW5iV2pub21FSmU3TXZ5NG12dnFxSUh1NDhTOUM4WmQxQkdWUGJ1OFYvVURyU1dROXpZQ1g0U0cKT2FzbDhDMFhtSDZrZW1oUERsRC9UdjB4dnlINXE1TVVjSGk0bUp0Titnem9iNTREd3pWR0VqZWY1TGVTMVY1RgowVEFQMGQrWEFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFmCkJnTlZIU01FR0RBV2dCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBbUFLYkNsdUhFZS9JRmJ1QWJneDBNenV6aTkwd2xtQVBiOGdtTwpxdmJwMjl1T1ZzVlNtUUFkZFBuZEZhTVhWcDFaaG1UVjVDU1F0ZFgyQ1ZNVyswVzQ3Qy9DT0Jkb1NFUTl5akJmCmlGRGNseG04QU4yUG1hR1FhK3hvT1hnWkxYZXJDaE5LV0JTWlIrWktYTEpTTTlVYUVTbEhmNXVuQkxFcENqK2oKZEJpSXFGY2E3eElGUGtyKzBSRW9BVmMveFBubnNhS2pMMlV5Z0dqUWZGTnhjT042Y3VjYjZMS0pYT1pFSVRiNQpINjhKdWFSQ0tyZWZZK0l5aFFWVk5taWk3dE1wY1UyS2pXNXBrVktxVTNkS0l0RXEyVmtTZHpNVUtqTnhZd3FGCll6YnozNFQ1MENXbm9HbU5SQVdKc0xlVmlPWVUyNmR3YkFXZDlVYitWMDFRam43OAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktrd2dnU2xBZ0VBQW9JQkFRRGNBOFl0aC9QV2FPR1MKb05tdkVIWjI0alE3UEtOK0Q0b3dMSFdlaXVSZG1oQTBZT3ZVM3dxRzNWcVk0WnBsWkFWMFBLbEQvKzJaU0YxNAp6OHcxZUY0UVR6WmFZeHd5OSt3ZkhOa1RERXBNalA4Sk0yT0VieWtVUnhVUnZXNCs3RDMwRTJFejVPUGx4bWMwCk1ZTS8vSkg1RURRaGNpRHJsWXFlMVNSTVJBTFplVm1rYUF5dTZOSEpUQnVqMFNJUHVkTFRjaCs5MHErcmR3bnkKZmtUMXgzT1RhbmJXam5vbUVKZTdNdnk0bXZ2cXFJSHU0OFM5QzhaZDFCR1ZQYnU4Vi9VRHJTV1E5ellDWDRTRwpPYXNsOEMwWG1INmtlbWhQRGxEL1R2MHh2eUg1cTVNVWNIaTRtSnROK2d6b2I1NER3elZHRWplZjVMZVMxVjVGCjBUQVAwZCtYQWdNQkFBRUNnZ0VCQUk2U3I0anYwZForanJhN0gzVnZ3S1RYZnl0bjV6YVlrVjhZWUh3RjIyakEKbm9HaTBSQllIUFU2V2l3NS9oaDRFWVM2anFHdkptUXZYY3NkTldMdEJsK2hSVUtiZVRtYUtWd2NFSnRrV24xeQozUTQwUytnVk5OU2NINDRvYUZuRU0zMklWWFFRZnBKMjJJZ2RFY1dVUVcvWnpUNWpPK3dPTXc4c1plSTZMSEtLCkdoOENsVDkrRGUvdXFqbjNCRnQwelZ3cnFLbllKSU1DSWFrb2lDRmtIcGhVTURFNVkyU1NLaGFGWndxMWtLd0sKdHFvWFpKQnlzYXhnUTFRa21mS1RnRkx5WlpXT01mRzVzb1VrU1RTeURFRzFsYnVYcHpUbTlVSTlKU2lsK01yaAp1LzVTeXBLOHBCSHhBdFg5VXdiTjFiRGw3Sng1SWJyMnNoM0F1UDF4OUpFQ2dZRUE4dGNTM09URXNOUFpQZlptCk9jaUduOW9STTdHVmVGdjMrL05iL3JodHp1L1RQUWJBSzhWZ3FrS0dPazNGN1krY2txS1NTWjFnUkF2SHBsZEIKaTY0Y0daT1dpK01jMWZVcEdVV2sxdnZXbG1nTUlQVjVtbFpvOHowMlNTdXhLZTI1Y2VNb09oenFlay9vRmFtdgoyTmxFeTh0dEhOMUxMS3grZllhMkpGcWVycThDZ1lFQTUvQUxHSXVrU3J0K0dkektJLzV5cjdSREpTVzIzUTJ4CkM5ZklUTUFSL1Q4dzNsWGhyUnRXcmlHL3l0QkVPNXdTMVIwdDkydW1nVkhIRTA5eFFXbzZ0Tm16QVBNb1RSekMKd08yYnJqQktBdUJkQ0RISjZsMlFnOEhPQWovUncrK2x4bEN0VEI2YS8xWEZIZnNHUGhqMEQrWlJiWVZzaE00UgpnSVVmdmpmQ1Y1a0NnWUVBMzdzL2FieHJhdThEaTQ3a0NBQ3o1N3FsZHBiNk92V2d0OFF5MGE5aG0vSmhFQ3lVCkNML0VtNWpHeWhpMWJuV05yNXVRWTdwVzR0cG5pdDJCU2d1VFlBMFYrck8zOFhmNThZcTBvRTFPR3l5cFlBUkoKa09SanRSYUVXVTJqNEJsaGJZZjNtL0xnSk9oUnp3T1RPNXFSUTZHY1dhZVlod1ExVmJrelByTXUxNGtDZ1lCbwp4dEhjWnNqelVidm5wd3hTTWxKUStaZ1RvZlAzN0lWOG1pQk1POEJrclRWQVczKzFtZElRbkFKdWRxTThZb2RICmF3VW03cVNyYXV3SjF5dU1wNWFadUhiYkNQMjl5QzVheFh3OHRtZlk0TTVtTTBmSjdqYW9ydGFId1pqYmNObHMKdTJsdUo2MVJoOGVpZ1pJU1gyZHgvMVB0ckFhWUFCZDcvYWVYWU0wVWtRS0JnUUNVbkFIdmRQUGhIVnJDWU1rTgpOOFBEK0t0YmhPRks2S3MvdlgyUkcyRnFmQkJPQWV3bEo1d0xWeFBLT1RpdytKS2FSeHhYMkcvREZVNzduOEQvCkR5V2RjM2ZCQWQ0a1lJamZVaGRGa1hHNEFMUDZBNVFIZVN4NzNScTFLNWxMVWhPbEZqc3VPZ0NKS28wVlFmRC8KT05paDB6SzN5Wmc3aDVQamZ1TUdGb09OQWc9PQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==
kind: Secret
metadata:
name: test-tlscontext-secret-2
labels:
kat-ambassador-id: tlscontext
type: kubernetes.io/tls
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.name}-same-prefix-1
prefix: /tls-context-same/
service: http://{self.target.path.fqdn}
host: tls-context-host-1
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-1
hosts:
- tls-context-host-1
secret: test-tlscontext-secret-1.secret-namespace
min_tls_version: v1.0
max_tls_version: v1.3
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: Mapping
name: {self.name}-same-prefix-2
prefix: /tls-context-same/
service: http://{self.target.path.fqdn}
host: tls-context-host-2
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-2
hosts:
- tls-context-host-2
secret: test-tlscontext-secret-2
alpn_protocols: h2,http/1.1
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: Module
name: tls
config:
server:
enabled: True
secret: test-tlscontext-secret-0
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: Mapping
name: {self.name}-other-mapping
prefix: /{self.name}/
service: https://{self.target.path.fqdn}
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-no-secret
min_tls_version: v1.0
max_tls_version: v1.3
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-error
hosts:
- tls-context-host-1
""")
def scheme(self) -> str:
return "https"
@staticmethod
def _go_close_connection_error(url):
return "Get {}: EOF".format(url)
def queries(self):
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"),
headers={"Host": "tls-context-host-2"},
insecure=True,
sni=True)
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True)
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-2"},
expected=200,
insecure=True,
sni=True)
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-3"},
expected=404,
insecure=True)
yield Query(self.url("tls-context-different/"),
headers={"Host": "tls-context-host-1"},
expected=404,
insecure=True,
sni=True)
yield Query(self.url(self.name + "/"),
insecure=True)
yield Query(self.url(self.name + "/"),
headers={"Host": "tls-context-host-3"},
insecure=True)
yield Query(self.url(self.name + "/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True)
# 7 - explicit Host header 2 wins, we'll get the SNI cert for this overlapping path
yield Query(self.url(self.name + "/"),
headers={"Host": "tls-context-host-2"},
expected=200,
insecure=True,
sni=True)
def check(self):
errors = self.results[0].json
num_errors = len(errors)
assert num_errors == 2, "expected 2 errors, got {} -\n{}".format(num_errors, errors)
cert_err = errors[0]
pkey_err = errors[1]
assert cert_err[1] == 'TLSContext TLSContextTest-same-context-error is missing cert_chain_file'
assert pkey_err[1] == 'TLSContext TLSContextTest-same-context-error is missing private_key_file'
idx = 0
for result in self.results:
if result.status == 200 and result.query.headers:
host_header = result.query.headers['Host']
tls_common_name = result.tls[0]['Issuer']['CommonName']
# XXX Weirdness with the fallback cert here! You see, if we use host
# tls-context-host-3 (or, really, anything except -1 or -2), then the
# fallback cert actually has CN 'localhost'. We should replace this with
# a real fallback cert, but for now, just hack the host_header.
#
# Ew.
if host_header == 'tls-context-host-3':
host_header = 'localhost'
assert host_header == tls_common_name, "test %d wanted CN %s, but got %s" % (idx, host_header, tls_common_name)
idx += 1
def requirements(self):
# We're replacing super()'s requirements deliberately here. Without a Host header they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True))
class TLSContextProtocolMaxVersion(AmbassadorTest):
# It appears not to be.
# debug = True
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: secret.max-version
labels:
kat-ambassador-id: tlscontextprotocolmaxversion
type: kubernetes.io/tls
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Module
name: ambassador
config:
defaults:
tls_secret_namespacing: False
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.name}-same-prefix-1
prefix: /tls-context-same/
service: http://{self.target.path.fqdn}
host: tls-context-host-1
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-1
hosts:
- tls-context-host-1
secret: secret.max-version
min_tls_version: v1.1
max_tls_version: v1.2
""")
def scheme(self) -> str:
return "https"
@staticmethod
def _go_close_connection_error(url):
return "Get {}: EOF".format(url)
def queries(self):
# ----
# XXX 2019-09-11
# These aren't actually reporting the negotiated version, alhough correct
# ----
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.2",
maxTLSv="v1.2")
# This should give us TLS v1.1
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.0",
maxTLSv="v1.1")
# This should be an error.
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.3",
maxTLSv="v1.3",
error=[ "tls: server selected unsupported protocol version 303",
"tls: no supported versions satisfy MinVersion and MaxVersion",
"tls: protocol version not supported" ])
def check(self):
tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version
tls_1_version = self.results[1].backend.request.tls.negotiated_protocol_version
# See comment in queries for why these are None. They should be v1.2 and v1.1 respectively.
assert tls_0_version == None, f"requesting TLS v1.2 got TLS {tls_0_version}"
assert tls_1_version == None, f"requesting TLS v1.0-v1.1 got TLS {tls_1_version}"
def requirements(self):
# We're replacing super()'s requirements deliberately here. Without a Host header they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True, minTLSv="v1.2"))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True, minTLSv="v1.2"))
class TLSContextProtocolMinVersion(AmbassadorTest):
# It appears not to be.
# debug = True
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: secret.min-version
labels:
kat-ambassador-id: tlscontextprotocolminversion
type: kubernetes.io/tls
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.name}-same-prefix-1
prefix: /tls-context-same/
service: https://{self.target.path.fqdn}
host: tls-context-host-1
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-1
hosts:
- tls-context-host-1
secret: secret.min-version
secret_namespacing: False
min_tls_version: v1.2
max_tls_version: v1.3
""")
def scheme(self) -> str:
return "https"
@staticmethod
def _go_close_connection_error(url):
return "Get {}: EOF".format(url)
def queries(self):
# This should give v1.3, but it currently seems to give 1.2.
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.2",
maxTLSv="v1.3")
# This should give v1.2
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.1",
maxTLSv="v1.2")
# This should be an error.
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
minTLSv="v1.0",
maxTLSv="v1.0",
error=[ "tls: server selected unsupported protocol version 303",
"tls: no supported versions satisfy MinVersion and MaxVersion",
"tls: protocol version not supported" ])
def check(self):
tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version
tls_1_version = self.results[1].backend.request.tls.negotiated_protocol_version
# Hmmm. Why does Envoy prefer 1.2 to 1.3 here?? This may be a client thing -- have to
# rebuild with Go 1.13.
assert tls_0_version == "v1.2", f"requesting TLS v1.2-v1.3 got TLS {tls_0_version}"
assert tls_1_version == "v1.2", f"requesting TLS v1.1-v1.2 got TLS {tls_1_version}"
def requirements(self):
# We're replacing super()'s requirements deliberately here. Without a Host header they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
class TLSContextCipherSuites(AmbassadorTest):
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + """
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: secret.cipher-suites
labels:
kat-ambassador-id: tlscontextciphersuites
type: kubernetes.io/tls
"""
def config(self):
yield self, self.format("""
---
apiVersion: ambassador/v0
kind: Mapping
name: {self.name}-same-prefix-1
prefix: /tls-context-same/
service: https://{self.target.path.fqdn}
host: tls-context-host-1
""")
yield self, self.format("""
---
apiVersion: ambassador/v1
kind: TLSContext
name: {self.name}-same-context-1
hosts:
- tls-context-host-1
secret: secret.cipher-suites
secret_namespacing: False
max_tls_version: v1.2
cipher_suites:
- ECDHE-RSA-AES128-GCM-SHA256
ecdh_curves:
- P-256
""")
def scheme(self) -> str:
return "https"
@staticmethod
def _go_close_connection_error(url):
return "Get {}: EOF".format(url)
def queries(self):
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
cipherSuites=["TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"],
maxTLSv="v1.2")
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
cipherSuites=["TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"],
maxTLSv="v1.2",
error="tls: handshake failure",)
yield Query(self.url("tls-context-same/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True,
cipherSuites=["TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"],
ecdhCurves=["X25519"],
maxTLSv="v1.2",
error="tls: handshake failure",)
def check(self):
tls_0_version = self.results[0].backend.request.tls.negotiated_protocol_version
assert tls_0_version == "v1.2", f"requesting TLS v1.2 got TLS {tls_0_version}"
def requirements(self):
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
| true | true |
1c45fc1d3bc956ee5253c530f021440b4f006f32 | 45,154 | py | Python | hangups/ui/__main__.py | zetorian/hangups | 60715702fc23842a94c8d13e144a8bd0ce45654a | [
"MIT"
] | null | null | null | hangups/ui/__main__.py | zetorian/hangups | 60715702fc23842a94c8d13e144a8bd0ce45654a | [
"MIT"
] | null | null | null | hangups/ui/__main__.py | zetorian/hangups | 60715702fc23842a94c8d13e144a8bd0ce45654a | [
"MIT"
] | null | null | null | """Reference chat client for hangups."""
import appdirs
import asyncio
import configargparse
import contextlib
import logging
import os
import sys
import urwid
import readlike
import hangups
from hangups.ui.emoticon import replace_emoticons
from hangups.ui import notifier
from hangups.ui.utils import get_conv_name, add_color_to_scheme
# hangups used to require a fork of urwid called hangups-urwid which may still
# be installed and create a conflict with the 'urwid' package name. See #198.
if urwid.__version__ == '1.2.2-dev':
sys.exit('error: hangups-urwid package is installed\n\n'
'Please uninstall hangups-urwid and urwid, and reinstall '
'hangups.')
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
COL_SCHEMES = {
# Very basic scheme with no colour
'default': {
('active_tab', '', ''),
('inactive_tab', 'standout', ''),
('msg_date', '', ''),
('msg_sender', '', ''),
('msg_self', '', ''),
('msg_text', '', ''),
('msg_text_self', '', ''),
('msg_selected', 'standout', ''),
('status_line', 'standout', ''),
('tab_background', 'standout', ''),
},
'solarized-dark': {
('active_tab', 'light gray', 'light blue'),
('inactive_tab', 'underline', 'light green'),
('msg_date', 'dark cyan', ''),
('msg_sender', 'dark blue', ''),
('msg_text_self', '', ''),
('msg_self', 'dark green', ''),
('msg_text', '', ''),
('msg_selected', 'standout', ''),
('status_line', 'standout', ''),
('tab_background', 'black,standout,underline', 'light green'),
},
}
COL_SCHEME_NAMES = (
'active_tab', 'inactive_tab', 'msg_date', 'msg_sender', 'msg_self',
'msg_text', 'msg_text_self', 'status_line', 'tab_background'
)
DISCREET_NOTIFICATION = notifier.Notification(
'hangups', 'Conversation', 'New message'
)
class HangupsDisconnected(Exception):
"""Raised when hangups is disconnected."""
class ChatUI(object):
"""User interface for hangups."""
def __init__(self, refresh_token_path, keybindings, palette,
palette_colors, datetimefmt, notifier_,
discreet_notifications):
"""Start the user interface."""
self._keys = keybindings
self._datetimefmt = datetimefmt
self._notifier = notifier_
self._discreet_notifications = discreet_notifications
set_terminal_title('hangups')
# These are populated by on_connect when it's called.
self._conv_widgets = {} # {conversation_id: ConversationWidget}
self._tabbed_window = None # TabbedWindowWidget
self._conv_list = None # hangups.ConversationList
self._user_list = None # hangups.UserList
self._coroutine_queue = CoroutineQueue()
self._exception = None
# TODO Add urwid widget for getting auth.
try:
cookies = hangups.auth.get_auth_stdin(refresh_token_path)
except hangups.GoogleAuthError as e:
sys.exit('Login failed ({})'.format(e))
self._client = hangups.Client(cookies)
self._client.on_connect.add_observer(self._on_connect)
loop = asyncio.get_event_loop()
loop.set_exception_handler(self._exception_handler)
try:
self._urwid_loop = urwid.MainLoop(
LoadingWidget(), palette, handle_mouse=False,
input_filter=self._input_filter,
event_loop=urwid.AsyncioEventLoop(loop=loop)
)
except urwid.AttrSpecError as e:
# Fail gracefully for invalid colour options.
sys.exit(e)
self._urwid_loop.screen.set_terminal_properties(colors=palette_colors)
self._urwid_loop.start()
coros = [self._connect(), self._coroutine_queue.consume()]
# Enable bracketed paste mode after the terminal has been switched to
# the alternate screen (after MainLoop.start() to work around bug
# 729533 in VTE.
with bracketed_paste_mode():
try:
# Run all the coros, until they all complete or one raises an
# exception. In the normal case, HangupsDisconnected will be
# raised.
loop.run_until_complete(asyncio.gather(*coros))
except HangupsDisconnected:
pass
finally:
# Clean up urwid.
self._urwid_loop.stop()
# Cancel all of the coros, and wait for them to shut down.
task = asyncio.gather(*coros, return_exceptions=True)
task.cancel()
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
# In Python 3.7, asyncio.gather no longer swallows
# CancelledError, so we need to ignore it.
pass
loop.close()
# If an exception was stored, raise it now. This is used for exceptions
# originating in urwid callbacks.
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
async def _connect(self):
await self._client.connect()
raise HangupsDisconnected()
def _exception_handler(self, _loop, context):
"""Handle exceptions from the asyncio loop."""
# Start a graceful shutdown.
self._coroutine_queue.put(self._client.disconnect())
# Store the exception to be re-raised later. If the context doesn't
# contain an exception, create one containing the error message.
default_exception = Exception(context.get('message'))
self._exception = context.get('exception', default_exception)
def _input_filter(self, keys, _):
"""Handle global keybindings."""
if keys == [self._keys['menu']]:
if self._urwid_loop.widget == self._tabbed_window:
self._show_menu()
else:
self._hide_menu()
elif keys == [self._keys['quit']]:
self._coroutine_queue.put(self._client.disconnect())
else:
return keys
def _show_menu(self):
"""Show the overlay menu."""
# If the current widget in the TabbedWindowWidget has a menu,
# overlay it on the TabbedWindowWidget.
current_widget = self._tabbed_window.get_current_widget()
if hasattr(current_widget, 'get_menu_widget'):
menu_widget = current_widget.get_menu_widget(self._hide_menu)
overlay = urwid.Overlay(menu_widget, self._tabbed_window,
align='center', width=('relative', 80),
valign='middle', height=('relative', 80))
self._urwid_loop.widget = overlay
def _hide_menu(self):
"""Hide the overlay menu."""
self._urwid_loop.widget = self._tabbed_window
def get_conv_widget(self, conv_id):
"""Return an existing or new ConversationWidget."""
if conv_id not in self._conv_widgets:
set_title_cb = (lambda widget, title:
self._tabbed_window.set_tab(widget, title=title))
widget = ConversationWidget(
self._client, self._coroutine_queue,
self._conv_list.get(conv_id), set_title_cb, self._keys,
self._datetimefmt
)
self._conv_widgets[conv_id] = widget
return self._conv_widgets[conv_id]
def add_conversation_tab(self, conv_id, switch=False):
"""Add conversation tab if not present, and optionally switch to it."""
conv_widget = self.get_conv_widget(conv_id)
self._tabbed_window.set_tab(conv_widget, switch=switch,
title=conv_widget.title)
def on_select_conversation(self, conv_id):
"""Called when the user selects a new conversation to listen to."""
# switch to new or existing tab for the conversation
self.add_conversation_tab(conv_id, switch=True)
async def _on_connect(self):
"""Handle connecting for the first time."""
self._user_list, self._conv_list = (
await hangups.build_user_conversation_list(self._client)
)
self._conv_list.on_event.add_observer(self._on_event)
# show the conversation menu
conv_picker = ConversationPickerWidget(self._conv_list,
self.on_select_conversation,
self._keys)
self._tabbed_window = TabbedWindowWidget(self._keys)
self._tabbed_window.set_tab(conv_picker, switch=True,
title='Conversations')
self._urwid_loop.widget = self._tabbed_window
def _on_event(self, conv_event):
"""Open conversation tab for new messages & pass events to notifier."""
conv = self._conv_list.get(conv_event.conversation_id)
user = conv.get_user(conv_event.user_id)
show_notification = all((
isinstance(conv_event, hangups.ChatMessageEvent),
not user.is_self,
not conv.is_quiet,
))
if show_notification:
self.add_conversation_tab(conv_event.conversation_id)
if self._discreet_notifications:
notification = DISCREET_NOTIFICATION
else:
notification = notifier.Notification(
user.full_name, get_conv_name(conv), conv_event.text
)
self._notifier.send(notification)
class CoroutineQueue:
"""Coroutine queue for the user interface.
Urwid executes callback functions for user input rather than coroutines.
This creates a problem if we need to execute a coroutine in response to
user input.
One option is to use asyncio.ensure_future to execute a "fire and forget"
coroutine. If we do this, exceptions will be logged instead of propagated,
which can obscure problems.
This class allows callbacks to place coroutines into a queue, and have them
executed by another coroutine. Exceptions will be propagated from the
consume method.
"""
def __init__(self):
self._queue = asyncio.Queue()
def put(self, coro):
"""Put a coroutine in the queue to be executed."""
# Avoid logging when a coroutine is queued or executed to avoid log
# spam from coroutines that are started on every keypress.
assert asyncio.iscoroutine(coro)
self._queue.put_nowait(coro)
async def consume(self):
"""Consume coroutines from the queue by executing them."""
while True:
coro = await self._queue.get()
assert asyncio.iscoroutine(coro)
await coro
class WidgetBase(urwid.WidgetWrap):
"""Base for UI Widgets
This class overrides the property definition for the method ``keypress`` in
``urwid.WidgetWrap``. Using a method that overrides the property saves
many pylint suppressions.
Args:
target: urwid.Widget instance
"""
def keypress(self, size, key):
"""forward the call"""
# pylint:disable=not-callable, useless-super-delegation
return super().keypress(size, key)
class LoadingWidget(WidgetBase):
"""Widget that shows a loading indicator."""
def __init__(self):
# show message in the center of the screen
super().__init__(urwid.Filler(
urwid.Text('Connecting...', align='center')
))
class RenameConversationDialog(WidgetBase):
"""Dialog widget for renaming a conversation."""
def __init__(self, coroutine_queue, conversation, on_cancel, on_save,
keybindings):
self._coroutine_queue = coroutine_queue
self._conversation = conversation
edit = urwid.Edit(edit_text=get_conv_name(conversation))
items = [
urwid.Text('Rename conversation:'),
edit,
urwid.Button(
'Save',
on_press=lambda _: self._rename(edit.edit_text, on_save)
),
urwid.Button('Cancel', on_press=lambda _: on_cancel()),
]
list_walker = urwid.SimpleFocusListWalker(items)
list_box = ListBox(keybindings, list_walker)
super().__init__(list_box)
def _rename(self, name, callback):
"""Rename conversation and call callback."""
self._coroutine_queue.put(self._conversation.rename(name))
callback()
class ConversationMenu(WidgetBase):
"""Menu for conversation actions."""
def __init__(self, coroutine_queue, conversation, close_callback,
keybindings):
rename_dialog = RenameConversationDialog(
coroutine_queue, conversation,
lambda: frame.contents.__setitem__('body', (list_box, None)),
close_callback, keybindings
)
items = [
urwid.Text(
'Conversation name: {}'.format(get_conv_name(conversation))
),
urwid.Button(
'Change Conversation Name',
on_press=lambda _: frame.contents.__setitem__(
'body', (rename_dialog, None)
)
),
urwid.Divider('-'),
urwid.Button('Back', on_press=lambda _: close_callback()),
]
list_walker = urwid.SimpleFocusListWalker(items)
list_box = ListBox(keybindings, list_walker)
frame = urwid.Frame(list_box)
padding = urwid.Padding(frame, left=1, right=1)
line_box = urwid.LineBox(padding, title='Conversation Menu')
super().__init__(line_box)
class ConversationButton(WidgetBase):
"""Button that shows the name and unread message count of conversation."""
def __init__(self, conversation, on_press):
conversation.on_event.add_observer(self._on_event)
# Need to update on watermark notifications as well since no event is
# received when the user marks messages as read.
conversation.on_watermark_notification.add_observer(self._on_event)
self._conversation = conversation
self._button = urwid.Button(self._get_label(), on_press=on_press,
user_data=conversation.id_)
super().__init__(self._button)
def _get_label(self):
"""Return the button's label generated from the conversation."""
return get_conv_name(self._conversation, show_unread=True)
def _on_event(self, _):
"""Update the button's label when an event occurs."""
self._button.set_label(self._get_label())
@property
def last_modified(self):
"""Last modified date of conversation, used for sorting."""
return self._conversation.last_modified
class ConversationListWalker(urwid.SimpleFocusListWalker):
"""ListWalker that maintains a list of ConversationButtons.
ConversationButtons are kept in order of last modified.
"""
# pylint: disable=abstract-method
def __init__(self, conversation_list, on_select):
self._conversation_list = conversation_list
self._conversation_list.on_event.add_observer(self._on_event)
self._on_press = lambda button, conv_id: on_select(conv_id)
convs = sorted(conversation_list.get_all(), reverse=True,
key=lambda c: c.last_modified)
buttons = [ConversationButton(conv, on_press=self._on_press)
for conv in convs]
super().__init__(buttons)
def _on_event(self, _):
"""Re-order the conversations when an event occurs."""
# TODO: handle adding new conversations
self.sort(key=lambda conv_button: conv_button.last_modified,
reverse=True)
class ListBox(WidgetBase):
"""ListBox widget supporting alternate keybindings."""
def __init__(self, keybindings, list_walker):
self._keybindings = keybindings
super().__init__(urwid.ListBox(list_walker))
def keypress(self, size, key):
# Handle alternate up/down keybindings
key = super().keypress(size, key)
if key == self._keybindings['down']:
super().keypress(size, 'down')
elif key == self._keybindings['up']:
super().keypress(size, 'up')
elif key == self._keybindings['page_up']:
super().keypress(size, 'page up')
elif key == self._keybindings['page_down']:
super().keypress(size, 'page down')
else:
return key
class ConversationPickerWidget(WidgetBase):
"""ListBox widget for picking a conversation from a list."""
def __init__(self, conversation_list, on_select, keybindings):
list_walker = ConversationListWalker(conversation_list, on_select)
list_box = ListBox(keybindings, list_walker)
widget = urwid.Padding(list_box, left=2, right=2)
super().__init__(widget)
class ReturnableEdit(urwid.Edit):
"""Edit widget that clears itself and calls a function on return."""
def __init__(self, on_return, keybindings, caption=None):
super().__init__(caption=caption, multiline=True)
self._on_return = on_return
self._keys = keybindings
self._paste_mode = False
def keypress(self, size, key):
if key == 'begin paste':
self._paste_mode = True
elif key == 'end paste':
self._paste_mode = False
elif key == 'enter' and not self._paste_mode:
self._on_return(self.get_edit_text())
self.set_edit_text('')
elif key not in self._keys.values() and key in readlike.keys():
text, pos = readlike.edit(self.edit_text, self.edit_pos, key)
self.set_edit_text(text)
self.set_edit_pos(pos)
else:
return super().keypress(size, key)
class StatusLineWidget(WidgetBase):
"""Widget for showing status messages.
If the client is disconnected, show a reconnecting message. If a temporary
message is showing, show the temporary message. If someone is typing, show
a typing messages.
"""
_MESSAGE_DELAY_SECS = 10
def __init__(self, client, conversation):
self._typing_statuses = {}
self._conversation = conversation
self._conversation.on_event.add_observer(self._on_event)
self._conversation.on_typing.add_observer(self._on_typing)
self._widget = urwid.Text('', align='center')
self._is_connected = True
self._message = None
self._message_handle = None
client.on_disconnect.add_observer(self._on_disconnect)
client.on_reconnect.add_observer(self._on_reconnect)
super().__init__(urwid.AttrMap(self._widget, 'status_line'))
def show_message(self, message_str):
"""Show a temporary message."""
if self._message_handle is not None:
self._message_handle.cancel()
self._message_handle = asyncio.get_event_loop().call_later(
self._MESSAGE_DELAY_SECS, self._clear_message
)
self._message = message_str
self._update()
def _clear_message(self):
"""Clear the temporary message."""
self._message = None
self._message_handle = None
self._update()
def _on_disconnect(self):
"""Show reconnecting message when disconnected."""
self._is_connected = False
self._update()
def _on_reconnect(self):
"""Hide reconnecting message when reconnected."""
self._is_connected = True
self._update()
def _on_event(self, conv_event):
"""Make users stop typing when they send a message."""
if isinstance(conv_event, hangups.ChatMessageEvent):
self._typing_statuses[conv_event.user_id] = (
hangups.TYPING_TYPE_STOPPED
)
self._update()
def _on_typing(self, typing_message):
"""Handle typing updates."""
self._typing_statuses[typing_message.user_id] = typing_message.status
self._update()
def _update(self):
"""Update status text."""
typing_users = [self._conversation.get_user(user_id)
for user_id, status in self._typing_statuses.items()
if status == hangups.TYPING_TYPE_STARTED]
displayed_names = [user.first_name for user in typing_users
if not user.is_self]
if displayed_names:
typing_message = '{} {} typing...'.format(
', '.join(sorted(displayed_names)),
'is' if len(displayed_names) == 1 else 'are'
)
else:
typing_message = ''
if not self._is_connected:
self._widget.set_text("RECONNECTING...")
elif self._message is not None:
self._widget.set_text(self._message)
else:
self._widget.set_text(typing_message)
class MessageWidget(WidgetBase):
"""Widget for displaying a single message in a conversation."""
def __init__(self, timestamp, text, datetimefmt, user=None,
show_date=False):
# Save the timestamp as an attribute for sorting.
self.timestamp = timestamp
text = [
('msg_date', self._get_date_str(timestamp, datetimefmt,
show_date=show_date) + ' '),
('msg_text_self' if user is not None and user.is_self
else 'msg_text', text)
]
if user is not None:
text.insert(1, ('msg_self' if user.is_self else 'msg_sender',
user.first_name + ': '))
self._widget = urwid.SelectableIcon(text, cursor_position=0)
super().__init__(urwid.AttrMap(
self._widget, '', {
# If the widget is focused, map every other display attribute
# to 'msg_selected' so the entire message is highlighted.
None: 'msg_selected',
'msg_date': 'msg_selected',
'msg_text_self': 'msg_selected',
'msg_text': 'msg_selected',
'msg_self': 'msg_selected',
'msg_sender': 'msg_selected',
}
))
@staticmethod
def _get_date_str(timestamp, datetimefmt, show_date=False):
"""Convert UTC datetime into user interface string."""
fmt = ''
if show_date:
fmt += '\n'+datetimefmt.get('date', '')+'\n'
fmt += datetimefmt.get('time', '')
return timestamp.astimezone(tz=None).strftime(fmt)
def __lt__(self, other):
return self.timestamp < other.timestamp
@staticmethod
def from_conversation_event(conversation, conv_event, prev_conv_event,
datetimefmt):
"""Return MessageWidget representing a ConversationEvent.
Returns None if the ConversationEvent does not have a widget
representation.
"""
user = conversation.get_user(conv_event.user_id)
# Check whether the previous event occurred on the same day as this
# event.
if prev_conv_event is not None:
is_new_day = (conv_event.timestamp.astimezone(tz=None).date() !=
prev_conv_event.timestamp.astimezone(tz=None).date())
else:
is_new_day = False
if isinstance(conv_event, hangups.ChatMessageEvent):
return MessageWidget(conv_event.timestamp, conv_event.text,
datetimefmt, user, show_date=is_new_day)
elif isinstance(conv_event, hangups.RenameEvent):
if conv_event.new_name == '':
text = ('{} cleared the conversation name'
.format(user.first_name))
else:
text = ('{} renamed the conversation to {}'
.format(user.first_name, conv_event.new_name))
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
elif isinstance(conv_event, hangups.MembershipChangeEvent):
event_users = [conversation.get_user(user_id) for user_id
in conv_event.participant_ids]
names = ', '.join([user.full_name for user in event_users])
if conv_event.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN:
text = ('{} added {} to the conversation'
.format(user.first_name, names))
else: # LEAVE
text = ('{} left the conversation'.format(names))
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
elif isinstance(conv_event, hangups.HangoutEvent):
text = {
hangups.HANGOUT_EVENT_TYPE_START: (
'A Hangout call is starting.'
),
hangups.HANGOUT_EVENT_TYPE_END: (
'A Hangout call ended.'
),
hangups.HANGOUT_EVENT_TYPE_ONGOING: (
'A Hangout call is ongoing.'
),
}.get(conv_event.event_type, 'Unknown Hangout call event.')
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
elif isinstance(conv_event, hangups.GroupLinkSharingModificationEvent):
status_on = hangups.GROUP_LINK_SHARING_STATUS_ON
status_text = ('on' if conv_event.new_status == status_on
else 'off')
text = '{} turned {} joining by link.'.format(user.first_name,
status_text)
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
else:
# conv_event is a generic hangups.ConversationEvent.
text = 'Unknown conversation event'
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
class ConversationEventListWalker(urwid.ListWalker):
"""ListWalker for ConversationEvents.
The position may be an event ID or POSITION_LOADING.
"""
POSITION_LOADING = 'loading'
def __init__(self, coroutine_queue, conversation, datetimefmt):
self._coroutine_queue = coroutine_queue # CoroutineQueue
self._conversation = conversation # Conversation
self._is_scrolling = False # Whether the user is trying to scroll up
self._is_loading = False # Whether we're currently loading more events
self._first_loaded = False # Whether the first event is loaded
self._datetimefmt = datetimefmt
# Focus position is the first event ID, or POSITION_LOADING.
self._focus_position = (conversation.events[-1].id_
if conversation.events
else self.POSITION_LOADING)
self._conversation.on_event.add_observer(self._handle_event)
def _handle_event(self, conv_event):
"""Handle updating and scrolling when a new event is added.
Automatically scroll down to show the new text if the bottom is
showing. This allows the user to scroll up to read previous messages
while new messages are arriving.
"""
if not self._is_scrolling:
self.set_focus(conv_event.id_)
else:
self._modified()
async def _load(self):
"""Load more events for this conversation."""
try:
conv_events = await self._conversation.get_events(
self._conversation.events[0].id_
)
except (IndexError, hangups.NetworkError):
conv_events = []
if not conv_events:
self._first_loaded = True
if self._focus_position == self.POSITION_LOADING and conv_events:
# If the loading indicator is still focused, and we loaded more
# events, set focus on the first new event so the loaded
# indicator is replaced.
self.set_focus(conv_events[-1].id_)
else:
# Otherwise, still need to invalidate in case the loading
# indicator is showing but not focused.
self._modified()
self._is_loading = False
def __getitem__(self, position):
"""Return widget at position or raise IndexError."""
if position == self.POSITION_LOADING:
if self._first_loaded:
# TODO: Show the full date the conversation was created.
return urwid.Text('No more messages', align='center')
else:
# Don't try to load while we're already loading.
if not self._is_loading and not self._first_loaded:
self._is_loading = True
self._coroutine_queue.put(self._load())
return urwid.Text('Loading...', align='center')
try:
# When creating the widget, also pass the previous event so a
# timestamp can be shown if this event occurred on a different day.
# Get the previous event, or None if it isn't loaded or doesn't
# exist.
prev_position = self._get_position(position, prev=True)
if prev_position == self.POSITION_LOADING:
prev_event = None
else:
prev_event = self._conversation.get_event(prev_position)
return MessageWidget.from_conversation_event(
self._conversation, self._conversation.get_event(position),
prev_event, self._datetimefmt
)
except KeyError:
raise IndexError('Invalid position: {}'.format(position))
def _get_position(self, position, prev=False):
"""Return the next/previous position or raise IndexError."""
if position == self.POSITION_LOADING:
if prev:
raise IndexError('Reached last position')
else:
return self._conversation.events[0].id_
else:
ev = self._conversation.next_event(position, prev=prev)
if ev is None:
if prev:
return self.POSITION_LOADING
else:
raise IndexError('Reached first position')
else:
return ev.id_
def next_position(self, position):
"""Return the position below position or raise IndexError."""
return self._get_position(position)
def prev_position(self, position):
"""Return the position above position or raise IndexError."""
return self._get_position(position, prev=True)
def set_focus(self, position):
"""Set the focus to position or raise IndexError."""
self._focus_position = position
self._modified()
# If we set focus to anywhere but the last position, the user if
# scrolling up:
try:
self.next_position(position)
except IndexError:
self._is_scrolling = False
else:
self._is_scrolling = True
def get_focus(self):
"""Return (widget, position) tuple."""
return (self[self._focus_position], self._focus_position)
class ConversationWidget(WidgetBase):
"""Widget for interacting with a conversation."""
def __init__(self, client, coroutine_queue, conversation, set_title_cb,
keybindings, datetimefmt):
self._client = client
self._coroutine_queue = coroutine_queue
self._conversation = conversation
self._conversation.on_event.add_observer(self._on_event)
self._conversation.on_watermark_notification.add_observer(
self._on_watermark_notification
)
self._keys = keybindings
self.title = ''
self._set_title_cb = set_title_cb
self._set_title()
self._list_walker = ConversationEventListWalker(
coroutine_queue, conversation, datetimefmt
)
self._list_box = ListBox(keybindings, self._list_walker)
self._status_widget = StatusLineWidget(client, conversation)
self._widget = urwid.Pile([
('weight', 1, self._list_box),
('pack', self._status_widget),
('pack', ReturnableEdit(self._on_return, keybindings,
caption='Send message: ')),
])
# focus the edit widget by default
self._widget.focus_position = 2
# Display any old ConversationEvents already attached to the
# conversation.
for event in self._conversation.events:
self._on_event(event)
super().__init__(self._widget)
def get_menu_widget(self, close_callback):
"""Return the menu widget associated with this widget."""
return ConversationMenu(
self._coroutine_queue, self._conversation, close_callback,
self._keys
)
def keypress(self, size, key):
"""Handle marking messages as read and keeping client active."""
# Set the client as active.
self._coroutine_queue.put(self._client.set_active())
# Mark the newest event as read.
self._coroutine_queue.put(self._conversation.update_read_timestamp())
return super().keypress(size, key)
def _set_title(self):
"""Update this conversation's tab title."""
self.title = get_conv_name(self._conversation, show_unread=True,
truncate=True)
self._set_title_cb(self, self.title)
def _on_return(self, text):
"""Called when the user presses return on the send message widget."""
# Ignore if the user hasn't typed a message.
if not text:
return
elif text.startswith('/image') and len(text.split(' ')) == 2:
# Temporary UI for testing image uploads
filename = text.split(' ')[1]
image_file = open(filename, 'rb')
text = ''
else:
image_file = None
text = replace_emoticons(text)
segments = hangups.ChatMessageSegment.from_str(text)
self._coroutine_queue.put(
self._handle_send_message(
self._conversation.send_message(
segments, image_file=image_file
)
)
)
async def _handle_send_message(self, coro):
"""Handle showing an error if a message fails to send."""
try:
await coro
except hangups.NetworkError:
self._status_widget.show_message('Failed to send message')
def _on_watermark_notification(self, _):
"""Handle watermark changes for this conversation."""
# Update the unread count in the title.
self._set_title()
def _on_event(self, _):
"""Display a new conversation message."""
# Update the title in case unread count or conversation name changed.
self._set_title()
class TabbedWindowWidget(WidgetBase):
"""A widget that displays a list of widgets via a tab bar."""
def __init__(self, keybindings):
self._widgets = [] # [urwid.Widget]
self._widget_title = {} # {urwid.Widget: str}
self._tab_index = None # int
self._keys = keybindings
self._tabs = urwid.Text('')
self._frame = urwid.Frame(None)
super().__init__(urwid.Pile([
('pack', urwid.AttrMap(self._tabs, 'tab_background')),
('weight', 1, self._frame),
]))
def get_current_widget(self):
"""Return the widget in the current tab."""
return self._widgets[self._tab_index]
def _update_tabs(self):
"""Update tab display."""
text = []
for num, widget in enumerate(self._widgets):
palette = ('active_tab' if num == self._tab_index
else 'inactive_tab')
text += [
(palette, ' {} '.format(self._widget_title[widget])),
('tab_background', ' '),
]
self._tabs.set_text(text)
self._frame.contents['body'] = (self._widgets[self._tab_index], None)
def keypress(self, size, key):
"""Handle keypresses for changing tabs."""
key = super().keypress(size, key)
num_tabs = len(self._widgets)
if key == self._keys['prev_tab']:
self._tab_index = (self._tab_index - 1) % num_tabs
self._update_tabs()
elif key == self._keys['next_tab']:
self._tab_index = (self._tab_index + 1) % num_tabs
self._update_tabs()
elif key == self._keys['close_tab']:
# Don't allow closing the Conversations tab
if self._tab_index > 0:
curr_tab = self._widgets[self._tab_index]
self._widgets.remove(curr_tab)
del self._widget_title[curr_tab]
self._tab_index -= 1
self._update_tabs()
else:
return key
def set_tab(self, widget, switch=False, title=None):
"""Add or modify a tab.
If widget is not a tab, it will be added. If switch is True, switch to
this tab. If title is given, set the tab's title.
"""
if widget not in self._widgets:
self._widgets.append(widget)
self._widget_title[widget] = ''
if switch:
self._tab_index = self._widgets.index(widget)
if title:
self._widget_title[widget] = title
self._update_tabs()
def set_terminal_title(title):
"""Use an xterm escape sequence to set the terminal title."""
sys.stdout.write("\x1b]2;{}\x07".format(title))
@contextlib.contextmanager
def bracketed_paste_mode():
"""Context manager for enabling/disabling bracketed paste mode."""
sys.stdout.write('\x1b[?2004h')
try:
yield
finally:
sys.stdout.write('\x1b[?2004l')
def dir_maker(path):
"""Create a directory if it does not exist."""
directory = os.path.dirname(path)
if directory != '' and not os.path.isdir(directory):
try:
os.makedirs(directory)
except OSError as e:
sys.exit('Failed to create directory: {}'.format(e))
NOTIFIER_TYPES = {
'none': notifier.Notifier,
'default': notifier.DefaultNotifier,
'bell': notifier.BellNotifier,
'dbus': notifier.DbusNotifier,
'apple': notifier.AppleNotifier,
}
def get_notifier(notification_type, disable_notifications):
if disable_notifications:
return notifier.Notifier()
else:
return NOTIFIER_TYPES[notification_type]()
def main():
"""Main entry point."""
# Build default paths for files.
dirs = appdirs.AppDirs('hangups', 'hangups')
default_log_path = os.path.join(dirs.user_log_dir, 'hangups.log')
default_token_path = os.path.join(dirs.user_cache_dir, 'refresh_token.txt')
default_config_path = 'hangups.conf'
user_config_path = os.path.join(dirs.user_config_dir, 'hangups.conf')
# Create a default empty config file if does not exist.
dir_maker(user_config_path)
if not os.path.isfile(user_config_path):
with open(user_config_path, 'a') as cfg:
cfg.write("")
parser = configargparse.ArgumentParser(
prog='hangups', default_config_files=[default_config_path,
user_config_path],
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
add_help=False, # Disable help so we can add it to the correct group.
)
general_group = parser.add_argument_group('General')
general_group.add('-h', '--help', action='help',
help='show this help message and exit')
general_group.add('--token-path', default=default_token_path,
help='path used to store OAuth refresh token')
general_group.add('--date-format', default='< %y-%m-%d >',
help='date format string')
general_group.add('--time-format', default='(%I:%M:%S %p)',
help='time format string')
general_group.add('-c', '--config', help='configuration file path',
is_config_file=True, default=user_config_path)
general_group.add('-v', '--version', action='version',
version='hangups {}'.format(hangups.__version__))
general_group.add('-d', '--debug', action='store_true',
help='log detailed debugging messages')
general_group.add('--manual-login', action='store_true',
help='enable manual login method using browser')
general_group.add('--log', default=default_log_path, help='log file path')
key_group = parser.add_argument_group('Keybindings')
key_group.add('--key-next-tab', default='ctrl d',
help='keybinding for next tab')
key_group.add('--key-prev-tab', default='ctrl u',
help='keybinding for previous tab')
key_group.add('--key-close-tab', default='ctrl w',
help='keybinding for close tab')
key_group.add('--key-quit', default='ctrl e',
help='keybinding for quitting')
key_group.add('--key-menu', default='ctrl n',
help='keybinding for context menu')
key_group.add('--key-up', default='k',
help='keybinding for alternate up key')
key_group.add('--key-down', default='j',
help='keybinding for alternate down key')
key_group.add('--key-page-up', default='ctrl b',
help='keybinding for alternate page up')
key_group.add('--key-page-down', default='ctrl f',
help='keybinding for alternate page down')
notification_group = parser.add_argument_group('Notifications')
# deprecated in favor of --notification-type=none:
notification_group.add('-n', '--disable-notifications',
action='store_true',
help=configargparse.SUPPRESS)
notification_group.add('-D', '--discreet-notifications',
action='store_true',
help='hide message details in notifications')
notification_group.add('--notification-type',
choices=sorted(NOTIFIER_TYPES.keys()),
default='default',
help='type of notifications to create')
# add color scheme options
col_group = parser.add_argument_group('Colors')
col_group.add('--col-scheme', choices=COL_SCHEMES.keys(),
default='default', help='colour scheme to use')
col_group.add('--col-palette-colors', choices=('16', '88', '256'),
default=16, help='Amount of available colors')
for name in COL_SCHEME_NAMES:
col_group.add('--col-' + name.replace('_', '-') + '-fg',
help=name + ' foreground color')
col_group.add('--col-' + name.replace('_', '-') + '-bg',
help=name + ' background color')
args = parser.parse_args()
# Create all necessary directories.
for path in [args.log, args.token_path]:
dir_maker(path)
logging.basicConfig(filename=args.log,
level=logging.DEBUG if args.debug else logging.WARNING,
format=LOG_FORMAT)
# urwid makes asyncio's debugging logs VERY noisy, so adjust the log level:
logging.getLogger('asyncio').setLevel(logging.WARNING)
datetimefmt = {'date': args.date_format,
'time': args.time_format}
# setup color scheme
palette_colors = int(args.col_palette_colors)
col_scheme = COL_SCHEMES[args.col_scheme]
for name in COL_SCHEME_NAMES:
col_scheme = add_color_to_scheme(col_scheme, name,
getattr(args, 'col_' + name + '_fg'),
getattr(args, 'col_' + name + '_bg'),
palette_colors)
keybindings = {
'next_tab': args.key_next_tab,
'prev_tab': args.key_prev_tab,
'close_tab': args.key_close_tab,
'quit': args.key_quit,
'menu': args.key_menu,
'up': args.key_up,
'down': args.key_down,
'page_up': args.key_page_up,
'page_down': args.key_page_down,
}
notifier_ = get_notifier(
args.notification_type, args.disable_notifications
)
if(args.manual_login):
hangups.auth.get_auth_manual(args.token_path)
sys.exit(1)
try:
ChatUI(
args.token_path, keybindings, col_scheme, palette_colors,
datetimefmt, notifier_, args.discreet_notifications
)
except KeyboardInterrupt:
sys.exit('Caught KeyboardInterrupt, exiting abnormally')
if __name__ == '__main__':
main()
| 39.094372 | 79 | 0.607477 |
import appdirs
import asyncio
import configargparse
import contextlib
import logging
import os
import sys
import urwid
import readlike
import hangups
from hangups.ui.emoticon import replace_emoticons
from hangups.ui import notifier
from hangups.ui.utils import get_conv_name, add_color_to_scheme
rwid.__version__ == '1.2.2-dev':
sys.exit('error: hangups-urwid package is installed\n\n'
'Please uninstall hangups-urwid and urwid, and reinstall '
'hangups.')
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
COL_SCHEMES = {
'default': {
('active_tab', '', ''),
('inactive_tab', 'standout', ''),
('msg_date', '', ''),
('msg_sender', '', ''),
('msg_self', '', ''),
('msg_text', '', ''),
('msg_text_self', '', ''),
('msg_selected', 'standout', ''),
('status_line', 'standout', ''),
('tab_background', 'standout', ''),
},
'solarized-dark': {
('active_tab', 'light gray', 'light blue'),
('inactive_tab', 'underline', 'light green'),
('msg_date', 'dark cyan', ''),
('msg_sender', 'dark blue', ''),
('msg_text_self', '', ''),
('msg_self', 'dark green', ''),
('msg_text', '', ''),
('msg_selected', 'standout', ''),
('status_line', 'standout', ''),
('tab_background', 'black,standout,underline', 'light green'),
},
}
COL_SCHEME_NAMES = (
'active_tab', 'inactive_tab', 'msg_date', 'msg_sender', 'msg_self',
'msg_text', 'msg_text_self', 'status_line', 'tab_background'
)
DISCREET_NOTIFICATION = notifier.Notification(
'hangups', 'Conversation', 'New message'
)
class HangupsDisconnected(Exception):
class ChatUI(object):
def __init__(self, refresh_token_path, keybindings, palette,
palette_colors, datetimefmt, notifier_,
discreet_notifications):
self._keys = keybindings
self._datetimefmt = datetimefmt
self._notifier = notifier_
self._discreet_notifications = discreet_notifications
set_terminal_title('hangups')
self._conv_widgets = {} # {conversation_id: ConversationWidget}
self._tabbed_window = None # TabbedWindowWidget
self._conv_list = None # hangups.ConversationList
self._user_list = None # hangups.UserList
self._coroutine_queue = CoroutineQueue()
self._exception = None
# TODO Add urwid widget for getting auth.
try:
cookies = hangups.auth.get_auth_stdin(refresh_token_path)
except hangups.GoogleAuthError as e:
sys.exit('Login failed ({})'.format(e))
self._client = hangups.Client(cookies)
self._client.on_connect.add_observer(self._on_connect)
loop = asyncio.get_event_loop()
loop.set_exception_handler(self._exception_handler)
try:
self._urwid_loop = urwid.MainLoop(
LoadingWidget(), palette, handle_mouse=False,
input_filter=self._input_filter,
event_loop=urwid.AsyncioEventLoop(loop=loop)
)
except urwid.AttrSpecError as e:
# Fail gracefully for invalid colour options.
sys.exit(e)
self._urwid_loop.screen.set_terminal_properties(colors=palette_colors)
self._urwid_loop.start()
coros = [self._connect(), self._coroutine_queue.consume()]
# Enable bracketed paste mode after the terminal has been switched to
# the alternate screen (after MainLoop.start() to work around bug
# 729533 in VTE.
with bracketed_paste_mode():
try:
# Run all the coros, until they all complete or one raises an
# exception. In the normal case, HangupsDisconnected will be
# raised.
loop.run_until_complete(asyncio.gather(*coros))
except HangupsDisconnected:
pass
finally:
# Clean up urwid.
self._urwid_loop.stop()
# Cancel all of the coros, and wait for them to shut down.
task = asyncio.gather(*coros, return_exceptions=True)
task.cancel()
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
# In Python 3.7, asyncio.gather no longer swallows
# CancelledError, so we need to ignore it.
pass
loop.close()
# If an exception was stored, raise it now. This is used for exceptions
# originating in urwid callbacks.
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
async def _connect(self):
await self._client.connect()
raise HangupsDisconnected()
def _exception_handler(self, _loop, context):
# Start a graceful shutdown.
self._coroutine_queue.put(self._client.disconnect())
# Store the exception to be re-raised later. If the context doesn't
default_exception = Exception(context.get('message'))
self._exception = context.get('exception', default_exception)
def _input_filter(self, keys, _):
if keys == [self._keys['menu']]:
if self._urwid_loop.widget == self._tabbed_window:
self._show_menu()
else:
self._hide_menu()
elif keys == [self._keys['quit']]:
self._coroutine_queue.put(self._client.disconnect())
else:
return keys
def _show_menu(self):
current_widget = self._tabbed_window.get_current_widget()
if hasattr(current_widget, 'get_menu_widget'):
menu_widget = current_widget.get_menu_widget(self._hide_menu)
overlay = urwid.Overlay(menu_widget, self._tabbed_window,
align='center', width=('relative', 80),
valign='middle', height=('relative', 80))
self._urwid_loop.widget = overlay
def _hide_menu(self):
self._urwid_loop.widget = self._tabbed_window
def get_conv_widget(self, conv_id):
if conv_id not in self._conv_widgets:
set_title_cb = (lambda widget, title:
self._tabbed_window.set_tab(widget, title=title))
widget = ConversationWidget(
self._client, self._coroutine_queue,
self._conv_list.get(conv_id), set_title_cb, self._keys,
self._datetimefmt
)
self._conv_widgets[conv_id] = widget
return self._conv_widgets[conv_id]
def add_conversation_tab(self, conv_id, switch=False):
conv_widget = self.get_conv_widget(conv_id)
self._tabbed_window.set_tab(conv_widget, switch=switch,
title=conv_widget.title)
def on_select_conversation(self, conv_id):
self.add_conversation_tab(conv_id, switch=True)
async def _on_connect(self):
self._user_list, self._conv_list = (
await hangups.build_user_conversation_list(self._client)
)
self._conv_list.on_event.add_observer(self._on_event)
conv_picker = ConversationPickerWidget(self._conv_list,
self.on_select_conversation,
self._keys)
self._tabbed_window = TabbedWindowWidget(self._keys)
self._tabbed_window.set_tab(conv_picker, switch=True,
title='Conversations')
self._urwid_loop.widget = self._tabbed_window
def _on_event(self, conv_event):
conv = self._conv_list.get(conv_event.conversation_id)
user = conv.get_user(conv_event.user_id)
show_notification = all((
isinstance(conv_event, hangups.ChatMessageEvent),
not user.is_self,
not conv.is_quiet,
))
if show_notification:
self.add_conversation_tab(conv_event.conversation_id)
if self._discreet_notifications:
notification = DISCREET_NOTIFICATION
else:
notification = notifier.Notification(
user.full_name, get_conv_name(conv), conv_event.text
)
self._notifier.send(notification)
class CoroutineQueue:
def __init__(self):
self._queue = asyncio.Queue()
def put(self, coro):
assert asyncio.iscoroutine(coro)
self._queue.put_nowait(coro)
async def consume(self):
while True:
coro = await self._queue.get()
assert asyncio.iscoroutine(coro)
await coro
class WidgetBase(urwid.WidgetWrap):
def keypress(self, size, key):
return super().keypress(size, key)
class LoadingWidget(WidgetBase):
def __init__(self):
super().__init__(urwid.Filler(
urwid.Text('Connecting...', align='center')
))
class RenameConversationDialog(WidgetBase):
def __init__(self, coroutine_queue, conversation, on_cancel, on_save,
keybindings):
self._coroutine_queue = coroutine_queue
self._conversation = conversation
edit = urwid.Edit(edit_text=get_conv_name(conversation))
items = [
urwid.Text('Rename conversation:'),
edit,
urwid.Button(
'Save',
on_press=lambda _: self._rename(edit.edit_text, on_save)
),
urwid.Button('Cancel', on_press=lambda _: on_cancel()),
]
list_walker = urwid.SimpleFocusListWalker(items)
list_box = ListBox(keybindings, list_walker)
super().__init__(list_box)
def _rename(self, name, callback):
self._coroutine_queue.put(self._conversation.rename(name))
callback()
class ConversationMenu(WidgetBase):
def __init__(self, coroutine_queue, conversation, close_callback,
keybindings):
rename_dialog = RenameConversationDialog(
coroutine_queue, conversation,
lambda: frame.contents.__setitem__('body', (list_box, None)),
close_callback, keybindings
)
items = [
urwid.Text(
'Conversation name: {}'.format(get_conv_name(conversation))
),
urwid.Button(
'Change Conversation Name',
on_press=lambda _: frame.contents.__setitem__(
'body', (rename_dialog, None)
)
),
urwid.Divider('-'),
urwid.Button('Back', on_press=lambda _: close_callback()),
]
list_walker = urwid.SimpleFocusListWalker(items)
list_box = ListBox(keybindings, list_walker)
frame = urwid.Frame(list_box)
padding = urwid.Padding(frame, left=1, right=1)
line_box = urwid.LineBox(padding, title='Conversation Menu')
super().__init__(line_box)
class ConversationButton(WidgetBase):
def __init__(self, conversation, on_press):
conversation.on_event.add_observer(self._on_event)
conversation.on_watermark_notification.add_observer(self._on_event)
self._conversation = conversation
self._button = urwid.Button(self._get_label(), on_press=on_press,
user_data=conversation.id_)
super().__init__(self._button)
def _get_label(self):
return get_conv_name(self._conversation, show_unread=True)
def _on_event(self, _):
self._button.set_label(self._get_label())
@property
def last_modified(self):
return self._conversation.last_modified
class ConversationListWalker(urwid.SimpleFocusListWalker):
def __init__(self, conversation_list, on_select):
self._conversation_list = conversation_list
self._conversation_list.on_event.add_observer(self._on_event)
self._on_press = lambda button, conv_id: on_select(conv_id)
convs = sorted(conversation_list.get_all(), reverse=True,
key=lambda c: c.last_modified)
buttons = [ConversationButton(conv, on_press=self._on_press)
for conv in convs]
super().__init__(buttons)
def _on_event(self, _):
self.sort(key=lambda conv_button: conv_button.last_modified,
reverse=True)
class ListBox(WidgetBase):
def __init__(self, keybindings, list_walker):
self._keybindings = keybindings
super().__init__(urwid.ListBox(list_walker))
def keypress(self, size, key):
key = super().keypress(size, key)
if key == self._keybindings['down']:
super().keypress(size, 'down')
elif key == self._keybindings['up']:
super().keypress(size, 'up')
elif key == self._keybindings['page_up']:
super().keypress(size, 'page up')
elif key == self._keybindings['page_down']:
super().keypress(size, 'page down')
else:
return key
class ConversationPickerWidget(WidgetBase):
def __init__(self, conversation_list, on_select, keybindings):
list_walker = ConversationListWalker(conversation_list, on_select)
list_box = ListBox(keybindings, list_walker)
widget = urwid.Padding(list_box, left=2, right=2)
super().__init__(widget)
class ReturnableEdit(urwid.Edit):
def __init__(self, on_return, keybindings, caption=None):
super().__init__(caption=caption, multiline=True)
self._on_return = on_return
self._keys = keybindings
self._paste_mode = False
def keypress(self, size, key):
if key == 'begin paste':
self._paste_mode = True
elif key == 'end paste':
self._paste_mode = False
elif key == 'enter' and not self._paste_mode:
self._on_return(self.get_edit_text())
self.set_edit_text('')
elif key not in self._keys.values() and key in readlike.keys():
text, pos = readlike.edit(self.edit_text, self.edit_pos, key)
self.set_edit_text(text)
self.set_edit_pos(pos)
else:
return super().keypress(size, key)
class StatusLineWidget(WidgetBase):
_MESSAGE_DELAY_SECS = 10
def __init__(self, client, conversation):
self._typing_statuses = {}
self._conversation = conversation
self._conversation.on_event.add_observer(self._on_event)
self._conversation.on_typing.add_observer(self._on_typing)
self._widget = urwid.Text('', align='center')
self._is_connected = True
self._message = None
self._message_handle = None
client.on_disconnect.add_observer(self._on_disconnect)
client.on_reconnect.add_observer(self._on_reconnect)
super().__init__(urwid.AttrMap(self._widget, 'status_line'))
def show_message(self, message_str):
if self._message_handle is not None:
self._message_handle.cancel()
self._message_handle = asyncio.get_event_loop().call_later(
self._MESSAGE_DELAY_SECS, self._clear_message
)
self._message = message_str
self._update()
def _clear_message(self):
self._message = None
self._message_handle = None
self._update()
def _on_disconnect(self):
self._is_connected = False
self._update()
def _on_reconnect(self):
self._is_connected = True
self._update()
def _on_event(self, conv_event):
if isinstance(conv_event, hangups.ChatMessageEvent):
self._typing_statuses[conv_event.user_id] = (
hangups.TYPING_TYPE_STOPPED
)
self._update()
def _on_typing(self, typing_message):
self._typing_statuses[typing_message.user_id] = typing_message.status
self._update()
def _update(self):
typing_users = [self._conversation.get_user(user_id)
for user_id, status in self._typing_statuses.items()
if status == hangups.TYPING_TYPE_STARTED]
displayed_names = [user.first_name for user in typing_users
if not user.is_self]
if displayed_names:
typing_message = '{} {} typing...'.format(
', '.join(sorted(displayed_names)),
'is' if len(displayed_names) == 1 else 'are'
)
else:
typing_message = ''
if not self._is_connected:
self._widget.set_text("RECONNECTING...")
elif self._message is not None:
self._widget.set_text(self._message)
else:
self._widget.set_text(typing_message)
class MessageWidget(WidgetBase):
def __init__(self, timestamp, text, datetimefmt, user=None,
show_date=False):
self.timestamp = timestamp
text = [
('msg_date', self._get_date_str(timestamp, datetimefmt,
show_date=show_date) + ' '),
('msg_text_self' if user is not None and user.is_self
else 'msg_text', text)
]
if user is not None:
text.insert(1, ('msg_self' if user.is_self else 'msg_sender',
user.first_name + ': '))
self._widget = urwid.SelectableIcon(text, cursor_position=0)
super().__init__(urwid.AttrMap(
self._widget, '', {
None: 'msg_selected',
'msg_date': 'msg_selected',
'msg_text_self': 'msg_selected',
'msg_text': 'msg_selected',
'msg_self': 'msg_selected',
'msg_sender': 'msg_selected',
}
))
@staticmethod
def _get_date_str(timestamp, datetimefmt, show_date=False):
fmt = ''
if show_date:
fmt += '\n'+datetimefmt.get('date', '')+'\n'
fmt += datetimefmt.get('time', '')
return timestamp.astimezone(tz=None).strftime(fmt)
def __lt__(self, other):
return self.timestamp < other.timestamp
@staticmethod
def from_conversation_event(conversation, conv_event, prev_conv_event,
datetimefmt):
user = conversation.get_user(conv_event.user_id)
if prev_conv_event is not None:
is_new_day = (conv_event.timestamp.astimezone(tz=None).date() !=
prev_conv_event.timestamp.astimezone(tz=None).date())
else:
is_new_day = False
if isinstance(conv_event, hangups.ChatMessageEvent):
return MessageWidget(conv_event.timestamp, conv_event.text,
datetimefmt, user, show_date=is_new_day)
elif isinstance(conv_event, hangups.RenameEvent):
if conv_event.new_name == '':
text = ('{} cleared the conversation name'
.format(user.first_name))
else:
text = ('{} renamed the conversation to {}'
.format(user.first_name, conv_event.new_name))
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
elif isinstance(conv_event, hangups.MembershipChangeEvent):
event_users = [conversation.get_user(user_id) for user_id
in conv_event.participant_ids]
names = ', '.join([user.full_name for user in event_users])
if conv_event.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN:
text = ('{} added {} to the conversation'
.format(user.first_name, names))
else:
text = ('{} left the conversation'.format(names))
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
elif isinstance(conv_event, hangups.HangoutEvent):
text = {
hangups.HANGOUT_EVENT_TYPE_START: (
'A Hangout call is starting.'
),
hangups.HANGOUT_EVENT_TYPE_END: (
'A Hangout call ended.'
),
hangups.HANGOUT_EVENT_TYPE_ONGOING: (
'A Hangout call is ongoing.'
),
}.get(conv_event.event_type, 'Unknown Hangout call event.')
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
elif isinstance(conv_event, hangups.GroupLinkSharingModificationEvent):
status_on = hangups.GROUP_LINK_SHARING_STATUS_ON
status_text = ('on' if conv_event.new_status == status_on
else 'off')
text = '{} turned {} joining by link.'.format(user.first_name,
status_text)
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
else:
text = 'Unknown conversation event'
return MessageWidget(conv_event.timestamp, text, datetimefmt,
show_date=is_new_day)
class ConversationEventListWalker(urwid.ListWalker):
POSITION_LOADING = 'loading'
def __init__(self, coroutine_queue, conversation, datetimefmt):
self._coroutine_queue = coroutine_queue
self._conversation = conversation
self._is_scrolling = False
self._is_loading = False
self._first_loaded = False # Whether the first event is loaded
self._datetimefmt = datetimefmt
# Focus position is the first event ID, or POSITION_LOADING.
self._focus_position = (conversation.events[-1].id_
if conversation.events
else self.POSITION_LOADING)
self._conversation.on_event.add_observer(self._handle_event)
def _handle_event(self, conv_event):
if not self._is_scrolling:
self.set_focus(conv_event.id_)
else:
self._modified()
async def _load(self):
try:
conv_events = await self._conversation.get_events(
self._conversation.events[0].id_
)
except (IndexError, hangups.NetworkError):
conv_events = []
if not conv_events:
self._first_loaded = True
if self._focus_position == self.POSITION_LOADING and conv_events:
# If the loading indicator is still focused, and we loaded more
# events, set focus on the first new event so the loaded
# indicator is replaced.
self.set_focus(conv_events[-1].id_)
else:
# Otherwise, still need to invalidate in case the loading
# indicator is showing but not focused.
self._modified()
self._is_loading = False
def __getitem__(self, position):
if position == self.POSITION_LOADING:
if self._first_loaded:
# TODO: Show the full date the conversation was created.
return urwid.Text('No more messages', align='center')
else:
# Don't try to load while we're already loading.
if not self._is_loading and not self._first_loaded:
self._is_loading = True
self._coroutine_queue.put(self._load())
return urwid.Text('Loading...', align='center')
try:
# When creating the widget, also pass the previous event so a
# timestamp can be shown if this event occurred on a different day.
# Get the previous event, or None if it isn't loaded or doesn't
# exist.
prev_position = self._get_position(position, prev=True)
if prev_position == self.POSITION_LOADING:
prev_event = None
else:
prev_event = self._conversation.get_event(prev_position)
return MessageWidget.from_conversation_event(
self._conversation, self._conversation.get_event(position),
prev_event, self._datetimefmt
)
except KeyError:
raise IndexError('Invalid position: {}'.format(position))
def _get_position(self, position, prev=False):
if position == self.POSITION_LOADING:
if prev:
raise IndexError('Reached last position')
else:
return self._conversation.events[0].id_
else:
ev = self._conversation.next_event(position, prev=prev)
if ev is None:
if prev:
return self.POSITION_LOADING
else:
raise IndexError('Reached first position')
else:
return ev.id_
def next_position(self, position):
return self._get_position(position)
def prev_position(self, position):
return self._get_position(position, prev=True)
def set_focus(self, position):
self._focus_position = position
self._modified()
# If we set focus to anywhere but the last position, the user if
# scrolling up:
try:
self.next_position(position)
except IndexError:
self._is_scrolling = False
else:
self._is_scrolling = True
def get_focus(self):
return (self[self._focus_position], self._focus_position)
class ConversationWidget(WidgetBase):
def __init__(self, client, coroutine_queue, conversation, set_title_cb,
keybindings, datetimefmt):
self._client = client
self._coroutine_queue = coroutine_queue
self._conversation = conversation
self._conversation.on_event.add_observer(self._on_event)
self._conversation.on_watermark_notification.add_observer(
self._on_watermark_notification
)
self._keys = keybindings
self.title = ''
self._set_title_cb = set_title_cb
self._set_title()
self._list_walker = ConversationEventListWalker(
coroutine_queue, conversation, datetimefmt
)
self._list_box = ListBox(keybindings, self._list_walker)
self._status_widget = StatusLineWidget(client, conversation)
self._widget = urwid.Pile([
('weight', 1, self._list_box),
('pack', self._status_widget),
('pack', ReturnableEdit(self._on_return, keybindings,
caption='Send message: ')),
])
# focus the edit widget by default
self._widget.focus_position = 2
# Display any old ConversationEvents already attached to the
# conversation.
for event in self._conversation.events:
self._on_event(event)
super().__init__(self._widget)
def get_menu_widget(self, close_callback):
return ConversationMenu(
self._coroutine_queue, self._conversation, close_callback,
self._keys
)
def keypress(self, size, key):
# Set the client as active.
self._coroutine_queue.put(self._client.set_active())
# Mark the newest event as read.
self._coroutine_queue.put(self._conversation.update_read_timestamp())
return super().keypress(size, key)
def _set_title(self):
self.title = get_conv_name(self._conversation, show_unread=True,
truncate=True)
self._set_title_cb(self, self.title)
def _on_return(self, text):
# Ignore if the user hasn't typed a message.
if not text:
return
elif text.startswith('/image') and len(text.split(' ')) == 2:
filename = text.split(' ')[1]
image_file = open(filename, 'rb')
text = ''
else:
image_file = None
text = replace_emoticons(text)
segments = hangups.ChatMessageSegment.from_str(text)
self._coroutine_queue.put(
self._handle_send_message(
self._conversation.send_message(
segments, image_file=image_file
)
)
)
async def _handle_send_message(self, coro):
try:
await coro
except hangups.NetworkError:
self._status_widget.show_message('Failed to send message')
def _on_watermark_notification(self, _):
self._set_title()
def _on_event(self, _):
self._set_title()
class TabbedWindowWidget(WidgetBase):
def __init__(self, keybindings):
self._widgets = []
self._widget_title = {}
self._tab_index = None
self._keys = keybindings
self._tabs = urwid.Text('')
self._frame = urwid.Frame(None)
super().__init__(urwid.Pile([
('pack', urwid.AttrMap(self._tabs, 'tab_background')),
('weight', 1, self._frame),
]))
def get_current_widget(self):
return self._widgets[self._tab_index]
def _update_tabs(self):
text = []
for num, widget in enumerate(self._widgets):
palette = ('active_tab' if num == self._tab_index
else 'inactive_tab')
text += [
(palette, ' {} '.format(self._widget_title[widget])),
('tab_background', ' '),
]
self._tabs.set_text(text)
self._frame.contents['body'] = (self._widgets[self._tab_index], None)
def keypress(self, size, key):
key = super().keypress(size, key)
num_tabs = len(self._widgets)
if key == self._keys['prev_tab']:
self._tab_index = (self._tab_index - 1) % num_tabs
self._update_tabs()
elif key == self._keys['next_tab']:
self._tab_index = (self._tab_index + 1) % num_tabs
self._update_tabs()
elif key == self._keys['close_tab']:
if self._tab_index > 0:
curr_tab = self._widgets[self._tab_index]
self._widgets.remove(curr_tab)
del self._widget_title[curr_tab]
self._tab_index -= 1
self._update_tabs()
else:
return key
def set_tab(self, widget, switch=False, title=None):
if widget not in self._widgets:
self._widgets.append(widget)
self._widget_title[widget] = ''
if switch:
self._tab_index = self._widgets.index(widget)
if title:
self._widget_title[widget] = title
self._update_tabs()
def set_terminal_title(title):
sys.stdout.write("\x1b]2;{}\x07".format(title))
@contextlib.contextmanager
def bracketed_paste_mode():
sys.stdout.write('\x1b[?2004h')
try:
yield
finally:
sys.stdout.write('\x1b[?2004l')
def dir_maker(path):
directory = os.path.dirname(path)
if directory != '' and not os.path.isdir(directory):
try:
os.makedirs(directory)
except OSError as e:
sys.exit('Failed to create directory: {}'.format(e))
NOTIFIER_TYPES = {
'none': notifier.Notifier,
'default': notifier.DefaultNotifier,
'bell': notifier.BellNotifier,
'dbus': notifier.DbusNotifier,
'apple': notifier.AppleNotifier,
}
def get_notifier(notification_type, disable_notifications):
if disable_notifications:
return notifier.Notifier()
else:
return NOTIFIER_TYPES[notification_type]()
def main():
# Build default paths for files.
dirs = appdirs.AppDirs('hangups', 'hangups')
default_log_path = os.path.join(dirs.user_log_dir, 'hangups.log')
default_token_path = os.path.join(dirs.user_cache_dir, 'refresh_token.txt')
default_config_path = 'hangups.conf'
user_config_path = os.path.join(dirs.user_config_dir, 'hangups.conf')
# Create a default empty config file if does not exist.
dir_maker(user_config_path)
if not os.path.isfile(user_config_path):
with open(user_config_path, 'a') as cfg:
cfg.write("")
parser = configargparse.ArgumentParser(
prog='hangups', default_config_files=[default_config_path,
user_config_path],
formatter_class=configargparse.ArgumentDefaultsHelpFormatter,
add_help=False, # Disable help so we can add it to the correct group.
)
general_group = parser.add_argument_group('General')
general_group.add('-h', '--help', action='help',
help='show this help message and exit')
general_group.add('--token-path', default=default_token_path,
help='path used to store OAuth refresh token')
general_group.add('--date-format', default='< %y-%m-%d >',
help='date format string')
general_group.add('--time-format', default='(%I:%M:%S %p)',
help='time format string')
general_group.add('-c', '--config', help='configuration file path',
is_config_file=True, default=user_config_path)
general_group.add('-v', '--version', action='version',
version='hangups {}'.format(hangups.__version__))
general_group.add('-d', '--debug', action='store_true',
help='log detailed debugging messages')
general_group.add('--manual-login', action='store_true',
help='enable manual login method using browser')
general_group.add('--log', default=default_log_path, help='log file path')
key_group = parser.add_argument_group('Keybindings')
key_group.add('--key-next-tab', default='ctrl d',
help='keybinding for next tab')
key_group.add('--key-prev-tab', default='ctrl u',
help='keybinding for previous tab')
key_group.add('--key-close-tab', default='ctrl w',
help='keybinding for close tab')
key_group.add('--key-quit', default='ctrl e',
help='keybinding for quitting')
key_group.add('--key-menu', default='ctrl n',
help='keybinding for context menu')
key_group.add('--key-up', default='k',
help='keybinding for alternate up key')
key_group.add('--key-down', default='j',
help='keybinding for alternate down key')
key_group.add('--key-page-up', default='ctrl b',
help='keybinding for alternate page up')
key_group.add('--key-page-down', default='ctrl f',
help='keybinding for alternate page down')
notification_group = parser.add_argument_group('Notifications')
# deprecated in favor of --notification-type=none:
notification_group.add('-n', '--disable-notifications',
action='store_true',
help=configargparse.SUPPRESS)
notification_group.add('-D', '--discreet-notifications',
action='store_true',
help='hide message details in notifications')
notification_group.add('--notification-type',
choices=sorted(NOTIFIER_TYPES.keys()),
default='default',
help='type of notifications to create')
# add color scheme options
col_group = parser.add_argument_group('Colors')
col_group.add('--col-scheme', choices=COL_SCHEMES.keys(),
default='default', help='colour scheme to use')
col_group.add('--col-palette-colors', choices=('16', '88', '256'),
default=16, help='Amount of available colors')
for name in COL_SCHEME_NAMES:
col_group.add('--col-' + name.replace('_', '-') + '-fg',
help=name + ' foreground color')
col_group.add('--col-' + name.replace('_', '-') + '-bg',
help=name + ' background color')
args = parser.parse_args()
# Create all necessary directories.
for path in [args.log, args.token_path]:
dir_maker(path)
logging.basicConfig(filename=args.log,
level=logging.DEBUG if args.debug else logging.WARNING,
format=LOG_FORMAT)
# urwid makes asyncio's debugging logs VERY noisy, so adjust the log level:
logging.getLogger('asyncio').setLevel(logging.WARNING)
datetimefmt = {'date': args.date_format,
'time': args.time_format}
palette_colors = int(args.col_palette_colors)
col_scheme = COL_SCHEMES[args.col_scheme]
for name in COL_SCHEME_NAMES:
col_scheme = add_color_to_scheme(col_scheme, name,
getattr(args, 'col_' + name + '_fg'),
getattr(args, 'col_' + name + '_bg'),
palette_colors)
keybindings = {
'next_tab': args.key_next_tab,
'prev_tab': args.key_prev_tab,
'close_tab': args.key_close_tab,
'quit': args.key_quit,
'menu': args.key_menu,
'up': args.key_up,
'down': args.key_down,
'page_up': args.key_page_up,
'page_down': args.key_page_down,
}
notifier_ = get_notifier(
args.notification_type, args.disable_notifications
)
if(args.manual_login):
hangups.auth.get_auth_manual(args.token_path)
sys.exit(1)
try:
ChatUI(
args.token_path, keybindings, col_scheme, palette_colors,
datetimefmt, notifier_, args.discreet_notifications
)
except KeyboardInterrupt:
sys.exit('Caught KeyboardInterrupt, exiting abnormally')
if __name__ == '__main__':
main()
| true | true |
1c45fd021187544fbd3336d5d553e7bcdc31d6de | 2,477 | py | Python | ANPR.py | itcthienkhiem/myANPR | e0a76b2165d539c6a38f51f7485f37349a85a074 | [
"Apache-2.0"
] | null | null | null | ANPR.py | itcthienkhiem/myANPR | e0a76b2165d539c6a38f51f7485f37349a85a074 | [
"Apache-2.0"
] | null | null | null | ANPR.py | itcthienkhiem/myANPR | e0a76b2165d539c6a38f51f7485f37349a85a074 | [
"Apache-2.0"
] | null | null | null |
try:
import cv2
except ImportError:
print ("You must have OpenCV installed")
import matplotlib.pyplot as plt
import numpy as np
#Image(filename='../../../data/ANPR/sample_plates.png')
def showfig(image, ucmap):
#There is a difference in pixel ordering in OpenCV and Matplotlib.
#OpenCV follows BGR order, while matplotlib follows RGB order.
if len(image.shape)==3 :
b,g,r = cv2.split(image) # get b,g,r
image = cv2.merge([r,g,b]) # switch it to rgb
imgplot=plt.imshow(image, ucmap)
imgplot.axes.get_xaxis().set_visible(False)
imgplot.axes.get_yaxis().set_visible(False)
plt.show()
plt.rcParams['figure.figsize'] = 10, 10
plt.title('Sample Car')
image_path="out.jpg"
carsample=cv2.imread(image_path)
showfig(carsample,None)
plt.rcParams['figure.figsize'] = 7,7
# convert into grayscale
gray_carsample=cv2.cvtColor(carsample, cv2.COLOR_BGR2GRAY)
showfig(gray_carsample, plt.get_cmap('gray'))
# blur the image
blur=cv2.GaussianBlur(gray_carsample,(5,5),0)
showfig(blur, plt.get_cmap('gray'))
# find the sobel gradient. use the kernel size to be 3
sobelx=cv2.Sobel(blur, cv2.CV_8U, 1, 0, ksize=3)
showfig(sobelx, plt.get_cmap('gray'))
#Otsu thresholding
_,th2=cv2.threshold(sobelx, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
showfig(th2, plt.get_cmap('gray'))
#Morphological Closing
se=cv2.getStructuringElement(cv2.MORPH_RECT,(23,2))
closing=cv2.morphologyEx(th2, cv2.MORPH_CLOSE, se)
showfig(closing, plt.get_cmap('gray'))
_,contours,_=cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
rect=cv2.minAreaRect(cnt)
box=cv2.boxPoints(rect)
box=np.int0(box)
cv2.drawContours(carsample, [box], 0, (0,255,0),2)
showfig(carsample, None)
def validate(cnt):
rect=cv2.minAreaRect(cnt)
box=cv2.boxPoints(rect)
box=np.int0(box)
output=False
width=rect[1][0]
height=rect[1][1]
if ((width!=0) & (height!=0)):
if (((height/width>2) & (height>width)) | ((width/height>2) & (width>height))):
if((height*width<16000) & (height*width>1000)):
output=True
return output
#Lets draw validated contours with red.
for cnt in contours:
if validate(cnt):
rect=cv2.minAreaRect(cnt)
box=cv2.boxPoints(rect)
box=np.int0(box)
cv2.drawContours(carsample, [box], 0, (0,0,255),2)
showfig(carsample, None)
| 31.75641 | 88 | 0.669762 |
try:
import cv2
except ImportError:
print ("You must have OpenCV installed")
import matplotlib.pyplot as plt
import numpy as np
def showfig(image, ucmap):
if len(image.shape)==3 :
b,g,r = cv2.split(image)
image = cv2.merge([r,g,b])
imgplot=plt.imshow(image, ucmap)
imgplot.axes.get_xaxis().set_visible(False)
imgplot.axes.get_yaxis().set_visible(False)
plt.show()
plt.rcParams['figure.figsize'] = 10, 10
plt.title('Sample Car')
image_path="out.jpg"
carsample=cv2.imread(image_path)
showfig(carsample,None)
plt.rcParams['figure.figsize'] = 7,7
gray_carsample=cv2.cvtColor(carsample, cv2.COLOR_BGR2GRAY)
showfig(gray_carsample, plt.get_cmap('gray'))
blur=cv2.GaussianBlur(gray_carsample,(5,5),0)
showfig(blur, plt.get_cmap('gray'))
sobelx=cv2.Sobel(blur, cv2.CV_8U, 1, 0, ksize=3)
showfig(sobelx, plt.get_cmap('gray'))
_,th2=cv2.threshold(sobelx, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
showfig(th2, plt.get_cmap('gray'))
se=cv2.getStructuringElement(cv2.MORPH_RECT,(23,2))
closing=cv2.morphologyEx(th2, cv2.MORPH_CLOSE, se)
showfig(closing, plt.get_cmap('gray'))
_,contours,_=cv2.findContours(closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
rect=cv2.minAreaRect(cnt)
box=cv2.boxPoints(rect)
box=np.int0(box)
cv2.drawContours(carsample, [box], 0, (0,255,0),2)
showfig(carsample, None)
def validate(cnt):
rect=cv2.minAreaRect(cnt)
box=cv2.boxPoints(rect)
box=np.int0(box)
output=False
width=rect[1][0]
height=rect[1][1]
if ((width!=0) & (height!=0)):
if (((height/width>2) & (height>width)) | ((width/height>2) & (width>height))):
if((height*width<16000) & (height*width>1000)):
output=True
return output
for cnt in contours:
if validate(cnt):
rect=cv2.minAreaRect(cnt)
box=cv2.boxPoints(rect)
box=np.int0(box)
cv2.drawContours(carsample, [box], 0, (0,0,255),2)
showfig(carsample, None)
| true | true |
1c45fe0df1db49b180b71b556b709af501dbc80c | 86 | py | Python | tests/profiling/wrong_program_gevent.py | mykytarudenko/new-project | e06a912382239739dd3f93b54d545b9506102372 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/profiling/wrong_program_gevent.py | mykytarudenko/new-project | e06a912382239739dd3f93b54d545b9506102372 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-01-27T04:53:24.000Z | 2021-01-27T04:53:24.000Z | tests/profiling/wrong_program_gevent.py | mykytarudenko/new-project | e06a912382239739dd3f93b54d545b9506102372 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | from gevent import monkey
import ddtrace.profiling.auto # noqa
monkey.patch_all()
| 12.285714 | 37 | 0.77907 | from gevent import monkey
import ddtrace.profiling.auto
monkey.patch_all()
| true | true |
1c45fe43ea64a09d9242eb97f7af409854244804 | 10,694 | py | Python | readers.py | ankitshah009/youtube-8m-1 | a0f28c9ca05b72ca709322f2c4871a4345a69fbb | [
"Apache-2.0"
] | 2 | 2018-09-15T04:14:28.000Z | 2019-02-14T02:35:55.000Z | readers.py | ankitshah009/youtube-8m-1 | a0f28c9ca05b72ca709322f2c4871a4345a69fbb | [
"Apache-2.0"
] | null | null | null | readers.py | ankitshah009/youtube-8m-1 | a0f28c9ca05b72ca709322f2c4871a4345a69fbb | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides readers configured for different datasets."""
import tensorflow as tf
try:
# relative imports on gcloud (as a module)
from . import utils
except ImportError:
# relative imports locally (as a script)
import utils
from tensorflow import logging
def resize_axis(tensor, axis, new_size, fill_value=0):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
# Update shape.
new_shape = tensor.get_shape().as_list() # A copy is being made.
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
class BaseReader(object):
"""Inherit from this class when implementing new readers."""
def prepare_reader(self, unused_filename_queue):
"""Create a thread for generating prediction and label tensors."""
raise NotImplementedError()
class YT8MAggregatedFeatureReader(BaseReader):
"""Reads TFRecords of pre-aggregated Examples.
The TFRecords must contain Examples with a sparse int64 'labels' feature and
a fixed length float32 feature, obtained from the features in 'feature_name'.
The float features are assumed to be an average of dequantized values.
"""
def __init__(self,
num_classes=3862,
feature_sizes=[1024, 128],
feature_names=["mean_rgb", "mean_audio"]):
"""Construct a YT8MAggregatedFeatureReader.
Args:
num_classes: a positive integer for the number of classes.
feature_sizes: positive integer(s) for the feature dimensions as a list.
feature_names: the feature name(s) in the tensorflow record as a list.
"""
assert len(feature_names) == len(feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(feature_names), len(feature_sizes))
self.num_classes = num_classes
self.feature_sizes = feature_sizes
self.feature_names = feature_names
def prepare_reader(self, filename_queue, batch_size=1024):
"""Creates a single reader thread for pre-aggregated YouTube 8M Examples.
Args:
filename_queue: A tensorflow queue of filename locations.
Returns:
A tuple of video indexes, features, labels, and padding data.
"""
reader = tf.TFRecordReader()
_, serialized_examples = reader.read_up_to(filename_queue, batch_size)
tf.add_to_collection("serialized_examples", serialized_examples)
return self.prepare_serialized_examples(serialized_examples)
def prepare_serialized_examples(self, serialized_examples):
# set the mapping from the fields to data types in the proto
num_features = len(self.feature_names)
assert num_features > 0, "self.feature_names is empty!"
assert len(self.feature_names) == len(self.feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(self.feature_names), len(self.feature_sizes))
feature_map = {"id": tf.FixedLenFeature([], tf.string),
"labels": tf.VarLenFeature(tf.int64)}
for feature_index in range(num_features):
feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
[self.feature_sizes[feature_index]], tf.float32)
features = tf.parse_example(serialized_examples, features=feature_map)
labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
labels.set_shape([None, self.num_classes])
concatenated_features = tf.concat([
features[feature_name] for feature_name in self.feature_names], 1)
return features["id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
class YT8MFrameFeatureReader(BaseReader):
"""Reads TFRecords of SequenceExamples.
The TFRecords must contain SequenceExamples with the sparse in64 'labels'
context feature and a fixed length byte-quantized feature vector, obtained
from the features in 'feature_names'. The quantized features will be mapped
back into a range between min_quantized_value and max_quantized_value.
"""
def __init__(self,
num_classes=3862,
feature_sizes=[1024, 128],
feature_names=["rgb", "audio"],
max_frames=300,
float16_flag=False):
"""Construct a YT8MFrameFeatureReader.
Args:
num_classes: a positive integer for the number of classes.
feature_sizes: positive integer(s) for the feature dimensions as a list.
feature_names: the feature name(s) in the tensorflow record as a list.
max_frames: the maximum number of frames to process.
"""
assert len(feature_names) == len(feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(feature_names), len(feature_sizes))
self.num_classes = num_classes
self.feature_sizes = feature_sizes
self.feature_names = feature_names
self.max_frames = max_frames
self.float16_flag = float16_flag
def get_video_matrix(self,
features,
feature_size,
max_frames,
max_quantized_value,
min_quantized_value):
"""Decodes features from an input string and quantizes it.
Args:
features: raw feature values
feature_size: length of each frame feature vector
max_frames: number of frames (rows) in the output feature_matrix
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
feature_matrix: matrix of all frame-features
num_frames: number of frames in the sequence
"""
dtype = tf.float16 if self.float16_flag else tf.float32
decoded_features = tf.reshape(
tf.cast(tf.decode_raw(features, tf.uint8), dtype),
[-1, feature_size])
num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
feature_matrix = utils.Dequantize(decoded_features,
max_quantized_value,
min_quantized_value)
feature_matrix = resize_axis(feature_matrix, 0, max_frames)
return feature_matrix, num_frames
def prepare_reader(self,
filename_queue,
max_quantized_value=2,
min_quantized_value=-2):
"""Creates a single reader thread for YouTube8M SequenceExamples.
Args:
filename_queue: A tensorflow queue of filename locations.
max_quantized_value: the maximum of the quantized value.
min_quantized_value: the minimum of the quantized value.
Returns:
A tuple of video indexes, video features, labels, and padding data.
"""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
return self.prepare_serialized_examples(serialized_example,
max_quantized_value, min_quantized_value)
def prepare_serialized_examples(self, serialized_example,
max_quantized_value=2, min_quantized_value=-2):
contexts, features = tf.parse_single_sequence_example(
serialized_example,
context_features={"id": tf.FixedLenFeature(
[], tf.string),
"labels": tf.VarLenFeature(tf.int64)},
sequence_features={
feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
for feature_name in self.feature_names
})
# read ground truth labels
labels = (tf.cast(
tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
validate_indices=False),
tf.bool))
# loads (potentially) different types of features and concatenates them
num_features = len(self.feature_names)
assert num_features > 0, "No feature selected: feature_names is empty!"
assert len(self.feature_names) == len(self.feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(self.feature_names), len(self.feature_sizes))
num_frames = -1 # the number of frames in the video
feature_matrices = [None] * num_features # an array of different features
for feature_index in range(num_features):
feature_matrix, num_frames_in_this_feature = self.get_video_matrix(
features[self.feature_names[feature_index]],
self.feature_sizes[feature_index],
self.max_frames,
max_quantized_value,
min_quantized_value)
if num_frames == -1:
num_frames = num_frames_in_this_feature
else:
tf.assert_equal(num_frames, num_frames_in_this_feature)
feature_matrices[feature_index] = feature_matrix
# cap the number of frames at self.max_frames
num_frames = tf.minimum(num_frames, self.max_frames)
# concatenate different features
video_matrix = tf.concat(feature_matrices, 1)
# convert to batch format.
# TODO: Do proper batch reads to remove the IO bottleneck.
batch_video_ids = tf.expand_dims(contexts["id"], 0)
batch_video_matrix = tf.expand_dims(video_matrix, 0)
batch_labels = tf.expand_dims(labels, 0)
batch_frames = tf.expand_dims(num_frames, 0)
return batch_video_ids, batch_video_matrix, batch_labels, batch_frames
| 37.921986 | 101 | 0.696559 |
import tensorflow as tf
try:
from . import utils
except ImportError:
import utils
from tensorflow import logging
def resize_axis(tensor, axis, new_size, fill_value=0):
tensor = tf.convert_to_tensor(tensor)
shape = tf.unstack(tf.shape(tensor))
pad_shape = shape[:]
pad_shape[axis] = tf.maximum(0, new_size - shape[axis])
shape[axis] = tf.minimum(shape[axis], new_size)
shape = tf.stack(shape)
resized = tf.concat([
tf.slice(tensor, tf.zeros_like(shape), shape),
tf.fill(tf.stack(pad_shape), tf.cast(fill_value, tensor.dtype))
], axis)
new_shape = tensor.get_shape().as_list()
new_shape[axis] = new_size
resized.set_shape(new_shape)
return resized
class BaseReader(object):
def prepare_reader(self, unused_filename_queue):
raise NotImplementedError()
class YT8MAggregatedFeatureReader(BaseReader):
def __init__(self,
num_classes=3862,
feature_sizes=[1024, 128],
feature_names=["mean_rgb", "mean_audio"]):
assert len(feature_names) == len(feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(feature_names), len(feature_sizes))
self.num_classes = num_classes
self.feature_sizes = feature_sizes
self.feature_names = feature_names
def prepare_reader(self, filename_queue, batch_size=1024):
reader = tf.TFRecordReader()
_, serialized_examples = reader.read_up_to(filename_queue, batch_size)
tf.add_to_collection("serialized_examples", serialized_examples)
return self.prepare_serialized_examples(serialized_examples)
def prepare_serialized_examples(self, serialized_examples):
num_features = len(self.feature_names)
assert num_features > 0, "self.feature_names is empty!"
assert len(self.feature_names) == len(self.feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(self.feature_names), len(self.feature_sizes))
feature_map = {"id": tf.FixedLenFeature([], tf.string),
"labels": tf.VarLenFeature(tf.int64)}
for feature_index in range(num_features):
feature_map[self.feature_names[feature_index]] = tf.FixedLenFeature(
[self.feature_sizes[feature_index]], tf.float32)
features = tf.parse_example(serialized_examples, features=feature_map)
labels = tf.sparse_to_indicator(features["labels"], self.num_classes)
labels.set_shape([None, self.num_classes])
concatenated_features = tf.concat([
features[feature_name] for feature_name in self.feature_names], 1)
return features["id"], concatenated_features, labels, tf.ones([tf.shape(serialized_examples)[0]])
class YT8MFrameFeatureReader(BaseReader):
def __init__(self,
num_classes=3862,
feature_sizes=[1024, 128],
feature_names=["rgb", "audio"],
max_frames=300,
float16_flag=False):
assert len(feature_names) == len(feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(feature_names), len(feature_sizes))
self.num_classes = num_classes
self.feature_sizes = feature_sizes
self.feature_names = feature_names
self.max_frames = max_frames
self.float16_flag = float16_flag
def get_video_matrix(self,
features,
feature_size,
max_frames,
max_quantized_value,
min_quantized_value):
dtype = tf.float16 if self.float16_flag else tf.float32
decoded_features = tf.reshape(
tf.cast(tf.decode_raw(features, tf.uint8), dtype),
[-1, feature_size])
num_frames = tf.minimum(tf.shape(decoded_features)[0], max_frames)
feature_matrix = utils.Dequantize(decoded_features,
max_quantized_value,
min_quantized_value)
feature_matrix = resize_axis(feature_matrix, 0, max_frames)
return feature_matrix, num_frames
def prepare_reader(self,
filename_queue,
max_quantized_value=2,
min_quantized_value=-2):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
return self.prepare_serialized_examples(serialized_example,
max_quantized_value, min_quantized_value)
def prepare_serialized_examples(self, serialized_example,
max_quantized_value=2, min_quantized_value=-2):
contexts, features = tf.parse_single_sequence_example(
serialized_example,
context_features={"id": tf.FixedLenFeature(
[], tf.string),
"labels": tf.VarLenFeature(tf.int64)},
sequence_features={
feature_name : tf.FixedLenSequenceFeature([], dtype=tf.string)
for feature_name in self.feature_names
})
labels = (tf.cast(
tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
validate_indices=False),
tf.bool))
num_features = len(self.feature_names)
assert num_features > 0, "No feature selected: feature_names is empty!"
assert len(self.feature_names) == len(self.feature_sizes), \
"length of feature_names (={}) != length of feature_sizes (={})".format( \
len(self.feature_names), len(self.feature_sizes))
num_frames = -1
feature_matrices = [None] * num_features
for feature_index in range(num_features):
feature_matrix, num_frames_in_this_feature = self.get_video_matrix(
features[self.feature_names[feature_index]],
self.feature_sizes[feature_index],
self.max_frames,
max_quantized_value,
min_quantized_value)
if num_frames == -1:
num_frames = num_frames_in_this_feature
else:
tf.assert_equal(num_frames, num_frames_in_this_feature)
feature_matrices[feature_index] = feature_matrix
num_frames = tf.minimum(num_frames, self.max_frames)
video_matrix = tf.concat(feature_matrices, 1)
batch_video_ids = tf.expand_dims(contexts["id"], 0)
batch_video_matrix = tf.expand_dims(video_matrix, 0)
batch_labels = tf.expand_dims(labels, 0)
batch_frames = tf.expand_dims(num_frames, 0)
return batch_video_ids, batch_video_matrix, batch_labels, batch_frames
| true | true |
1c45ffd929e7bc233ccb8e5fdcdba952ce40321b | 6,303 | py | Python | userbot/plugins/updater.py | kumar451/CatUserbot | 44fab853232fad163fee63565cc4f3e645596527 | [
"MIT"
] | null | null | null | userbot/plugins/updater.py | kumar451/CatUserbot | 44fab853232fad163fee63565cc4f3e645596527 | [
"MIT"
] | null | null | null | userbot/plugins/updater.py | kumar451/CatUserbot | 44fab853232fad163fee63565cc4f3e645596527 | [
"MIT"
] | null | null | null | """Update UserBot code (for Xtra-Telegram)
Syntax: .update
\nAll Credits goes to © @Three_Cube_TeKnoways
\nFor this awasome plugin.\nPorted from PpaperPlane Extended"""
from os import remove, execle, path, makedirs, getenv, environ,execl
from shutil import rmtree
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from userbot import CMD_HELP, bot
from userbot.utils import admin_cmd
UPSTREAM_REPO_URL = "https://github.com/Jisan09/catuserbot.git"
HEROKU_API_KEY = Var.HEROKU_API_KEY
HEROKU_APP_NAME = Var.HEROKU_APP_NAME
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} by <{c.author}>\n'
return ch_log
async def update_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@borg.on(admin_cmd(pattern="update ?(.*)", outgoing=True))
async def upstream(ups):
"For .update command, check if the bot is up to date, update if specified"
conf = ups.pattern_match.group(1)
await ups.edit("Checking for updates, please wait....")
off_repo = UPSTREAM_REPO_URL
force_update = False
try:
txt = "Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\ndirectory {error} is not found')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\nEarly failure! {error}')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(f"Unfortunately, the directory {error} does not seem to be a git repository.\
\nBut we can fix that by force updating the userbot using `.update now`.")
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_update = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). '
'in that case, Updater is unable to identify '
'which branch is to be merged. '
'please checkout to any official branch`')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_update:
await ups.edit(
f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_update:
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, view the file to see it.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond("do `.update now` to update")
return
if force_update:
await ups.edit('Force-Syncing to latest stable userbot code, please wait...')
else:
await ups.edit('Updating userbot, please wait....')
if HEROKU_API_KEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_API_KEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APP_NAME:
await ups.edit('CAT Please set up the `HEROKU_APP_NAME` variable to be able to update userbot.')
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APP_NAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`'
)
repo.__del__()
return
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_API_KEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
await ups.edit("Updating and Deploying New Update. Please wait for 5 minutes then use `.alive` to check if i'm working or not.")
remote.push(refspec="HEAD:refs/heads/master", force=True)
else:
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
reqs_upgrade = await update_requirements()
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a second!`')
# Spin a new instance of bot
args = [sys.executable, "-m", "userbot"]
execle(sys.executable, *args, environ)
return
CMD_HELP.update({
'update':
".update\
\nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\
\n\n.update now\
\nUsage: Updates your userbot, if there are any updates in the main userbot repository."
})
| 38.2 | 136 | 0.618912 |
from os import remove, execle, path, makedirs, getenv, environ,execl
from shutil import rmtree
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from userbot import CMD_HELP, bot
from userbot.utils import admin_cmd
UPSTREAM_REPO_URL = "https://github.com/Jisan09/catuserbot.git"
HEROKU_API_KEY = Var.HEROKU_API_KEY
HEROKU_APP_NAME = Var.HEROKU_APP_NAME
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} by <{c.author}>\n'
return ch_log
async def update_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@borg.on(admin_cmd(pattern="update ?(.*)", outgoing=True))
async def upstream(ups):
conf = ups.pattern_match.group(1)
await ups.edit("Checking for updates, please wait....")
off_repo = UPSTREAM_REPO_URL
force_update = False
try:
txt = "Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\ndirectory {error} is not found')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\nEarly failure! {error}')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(f"Unfortunately, the directory {error} does not seem to be a git repository.\
\nBut we can fix that by force updating the userbot using `.update now`.")
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_update = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). '
'in that case, Updater is unable to identify '
'which branch is to be merged. '
'please checkout to any official branch`')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_update:
await ups.edit(
f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_update:
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, view the file to see it.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond("do `.update now` to update")
return
if force_update:
await ups.edit('Force-Syncing to latest stable userbot code, please wait...')
else:
await ups.edit('Updating userbot, please wait....')
if HEROKU_API_KEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_API_KEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APP_NAME:
await ups.edit('CAT Please set up the `HEROKU_APP_NAME` variable to be able to update userbot.')
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APP_NAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`'
)
repo.__del__()
return
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_API_KEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
await ups.edit("Updating and Deploying New Update. Please wait for 5 minutes then use `.alive` to check if i'm working or not.")
remote.push(refspec="HEAD:refs/heads/master", force=True)
else:
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
reqs_upgrade = await update_requirements()
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a second!`')
# Spin a new instance of bot
args = [sys.executable, "-m", "userbot"]
execle(sys.executable, *args, environ)
return
CMD_HELP.update({
'update':
".update\
\nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\
\n\n.update now\
\nUsage: Updates your userbot, if there are any updates in the main userbot repository."
})
| true | true |
1c460171229eb1de0dd2daccc2cae4a857668e41 | 6,051 | py | Python | util.py | bbitarello/ldpred | b84b99f23dc83dc164300b8dee6678207461a751 | [
"MIT"
] | 89 | 2016-06-03T14:31:21.000Z | 2022-02-22T02:15:45.000Z | util.py | bbitarello/ldpred | b84b99f23dc83dc164300b8dee6678207461a751 | [
"MIT"
] | 143 | 2016-08-10T14:06:53.000Z | 2021-07-04T10:29:26.000Z | util.py | bbitarello/ldpred | b84b99f23dc83dc164300b8dee6678207461a751 | [
"MIT"
] | 68 | 2016-08-05T14:56:39.000Z | 2021-12-10T15:43:35.000Z | """
Various general utility functions.
"""
import scipy as sp
from scipy import stats
import pickle
import gzip
import os
from itertools import takewhile
from itertools import repeat
import sys
import re
# LDpred currently ignores the Y and MT chromosomes.
ok_chromosomes = set(range(1, 24))
chromosomes_list = ['chrom_%d' % (chrom) for chrom in ok_chromosomes]
chrom_name_map = {'X':23,'chr_X':23,'chrom_X':23}
for chrom in ok_chromosomes:
chrom_name_map['%d' % (chrom)]=chrom
chrom_name_map['chrom_%d' % (chrom)]=chrom
chrom_name_map['chr_%d' % (chrom)]=chrom
def get_chrom_num(chrom):
return chrom_name_map.get(re.sub("chr", "", chrom),0)
#Various auxiliary variables
ambig_nts = set([('A', 'T'), ('T', 'A'), ('G', 'C'), ('C', 'G')])
opp_strand_dict = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G'}
valid_nts = set(['A', 'T', 'C', 'G'])
# LDpred currently ignores the Y and MT chromosomes.
valid_chromosomes = ['%d' % (x) for x in range(1, 24)]
valid_chromosomes.append('X')
chromosomes_list = ['chrom_%s' % (chrom) for chrom in valid_chromosomes]
#Conversion sizes for strings (necessary for using h5py and python 3)
fids_dtype = '|S64'
iids_dtype = '|S64'
sids_dtype = "|S30"
nts_dtype = "|S1"
sids_u_dtype = '<U30'
nts_u_dtype = '<U1'
my_path = os.path.dirname(os.path.abspath(__file__))
hm3_file = os.path.join(my_path, 'reference','hm3_sids.txt.gz')
lrld_file = os.path.join(my_path, 'reference','long-range-ld-price-2008hg38.txt')
def check_chromosomes(missing_chromosomes):
if len(missing_chromosomes) > 0:
print('Ignored chromosomes:', ','.join(list(missing_chromosomes)))
print('Please note that only data on chromosomes 1-23, and X is parsed.')
def calc_auc(y_true, y_hat, show_plot=False):
"""
Calculate the Area Under the Curve (AUC) for a predicted and observed case-control phenotype.
"""
y_true = sp.copy(y_true)
if len(sp.unique(y_true)) == 2:
y_min = y_true.min()
y_max = y_true.max()
if y_min != 0 or y_max != 1:
print('Transforming back to a dichotomous trait')
y_true[y_true == y_min] = 0
y_true[y_true == y_max] = 1
else:
print('Warning: Calculating AUC for a quantitative phenotype.')
y_mean = sp.mean(y_true)
zero_filter = y_true <= y_mean
one_filter = y_true > y_mean
y_true[zero_filter] = 0
y_true[one_filter] = 1
num_cases = sp.sum(y_true == 1)
num_controls = sp.sum(y_true == 0)
assert num_cases + num_controls == len(y_true), 'The phenotype is not defined as expected. It is not binary (0 1 case-control status).'
print('%d cases, %d controls' % (num_cases, num_controls))
num_indivs = float(len(y_true))
tot_num_pos = float(sp.sum(y_true))
tot_num_neg = float(num_indivs - tot_num_pos)
l = y_hat.tolist()
l.sort(reverse=True)
roc_x = []
roc_y = []
auc = 0.0
prev_fpr = 0.0
for thres in l:
thres_filter = y_hat >= thres
y_t = y_true[thres_filter]
n = len(y_t)
tp = sp.sum(y_t)
fp = n - tp
fpr = fp / tot_num_neg
tpr = tp / tot_num_pos
roc_x.append(fpr)
roc_y.append(tpr)
delta_fpr = fpr - prev_fpr
auc += tpr * delta_fpr
prev_fpr = fpr
print('AUC: %0.4f' % auc)
if show_plot:
import pylab
pylab.plot(roc_x, roc_y)
pylab.show()
return auc
def obs_h2_to_liab(R2_osb,K=0.01,P=0.5):
"""
Transformation from observed to liability scale.
Lee et al. AJHG 2011 conversion?
For heritability only
"""
t = stats.norm.ppf(1-K)
z = stats.norm.pdf(t)
c = P*(1-P)*z**2/(K**2*(1-K)**2)
R2_liab = R2_osb/c
return R2_liab
def obs_r2_to_liab(R2_osb,K=0.01,P=0.5):
"""
Lee et al., Gen Epi 2012 conversion
For R2 only
"""
t = stats.norm.ppf(K)
z = stats.norm.pdf(t)
m = z/K
C = (K*(1-K))**2/((z**2)*(P*(1-P)))
d = m*((P-K)/(1-K))
theta =d**2 - d*t
R2_liab_cc = (R2_osb*C)/(1+(R2_osb*C*theta))
return R2_liab_cc
def load_hapmap_SNPs():
f = gzip.open(hm3_file, 'r')
hm3_sids = pickle.load(f)
f.close()
return hm3_sids
def load_lrld_dict():
#Load Price et al. AJHG 2008 long range LD table.
d = {}
for chrom in ok_chromosomes:
d[chrom] = {'reg_dict':{}}
with open(lrld_file, 'r') as f:
for line in f:
l = line.split()
d[chrom_name_map[l[0]]][l[3]] = {'start_pos':int(l[1]), 'end_pos':int(l[2])}
return d
def is_in_lrld(chrom, pos, lrld_dict):
if len(lrld_dict[chrom]['reg_dict'])==0:
return False
else:
for lrld_reg in lrld_dict[chrom]['reg_dict']:
if lrld_reg['start_pos'] < pos < lrld_reg['end_pos']:
return True
else:
return False
def get_snp_lrld_status(chromosome, positions, lrld_dict):
snp_lrld = sp.zeros(len(positions))
for snp_i in range(len(positions)):
snp_lrld[snp_i] = is_in_lrld(chromosome, positions[snp_i], lrld_dict)
return snp_lrld
def is_gz(name):
return name.lower().endswith(('.gz', '.gzip'))
def count_lines(filename):
if sys.version_info >= (3,0):
return count_lines_fast(filename)
else:
return count_lines_slow(filename)
def count_lines_fast(filename):
opener = open
if is_gz(filename):
opener = gzip.open
try:
with opener(filename, 'rb') as f:
bufgen = takewhile(lambda x: x, (f.raw.read(1024*1024) for _ in repeat(None)))
num_lines =sum( buf.count(b'\n') for buf in bufgen )
except Exception:
num_lines = -1
return num_lines
def count_lines_slow(filename):
opener = open
if is_gz(filename):
opener = gzip.open
try:
with opener(filename, 'rb') as f:
num_lines = sum(1 for line in f)
except Exception:
num_lines=-1
return num_lines
| 27.013393 | 139 | 0.605354 | import scipy as sp
from scipy import stats
import pickle
import gzip
import os
from itertools import takewhile
from itertools import repeat
import sys
import re
ok_chromosomes = set(range(1, 24))
chromosomes_list = ['chrom_%d' % (chrom) for chrom in ok_chromosomes]
chrom_name_map = {'X':23,'chr_X':23,'chrom_X':23}
for chrom in ok_chromosomes:
chrom_name_map['%d' % (chrom)]=chrom
chrom_name_map['chrom_%d' % (chrom)]=chrom
chrom_name_map['chr_%d' % (chrom)]=chrom
def get_chrom_num(chrom):
return chrom_name_map.get(re.sub("chr", "", chrom),0)
ambig_nts = set([('A', 'T'), ('T', 'A'), ('G', 'C'), ('C', 'G')])
opp_strand_dict = {'A': 'T', 'G': 'C', 'T': 'A', 'C': 'G'}
valid_nts = set(['A', 'T', 'C', 'G'])
valid_chromosomes = ['%d' % (x) for x in range(1, 24)]
valid_chromosomes.append('X')
chromosomes_list = ['chrom_%s' % (chrom) for chrom in valid_chromosomes]
fids_dtype = '|S64'
iids_dtype = '|S64'
sids_dtype = "|S30"
nts_dtype = "|S1"
sids_u_dtype = '<U30'
nts_u_dtype = '<U1'
my_path = os.path.dirname(os.path.abspath(__file__))
hm3_file = os.path.join(my_path, 'reference','hm3_sids.txt.gz')
lrld_file = os.path.join(my_path, 'reference','long-range-ld-price-2008hg38.txt')
def check_chromosomes(missing_chromosomes):
if len(missing_chromosomes) > 0:
print('Ignored chromosomes:', ','.join(list(missing_chromosomes)))
print('Please note that only data on chromosomes 1-23, and X is parsed.')
def calc_auc(y_true, y_hat, show_plot=False):
y_true = sp.copy(y_true)
if len(sp.unique(y_true)) == 2:
y_min = y_true.min()
y_max = y_true.max()
if y_min != 0 or y_max != 1:
print('Transforming back to a dichotomous trait')
y_true[y_true == y_min] = 0
y_true[y_true == y_max] = 1
else:
print('Warning: Calculating AUC for a quantitative phenotype.')
y_mean = sp.mean(y_true)
zero_filter = y_true <= y_mean
one_filter = y_true > y_mean
y_true[zero_filter] = 0
y_true[one_filter] = 1
num_cases = sp.sum(y_true == 1)
num_controls = sp.sum(y_true == 0)
assert num_cases + num_controls == len(y_true), 'The phenotype is not defined as expected. It is not binary (0 1 case-control status).'
print('%d cases, %d controls' % (num_cases, num_controls))
num_indivs = float(len(y_true))
tot_num_pos = float(sp.sum(y_true))
tot_num_neg = float(num_indivs - tot_num_pos)
l = y_hat.tolist()
l.sort(reverse=True)
roc_x = []
roc_y = []
auc = 0.0
prev_fpr = 0.0
for thres in l:
thres_filter = y_hat >= thres
y_t = y_true[thres_filter]
n = len(y_t)
tp = sp.sum(y_t)
fp = n - tp
fpr = fp / tot_num_neg
tpr = tp / tot_num_pos
roc_x.append(fpr)
roc_y.append(tpr)
delta_fpr = fpr - prev_fpr
auc += tpr * delta_fpr
prev_fpr = fpr
print('AUC: %0.4f' % auc)
if show_plot:
import pylab
pylab.plot(roc_x, roc_y)
pylab.show()
return auc
def obs_h2_to_liab(R2_osb,K=0.01,P=0.5):
t = stats.norm.ppf(1-K)
z = stats.norm.pdf(t)
c = P*(1-P)*z**2/(K**2*(1-K)**2)
R2_liab = R2_osb/c
return R2_liab
def obs_r2_to_liab(R2_osb,K=0.01,P=0.5):
t = stats.norm.ppf(K)
z = stats.norm.pdf(t)
m = z/K
C = (K*(1-K))**2/((z**2)*(P*(1-P)))
d = m*((P-K)/(1-K))
theta =d**2 - d*t
R2_liab_cc = (R2_osb*C)/(1+(R2_osb*C*theta))
return R2_liab_cc
def load_hapmap_SNPs():
f = gzip.open(hm3_file, 'r')
hm3_sids = pickle.load(f)
f.close()
return hm3_sids
def load_lrld_dict():
d = {}
for chrom in ok_chromosomes:
d[chrom] = {'reg_dict':{}}
with open(lrld_file, 'r') as f:
for line in f:
l = line.split()
d[chrom_name_map[l[0]]][l[3]] = {'start_pos':int(l[1]), 'end_pos':int(l[2])}
return d
def is_in_lrld(chrom, pos, lrld_dict):
if len(lrld_dict[chrom]['reg_dict'])==0:
return False
else:
for lrld_reg in lrld_dict[chrom]['reg_dict']:
if lrld_reg['start_pos'] < pos < lrld_reg['end_pos']:
return True
else:
return False
def get_snp_lrld_status(chromosome, positions, lrld_dict):
snp_lrld = sp.zeros(len(positions))
for snp_i in range(len(positions)):
snp_lrld[snp_i] = is_in_lrld(chromosome, positions[snp_i], lrld_dict)
return snp_lrld
def is_gz(name):
return name.lower().endswith(('.gz', '.gzip'))
def count_lines(filename):
if sys.version_info >= (3,0):
return count_lines_fast(filename)
else:
return count_lines_slow(filename)
def count_lines_fast(filename):
opener = open
if is_gz(filename):
opener = gzip.open
try:
with opener(filename, 'rb') as f:
bufgen = takewhile(lambda x: x, (f.raw.read(1024*1024) for _ in repeat(None)))
num_lines =sum( buf.count(b'\n') for buf in bufgen )
except Exception:
num_lines = -1
return num_lines
def count_lines_slow(filename):
opener = open
if is_gz(filename):
opener = gzip.open
try:
with opener(filename, 'rb') as f:
num_lines = sum(1 for line in f)
except Exception:
num_lines=-1
return num_lines
| true | true |
1c4601d6ca6b386fcd89245ffea8fedcc89875c1 | 2,924 | py | Python | test/functional/mempool_resurrect.py | patrykwnosuch/machinecoin-core | b6783c857f43f7f077de594d1e03d156f5295b9c | [
"MIT"
] | 1 | 2019-05-27T11:12:53.000Z | 2019-05-27T11:12:53.000Z | test/functional/mempool_resurrect.py | patrykwnosuch/machinecoin-core | b6783c857f43f7f077de594d1e03d156f5295b9c | [
"MIT"
] | null | null | null | test/functional/mempool_resurrect.py | patrykwnosuch/machinecoin-core | b6783c857f43f7f077de594d1e03d156f5295b9c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resurrection of mined transactions when the blockchain is re-organized."""
from test_framework.blocktools import create_raw_transaction
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.util import assert_equal
class MempoolCoinbaseTest(MachinecoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [self.nodes[0].getblockhash(n) for n in range(1, 4)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spends1_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids]
spends1_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.98) for txid in spends1_id]
spends2_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back
for node in self.nodes:
node.invalidateblock(blocks[0])
# All txns should be back in mempool with 0 confirmations
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 41.183099 | 123 | 0.675103 |
from test_framework.blocktools import create_raw_transaction
from test_framework.test_framework import MachinecoinTestFramework
from test_framework.util import assert_equal
class MempoolCoinbaseTest(MachinecoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [self.nodes[0].getblockhash(n) for n in range(1, 4)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spends1_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids]
spends1_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.98) for txid in spends1_id]
spends2_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back
for node in self.nodes:
node.invalidateblock(blocks[0])
# All txns should be back in mempool with 0 confirmations
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| true | true |
1c4602bc2b8f35a6487314afa0547956c601bf34 | 3,032 | py | Python | estatisticas_facebook/users/migrations/0001_initial.py | danieldourado/estatisticas_facebook_django | 67274e647cf9e2261f1a7810cd9862a4040dfc06 | [
"MIT"
] | 2 | 2017-12-22T01:00:22.000Z | 2017-12-22T11:14:40.000Z | estatisticas_facebook/users/migrations/0001_initial.py | danieldourado/estatisticas_facebook_django | 67274e647cf9e2261f1a7810cd9862a4040dfc06 | [
"MIT"
] | 18 | 2017-12-14T12:04:45.000Z | 2022-03-11T23:23:05.000Z | estatisticas_facebook/users/migrations/0001_initial.py | danieldourado/estatisticas_facebook_django | 67274e647cf9e2261f1a7810cd9862a4040dfc06 | [
"MIT"
] | 1 | 2021-03-27T16:18:56.000Z | 2021-03-27T16:18:56.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-06 11:33
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=255, verbose_name='Name of User')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 63.166667 | 329 | 0.662929 |
from __future__ import unicode_literals
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=255, verbose_name='Name of User')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| true | true |
1c460421d2d07bef9ca1d09f5ba7710449101a3e | 689 | py | Python | battleships/managers/GameOver.py | wkta/Python-Bataille-Navale | b6f725519cf1cf559e60ec766aa4f059463b1493 | [
"MIT"
] | null | null | null | battleships/managers/GameOver.py | wkta/Python-Bataille-Navale | b6f725519cf1cf559e60ec766aa4f059463b1493 | [
"MIT"
] | null | null | null | battleships/managers/GameOver.py | wkta/Python-Bataille-Navale | b6f725519cf1cf559e60ec766aa4f059463b1493 | [
"MIT"
] | 1 | 2019-12-03T15:42:38.000Z | 2019-12-03T15:42:38.000Z | # Copyright © 2019 CAILLAUD Jean-Baptiste.
# import the engine.
import engine
class GameOver(engine.LevelManager):
"""
Renders the game over screen.
"""
def begin(self):
# Add the close handler.
engine.Engine.input_handler.add_listener(engine.CloseOnEscapeOrQuit())
# Create the game over message.
text = engine.TextGameObject(
engine.Engine.scene,
"Futura",
48,
"You won !" if engine.Engine.game_manager.winner == 0 else "You lost ...",
(255, 255, 255)
)
text.transform.position = engine.math.Vector2(256, 256)
text.transform.offset = text.size / -2
| 26.5 | 86 | 0.596517 |
import engine
class GameOver(engine.LevelManager):
def begin(self):
engine.Engine.input_handler.add_listener(engine.CloseOnEscapeOrQuit())
text = engine.TextGameObject(
engine.Engine.scene,
"Futura",
48,
"You won !" if engine.Engine.game_manager.winner == 0 else "You lost ...",
(255, 255, 255)
)
text.transform.position = engine.math.Vector2(256, 256)
text.transform.offset = text.size / -2
| true | true |
1c460473d11c959ddd81ee921f32596dbd8e1e9e | 754 | py | Python | snakewatch/action/__init__.py | asoc/snakewatch | 347b94e0ca59cdb309a6950fa3b5464c8d0081f8 | [
"BSD-3-Clause"
] | null | null | null | snakewatch/action/__init__.py | asoc/snakewatch | 347b94e0ca59cdb309a6950fa3b5464c8d0081f8 | [
"BSD-3-Clause"
] | null | null | null | snakewatch/action/__init__.py | asoc/snakewatch | 347b94e0ca59cdb309a6950fa3b5464c8d0081f8 | [
"BSD-3-Clause"
] | null | null | null | """
This file is part of snakewatch.
snakewatch is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
snakewatch is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with snakewatch. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function, absolute_import, unicode_literals, division
| 37.7 | 82 | 0.795756 |
from __future__ import print_function, absolute_import, unicode_literals, division
| true | true |
1c4604bd0cd7e6cda5fbccf019cd6209998f11b6 | 2,134 | py | Python | tests/unit/pypyr/parser/jsonfile_test.py | pypyr/pypyr-cli | dc0f694ac0c0e3c2844c1a20788c9af586a8a16e | [
"Apache-2.0"
] | 31 | 2017-03-24T11:27:34.000Z | 2020-05-27T20:06:28.000Z | tests/unit/pypyr/parser/jsonfile_test.py | pypyr/pypyr-cli | dc0f694ac0c0e3c2844c1a20788c9af586a8a16e | [
"Apache-2.0"
] | 89 | 2017-04-12T09:50:32.000Z | 2020-08-13T13:18:36.000Z | tests/unit/pypyr/parser/jsonfile_test.py | pypyr/pypyr-cli | dc0f694ac0c0e3c2844c1a20788c9af586a8a16e | [
"Apache-2.0"
] | 6 | 2017-06-04T14:19:59.000Z | 2020-02-10T13:16:40.000Z | """jsonfile.py unit tests."""
from unittest.mock import patch
import pytest
import pypyr.parser.jsonfile
def test_json_file_open_fails_on_arbitrary_string():
"""Non path-y input string should fail."""
with pytest.raises(FileNotFoundError):
pypyr.parser.jsonfile.get_parsed_context('value 1,value 2, value3')
def test_json_file_open_fails_on_empty_string():
"""Non path-y input string should fail."""
with pytest.raises(AssertionError):
pypyr.parser.jsonfile.get_parsed_context(None)
def test_json_pass(fs):
"""Relative path to json should succeed."""
in_path = './tests/testfiles/test.json'
fs.create_file(in_path, contents="""{
"key1": "value1",
"key2": "value2",
"key3": "value3"
}
""")
context = pypyr.parser.jsonfile.get_parsed_context([in_path])
assert context, "context shouldn't be None"
assert len(context) == 3, "context should have 3 items"
assert context["key2"] == "value2", "key2 should be value2"
@patch('pypyr.config.config.default_encoding', new='utf-16')
def test_json_pass_with_encoding(fs):
"""Relative path to json should succeed with encoding."""
in_path = './tests/testfiles/test.json'
fs.create_file(in_path, contents="""{
"key1": "value1",
"key2": "value2",
"key3": "value3"
}
""", encoding='utf-16')
context = pypyr.parser.jsonfile.get_parsed_context([in_path])
assert context, "context shouldn't be None"
assert len(context) == 3, "context should have 3 items"
assert context["key2"] == "value2", "key2 should be value2"
def test_json_parse_not_mapping_at_root(fs):
"""Not mapping at root level raises."""
in_path = './tests/testfiles/singleliteral.json'
fs.create_file(in_path, contents='123')
with pytest.raises(TypeError) as err_info:
pypyr.parser.jsonfile.get_parsed_context([in_path])
assert str(err_info.value) == (
"json input should describe an object at the top "
"level. You should have something like\n"
"{\n\"key1\":\"value1\",\n\"key2\":\"value2\"\n}\n"
"at the json top-level, not an [array] or literal.")
| 31.850746 | 75 | 0.679944 | from unittest.mock import patch
import pytest
import pypyr.parser.jsonfile
def test_json_file_open_fails_on_arbitrary_string():
with pytest.raises(FileNotFoundError):
pypyr.parser.jsonfile.get_parsed_context('value 1,value 2, value3')
def test_json_file_open_fails_on_empty_string():
with pytest.raises(AssertionError):
pypyr.parser.jsonfile.get_parsed_context(None)
def test_json_pass(fs):
in_path = './tests/testfiles/test.json'
fs.create_file(in_path, contents="""{
"key1": "value1",
"key2": "value2",
"key3": "value3"
}
""")
context = pypyr.parser.jsonfile.get_parsed_context([in_path])
assert context, "context shouldn't be None"
assert len(context) == 3, "context should have 3 items"
assert context["key2"] == "value2", "key2 should be value2"
@patch('pypyr.config.config.default_encoding', new='utf-16')
def test_json_pass_with_encoding(fs):
in_path = './tests/testfiles/test.json'
fs.create_file(in_path, contents="""{
"key1": "value1",
"key2": "value2",
"key3": "value3"
}
""", encoding='utf-16')
context = pypyr.parser.jsonfile.get_parsed_context([in_path])
assert context, "context shouldn't be None"
assert len(context) == 3, "context should have 3 items"
assert context["key2"] == "value2", "key2 should be value2"
def test_json_parse_not_mapping_at_root(fs):
in_path = './tests/testfiles/singleliteral.json'
fs.create_file(in_path, contents='123')
with pytest.raises(TypeError) as err_info:
pypyr.parser.jsonfile.get_parsed_context([in_path])
assert str(err_info.value) == (
"json input should describe an object at the top "
"level. You should have something like\n"
"{\n\"key1\":\"value1\",\n\"key2\":\"value2\"\n}\n"
"at the json top-level, not an [array] or literal.")
| true | true |
1c46065a2d7cec80d32a5396991fd1b74b074e66 | 8,727 | py | Python | syncflux.py | nagylzs/syncflux | c070267065cad817708d0680e17bfe5f8942310f | [
"Apache-2.0"
] | null | null | null | syncflux.py | nagylzs/syncflux | c070267065cad817708d0680e17bfe5f8942310f | [
"Apache-2.0"
] | null | null | null | syncflux.py | nagylzs/syncflux | c070267065cad817708d0680e17bfe5f8942310f | [
"Apache-2.0"
] | null | null | null | import copy
import datetime
import sys
import os
import time
import argparse
import traceback
import pytz
import syncthing
from influxdb import InfluxDBClient
import yaml
from yaml2dataclass import Schema, SchemaPath
from typing import Optional, Dict, Type, List
from dataclasses import dataclass, asdict, field
@dataclass
class SyncthingConfiguration(Schema):
name: str
api_key: str
host: str = 'localhost'
port: int = field(default=8384)
timeout: float = field(default=10.0)
is_https: bool = field(default=False)
ssl_cert_file: Optional[str] = field(default=None)
tags: Optional[List[str]] = field(default_factory=lambda: [])
def get_client_params(self):
result = asdict(self)
if "name" in result:
del result["name"]
if "tags" in result:
del result["tags"]
return result
@dataclass
class InfluxDbConfiguration(Schema):
host: str
port: int # Common ports: 443
ssl: bool
verify_ssl: bool
database: str
username: str
password: str
def get_client_params(self):
result = asdict(self)
if "tags" in result:
del result["tags"]
return result
@dataclass
class MeasurementConfiguration(Schema):
devices: str
folders: str
@dataclass
class AppConfiguration(Schema):
syncthings: Dict[str, SyncthingConfiguration]
influxes: Dict[str, InfluxDbConfiguration]
measurements: MeasurementConfiguration
@classmethod
def _load_dict(cls, props_dict, dest_cls: Type[Schema], add_name: bool = False):
result = {}
for name, value in props_dict.items():
arguments = {}
arguments.update(value)
if add_name:
arguments["name"] = name
result[name] = dest_cls.scm_load_from_dict(arguments)
return result
@classmethod
def scm_convert(cls, values: dict, path: SchemaPath):
values["syncthings"] = cls._load_dict(values["syncthings"], SyncthingConfiguration, True)
values["influxes"] = cls._load_dict(values["influxes"], InfluxDbConfiguration)
return values
def load_app_config(stream) -> AppConfiguration:
"""Load application configuration from a stream."""
obj = yaml.safe_load(stream)
return AppConfiguration.scm_load_from_dict(obj)
def error(message: str):
sys.stderr.write("\nerror: " + message + "\n")
sys.stderr.flush()
raise SystemExit(-1)
def info(*values):
if not args.silent:
print(*values)
def main():
# Collect data
points = []
for sync in config.syncthings.values():
info(" Connect syncthing %s" % sync.name)
proto_tags = {"cfg_name": sync.name}
if sync.tags:
proto_tags.update(sync.tags)
conn_args = sync.get_client_params()
q_started = time.time()
conn = syncthing.Syncthing(**conn_args)
now = datetime.datetime.now(tz=pytz.UTC)
sync_cfg = conn.system.config()
# My own device id
my_device = sync_cfg["defaults"]["folder"]["devices"][0]
my_id = my_device["deviceID"]
proto_tags["my_id"] = my_id
# Collect device stats
device_stats = conn.stats.device()
# List all remote devices
remote_devices = []
for device in sync_cfg["devices"]:
device_id = device["deviceID"]
if device_id == my_id:
proto_tags["my_name"] = device["name"]
else:
stats = device_stats[device_id]
last_seen = syncthing.parse_datetime(stats["lastSeen"])
last_seen_since = now - last_seen
remote_devices.append({
"tags": {
"id": device["deviceID"], # Device ID
"name": device["name"], # Device Name
},
"fields": {
"last_seen_since_sec": last_seen_since.total_seconds(), # Number of seconds last seen
}
})
# Folders
folders = []
for folder in sync_cfg["folders"]:
# Get completion for my own device
completion = conn.database.completion(my_id, folder["id"])
folders.append({
"tags": {"id": folder["id"], "label": folder["label"], "path": folder["path"]},
"fields": {"completion": completion},
})
q_elapsed = time.time() - q_started
proto_fields = {"q_elapsed": q_elapsed}
# Create data points for devices
for device in remote_devices:
tags = copy.copy(proto_tags)
tags.update(device["tags"])
fields = copy.copy(proto_fields)
fields.update(device["fields"])
point = dict(measurement=config.measurements.devices, tags=tags, fields=fields)
points.append(point)
# Create points for folders
for folder in folders:
tags = copy.copy(proto_tags)
tags.update(folder["tags"])
fields = copy.copy(proto_fields)
fields.update(folder["fields"])
point = dict(measurement=config.measurements.folders, tags=tags, fields=fields)
points.append(point)
if not points:
return
for influx_name, influx in config.influxes.items():
info(" Sending %d point(s) to influxdb %s" % (len(points), influx_name))
try:
influx = config.influxes[influx_name]
client = InfluxDBClient(**asdict(influx))
client.write_points(points)
except:
if args.halt_on_send_error:
raise
else:
traceback.print_exc(file=sys.stderr)
parser = argparse.ArgumentParser(description='Monitor your Syncthing instances with influxdb.')
parser.add_argument('-c', "--config", dest="config", default=None,
help="Configuration file for application. Default is syncflux.yml. "
"See syncflux_example.yml for an example.")
parser.add_argument("--config-dir", dest="config_dir", default=None,
help="Configuration directory. All config files with .yml extension will be processed one by one.")
parser.add_argument('-n', "--count", dest="count", default=1, type=int,
help="Number of test runs. Default is one. Use -1 to run indefinitely.")
parser.add_argument('-w', "--wait", dest="wait", default=60, type=float,
help="Number of seconds between test runs.")
parser.add_argument("-s", "--silent", dest='silent', action="store_true", default=False,
help="Supress all messages except errors.")
parser.add_argument("-v", "--verbose", dest='verbose', action="store_true", default=False,
help="Be verbose."
)
parser.add_argument("--halt-on-send-error", dest="halt_on_send_error", default=False, action="store_true",
help="Halt when cannot send data to influxdb. The default is to ignore the error.")
args = parser.parse_args()
if args.silent and args.verbose:
parser.error("Cannot use --silent and --verbose at the same time.")
if args.config is None:
args.config = "syncflux.yml"
if (args.config is not None) and (args.config_dir is not None):
parser.error("You must give either --config or --config-dir (exactly one of them)")
if args.count == 0:
parser.error("Test run count cannot be zero.")
if args.wait <= 0:
parser.error("Wait time must be positive.")
if args.config:
config_files = [args.config]
else:
config_files = []
for file_name in sorted(os.listdir(args.config_dir)):
ext = os.path.splitext(file_name)[1]
if ext.lower() == ".yml":
fpath = os.path.join(args.config_dir, file_name)
config_files.append(fpath)
index = 0
while args.count < 0 or index < args.count:
if args.count != 1:
info("Pass #%d started" % (index + 1))
started = time.time()
for config_file in config_files:
if not os.path.isfile(config_file):
parser.error("Cannot open %s" % config_file)
config = load_app_config(open(config_file, "r"))
main()
elapsed = time.time() - started
index += 1
last_one = (args.count > 0) and (index == args.count)
if not last_one:
remaining = args.wait - elapsed
if remaining > 0:
if not args.silent:
info("Pass #%d elapsed %.2f sec, waiting %.2f sec for next." % (index, elapsed, remaining))
time.sleep(args.wait)
else:
info("Pass #%d elapsed %.2f sec" % (index, elapsed))
info("")
| 33.694981 | 119 | 0.605936 | import copy
import datetime
import sys
import os
import time
import argparse
import traceback
import pytz
import syncthing
from influxdb import InfluxDBClient
import yaml
from yaml2dataclass import Schema, SchemaPath
from typing import Optional, Dict, Type, List
from dataclasses import dataclass, asdict, field
@dataclass
class SyncthingConfiguration(Schema):
name: str
api_key: str
host: str = 'localhost'
port: int = field(default=8384)
timeout: float = field(default=10.0)
is_https: bool = field(default=False)
ssl_cert_file: Optional[str] = field(default=None)
tags: Optional[List[str]] = field(default_factory=lambda: [])
def get_client_params(self):
result = asdict(self)
if "name" in result:
del result["name"]
if "tags" in result:
del result["tags"]
return result
@dataclass
class InfluxDbConfiguration(Schema):
host: str
port: int
ssl: bool
verify_ssl: bool
database: str
username: str
password: str
def get_client_params(self):
result = asdict(self)
if "tags" in result:
del result["tags"]
return result
@dataclass
class MeasurementConfiguration(Schema):
devices: str
folders: str
@dataclass
class AppConfiguration(Schema):
syncthings: Dict[str, SyncthingConfiguration]
influxes: Dict[str, InfluxDbConfiguration]
measurements: MeasurementConfiguration
@classmethod
def _load_dict(cls, props_dict, dest_cls: Type[Schema], add_name: bool = False):
result = {}
for name, value in props_dict.items():
arguments = {}
arguments.update(value)
if add_name:
arguments["name"] = name
result[name] = dest_cls.scm_load_from_dict(arguments)
return result
@classmethod
def scm_convert(cls, values: dict, path: SchemaPath):
values["syncthings"] = cls._load_dict(values["syncthings"], SyncthingConfiguration, True)
values["influxes"] = cls._load_dict(values["influxes"], InfluxDbConfiguration)
return values
def load_app_config(stream) -> AppConfiguration:
obj = yaml.safe_load(stream)
return AppConfiguration.scm_load_from_dict(obj)
def error(message: str):
sys.stderr.write("\nerror: " + message + "\n")
sys.stderr.flush()
raise SystemExit(-1)
def info(*values):
if not args.silent:
print(*values)
def main():
points = []
for sync in config.syncthings.values():
info(" Connect syncthing %s" % sync.name)
proto_tags = {"cfg_name": sync.name}
if sync.tags:
proto_tags.update(sync.tags)
conn_args = sync.get_client_params()
q_started = time.time()
conn = syncthing.Syncthing(**conn_args)
now = datetime.datetime.now(tz=pytz.UTC)
sync_cfg = conn.system.config()
my_device = sync_cfg["defaults"]["folder"]["devices"][0]
my_id = my_device["deviceID"]
proto_tags["my_id"] = my_id
device_stats = conn.stats.device()
remote_devices = []
for device in sync_cfg["devices"]:
device_id = device["deviceID"]
if device_id == my_id:
proto_tags["my_name"] = device["name"]
else:
stats = device_stats[device_id]
last_seen = syncthing.parse_datetime(stats["lastSeen"])
last_seen_since = now - last_seen
remote_devices.append({
"tags": {
"id": device["deviceID"],
"name": device["name"],
},
"fields": {
"last_seen_since_sec": last_seen_since.total_seconds(),
}
})
folders = []
for folder in sync_cfg["folders"]:
completion = conn.database.completion(my_id, folder["id"])
folders.append({
"tags": {"id": folder["id"], "label": folder["label"], "path": folder["path"]},
"fields": {"completion": completion},
})
q_elapsed = time.time() - q_started
proto_fields = {"q_elapsed": q_elapsed}
for device in remote_devices:
tags = copy.copy(proto_tags)
tags.update(device["tags"])
fields = copy.copy(proto_fields)
fields.update(device["fields"])
point = dict(measurement=config.measurements.devices, tags=tags, fields=fields)
points.append(point)
for folder in folders:
tags = copy.copy(proto_tags)
tags.update(folder["tags"])
fields = copy.copy(proto_fields)
fields.update(folder["fields"])
point = dict(measurement=config.measurements.folders, tags=tags, fields=fields)
points.append(point)
if not points:
return
for influx_name, influx in config.influxes.items():
info(" Sending %d point(s) to influxdb %s" % (len(points), influx_name))
try:
influx = config.influxes[influx_name]
client = InfluxDBClient(**asdict(influx))
client.write_points(points)
except:
if args.halt_on_send_error:
raise
else:
traceback.print_exc(file=sys.stderr)
parser = argparse.ArgumentParser(description='Monitor your Syncthing instances with influxdb.')
parser.add_argument('-c', "--config", dest="config", default=None,
help="Configuration file for application. Default is syncflux.yml. "
"See syncflux_example.yml for an example.")
parser.add_argument("--config-dir", dest="config_dir", default=None,
help="Configuration directory. All config files with .yml extension will be processed one by one.")
parser.add_argument('-n', "--count", dest="count", default=1, type=int,
help="Number of test runs. Default is one. Use -1 to run indefinitely.")
parser.add_argument('-w', "--wait", dest="wait", default=60, type=float,
help="Number of seconds between test runs.")
parser.add_argument("-s", "--silent", dest='silent', action="store_true", default=False,
help="Supress all messages except errors.")
parser.add_argument("-v", "--verbose", dest='verbose', action="store_true", default=False,
help="Be verbose."
)
parser.add_argument("--halt-on-send-error", dest="halt_on_send_error", default=False, action="store_true",
help="Halt when cannot send data to influxdb. The default is to ignore the error.")
args = parser.parse_args()
if args.silent and args.verbose:
parser.error("Cannot use --silent and --verbose at the same time.")
if args.config is None:
args.config = "syncflux.yml"
if (args.config is not None) and (args.config_dir is not None):
parser.error("You must give either --config or --config-dir (exactly one of them)")
if args.count == 0:
parser.error("Test run count cannot be zero.")
if args.wait <= 0:
parser.error("Wait time must be positive.")
if args.config:
config_files = [args.config]
else:
config_files = []
for file_name in sorted(os.listdir(args.config_dir)):
ext = os.path.splitext(file_name)[1]
if ext.lower() == ".yml":
fpath = os.path.join(args.config_dir, file_name)
config_files.append(fpath)
index = 0
while args.count < 0 or index < args.count:
if args.count != 1:
info("Pass #%d started" % (index + 1))
started = time.time()
for config_file in config_files:
if not os.path.isfile(config_file):
parser.error("Cannot open %s" % config_file)
config = load_app_config(open(config_file, "r"))
main()
elapsed = time.time() - started
index += 1
last_one = (args.count > 0) and (index == args.count)
if not last_one:
remaining = args.wait - elapsed
if remaining > 0:
if not args.silent:
info("Pass #%d elapsed %.2f sec, waiting %.2f sec for next." % (index, elapsed, remaining))
time.sleep(args.wait)
else:
info("Pass #%d elapsed %.2f sec" % (index, elapsed))
info("")
| true | true |
1c46086c638aabe3f56e864c3f814d7d84d20949 | 9,687 | py | Python | lib/spack/spack/build_systems/cuda.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | lib/spack/spack/build_systems/cuda.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2022-02-28T11:32:57.000Z | 2022-03-02T11:37:37.000Z | lib/spack/spack/build_systems/cuda.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.variant
from spack.directives import conflicts, depends_on, variant
from spack.multimethod import when
from spack.package import PackageBase
class CudaPackage(PackageBase):
"""Auxiliary class which contains CUDA variant, dependencies and conflicts
and is meant to unify and facilitate its usage.
Maintainers: ax3l, Rombur, davidbeckingsale
"""
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list
# https://developer.nvidia.com/cuda-gpus
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
cuda_arch_values = (
'10', '11', '12', '13',
'20', '21',
'30', '32', '35', '37',
'50', '52', '53',
'60', '61', '62',
'70', '72', '75',
'80', '86'
)
# FIXME: keep cuda and cuda_arch separate to make usage easier until
# Spack has depends_on(cuda, when='cuda_arch!=None') or alike
variant('cuda', default=False,
description='Build with CUDA')
variant('cuda_arch',
description='CUDA architecture',
values=spack.variant.any_combination_of(*cuda_arch_values),
when='+cuda')
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#nvcc-examples
# https://llvm.org/docs/CompileCudaWithLLVM.html#compiling-cuda-code
@staticmethod
def cuda_flags(arch_list):
return [('--generate-code arch=compute_{0},code=sm_{0} '
'--generate-code arch=compute_{0},code=compute_{0}').format(s)
for s in arch_list]
depends_on('cuda', when='+cuda')
# CUDA version vs Architecture
# https://en.wikipedia.org/wiki/CUDA#GPUs_supported
# https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#deprecated-features
depends_on('cuda@:6.0', when='cuda_arch=10')
depends_on('cuda@:6.5', when='cuda_arch=11')
depends_on('cuda@2.1:6.5', when='cuda_arch=12')
depends_on('cuda@2.1:6.5', when='cuda_arch=13')
depends_on('cuda@3.0:8.0', when='cuda_arch=20')
depends_on('cuda@3.2:8.0', when='cuda_arch=21')
depends_on('cuda@5.0:10.2', when='cuda_arch=30')
depends_on('cuda@5.0:10.2', when='cuda_arch=32')
depends_on('cuda@5.0:', when='cuda_arch=35')
depends_on('cuda@6.5:', when='cuda_arch=37')
depends_on('cuda@6.0:', when='cuda_arch=50')
depends_on('cuda@6.5:', when='cuda_arch=52')
depends_on('cuda@6.5:', when='cuda_arch=53')
depends_on('cuda@8.0:', when='cuda_arch=60')
depends_on('cuda@8.0:', when='cuda_arch=61')
depends_on('cuda@8.0:', when='cuda_arch=62')
depends_on('cuda@9.0:', when='cuda_arch=70')
depends_on('cuda@9.0:', when='cuda_arch=72')
depends_on('cuda@10.0:', when='cuda_arch=75')
depends_on('cuda@11.0:', when='cuda_arch=80')
depends_on('cuda@11.1:', when='cuda_arch=86')
# From the NVIDIA install guide we know of conflicts for particular
# platforms (linux, darwin), architectures (x86, powerpc) and compilers
# (gcc, clang). We don't restrict %gcc and %clang conflicts to
# platform=linux, since they should also apply to platform=cray, and may
# apply to platform=darwin. We currently do not provide conflicts for
# platform=darwin with %apple-clang.
# Linux x86_64 compiler conflicts from here:
# https://gist.github.com/ax3l/9489132
with when('^cuda~allow-unsupported-compilers'):
# GCC
# According to
# https://github.com/spack/spack/pull/25054#issuecomment-886531664
# these conflicts are valid independently from the architecture
# minimum supported versions
conflicts('%gcc@:4', when='+cuda ^cuda@11.0:')
conflicts('%gcc@:5', when='+cuda ^cuda@11.4:')
# maximum supported version
# NOTE:
# in order to not constrain future cuda version to old gcc versions,
# it has been decided to use an upper bound for the latest version.
# This implies that the last one in the list has to be updated at
# each release of a new cuda minor version.
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0')
conflicts('%gcc@12:', when='+cuda ^cuda@:11.6')
conflicts('%clang@13:', when='+cuda ^cuda@:11.5')
conflicts('%clang@14:', when='+cuda ^cuda@:11.6')
# https://gist.github.com/ax3l/9489132#gistcomment-3860114
conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0')
conflicts('%gcc@5:', when='+cuda ^cuda@:7.5 target=x86_64:')
conflicts('%gcc@6:', when='+cuda ^cuda@:8 target=x86_64:')
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1 target=x86_64:')
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=x86_64:')
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89 target=x86_64:')
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27 target=x86_64:')
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5 target=x86_64:')
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8 target=x86_64:')
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1 target=x86_64:')
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10 target=x86_64:')
conflicts('%pgi@:17,20:', when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:')
conflicts('%pgi@:17,21:', when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:')
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5 target=x86_64:')
conflicts('%clang@:3.7,4:', when='+cuda ^cuda@8.0:9.0 target=x86_64:')
conflicts('%clang@:3.7,4.1:', when='+cuda ^cuda@9.1 target=x86_64:')
conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2 target=x86_64:')
conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130 target=x86_64:')
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105 target=x86_64:')
conflicts('%clang@:3.7,8.1:',
when='+cuda ^cuda@10.1.105:10.1.243 target=x86_64:')
conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89 target=x86_64:')
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=x86_64:')
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:')
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:')
# x86_64 vs. ppc64le differ according to NVidia docs
# Linux ppc64le compiler conflicts from Table from the docs below:
# https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html
# https://docs.nvidia.com/cuda/archive/9.2/cuda-installation-guide-linux/index.html
# https://docs.nvidia.com/cuda/archive/9.1/cuda-installation-guide-linux/index.html
# https://docs.nvidia.com/cuda/archive/9.0/cuda-installation-guide-linux/index.html
# https://docs.nvidia.com/cuda/archive/8.0/cuda-installation-guide-linux/index.html
# information prior to CUDA 9 difficult to find
conflicts('%gcc@6:', when='+cuda ^cuda@:9 target=ppc64le:')
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=ppc64le:')
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243 target=ppc64le:')
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
conflicts('%pgi', when='+cuda ^cuda@:8 target=ppc64le:')
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185 target=ppc64le:')
conflicts('%pgi@:17', when='+cuda ^cuda@:10 target=ppc64le:')
conflicts('%clang@4:', when='+cuda ^cuda@:9.0.176 target=ppc64le:')
conflicts('%clang@5:', when='+cuda ^cuda@:9.1 target=ppc64le:')
conflicts('%clang@6:', when='+cuda ^cuda@:9.2 target=ppc64le:')
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130 target=ppc64le:')
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105 target=ppc64le:')
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89 target=ppc64le:')
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=ppc64le:')
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2 target=ppc64le:')
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:')
# Intel is mostly relevant for x86_64 Linux, even though it also
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
conflicts('%intel@:11.0', when='+cuda ^cuda@:3.1')
conflicts('%intel@:12.0', when='+cuda ^cuda@5.5:')
conflicts('%intel@:13.0', when='+cuda ^cuda@6.0:')
conflicts('%intel@:13.2', when='+cuda ^cuda@6.5:')
conflicts('%intel@:14.9', when='+cuda ^cuda@7:')
# Intel 15.x is compatible with CUDA 7 thru current CUDA
conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43')
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
conflicts('%intel@19.2:', when='+cuda ^cuda@:11.1.0')
# XL is mostly relevant for ppc64le Linux
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.1.0')
# Darwin.
# TODO: add missing conflicts for %apple-clang cuda@:10
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2: ')
# Make sure cuda_arch can not be used without +cuda
for value in cuda_arch_values:
conflicts('~cuda', when='cuda_arch=' + value)
| 50.19171 | 92 | 0.617425 |
import spack.variant
from spack.directives import conflicts, depends_on, variant
from spack.multimethod import when
from spack.package import PackageBase
class CudaPackage(PackageBase):
= (
'10', '11', '12', '13',
'20', '21',
'30', '32', '35', '37',
'50', '52', '53',
'60', '61', '62',
'70', '72', '75',
'80', '86'
)
variant('cuda', default=False,
description='Build with CUDA')
variant('cuda_arch',
description='CUDA architecture',
values=spack.variant.any_combination_of(*cuda_arch_values),
when='+cuda')
da_flags(arch_list):
return [('--generate-code arch=compute_{0},code=sm_{0} '
'--generate-code arch=compute_{0},code=compute_{0}').format(s)
for s in arch_list]
depends_on('cuda', when='+cuda')
when='cuda_arch=10')
depends_on('cuda@:6.5', when='cuda_arch=11')
depends_on('cuda@2.1:6.5', when='cuda_arch=12')
depends_on('cuda@2.1:6.5', when='cuda_arch=13')
depends_on('cuda@3.0:8.0', when='cuda_arch=20')
depends_on('cuda@3.2:8.0', when='cuda_arch=21')
depends_on('cuda@5.0:10.2', when='cuda_arch=30')
depends_on('cuda@5.0:10.2', when='cuda_arch=32')
depends_on('cuda@5.0:', when='cuda_arch=35')
depends_on('cuda@6.5:', when='cuda_arch=37')
depends_on('cuda@6.0:', when='cuda_arch=50')
depends_on('cuda@6.5:', when='cuda_arch=52')
depends_on('cuda@6.5:', when='cuda_arch=53')
depends_on('cuda@8.0:', when='cuda_arch=60')
depends_on('cuda@8.0:', when='cuda_arch=61')
depends_on('cuda@8.0:', when='cuda_arch=62')
depends_on('cuda@9.0:', when='cuda_arch=70')
depends_on('cuda@9.0:', when='cuda_arch=72')
depends_on('cuda@10.0:', when='cuda_arch=75')
depends_on('cuda@11.0:', when='cuda_arch=80')
depends_on('cuda@11.1:', when='cuda_arch=86')
# platform=linux, since they should also apply to platform=cray, and may
# apply to platform=darwin. We currently do not provide conflicts for
# platform=darwin with %apple-clang.
# Linux x86_64 compiler conflicts from here:
# https://gist.github.com/ax3l/9489132
with when('^cuda~allow-unsupported-compilers'):
# GCC
# According to
# https://github.com/spack/spack/pull/25054#issuecomment-886531664
# these conflicts are valid independently from the architecture
# minimum supported versions
conflicts('%gcc@:4', when='+cuda ^cuda@11.0:')
conflicts('%gcc@:5', when='+cuda ^cuda@11.4:')
# maximum supported version
# NOTE:
# in order to not constrain future cuda version to old gcc versions,
# it has been decided to use an upper bound for the latest version.
# This implies that the last one in the list has to be updated at
# each release of a new cuda minor version.
conflicts('%gcc@10:', when='+cuda ^cuda@:11.0')
conflicts('%gcc@12:', when='+cuda ^cuda@:11.6')
conflicts('%clang@13:', when='+cuda ^cuda@:11.5')
conflicts('%clang@14:', when='+cuda ^cuda@:11.6')
# https://gist.github.com/ax3l/9489132#gistcomment-3860114
conflicts('%gcc@10', when='+cuda ^cuda@:11.4.0')
conflicts('%gcc@5:', when='+cuda ^cuda@:7.5 target=x86_64:')
conflicts('%gcc@6:', when='+cuda ^cuda@:8 target=x86_64:')
conflicts('%gcc@7:', when='+cuda ^cuda@:9.1 target=x86_64:')
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=x86_64:')
conflicts('%gcc@9:', when='+cuda ^cuda@:10.2.89 target=x86_64:')
conflicts('%pgi@:14.8', when='+cuda ^cuda@:7.0.27 target=x86_64:')
conflicts('%pgi@:15.3,15.5:', when='+cuda ^cuda@7.5 target=x86_64:')
conflicts('%pgi@:16.2,16.0:16.3', when='+cuda ^cuda@8 target=x86_64:')
conflicts('%pgi@:15,18:', when='+cuda ^cuda@9.0:9.1 target=x86_64:')
conflicts('%pgi@:16,19:', when='+cuda ^cuda@9.2.88:10 target=x86_64:')
conflicts('%pgi@:17,20:', when='+cuda ^cuda@10.1.105:10.2.89 target=x86_64:')
conflicts('%pgi@:17,21:', when='+cuda ^cuda@11.0.2:11.1.0 target=x86_64:')
conflicts('%clang@:3.4', when='+cuda ^cuda@:7.5 target=x86_64:')
conflicts('%clang@:3.7,4:', when='+cuda ^cuda@8.0:9.0 target=x86_64:')
conflicts('%clang@:3.7,4.1:', when='+cuda ^cuda@9.1 target=x86_64:')
conflicts('%clang@:3.7,5.1:', when='+cuda ^cuda@9.2 target=x86_64:')
conflicts('%clang@:3.7,6.1:', when='+cuda ^cuda@10.0.130 target=x86_64:')
conflicts('%clang@:3.7,7.1:', when='+cuda ^cuda@10.1.105 target=x86_64:')
conflicts('%clang@:3.7,8.1:',
when='+cuda ^cuda@10.1.105:10.1.243 target=x86_64:')
conflicts('%clang@:3.2,9:', when='+cuda ^cuda@10.2.89 target=x86_64:')
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=x86_64:')
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.3 target=x86_64:')
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=x86_64:')
# x86_64 vs. ppc64le differ according to NVidia docs
# Linux ppc64le compiler conflicts from Table from the docs below:
# https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html
# https://docs.nvidia.com/cuda/archive/9.2/cuda-installation-guide-linux/index.html
# https://docs.nvidia.com/cuda/archive/9.1/cuda-installation-guide-linux/index.html
# https://docs.nvidia.com/cuda/archive/9.0/cuda-installation-guide-linux/index.html
# https://docs.nvidia.com/cuda/archive/8.0/cuda-installation-guide-linux/index.html
# information prior to CUDA 9 difficult to find
conflicts('%gcc@6:', when='+cuda ^cuda@:9 target=ppc64le:')
conflicts('%gcc@8:', when='+cuda ^cuda@:10.0.130 target=ppc64le:')
conflicts('%gcc@9:', when='+cuda ^cuda@:10.1.243 target=ppc64le:')
# officially, CUDA 11.0.2 only supports the system GCC 8.3 on ppc64le
conflicts('%pgi', when='+cuda ^cuda@:8 target=ppc64le:')
conflicts('%pgi@:16', when='+cuda ^cuda@:9.1.185 target=ppc64le:')
conflicts('%pgi@:17', when='+cuda ^cuda@:10 target=ppc64le:')
conflicts('%clang@4:', when='+cuda ^cuda@:9.0.176 target=ppc64le:')
conflicts('%clang@5:', when='+cuda ^cuda@:9.1 target=ppc64le:')
conflicts('%clang@6:', when='+cuda ^cuda@:9.2 target=ppc64le:')
conflicts('%clang@7:', when='+cuda ^cuda@10.0.130 target=ppc64le:')
conflicts('%clang@7.1:', when='+cuda ^cuda@:10.1.105 target=ppc64le:')
conflicts('%clang@8.1:', when='+cuda ^cuda@:10.2.89 target=ppc64le:')
conflicts('%clang@:5', when='+cuda ^cuda@11.0.2: target=ppc64le:')
conflicts('%clang@10:', when='+cuda ^cuda@:11.0.2 target=ppc64le:')
conflicts('%clang@11:', when='+cuda ^cuda@:11.1.0 target=ppc64le:')
# Intel is mostly relevant for x86_64 Linux, even though it also
# exists for Mac OS X. No information prior to CUDA 3.2 or Intel 11.1
conflicts('%intel@:11.0', when='+cuda ^cuda@:3.1')
conflicts('%intel@:12.0', when='+cuda ^cuda@5.5:')
conflicts('%intel@:13.0', when='+cuda ^cuda@6.0:')
conflicts('%intel@:13.2', when='+cuda ^cuda@6.5:')
conflicts('%intel@:14.9', when='+cuda ^cuda@7:')
# Intel 15.x is compatible with CUDA 7 thru current CUDA
conflicts('%intel@16.0:', when='+cuda ^cuda@:8.0.43')
conflicts('%intel@17.0:', when='+cuda ^cuda@:8.0.60')
conflicts('%intel@18.0:', when='+cuda ^cuda@:9.9')
conflicts('%intel@19.0:', when='+cuda ^cuda@:10.0')
conflicts('%intel@19.1:', when='+cuda ^cuda@:10.1')
conflicts('%intel@19.2:', when='+cuda ^cuda@:11.1.0')
# XL is mostly relevant for ppc64le Linux
conflicts('%xl@:12,14:', when='+cuda ^cuda@:9.1')
conflicts('%xl@:12,14:15,17:', when='+cuda ^cuda@9.2')
conflicts('%xl@:12,17:', when='+cuda ^cuda@:11.1.0')
# Darwin.
# TODO: add missing conflicts for %apple-clang cuda@:10
conflicts('platform=darwin', when='+cuda ^cuda@11.0.2: ')
# Make sure cuda_arch can not be used without +cuda
for value in cuda_arch_values:
conflicts('~cuda', when='cuda_arch=' + value)
| true | true |
1c460873137198c0c9f2771d470c8e55b0d5da3b | 33 | py | Python | step2.py | SirLonsevrot/Lesson_20.11.28 | eac91b46441bf641c60d7f5d1340d74f8665614b | [
"Apache-2.0"
] | null | null | null | step2.py | SirLonsevrot/Lesson_20.11.28 | eac91b46441bf641c60d7f5d1340d74f8665614b | [
"Apache-2.0"
] | null | null | null | step2.py | SirLonsevrot/Lesson_20.11.28 | eac91b46441bf641c60d7f5d1340d74f8665614b | [
"Apache-2.0"
] | null | null | null | print('Георгий ничего не умеет')
| 16.5 | 32 | 0.757576 | print('Георгий ничего не умеет')
| true | true |
1c4608abdf6f6b3a4ea765ffc6252d1d214de3d1 | 6,003 | py | Python | pro/views.py | iyerikuzwe/Award | a5ac352a7d05d23c92167022e00648caeab62590 | [
"Unlicense"
] | null | null | null | pro/views.py | iyerikuzwe/Award | a5ac352a7d05d23c92167022e00648caeab62590 | [
"Unlicense"
] | null | null | null | pro/views.py | iyerikuzwe/Award | a5ac352a7d05d23c92167022e00648caeab62590 | [
"Unlicense"
] | null | null | null | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from .forms import ProjectForm, ProfileForm, DesignForm, ContentForm, UsabilityForm
from .models import Project, Profile
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework.views import APIView
# from .serializer import ProfSerializer, ProjectSerializer
from .permissions import IsAuthenticatedOrReadOnly
from rest_framework import status
@login_required(login_url='/accounts/login/')
def index(request):
projects = Project.objects.all().order_by('-posted_on')
form = DesignForm()
form = UsabilityForm()
form = ContentForm()
return render(request, 'index.html', locals())
@login_required(login_url='/accounts/login/')
def new_project(request):
"""
Function that enables one to upload projects
"""
profile = Profile.objects.all()
for profile in profile:
if request.method == 'POST':
form = ProjectForm(request.POST, request.FILES)
if form.is_valid():
pro = form.save(commit=False)
pro.profile = profile
pro.user = request.user
pro.save()
return redirect('landing')
else:
form = ProjectForm()
return render(request, 'new_pro.html', {"form": form})
@login_required(login_url='/accounts/login/')
def edit_profile(request):
"""
Function that enables one to edit their profile information
"""
current_user = request.user
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = current_user
profile.save()
return redirect('landing')
else:
form = ProfileForm()
return render(request, 'profile/edit-profile.html', {"form": form,})
@login_required(login_url='/accounts/login/')
def view_project(request, id):
"""
Function that enables one to view specific project
"""
title = "View Project"
project = Project.get_pro_by_id(id=id)
return render(request, 'view_project.html', locals())
@login_required(login_url='/accounts/login/')
def profile(request, user_id):
"""
Function that enables one to see their profile
"""
title = "Profile"
pros= Project.get_pro_by_user(id= user_id).order_by('-posted_on')
profiles = Profile.objects.get(user_id=user_id)
users = User.objects.get(id=user_id)
return render(request, 'profile/profile.html', locals())
def search_results(request):
if 'pro' in request.GET and request.GET["pro"]:
search_term = request.GET.get("pro")
searched_projects = Project.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"pros": searched_projects})
else:
message = "You didn't searched for any term"
return render(request, 'search.html',{"message":message})
class ProfList(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
all_merchprof = Profile.objects.all()
serializers = ProfSerializer(all_merchprof, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProfSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProjectList(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
all_merchproj = Project.objects.all()
serializers = ProjectSerializer(all_merchproj, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProjectSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
@login_required(login_url='/accounts/login/')
def add_design(request, id):
project = get_object_or_404(Project, pk=id)
if request.method == 'POST':
form = DesignForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.project = project
rate.user_name = request.user
rate.profile = request.user.profile
rate.save()
return redirect('landing')
else:
form = DesignForm()
return render(request, 'index.html',{'form': form})
@login_required(login_url='/accounts/login/')
def add_usability(request, id):
project = get_object_or_404(Project, pk=id)
if request.method == 'POST':
form = UsabilityForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.project = project
rate.user_name = request.user
rate.profile = request.user.profile
rate.save()
return redirect('landing')
else:
form = UsabilityForm()
return render(request, 'index.html',{'form': form})
@login_required(login_url='/accounts/login/')
def add_content(request, id):
project = get_object_or_404(Project, pk=id)
if request.method == 'POST':
form = ContentForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.project = project
rate.user_name = request.user
rate.profile = request.user.profile
rate.save()
return redirect('landing')
else:
form = ContentForm()
return render(request, 'index.html',{'form': form})
| 33.35 | 91 | 0.666167 | from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from .forms import ProjectForm, ProfileForm, DesignForm, ContentForm, UsabilityForm
from .models import Project, Profile
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework.views import APIView
from .permissions import IsAuthenticatedOrReadOnly
from rest_framework import status
@login_required(login_url='/accounts/login/')
def index(request):
projects = Project.objects.all().order_by('-posted_on')
form = DesignForm()
form = UsabilityForm()
form = ContentForm()
return render(request, 'index.html', locals())
@login_required(login_url='/accounts/login/')
def new_project(request):
profile = Profile.objects.all()
for profile in profile:
if request.method == 'POST':
form = ProjectForm(request.POST, request.FILES)
if form.is_valid():
pro = form.save(commit=False)
pro.profile = profile
pro.user = request.user
pro.save()
return redirect('landing')
else:
form = ProjectForm()
return render(request, 'new_pro.html', {"form": form})
@login_required(login_url='/accounts/login/')
def edit_profile(request):
current_user = request.user
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
if form.is_valid():
profile = form.save(commit=False)
profile.user = current_user
profile.save()
return redirect('landing')
else:
form = ProfileForm()
return render(request, 'profile/edit-profile.html', {"form": form,})
@login_required(login_url='/accounts/login/')
def view_project(request, id):
title = "View Project"
project = Project.get_pro_by_id(id=id)
return render(request, 'view_project.html', locals())
@login_required(login_url='/accounts/login/')
def profile(request, user_id):
title = "Profile"
pros= Project.get_pro_by_user(id= user_id).order_by('-posted_on')
profiles = Profile.objects.get(user_id=user_id)
users = User.objects.get(id=user_id)
return render(request, 'profile/profile.html', locals())
def search_results(request):
if 'pro' in request.GET and request.GET["pro"]:
search_term = request.GET.get("pro")
searched_projects = Project.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"pros": searched_projects})
else:
message = "You didn't searched for any term"
return render(request, 'search.html',{"message":message})
class ProfList(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
all_merchprof = Profile.objects.all()
serializers = ProfSerializer(all_merchprof, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProfSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
class ProjectList(APIView):
permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
all_merchproj = Project.objects.all()
serializers = ProjectSerializer(all_merchproj, many=True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProjectSerializer(data=request.data)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status=status.HTTP_201_CREATED)
return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)
@login_required(login_url='/accounts/login/')
def add_design(request, id):
project = get_object_or_404(Project, pk=id)
if request.method == 'POST':
form = DesignForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.project = project
rate.user_name = request.user
rate.profile = request.user.profile
rate.save()
return redirect('landing')
else:
form = DesignForm()
return render(request, 'index.html',{'form': form})
@login_required(login_url='/accounts/login/')
def add_usability(request, id):
project = get_object_or_404(Project, pk=id)
if request.method == 'POST':
form = UsabilityForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.project = project
rate.user_name = request.user
rate.profile = request.user.profile
rate.save()
return redirect('landing')
else:
form = UsabilityForm()
return render(request, 'index.html',{'form': form})
@login_required(login_url='/accounts/login/')
def add_content(request, id):
project = get_object_or_404(Project, pk=id)
if request.method == 'POST':
form = ContentForm(request.POST)
if form.is_valid():
rate = form.save(commit=False)
rate.project = project
rate.user_name = request.user
rate.profile = request.user.profile
rate.save()
return redirect('landing')
else:
form = ContentForm()
return render(request, 'index.html',{'form': form})
| true | true |
1c46098610964543336f1caf9a6c92cb98615a0c | 5,335 | py | Python | example/distill/nlp/reader.py | wangxicoding/edl | 75d651e72e5297aba2e597588cf958ea336deb4e | [
"Apache-2.0"
] | 90 | 2020-04-21T01:46:10.000Z | 2022-02-10T09:09:34.000Z | example/distill/nlp/reader.py | wangxicoding/edl | 75d651e72e5297aba2e597588cf958ea336deb4e | [
"Apache-2.0"
] | 37 | 2018-03-02T22:41:15.000Z | 2020-04-22T16:48:36.000Z | example/distill/nlp/reader.py | wangxicoding/edl | 75d651e72e5297aba2e597588cf958ea336deb4e | [
"Apache-2.0"
] | 34 | 2018-03-02T23:28:25.000Z | 2020-03-25T08:50:29.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import os
import csv
import sys
from paddlehub.dataset import InputExample
from paddlehub.common.dir import DATA_HOME
from paddlehub.dataset.base_nlp_dataset import BaseNLPDataset
import paddle as P
import paddle.fluid.dygraph as D
import numpy as np
def space_tokenizer(i):
return i.split()
def pad_batch_data(data, dtype, pad_idx=0, max_len=-1):
if max_len <= 0:
for s in data:
if len(s) > max_len:
max_len = len(s)
inst_data = np.array([
list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in data
])
return np.array(inst_data).astype(dtype)
class ChnSentiCorp(BaseNLPDataset):
def __init__(self):
base_path = "./data/"
super(ChnSentiCorp, self).__init__(
base_path=base_path,
train_file="train.part.0",
dev_file="dev.part.0",
test_file="test.part.0",
label_file=None,
label_list=["0", "1"], )
self._word_dict = None
def __read_file(self, input_file):
"""
data file format:
origin sentence\tword segment sentence\tlabel
"""
with codecs.open(input_file, "r", encoding="UTF-8") as f:
for line in f:
line = line.strip()
if len(line) <= 0:
continue
arr = line.split("\t")
#print("line:", len(arr))
yield arr
def _read_file(self, input_file, phase=None):
"""
[(seq_id,label,origin sentence)]
"""
seq_id = 0
examples = []
for t in self.__read_file(input_file):
if len(t) == 2:
#example = InputExample(
# guid=seq_id, label=t[1], text_a=t[0])
#print("t2", t[1])
assert len(t) != 2, "data format error:" + t
elif len(t) == 3:
example = InputExample(guid=seq_id, label=t[2], text_a=t[0])
#print("t3", t[2])
else:
assert False, 'invalid format'
seq_id += 1
examples.append(example)
return examples
def student_word_dict(self, vocab_file):
"""
{
word->word_idx
}
"""
with codecs.open(vocab_file, "r", encoding="UTF-8") as f:
self._word_dict = {
i.strip(): l
for l, i in enumerate(f.readlines())
}
return self._word_dict
def student_reader(self, input_file, word_dict):
"""
return [([segment_sentence_idxs], label, sentence), ()...]
"""
def reader():
input_files = []
if isinstance(input_file, str):
input_files.append(input_file)
else:
input_files = input_file
assert isinstance(input_file, list)
for data_file in input_files:
print("open file:", data_file)
for t in self.__read_file(data_file):
s = []
for word in space_tokenizer(t[1]):
idx = word_dict[
word] if word in word_dict else word_dict['[UNK]']
s.append(idx)
yield s, t[2], t[0]
return reader
def batch_reader(self, input_file, word_dict, batch_size, shuffle=True):
def reader():
if shuffle:
s_reader = P.reader.shuffle(
self.student_reader(input_file, word_dict),
buf_size=100000)
else:
s_reader = self.student_reader(input_file, word_dict)
b = [[], [], []]
for rec in s_reader():
if len(b[0]) == batch_size:
yield b
b = [[], [], []]
continue
for i in range(len(rec)):
b[i].append(rec[i])
if len(b[0]) > 0:
yield b
return reader
def pad_batch_reader(self, input_file, word_dict, batch_size,
shuffle=True):
def reader():
b_reader = self.batch_reader(
input_file, word_dict, batch_size, shuffle=shuffle)
for b in b_reader():
b[0] = D.base.to_variable(pad_batch_data(b[0], 'int64'))
b[1] = D.base.to_variable(np.array(b[1]).astype('int64'))
yield b
return reader
if __name__ == '__main__':
ds = ChnSentiCorp()
ds._read_file("./data/train.part.0")
ds.student_reader("./data/train.part.0", "./data/vocab.bow.txt")
| 30.485714 | 78 | 0.530834 |
import codecs
import os
import csv
import sys
from paddlehub.dataset import InputExample
from paddlehub.common.dir import DATA_HOME
from paddlehub.dataset.base_nlp_dataset import BaseNLPDataset
import paddle as P
import paddle.fluid.dygraph as D
import numpy as np
def space_tokenizer(i):
return i.split()
def pad_batch_data(data, dtype, pad_idx=0, max_len=-1):
if max_len <= 0:
for s in data:
if len(s) > max_len:
max_len = len(s)
inst_data = np.array([
list(inst) + list([pad_idx] * (max_len - len(inst))) for inst in data
])
return np.array(inst_data).astype(dtype)
class ChnSentiCorp(BaseNLPDataset):
def __init__(self):
base_path = "./data/"
super(ChnSentiCorp, self).__init__(
base_path=base_path,
train_file="train.part.0",
dev_file="dev.part.0",
test_file="test.part.0",
label_file=None,
label_list=["0", "1"], )
self._word_dict = None
def __read_file(self, input_file):
with codecs.open(input_file, "r", encoding="UTF-8") as f:
for line in f:
line = line.strip()
if len(line) <= 0:
continue
arr = line.split("\t")
yield arr
def _read_file(self, input_file, phase=None):
seq_id = 0
examples = []
for t in self.__read_file(input_file):
if len(t) == 2:
assert len(t) != 2, "data format error:" + t
elif len(t) == 3:
example = InputExample(guid=seq_id, label=t[2], text_a=t[0])
else:
assert False, 'invalid format'
seq_id += 1
examples.append(example)
return examples
def student_word_dict(self, vocab_file):
with codecs.open(vocab_file, "r", encoding="UTF-8") as f:
self._word_dict = {
i.strip(): l
for l, i in enumerate(f.readlines())
}
return self._word_dict
def student_reader(self, input_file, word_dict):
def reader():
input_files = []
if isinstance(input_file, str):
input_files.append(input_file)
else:
input_files = input_file
assert isinstance(input_file, list)
for data_file in input_files:
print("open file:", data_file)
for t in self.__read_file(data_file):
s = []
for word in space_tokenizer(t[1]):
idx = word_dict[
word] if word in word_dict else word_dict['[UNK]']
s.append(idx)
yield s, t[2], t[0]
return reader
def batch_reader(self, input_file, word_dict, batch_size, shuffle=True):
def reader():
if shuffle:
s_reader = P.reader.shuffle(
self.student_reader(input_file, word_dict),
buf_size=100000)
else:
s_reader = self.student_reader(input_file, word_dict)
b = [[], [], []]
for rec in s_reader():
if len(b[0]) == batch_size:
yield b
b = [[], [], []]
continue
for i in range(len(rec)):
b[i].append(rec[i])
if len(b[0]) > 0:
yield b
return reader
def pad_batch_reader(self, input_file, word_dict, batch_size,
shuffle=True):
def reader():
b_reader = self.batch_reader(
input_file, word_dict, batch_size, shuffle=shuffle)
for b in b_reader():
b[0] = D.base.to_variable(pad_batch_data(b[0], 'int64'))
b[1] = D.base.to_variable(np.array(b[1]).astype('int64'))
yield b
return reader
if __name__ == '__main__':
ds = ChnSentiCorp()
ds._read_file("./data/train.part.0")
ds.student_reader("./data/train.part.0", "./data/vocab.bow.txt")
| true | true |
1c460c1837c4e7c5359fc82cd3f26054a7ebdf50 | 179 | py | Python | needle/engines/base.py | VICEMedia/needle | c2d28ee07278f1d0bd7ace6a2cb65cfea24f2a7e | [
"BSD-3-Clause"
] | 144 | 2017-04-23T08:52:52.000Z | 2022-03-15T03:40:37.000Z | new_pytest_needle/engines/base.py | Gadzillion/new_pytest_needle | b86de146c443a8377cfab9750aff187c0cb0852d | [
"MIT"
] | 35 | 2015-01-16T15:24:35.000Z | 2017-04-02T22:35:05.000Z | new_pytest_needle/engines/base.py | Gadzillion/new_pytest_needle | b86de146c443a8377cfab9750aff187c0cb0852d | [
"MIT"
] | 24 | 2017-04-23T08:52:57.000Z | 2022-02-02T11:57:21.000Z | class EngineBase(object):
"""
Base class for diff engines.
"""
def assertSameFiles(self, output_file, baseline_file, threshold):
raise NotImplementedError | 25.571429 | 69 | 0.687151 | class EngineBase(object):
def assertSameFiles(self, output_file, baseline_file, threshold):
raise NotImplementedError | true | true |
1c460c5310e4d551ba33a840648e9aabd577f049 | 1,077 | py | Python | ESEC.FSE.2017.Experimental.Replication/Figure1/Table1GroupScore.py | austinatchley/Themis | 67d5e639e9445f1612249ae7939b3625fea138db | [
"BSD-4-Clause-UC"
] | 88 | 2017-08-14T19:44:21.000Z | 2021-11-20T00:48:01.000Z | ESEC.FSE.2017.Experimental.Replication/Figure1/Table1GroupScore.py | kavithacd/Themis | 67d5e639e9445f1612249ae7939b3625fea138db | [
"BSD-4-Clause-UC"
] | 25 | 2017-03-07T15:33:46.000Z | 2020-06-18T01:39:26.000Z | ESEC.FSE.2017.Experimental.Replication/Figure1/Table1GroupScore.py | kavithacd/Themis | 67d5e639e9445f1612249ae7939b3625fea138db | [
"BSD-4-Clause-UC"
] | 19 | 2017-10-11T15:25:12.000Z | 2021-08-16T01:47:43.000Z | '''
This script calculates the Group discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
#type = 0 means race
#type = 1 means gender
if(type==0):
pos = [0,0,0,0,0]
neg = [0,0,0,0,0]
else:
pos =[0,0]
neg =[0,0]
for line in f:
line = line.strip()
line =line.split(',')
if(float(line[-1])>0):
if(type==1):
if(int(line[8])<len(pos)):
pos[int(line[8])]+=1
else:
pos[int(line[7])]+=1
if(float(line[-1])<=0):
if(type==1):
if(int(line[8])<len(neg)):
neg[int(line[8])]+=1
else:
neg[int(line[7])]+=1
i =0
max = 0
min = 1
while i<len(pos):
ratio = pos[i]*1.0/(pos[i]+neg[i])
if(ratio >= max):
max = ratio
if(ratio < min):
min = ratio
i+=1
val = 100*(max-min)
if(val < 0.01):
val=0.01
print("%.2f" %val)
#print 100*(max-min)
| 18.894737 | 107 | 0.499536 | '''
This script calculates the Group discrimination score for the particular input file towards race or gender.
USAGE :
argv[1] : Input test suite
argv[2] : 0/1
0 for tace
1 for gender
'''
import sys
f = open(sys.argv[1],"r")
type = int(sys.argv[2])
if(type==0):
pos = [0,0,0,0,0]
neg = [0,0,0,0,0]
else:
pos =[0,0]
neg =[0,0]
for line in f:
line = line.strip()
line =line.split(',')
if(float(line[-1])>0):
if(type==1):
if(int(line[8])<len(pos)):
pos[int(line[8])]+=1
else:
pos[int(line[7])]+=1
if(float(line[-1])<=0):
if(type==1):
if(int(line[8])<len(neg)):
neg[int(line[8])]+=1
else:
neg[int(line[7])]+=1
i =0
max = 0
min = 1
while i<len(pos):
ratio = pos[i]*1.0/(pos[i]+neg[i])
if(ratio >= max):
max = ratio
if(ratio < min):
min = ratio
i+=1
val = 100*(max-min)
if(val < 0.01):
val=0.01
print("%.2f" %val)
| false | true |
1c460cfe2369acdf089542529e5400b016579622 | 4,298 | py | Python | temboo/core/Library/LastFm/Artist/GetTopTracks.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/LastFm/Artist/GetTopTracks.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/LastFm/Artist/GetTopTracks.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | # -*- coding: utf-8 -*-
###############################################################################
#
# GetTopTracks
# Retrieves the top tracks by an artist on Last.fm, ordered by popularity.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetTopTracks(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetTopTracks Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetTopTracks, self).__init__(temboo_session, '/Library/LastFm/Artist/GetTopTracks')
def new_input_set(self):
return GetTopTracksInputSet()
def _make_result_set(self, result, path):
return GetTopTracksResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetTopTracksChoreographyExecution(session, exec_id, path)
class GetTopTracksInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetTopTracks
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) Your Last.fm API Key.)
"""
super(GetTopTracksInputSet, self)._set_input('APIKey', value)
def set_Artist(self, value):
"""
Set the value of the Artist input for this Choreo. ((conditional, string) The artist name. Required unless providing MbID.)
"""
super(GetTopTracksInputSet, self)._set_input('Artist', value)
def set_AutoCorrect(self, value):
"""
Set the value of the AutoCorrect input for this Choreo. ((optional, boolean) Transform misspelled artist names into correct artist names. The corrected artist name will be returned in the response. Defaults to 0.)
"""
super(GetTopTracksInputSet, self)._set_input('AutoCorrect', value)
def set_Limit(self, value):
"""
Set the value of the Limit input for this Choreo. ((optional, integer) The number of results to fetch per page. Defaults to 50.)
"""
super(GetTopTracksInputSet, self)._set_input('Limit', value)
def set_MbID(self, value):
"""
Set the value of the MbID input for this Choreo. ((conditional, string) The musicbrainz id for the artist. Required unless providing Artist.)
"""
super(GetTopTracksInputSet, self)._set_input('MbID', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page number to fetch. Defaults to 1.)
"""
super(GetTopTracksInputSet, self)._set_input('Page', value)
class GetTopTracksResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetTopTracks Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Last.fm.)
"""
return self._output.get('Response', None)
class GetTopTracksChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetTopTracksResultSet(response, path)
| 39.796296 | 221 | 0.676826 | true | true | |
1c460e435bc0e519d5da56e295c2516fae50f58a | 2,381 | py | Python | pgmpy/exceptions/Exceptions.py | NunoEdgarGFlowHub/pgmpy | ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce | [
"MIT"
] | 1 | 2016-08-27T18:30:57.000Z | 2016-08-27T18:30:57.000Z | pgmpy/exceptions/Exceptions.py | NunoEdgarGFlowHub/pgmpy | ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce | [
"MIT"
] | null | null | null | pgmpy/exceptions/Exceptions.py | NunoEdgarGFlowHub/pgmpy | ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce | [
"MIT"
] | 1 | 2016-08-27T18:31:00.000Z | 2016-08-27T18:31:00.000Z | #!/usr/bin/env python3
"""Contains all the user-defined exceptions created for PgmPy"""
class MissingParentsError(Exception):
def __init__(self, *missing):
self.missing = missing
def __str__(self):
return repr("Parents are missing: " + str(self.missing))
class ExtraParentsError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr("Following are not parents: " + str(self.extra))
class MissingStatesError(Exception):
def __init__(self, *missing):
self.missing = missing
def __str__(self):
return repr("States are missing: " + str(self.missing))
class ExtraStatesError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr("Following are not states: " + str(self.extra))
class SelfLoopError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class CycleError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class StateError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class NodeNotFoundError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class ScopeError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class SizeError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class CardinalityError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class RequiredError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class ModelError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class InvalidValueError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
| 20.704348 | 68 | 0.642167 |
class MissingParentsError(Exception):
def __init__(self, *missing):
self.missing = missing
def __str__(self):
return repr("Parents are missing: " + str(self.missing))
class ExtraParentsError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr("Following are not parents: " + str(self.extra))
class MissingStatesError(Exception):
def __init__(self, *missing):
self.missing = missing
def __str__(self):
return repr("States are missing: " + str(self.missing))
class ExtraStatesError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr("Following are not states: " + str(self.extra))
class SelfLoopError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class CycleError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class StateError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class NodeNotFoundError(Exception):
def __init__(self, *extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class ScopeError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class SizeError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class CardinalityError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class RequiredError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class ModelError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
class InvalidValueError(Exception):
def __init__(self, extra):
self.extra = extra
def __str__(self):
return repr(str(self.extra))
| true | true |
1c460e9948c0b105e16e3c6be296155958f589a9 | 2,555 | py | Python | only_common.py | taotaotao3/only_common | 7dd3700d4bf3935c193b0b6f38a0dafa750ad01c | [
"MIT"
] | null | null | null | only_common.py | taotaotao3/only_common | 7dd3700d4bf3935c193b0b6f38a0dafa750ad01c | [
"MIT"
] | null | null | null | only_common.py | taotaotao3/only_common | 7dd3700d4bf3935c193b0b6f38a0dafa750ad01c | [
"MIT"
] | null | null | null | import sys
import io
import csv
import pprint
import pandas as pd
import pdb
def excommon(arg_1 = 'a.csv', arg_2 = 'b.csv', arg_3 = 'shift-jis'):
print('sys.argv[1]:', arg_1)
print('sys.argv[2]:', arg_2)
print('sys.argv[3]:', arg_3)
df_a = pd.read_csv(arg_1, encoding=arg_3, header=None)
list_a = []
list_a = list(df_a.loc[0][0])
df_b = pd.read_csv(arg_2, encoding=arg_3, header=None)
list_b = []
list_b = list(df_b.loc[0][0])
after_content = ""
after_content2 = ""
flag_last = "0"
def duplicate_delete_csv(content, content2, after_content, after_content2, flag_last):
after_content = content
after_content2 = content2
for i in range(len(content)):
if i > int(len(content2)-1):
after_content = content[:i]
flag_last = "1"
return after_content, after_content2, flag_last
if len(content) - 1 == i and content[i] == content2[i]:
flag_last = "1"
content2 = content
after_content2 = content2
after_content = content
return after_content, after_content2, flag_last
if len(content2) - 1 == i and content[i] == content2[i]:
flag_last = "1"
content = content2
after_content = content
after_content2 = content2
return after_content, after_content2, flag_last
if content[i] != content2[i]:
for num in range(len(content) - i):
if content2[i] == content[i+num]:
after_content = content[:i] + content[(i+num):]
if i == len(content2) - 1:
flag_last = "1"
after_content = content2[:i+1]
after_content2 = content2[:i+1]
return after_content, after_content2, flag_last
after_content2 = content2[:i] + content2[i+1:]
if i == len(content2) - 1:
flag_last = "1"
after_content = content2[:i]
after_content2 = content2[:i]
return after_content, after_content2, flag_last
while list_a != list_b:
list_a, list_b, flag_last = duplicate_delete_csv(list_a, list_b, after_content, after_content2, flag_last)
if flag_last == "1":
break
StrA = "".join(list_a)
print('Only common parts:', StrA)
sys.exit
| 37.573529 | 115 | 0.535812 | import sys
import io
import csv
import pprint
import pandas as pd
import pdb
def excommon(arg_1 = 'a.csv', arg_2 = 'b.csv', arg_3 = 'shift-jis'):
print('sys.argv[1]:', arg_1)
print('sys.argv[2]:', arg_2)
print('sys.argv[3]:', arg_3)
df_a = pd.read_csv(arg_1, encoding=arg_3, header=None)
list_a = []
list_a = list(df_a.loc[0][0])
df_b = pd.read_csv(arg_2, encoding=arg_3, header=None)
list_b = []
list_b = list(df_b.loc[0][0])
after_content = ""
after_content2 = ""
flag_last = "0"
def duplicate_delete_csv(content, content2, after_content, after_content2, flag_last):
after_content = content
after_content2 = content2
for i in range(len(content)):
if i > int(len(content2)-1):
after_content = content[:i]
flag_last = "1"
return after_content, after_content2, flag_last
if len(content) - 1 == i and content[i] == content2[i]:
flag_last = "1"
content2 = content
after_content2 = content2
after_content = content
return after_content, after_content2, flag_last
if len(content2) - 1 == i and content[i] == content2[i]:
flag_last = "1"
content = content2
after_content = content
after_content2 = content2
return after_content, after_content2, flag_last
if content[i] != content2[i]:
for num in range(len(content) - i):
if content2[i] == content[i+num]:
after_content = content[:i] + content[(i+num):]
if i == len(content2) - 1:
flag_last = "1"
after_content = content2[:i+1]
after_content2 = content2[:i+1]
return after_content, after_content2, flag_last
after_content2 = content2[:i] + content2[i+1:]
if i == len(content2) - 1:
flag_last = "1"
after_content = content2[:i]
after_content2 = content2[:i]
return after_content, after_content2, flag_last
while list_a != list_b:
list_a, list_b, flag_last = duplicate_delete_csv(list_a, list_b, after_content, after_content2, flag_last)
if flag_last == "1":
break
StrA = "".join(list_a)
print('Only common parts:', StrA)
sys.exit
| true | true |
1c460f108d2d697a791df8a9c61f73dfc9837a9b | 2,840 | py | Python | test/functional/test_framework/address.py | IDC-Group/VHKD | 0256ddf1477439ebc84e97132d3673aa61c39b73 | [
"MIT"
] | 3 | 2018-06-23T10:04:45.000Z | 2018-06-25T02:22:01.000Z | test/functional/test_framework/address.py | IDC-Group/VHKD | 0256ddf1477439ebc84e97132d3673aa61c39b73 | [
"MIT"
] | null | null | null | test/functional/test_framework/address.py | IDC-Group/VHKD | 0256ddf1477439ebc84e97132d3673aa61c39b73 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016 The vhkdCoin Core vhkd
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Encode and decode BASE58, P2PKH and P2SH addresses."""
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import bytes_to_hex_str, hex_str_to_bytes
from . import segwit_addr
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = bytes_to_hex_str(b)
str = bytes_to_hex_str(chr(version).encode('latin-1')) + str
checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str)))
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
# TODO: def base58_decode
def keyhash_to_p2pkh(hash, main = False):
assert (len(hash) == 20)
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert (len(hash) == 20)
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main = False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return segwit_addr.encode("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main = False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main = False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key) # Assuming this is hex string
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert(False)
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script) # Assuming this is hex string
if (type(script) is bytes or type(script) is CScript):
return script
assert(False)
| 32.272727 | 73 | 0.68662 |
from .script import hash256, hash160, sha256, CScript, OP_0
from .util import bytes_to_hex_str, hex_str_to_bytes
from . import segwit_addr
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def byte_to_base58(b, version):
result = ''
str = bytes_to_hex_str(b)
str = bytes_to_hex_str(chr(version).encode('latin-1')) + str
checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str)))
str += checksum[:8]
value = int('0x'+str,0)
while value > 0:
result = chars[value % 58] + result
value //= 58
while (str[:2] == '00'):
result = chars[0] + result
str = str[2:]
return result
def keyhash_to_p2pkh(hash, main = False):
assert (len(hash) == 20)
version = 0 if main else 111
return byte_to_base58(hash, version)
def scripthash_to_p2sh(hash, main = False):
assert (len(hash) == 20)
version = 5 if main else 196
return byte_to_base58(hash, version)
def key_to_p2pkh(key, main = False):
key = check_key(key)
return keyhash_to_p2pkh(hash160(key), main)
def script_to_p2sh(script, main = False):
script = check_script(script)
return scripthash_to_p2sh(hash160(script), main)
def key_to_p2sh_p2wpkh(key, main = False):
key = check_key(key)
p2shscript = CScript([OP_0, hash160(key)])
return script_to_p2sh(p2shscript, main)
def program_to_witness(version, program, main = False):
if (type(program) is str):
program = hex_str_to_bytes(program)
assert 0 <= version <= 16
assert 2 <= len(program) <= 40
assert version > 0 or len(program) in [20, 32]
return segwit_addr.encode("bc" if main else "bcrt", version, program)
def script_to_p2wsh(script, main = False):
script = check_script(script)
return program_to_witness(0, sha256(script), main)
def key_to_p2wpkh(key, main = False):
key = check_key(key)
return program_to_witness(0, hash160(key), main)
def script_to_p2sh_p2wsh(script, main = False):
script = check_script(script)
p2shscript = CScript([OP_0, sha256(script)])
return script_to_p2sh(p2shscript, main)
def check_key(key):
if (type(key) is str):
key = hex_str_to_bytes(key)
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
return key
assert(False)
def check_script(script):
if (type(script) is str):
script = hex_str_to_bytes(script)
if (type(script) is bytes or type(script) is CScript):
return script
assert(False)
| true | true |
1c460f4074ead61f00745adb8067544b72ddcdf8 | 7,593 | py | Python | tensor2tensor/rl/envs/simulated_batch_env.py | akshitj1/tensor2tensor | a76b0f0afe24c966e26d0112356eb66f5a8a37aa | [
"Apache-2.0"
] | 1 | 2022-03-25T03:07:28.000Z | 2022-03-25T03:07:28.000Z | tensor2tensor/rl/envs/simulated_batch_env.py | akshitj1/tensor2tensor | a76b0f0afe24c966e26d0112356eb66f5a8a37aa | [
"Apache-2.0"
] | 1 | 2022-01-05T06:08:00.000Z | 2022-01-05T06:08:29.000Z | tensor2tensor/rl/envs/simulated_batch_env.py | akshitj1/tensor2tensor | a76b0f0afe24c966e26d0112356eb66f5a8a37aa | [
"Apache-2.0"
] | 1 | 2021-07-15T07:25:08.000Z | 2021-07-15T07:25:08.000Z | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batch of environments inside the TensorFlow graph."""
# The code was based on Danijar Hafner's code from tf.agents:
# https://github.com/tensorflow/agents/blob/master/agents/tools/in_graph_batch_env.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_layers
from tensor2tensor.rl.envs import in_graph_batch_env
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
class HistoryBuffer(object):
"""History Buffer."""
def __init__(self, input_dataset, length):
self.input_data_iterator = (
input_dataset.batch(length).make_one_shot_iterator())
self.length = length
initial_frames = self.get_initial_observations()
initial_shape = [length] + common_layers.shape_list(initial_frames)[1:]
self._history_buff = tf.Variable(tf.zeros(initial_shape, tf.float32),
trainable=False)
def get_initial_observations(self):
return tf.cast(self.input_data_iterator.get_next(), tf.float32)
def get_all_elements(self):
return self._history_buff.read_value()
def move_by_one_element(self, element):
last_removed = self.get_all_elements()[:, 1:, ...]
element = tf.expand_dims(element, dim=1)
moved = tf.concat([last_removed, element], axis=1)
with tf.control_dependencies([moved]):
with tf.control_dependencies([self._history_buff.assign(moved)]):
return self._history_buff.read_value()
def reset(self, indices):
initial_frames = tf.gather(self.get_initial_observations(), indices)
scatter_op = tf.scatter_update(self._history_buff, indices, initial_frames)
with tf.control_dependencies([scatter_op]):
return self._history_buff.read_value()
def compute_uncertainty_reward(logits, predictions):
"""Uncertainty reward based on logits."""
# TODO(rsepassi): Add support for L1/L2 loss models. Current code only
# works for softmax models.
vocab_size = logits.shape[-1]
assert vocab_size > 1
log_probs = common_layers.log_prob_from_logits(logits)
max_log_probs = common_layers.index_last_dim_with_indices(log_probs,
predictions)
# Threshold
neg_log_prob = tf.nn.relu(-max_log_probs - 0.02)
# Sum across all but the batch dimension
reduce_dims = list(range(len(neg_log_prob.shape)))[1:]
summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims)
return summed / 10
class SimulatedBatchEnv(in_graph_batch_env.InGraphBatchEnv):
"""Batch of environments inside the TensorFlow graph.
The batch of environments will be stepped and reset inside of the graph using
a tf.py_func(). The current batch of observations, actions, rewards, and done
flags are held in according variables.
"""
def __init__(self, environment_lambda, length, problem,
simulation_random_starts=False, intrinsic_reward_scale=0.):
"""Batch of environments inside the TensorFlow graph."""
self.length = length
self._min_reward = problem.min_reward
self._num_frames = problem.num_input_frames
self._intrinsic_reward_scale = intrinsic_reward_scale
initialization_env = environment_lambda()
hparams = trainer_lib.create_hparams(
FLAGS.hparams_set, problem_name=FLAGS.problem)
hparams.force_full_predict = True
self._model = registry.model(FLAGS.model)(
hparams, tf.estimator.ModeKeys.PREDICT)
self.action_space = initialization_env.action_space
self.action_shape = list(initialization_env.action_space.shape)
self.action_dtype = tf.int32
if simulation_random_starts:
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, FLAGS.data_dir,
shuffle_files=True, hparams=hparams)
dataset = dataset.shuffle(buffer_size=100)
else:
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, FLAGS.data_dir,
shuffle_files=False, hparams=hparams).take(1)
dataset = dataset.map(lambda x: x["inputs"]).repeat()
self.history_buffer = HistoryBuffer(dataset, self.length)
shape = (self.length, problem.frame_height, problem.frame_width,
problem.num_channels)
self._observ = tf.Variable(tf.zeros(shape, tf.float32), trainable=False)
def __len__(self):
"""Number of combined environments."""
return self.length
def simulate(self, action):
with tf.name_scope("environment/simulate"):
actions = tf.concat([tf.expand_dims(action, axis=1)] * self._num_frames,
axis=1)
history = self.history_buffer.get_all_elements()
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
model_output = self._model.infer(
{"inputs": history, "input_action": actions})
observ = tf.to_float(tf.squeeze(model_output["targets"], axis=1))
reward = tf.to_float(model_output["target_reward"])
reward = tf.reshape(reward, shape=(self.length,)) + self._min_reward
if self._intrinsic_reward_scale:
# Use the model's uncertainty about its prediction as an intrinsic
# reward. The uncertainty is measured by the log probability of the
# predicted pixel value.
if "targets_logits" not in model_output:
raise ValueError("The use of intrinsic rewards requires access to "
"the logits. Ensure that model.infer returns "
"'targets_logits'")
uncertainty_reward = compute_uncertainty_reward(
model_output["targets_logits"], model_output["targets"])
uncertainty_reward = tf.minimum(
1., self._intrinsic_reward_scale * uncertainty_reward)
uncertainty_reward = tf.Print(uncertainty_reward, [uncertainty_reward],
message="uncertainty_reward", first_n=1,
summarize=8)
reward += uncertainty_reward
done = tf.constant(False, tf.bool, shape=(self.length,))
with tf.control_dependencies([observ]):
with tf.control_dependencies(
[self._observ.assign(observ),
self.history_buffer.move_by_one_element(observ)]):
return tf.identity(reward), tf.identity(done)
def _reset_non_empty(self, indices):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
"""
with tf.control_dependencies([self.history_buffer.reset(indices)]):
with tf.control_dependencies([self._observ.assign(
self.history_buffer.get_all_elements()[:, -1, ...])]):
return tf.identity(self._observ.read_value())
@property
def observ(self):
"""Access the variable holding the current observation."""
return tf.identity(self._observ)
| 40.388298 | 85 | 0.703148 |
# https://github.com/tensorflow/agents/blob/master/agents/tools/in_graph_batch_env.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_layers
from tensor2tensor.rl.envs import in_graph_batch_env
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
class HistoryBuffer(object):
def __init__(self, input_dataset, length):
self.input_data_iterator = (
input_dataset.batch(length).make_one_shot_iterator())
self.length = length
initial_frames = self.get_initial_observations()
initial_shape = [length] + common_layers.shape_list(initial_frames)[1:]
self._history_buff = tf.Variable(tf.zeros(initial_shape, tf.float32),
trainable=False)
def get_initial_observations(self):
return tf.cast(self.input_data_iterator.get_next(), tf.float32)
def get_all_elements(self):
return self._history_buff.read_value()
def move_by_one_element(self, element):
last_removed = self.get_all_elements()[:, 1:, ...]
element = tf.expand_dims(element, dim=1)
moved = tf.concat([last_removed, element], axis=1)
with tf.control_dependencies([moved]):
with tf.control_dependencies([self._history_buff.assign(moved)]):
return self._history_buff.read_value()
def reset(self, indices):
initial_frames = tf.gather(self.get_initial_observations(), indices)
scatter_op = tf.scatter_update(self._history_buff, indices, initial_frames)
with tf.control_dependencies([scatter_op]):
return self._history_buff.read_value()
def compute_uncertainty_reward(logits, predictions):
# TODO(rsepassi): Add support for L1/L2 loss models. Current code only
# works for softmax models.
vocab_size = logits.shape[-1]
assert vocab_size > 1
log_probs = common_layers.log_prob_from_logits(logits)
max_log_probs = common_layers.index_last_dim_with_indices(log_probs,
predictions)
# Threshold
neg_log_prob = tf.nn.relu(-max_log_probs - 0.02)
# Sum across all but the batch dimension
reduce_dims = list(range(len(neg_log_prob.shape)))[1:]
summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims)
return summed / 10
class SimulatedBatchEnv(in_graph_batch_env.InGraphBatchEnv):
def __init__(self, environment_lambda, length, problem,
simulation_random_starts=False, intrinsic_reward_scale=0.):
self.length = length
self._min_reward = problem.min_reward
self._num_frames = problem.num_input_frames
self._intrinsic_reward_scale = intrinsic_reward_scale
initialization_env = environment_lambda()
hparams = trainer_lib.create_hparams(
FLAGS.hparams_set, problem_name=FLAGS.problem)
hparams.force_full_predict = True
self._model = registry.model(FLAGS.model)(
hparams, tf.estimator.ModeKeys.PREDICT)
self.action_space = initialization_env.action_space
self.action_shape = list(initialization_env.action_space.shape)
self.action_dtype = tf.int32
if simulation_random_starts:
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, FLAGS.data_dir,
shuffle_files=True, hparams=hparams)
dataset = dataset.shuffle(buffer_size=100)
else:
dataset = problem.dataset(tf.estimator.ModeKeys.TRAIN, FLAGS.data_dir,
shuffle_files=False, hparams=hparams).take(1)
dataset = dataset.map(lambda x: x["inputs"]).repeat()
self.history_buffer = HistoryBuffer(dataset, self.length)
shape = (self.length, problem.frame_height, problem.frame_width,
problem.num_channels)
self._observ = tf.Variable(tf.zeros(shape, tf.float32), trainable=False)
def __len__(self):
return self.length
def simulate(self, action):
with tf.name_scope("environment/simulate"):
actions = tf.concat([tf.expand_dims(action, axis=1)] * self._num_frames,
axis=1)
history = self.history_buffer.get_all_elements()
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
model_output = self._model.infer(
{"inputs": history, "input_action": actions})
observ = tf.to_float(tf.squeeze(model_output["targets"], axis=1))
reward = tf.to_float(model_output["target_reward"])
reward = tf.reshape(reward, shape=(self.length,)) + self._min_reward
if self._intrinsic_reward_scale:
# Use the model's uncertainty about its prediction as an intrinsic
if "targets_logits" not in model_output:
raise ValueError("The use of intrinsic rewards requires access to "
"the logits. Ensure that model.infer returns "
"'targets_logits'")
uncertainty_reward = compute_uncertainty_reward(
model_output["targets_logits"], model_output["targets"])
uncertainty_reward = tf.minimum(
1., self._intrinsic_reward_scale * uncertainty_reward)
uncertainty_reward = tf.Print(uncertainty_reward, [uncertainty_reward],
message="uncertainty_reward", first_n=1,
summarize=8)
reward += uncertainty_reward
done = tf.constant(False, tf.bool, shape=(self.length,))
with tf.control_dependencies([observ]):
with tf.control_dependencies(
[self._observ.assign(observ),
self.history_buffer.move_by_one_element(observ)]):
return tf.identity(reward), tf.identity(done)
def _reset_non_empty(self, indices):
with tf.control_dependencies([self.history_buffer.reset(indices)]):
with tf.control_dependencies([self._observ.assign(
self.history_buffer.get_all_elements()[:, -1, ...])]):
return tf.identity(self._observ.read_value())
@property
def observ(self):
return tf.identity(self._observ)
| true | true |
1c461034d0e13519aa62b7aed184a164629d184b | 4,234 | py | Python | scripts/py_featextr_server/wordembed_cosine_server.py | MokriyYuriy/FlexNeuART | 49f13e3f9f0b0ea1399ea558436caaedd5233f5c | [
"Apache-2.0"
] | null | null | null | scripts/py_featextr_server/wordembed_cosine_server.py | MokriyYuriy/FlexNeuART | 49f13e3f9f0b0ea1399ea558436caaedd5233f5c | [
"Apache-2.0"
] | null | null | null | scripts/py_featextr_server/wordembed_cosine_server.py | MokriyYuriy/FlexNeuART | 49f13e3f9f0b0ea1399ea558436caaedd5233f5c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import argparse
sys.path.append('.')
from scripts.py_featextr_server.base_server import BaseQueryHandler, startQueryServer
import numpy as np
from scripts.py_featextr_server.utils import loadEmbeddings, createEmbedMap, robustCosineSimil
# Exclusive==True means that only one getScores
# function is executed at at time
class CosineSimilQueryHandler(BaseQueryHandler):
def __init__(self, queryEmbedFile, docEmbedFile, exclusive, debugPrint=False, useIDF=True):
super().__init__(exclusive)
self.debugPrint = debugPrint
self.useIDF = useIDF
print('Loading answer embeddings from: ' + docEmbedFile)
answWords, self.answEmbed = loadEmbeddings(docEmbedFile)
self.answEmbedMap = createEmbedMap(answWords)
if queryEmbedFile is not None:
print('Loading query embeddings from: ' + queryEmbedFile)
queryWords, self.queryEmbed = loadEmbeddings(queryEmbedFile)
self.queryEmbedMap = createEmbedMap(queryWords)
else:
self.queryEmbed = self.answEmbed
self.queryEmbedMap = self.answEmbedMap
print('Loading is done!')
def textEntryToStr(self, te):
arr = []
if self.debugPrint:
for winfo in te.entries:
arr.append('%s %g %d ' % (winfo.word, winfo.IDF, winfo.qty))
return 'docId=' + te.id + ' ' + ' '.join(arr)
def createDocEmbed(self, isQuery, textEntry):
if isQuery:
embeds = self.queryEmbed
embedMap = self.queryEmbedMap
else:
embeds = self.answEmbed
embedMap = self.answEmbedMap
zerov = np.zeros_like(embeds[0])
res = zerov
for winfo in textEntry.entries:
vectMult = winfo.qty
if self.useIDF:
vectMult *= winfo.IDF
word = winfo.word
if word in embedMap:
res += embeds[embedMap[word]] * vectMult
return res
# This function overrides the parent class
def computeScoresFromParsedOverride(self, query, docs):
if self.debugPrint:
print('getScores', query.id, self.textEntryToStr(query))
ret = {}
queryEmbed = self.createDocEmbed(True, query)
if self.debugPrint:
print(queryEmbed)
for d in docs:
if self.debugPrint:
print(self.textEntryToStr(d))
docEmbed = self.createDocEmbed(False, d)
if self.debugPrint:
print(docEmbed)
# Regular cosine deals poorly with all-zero vectors
simil = robustCosineSimil(docEmbed, queryEmbed)
# simil = (1-cosine(docEmbed, queryEmbed))
# Note that each element must be an array, b/c
# we can generate more than one feature per document!
ret[d.id] = [simil]
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Serving word-embedding models.')
parser.add_argument('--query_embed', metavar='query embeddings',
default=None, type=str,
help='Optional query embeddings file')
parser.add_argument('--doc_embed', metavar='doc embeddings',
required=True, type=str,
help='document embeddings file')
parser.add_argument('--debug_print', action='store_true',
help='Provide debug output')
parser.add_argument('--port', metavar='server port',
required=True, type=int,
help='Server port')
parser.add_argument('--host', metavar='server host',
default='127.0.0.1', type=str,
help='server host addr to bind the port')
args = parser.parse_args()
multiThreaded = True
startQueryServer(args.host, args.port, multiThreaded,
CosineSimilQueryHandler(exclusive=False,
queryEmbedFile=args.query_embed,
docEmbedFile=args.doc_embed,
debugPrint=args.debug_print))
| 35.579832 | 95 | 0.593056 |
import sys
import argparse
sys.path.append('.')
from scripts.py_featextr_server.base_server import BaseQueryHandler, startQueryServer
import numpy as np
from scripts.py_featextr_server.utils import loadEmbeddings, createEmbedMap, robustCosineSimil
class CosineSimilQueryHandler(BaseQueryHandler):
def __init__(self, queryEmbedFile, docEmbedFile, exclusive, debugPrint=False, useIDF=True):
super().__init__(exclusive)
self.debugPrint = debugPrint
self.useIDF = useIDF
print('Loading answer embeddings from: ' + docEmbedFile)
answWords, self.answEmbed = loadEmbeddings(docEmbedFile)
self.answEmbedMap = createEmbedMap(answWords)
if queryEmbedFile is not None:
print('Loading query embeddings from: ' + queryEmbedFile)
queryWords, self.queryEmbed = loadEmbeddings(queryEmbedFile)
self.queryEmbedMap = createEmbedMap(queryWords)
else:
self.queryEmbed = self.answEmbed
self.queryEmbedMap = self.answEmbedMap
print('Loading is done!')
def textEntryToStr(self, te):
arr = []
if self.debugPrint:
for winfo in te.entries:
arr.append('%s %g %d ' % (winfo.word, winfo.IDF, winfo.qty))
return 'docId=' + te.id + ' ' + ' '.join(arr)
def createDocEmbed(self, isQuery, textEntry):
if isQuery:
embeds = self.queryEmbed
embedMap = self.queryEmbedMap
else:
embeds = self.answEmbed
embedMap = self.answEmbedMap
zerov = np.zeros_like(embeds[0])
res = zerov
for winfo in textEntry.entries:
vectMult = winfo.qty
if self.useIDF:
vectMult *= winfo.IDF
word = winfo.word
if word in embedMap:
res += embeds[embedMap[word]] * vectMult
return res
def computeScoresFromParsedOverride(self, query, docs):
if self.debugPrint:
print('getScores', query.id, self.textEntryToStr(query))
ret = {}
queryEmbed = self.createDocEmbed(True, query)
if self.debugPrint:
print(queryEmbed)
for d in docs:
if self.debugPrint:
print(self.textEntryToStr(d))
docEmbed = self.createDocEmbed(False, d)
if self.debugPrint:
print(docEmbed)
simil = robustCosineSimil(docEmbed, queryEmbed)
ret[d.id] = [simil]
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Serving word-embedding models.')
parser.add_argument('--query_embed', metavar='query embeddings',
default=None, type=str,
help='Optional query embeddings file')
parser.add_argument('--doc_embed', metavar='doc embeddings',
required=True, type=str,
help='document embeddings file')
parser.add_argument('--debug_print', action='store_true',
help='Provide debug output')
parser.add_argument('--port', metavar='server port',
required=True, type=int,
help='Server port')
parser.add_argument('--host', metavar='server host',
default='127.0.0.1', type=str,
help='server host addr to bind the port')
args = parser.parse_args()
multiThreaded = True
startQueryServer(args.host, args.port, multiThreaded,
CosineSimilQueryHandler(exclusive=False,
queryEmbedFile=args.query_embed,
docEmbedFile=args.doc_embed,
debugPrint=args.debug_print))
| true | true |
1c4610361f88087ecacad48415ecb6f130687e52 | 409 | py | Python | XiuxiuService/AliSDK/top/api/rest/OpenimChatlogsGetRequest.py | nightHearter/XiuxiuService | 281c2d5eef85936edcd0d9ec97c8d165078f444c | [
"MIT"
] | null | null | null | XiuxiuService/AliSDK/top/api/rest/OpenimChatlogsGetRequest.py | nightHearter/XiuxiuService | 281c2d5eef85936edcd0d9ec97c8d165078f444c | [
"MIT"
] | null | null | null | XiuxiuService/AliSDK/top/api/rest/OpenimChatlogsGetRequest.py | nightHearter/XiuxiuService | 281c2d5eef85936edcd0d9ec97c8d165078f444c | [
"MIT"
] | null | null | null | '''
Created by auto_sdk on 2015.06.16
'''
from top.api.base import RestApi
class OpenimChatlogsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.begin = None
self.count = None
self.end = None
self.next_key = None
self.user1 = None
self.user2 = None
def getapiname(self):
return 'taobao.openim.chatlogs.get'
| 24.058824 | 56 | 0.696822 | from top.api.base import RestApi
class OpenimChatlogsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.begin = None
self.count = None
self.end = None
self.next_key = None
self.user1 = None
self.user2 = None
def getapiname(self):
return 'taobao.openim.chatlogs.get'
| true | true |
1c46117a8c4860a623124d64ceca53a37a0253a2 | 4,961 | py | Python | project/Code/video_stabilizer.py | OmerRe/video-processing-methods | 245a89aaa1e774a62da1f043058242841a4f53ee | [
"MIT"
] | 1 | 2022-03-23T13:07:28.000Z | 2022-03-23T13:07:28.000Z | project/Code/video_stabilizer.py | OmerRe/video-processing-methods | 245a89aaa1e774a62da1f043058242841a4f53ee | [
"MIT"
] | null | null | null | project/Code/video_stabilizer.py | OmerRe/video-processing-methods | 245a89aaa1e774a62da1f043058242841a4f53ee | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from Code.utils import fixBorder, convert_to_gray
def stabilize_video(video_frames: list, config: dict) -> list:
"""Creating a stabilized video from an arbitrary input video.
Args:
input_video: cv2.VideoCapture. Video we want to stabilize.
config: dict. Dictionary which contains useful constants.
Returns:
None, but creates stabilized video from the input video.
Details:
"""
print("Starting Video Stabilization...")
transforms = find_motion_between_frames(config['video_params'], video_frames, config)
transforms_smooth = calc_smooth_transforms(config, transforms)
stabilized_frames = apply_smooth_motion_to_frames(config['video_params'], video_frames, transforms_smooth)
print("Video Stabilization Finished")
return stabilized_frames
def find_motion_between_frames(video_params: dict, video_frames: list, config: dict) -> np.ndarray:
# Pre-define transformation-store array
transforms = np.zeros((video_params['n_frames'] - 1, 9), np.float32)
prev_frame_gray = cv2.cvtColor(video_frames[0], cv2.COLOR_BGR2GRAY)
for frame_idx, current_frame in enumerate(video_frames[1:]):
# Detecting feature points in previous frame
prev_frame_pts = []
curr_frame_pts = []
current_frame_gray = convert_to_gray(current_frame)
# Calculating optical flow and keeping only the valid features points
detector = cv2.FastFeatureDetector.create()
orb = cv2.ORB_create()
kp1 = detector.detect(prev_frame_gray, None)
kp2 = detector.detect(current_frame_gray, None)
kp1, des1 = orb.compute(prev_frame_gray, kp1)
kp2, des2 = orb.compute(current_frame_gray, kp2)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
# img3 = cv2.drawMatches(prev_frame_gray, kp1, current_frame_gray, kp2, matches, None,
# flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# plt.imshow(img3), plt.show()
prev_frame_pts.append(np.float32([kp1[match.queryIdx].pt for match in matches]).reshape(-1, 1, 2))
curr_frame_pts.append(np.float32([kp2[match.trainIdx].pt for match in matches]).reshape(-1, 1, 2))
prev_frame_pts = np.squeeze(np.array(prev_frame_pts))
curr_frame_pts = np.squeeze(np.array(curr_frame_pts))
transform_matrix, mask = cv2.findHomography(prev_frame_pts, curr_frame_pts, cv2.RANSAC, 5.0)
transforms[frame_idx] = transform_matrix.flatten()
print(f"Video Stabilizing: calculating transformation for frame: {frame_idx + 1} "
f"/ {video_params['n_frames'] - 1} - Tracked points: {len(prev_frame_pts)}")
prev_frame_gray = current_frame_gray
return transforms
def apply_smooth_motion_to_frames(video_params: dict, video_frames: list, transforms_smooth: np.ndarray) -> list:
stabilized_frames = [fixBorder(video_frames[0])]
# Write n_frames-1 transformed frames
for frame_idx, current_frame in enumerate(video_frames[:-1]):
print(f"Video Stabilizing: applying transformation to frame: {frame_idx + 1} "
f"/ {video_params['n_frames'] - 1}")
transform_matrix = transforms_smooth[frame_idx].reshape((3, 3))
# Apply homography wrapping to the given frame
frame_stabilized = cv2.warpPerspective(current_frame, transform_matrix, (video_params['w'], video_params['h']))
# Fix border artifacts
frame_stabilized = fixBorder(frame_stabilized)
stabilized_frames.append(frame_stabilized)
return stabilized_frames
def movingAverage(curve: np.ndarray, radius: int) -> np.ndarray:
window_size = 2 * radius + 1
# Define the filter
f = np.ones(window_size)/window_size
# Add padding to the boundaries
curve_pad = np.lib.pad(curve, (radius, radius), 'edge')
# Apply convolution
curve_smoothed = np.convolve(curve_pad, f, mode='same')
# Remove padding
curve_smoothed = curve_smoothed[radius:-radius]
# return smoothed curve
return curve_smoothed
def smooth(trajectory: np.ndarray, config: dict) -> np.ndarray:
smoothed_trajectory = np.copy(trajectory)
for i in range(smoothed_trajectory.shape[1]):
smoothed_trajectory[:, i] = movingAverage(trajectory[:, i], radius=config['SMOOTHING_RADIUS'])
return smoothed_trajectory
def calc_smooth_transforms(config: dict, transforms: np.ndarray) -> np.ndarray:
# Compute trajectory using cumulative sum of transformations
trajectory = np.cumsum(transforms, axis=0)
smoothed_trajectory = smooth(trajectory, config)
# Calculate difference between smoothed_trajectory and trajectory
difference = smoothed_trajectory - trajectory
# Calculate smooth transformation array
transforms_smooth = transforms + difference
return transforms_smooth
| 45.513761 | 119 | 0.712961 | import cv2
import numpy as np
from Code.utils import fixBorder, convert_to_gray
def stabilize_video(video_frames: list, config: dict) -> list:
print("Starting Video Stabilization...")
transforms = find_motion_between_frames(config['video_params'], video_frames, config)
transforms_smooth = calc_smooth_transforms(config, transforms)
stabilized_frames = apply_smooth_motion_to_frames(config['video_params'], video_frames, transforms_smooth)
print("Video Stabilization Finished")
return stabilized_frames
def find_motion_between_frames(video_params: dict, video_frames: list, config: dict) -> np.ndarray:
transforms = np.zeros((video_params['n_frames'] - 1, 9), np.float32)
prev_frame_gray = cv2.cvtColor(video_frames[0], cv2.COLOR_BGR2GRAY)
for frame_idx, current_frame in enumerate(video_frames[1:]):
prev_frame_pts = []
curr_frame_pts = []
current_frame_gray = convert_to_gray(current_frame)
detector = cv2.FastFeatureDetector.create()
orb = cv2.ORB_create()
kp1 = detector.detect(prev_frame_gray, None)
kp2 = detector.detect(current_frame_gray, None)
kp1, des1 = orb.compute(prev_frame_gray, kp1)
kp2, des2 = orb.compute(current_frame_gray, kp2)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
prev_frame_pts.append(np.float32([kp1[match.queryIdx].pt for match in matches]).reshape(-1, 1, 2))
curr_frame_pts.append(np.float32([kp2[match.trainIdx].pt for match in matches]).reshape(-1, 1, 2))
prev_frame_pts = np.squeeze(np.array(prev_frame_pts))
curr_frame_pts = np.squeeze(np.array(curr_frame_pts))
transform_matrix, mask = cv2.findHomography(prev_frame_pts, curr_frame_pts, cv2.RANSAC, 5.0)
transforms[frame_idx] = transform_matrix.flatten()
print(f"Video Stabilizing: calculating transformation for frame: {frame_idx + 1} "
f"/ {video_params['n_frames'] - 1} - Tracked points: {len(prev_frame_pts)}")
prev_frame_gray = current_frame_gray
return transforms
def apply_smooth_motion_to_frames(video_params: dict, video_frames: list, transforms_smooth: np.ndarray) -> list:
stabilized_frames = [fixBorder(video_frames[0])]
for frame_idx, current_frame in enumerate(video_frames[:-1]):
print(f"Video Stabilizing: applying transformation to frame: {frame_idx + 1} "
f"/ {video_params['n_frames'] - 1}")
transform_matrix = transforms_smooth[frame_idx].reshape((3, 3))
frame_stabilized = cv2.warpPerspective(current_frame, transform_matrix, (video_params['w'], video_params['h']))
frame_stabilized = fixBorder(frame_stabilized)
stabilized_frames.append(frame_stabilized)
return stabilized_frames
def movingAverage(curve: np.ndarray, radius: int) -> np.ndarray:
window_size = 2 * radius + 1
f = np.ones(window_size)/window_size
curve_pad = np.lib.pad(curve, (radius, radius), 'edge')
curve_smoothed = np.convolve(curve_pad, f, mode='same')
curve_smoothed = curve_smoothed[radius:-radius]
return curve_smoothed
def smooth(trajectory: np.ndarray, config: dict) -> np.ndarray:
smoothed_trajectory = np.copy(trajectory)
for i in range(smoothed_trajectory.shape[1]):
smoothed_trajectory[:, i] = movingAverage(trajectory[:, i], radius=config['SMOOTHING_RADIUS'])
return smoothed_trajectory
def calc_smooth_transforms(config: dict, transforms: np.ndarray) -> np.ndarray:
trajectory = np.cumsum(transforms, axis=0)
smoothed_trajectory = smooth(trajectory, config)
difference = smoothed_trajectory - trajectory
transforms_smooth = transforms + difference
return transforms_smooth
| true | true |
1c4612a1484861de5941c466421c93898e7ec41d | 347 | py | Python | dashboard/main.py | BOJIT/pi-dashboard | 134c3d7b941a470630aceed4e69b8735bcfcebfd | [
"MIT"
] | null | null | null | dashboard/main.py | BOJIT/pi-dashboard | 134c3d7b941a470630aceed4e69b8735bcfcebfd | [
"MIT"
] | null | null | null | dashboard/main.py | BOJIT/pi-dashboard | 134c3d7b941a470630aceed4e69b8735bcfcebfd | [
"MIT"
] | null | null | null | """
Copyright (c)
Author: James Bennion-Pedley
Date: 2021 - present
Licence: MIT
"""
# from dashboard import app
from flask import Blueprint, render_template
from flask_login import login_required, current_user
main = Blueprint('main', __name__)
# Home page
@main.route('/')
@login_required
def index():
return render_template('index.html')
| 16.52381 | 52 | 0.752161 |
from flask import Blueprint, render_template
from flask_login import login_required, current_user
main = Blueprint('main', __name__)
@main.route('/')
@login_required
def index():
return render_template('index.html')
| true | true |
1c461452d26499a8ba2aa4b2b235a47f6a1e796d | 5,474 | py | Python | project/S17-IO-3012/code/bin/benchmark_replicas_import.py | suunni/sp17-i524 | 42dd11b914c03c741dad8a8505c3e091dc6ec412 | [
"Apache-2.0"
] | 2 | 2020-10-30T09:54:25.000Z | 2021-12-14T19:13:18.000Z | project/S17-IO-3012/code/bin/benchmark_replicas_import.py | cloudmesh/sp17-i524 | 42dd11b914c03c741dad8a8505c3e091dc6ec412 | [
"Apache-2.0"
] | 98 | 2017-01-19T04:24:02.000Z | 2017-10-27T11:30:50.000Z | project/S17-IO-3012/code/bin/benchmark_replicas_import.py | cloudmesh/sp17-i524 | 42dd11b914c03c741dad8a8505c3e091dc6ec412 | [
"Apache-2.0"
] | 294 | 2017-01-09T13:18:39.000Z | 2018-07-13T01:32:24.000Z | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
# benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(
['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
# print benchmark_df1['shard_replicas']
# print benchmark_df1
# print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream):
"""formats and creates a line chart
@param1: import_seconds_kilo Array with import_seconds from kilo
@type: numpy array
@param2: replicas_kilo Array with replicas from kilo
@type: numpy array
@param3: import_seconds_chameleon Array with import_seconds from chameleon
@type: numpy array
@param4: replicas_chameleon Array with replicas from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average Mongoimport Runtime by Shard Replication Factor')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Degree of Replication Per Set')
# Make the chart
plt.plot(replicas_kilo, import_seconds_kilo, label='Kilo Cloud')
plt.plot(replicas_chameleon, import_seconds_chameleon, label='Chameleon Cloud')
plt.plot(replicas_jetstream, import_seconds_jetstream, label='Jetstream Cloud')
# http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/replica_import.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
import_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream)
| 38.013889 | 144 | 0.735842 | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
benchmark_df = benchmark_df.groupby(
['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream):
fig = plt.figure()
plt.ylabel('Runtime in Seconds')
plt.xlabel('Degree of Replication Per Set')
plt.plot(replicas_kilo, import_seconds_kilo, label='Kilo Cloud')
plt.plot(replicas_chameleon, import_seconds_chameleon, label='Chameleon Cloud')
plt.plot(replicas_jetstream, import_seconds_jetstream, label='Jetstream Cloud')
plt.ylim(ymin=0)
plt.legend(loc='best')
fig.savefig('../report/replica_import.png')
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
import_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
import_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
import_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[6]])
replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])
make_figure(import_seconds_kilo, replicas_kilo, import_seconds_chameleon, replicas_chameleon, import_seconds_jetstream, replicas_jetstream)
| true | true |
1c461466a808f85ad09eb1de51759f22e737153d | 10,277 | py | Python | sdk/examples/intkey_python/dgt_intkey/client_cli/intkey_cli.py | DGT-Network/DGT-SDK | 3413ae22e79c13e71264271fa3f82203fd49f0b3 | [
"Apache-2.0"
] | null | null | null | sdk/examples/intkey_python/dgt_intkey/client_cli/intkey_cli.py | DGT-Network/DGT-SDK | 3413ae22e79c13e71264271fa3f82203fd49f0b3 | [
"Apache-2.0"
] | null | null | null | sdk/examples/intkey_python/dgt_intkey/client_cli/intkey_cli.py | DGT-Network/DGT-SDK | 3413ae22e79c13e71264271fa3f82203fd49f0b3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016, 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import argparse
import getpass
import logging
import os
import sys
import traceback
import pkg_resources
from colorlog import ColoredFormatter
from dgt_intkey.client_cli.generate import add_generate_parser
from dgt_intkey.client_cli.generate import do_generate
from dgt_intkey.client_cli.populate import add_populate_parser
from dgt_intkey.client_cli.populate import do_populate
from dgt_intkey.client_cli.create_batch import add_create_batch_parser
from dgt_intkey.client_cli.create_batch import do_create_batch
from dgt_intkey.client_cli.load import add_load_parser
from dgt_intkey.client_cli.load import do_load
from dgt_intkey.client_cli.intkey_workload import add_workload_parser
from dgt_intkey.client_cli.intkey_workload import do_workload
from dgt_intkey.client_cli.intkey_client import IntkeyClient
from dgt_intkey.client_cli.exceptions import IntKeyCliException
from dgt_intkey.client_cli.exceptions import IntkeyClientException
DISTRIBUTION_NAME = 'dgt-intkey'
DEFAULT_URL = 'http://127.0.0.1:8008'
def create_console_handler(verbose_level):
clog = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s "
"%(white)s%(message)s",
datefmt="%H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
clog.setFormatter(formatter)
if verbose_level == 0:
clog.setLevel(logging.WARN)
elif verbose_level == 1:
clog.setLevel(logging.INFO)
else:
clog.setLevel(logging.DEBUG)
return clog
def setup_loggers(verbose_level):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(create_console_handler(verbose_level))
def create_parent_parser(prog_name):
parent_parser = argparse.ArgumentParser(prog=prog_name, add_help=False)
parent_parser.add_argument(
'-v', '--verbose',
action='count',
help='enable more verbose output')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parent_parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='display version information')
return parent_parser
def create_parser(prog_name):
parent_parser = create_parent_parser(prog_name)
parser = argparse.ArgumentParser(
parents=[parent_parser],
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(title='subcommands', dest='command')
add_set_parser(subparsers, parent_parser)
add_inc_parser(subparsers, parent_parser)
add_dec_parser(subparsers, parent_parser)
add_show_parser(subparsers, parent_parser)
add_list_parser(subparsers, parent_parser)
add_generate_parser(subparsers, parent_parser)
add_load_parser(subparsers, parent_parser)
add_populate_parser(subparsers, parent_parser)
add_create_batch_parser(subparsers, parent_parser)
add_workload_parser(subparsers, parent_parser)
return parser
def add_set_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to set <name> to <value>.'
parser = subparsers.add_parser(
'set',
parents=[parent_parser],
description=message,
help='Sets an intkey value')
parser.add_argument(
'name',
type=str,
help='name of key to set')
parser.add_argument(
'value',
type=int,
help='amount to set')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_set(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.set(name, value, wait)
print(response)
def add_inc_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to increment <name> by <value>.'
parser = subparsers.add_parser(
'inc',
parents=[parent_parser],
description=message,
help='Increments an intkey value')
parser.add_argument(
'name',
type=str,
help='identify name of key to increment')
parser.add_argument(
'value',
type=int,
help='specify amount to increment')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_inc(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.inc(name, value, wait)
print(response)
def add_dec_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to decrement <name> by <value>.'
parser = subparsers.add_parser(
'dec',
parents=[parent_parser],
description=message,
help='Decrements an intkey value')
parser.add_argument(
'name',
type=str,
help='identify name of key to decrement')
parser.add_argument(
'value',
type=int,
help='amount to decrement')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_dec(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.dec(name, value, wait)
print(response)
def add_show_parser(subparsers, parent_parser):
message = 'Shows the value of the key <name>.'
parser = subparsers.add_parser(
'show',
parents=[parent_parser],
description=message,
help='Displays the specified intkey value')
parser.add_argument(
'name',
type=str,
help='name of key to show')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
def do_show(args):
name = args.name
client = _get_client(args)
value = client.show(name)
print('{}: {}'.format(name, value))
def add_list_parser(subparsers, parent_parser):
message = 'Shows the values of all keys in intkey state.'
parser = subparsers.add_parser(
'list',
parents=[parent_parser],
description=message,
help='Displays all intkey values')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
def do_list(args):
client = _get_client(args)
results = client.list()
for pair in results:
for name, value in pair.items():
print('{}: {}'.format(name, value))
def _get_client(args):
return IntkeyClient(
url=DEFAULT_URL if args.url is None else args.url,
keyfile=_get_keyfile(args))
def _get_keyfile(args):
try:
if args.keyfile is not None:
return args.keyfile
except AttributeError:
return None
real_user = getpass.getuser()
home = os.path.expanduser("~")
key_dir = os.path.join(home, ".sawtooth", "keys")
return '{}/{}.priv'.format(key_dir, real_user)
def main(prog_name=os.path.basename(sys.argv[0]), args=None):
if args is None:
args = sys.argv[1:]
parser = create_parser(prog_name)
args = parser.parse_args(args)
if args.verbose is None:
verbose_level = 0
else:
verbose_level = args.verbose
setup_loggers(verbose_level=verbose_level)
if not args.command:
parser.print_help()
sys.exit(1)
if args.command == 'set':
do_set(args)
elif args.command == 'inc':
do_inc(args)
elif args.command == 'dec':
do_dec(args)
elif args.command == 'show':
do_show(args)
elif args.command == 'list':
do_list(args)
elif args.command == 'generate':
do_generate(args)
elif args.command == 'populate':
do_populate(args)
elif args.command == 'load':
do_load(args)
elif args.command == 'create_batch':
do_create_batch(args)
elif args.command == 'workload':
do_workload(args)
else:
raise IntKeyCliException("invalid command: {}".format(args.command))
def main_wrapper():
# pylint: disable=bare-except
try:
main()
except (IntKeyCliException, IntkeyClientException) as err:
print("Error: {}".format(err), file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
except SystemExit as e:
raise e
except:
traceback.print_exc(file=sys.stderr)
sys.exit(1)
| 26.763021 | 80 | 0.648827 |
import argparse
import getpass
import logging
import os
import sys
import traceback
import pkg_resources
from colorlog import ColoredFormatter
from dgt_intkey.client_cli.generate import add_generate_parser
from dgt_intkey.client_cli.generate import do_generate
from dgt_intkey.client_cli.populate import add_populate_parser
from dgt_intkey.client_cli.populate import do_populate
from dgt_intkey.client_cli.create_batch import add_create_batch_parser
from dgt_intkey.client_cli.create_batch import do_create_batch
from dgt_intkey.client_cli.load import add_load_parser
from dgt_intkey.client_cli.load import do_load
from dgt_intkey.client_cli.intkey_workload import add_workload_parser
from dgt_intkey.client_cli.intkey_workload import do_workload
from dgt_intkey.client_cli.intkey_client import IntkeyClient
from dgt_intkey.client_cli.exceptions import IntKeyCliException
from dgt_intkey.client_cli.exceptions import IntkeyClientException
DISTRIBUTION_NAME = 'dgt-intkey'
DEFAULT_URL = 'http://127.0.0.1:8008'
def create_console_handler(verbose_level):
clog = logging.StreamHandler()
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s "
"%(white)s%(message)s",
datefmt="%H:%M:%S",
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
})
clog.setFormatter(formatter)
if verbose_level == 0:
clog.setLevel(logging.WARN)
elif verbose_level == 1:
clog.setLevel(logging.INFO)
else:
clog.setLevel(logging.DEBUG)
return clog
def setup_loggers(verbose_level):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(create_console_handler(verbose_level))
def create_parent_parser(prog_name):
parent_parser = argparse.ArgumentParser(prog=prog_name, add_help=False)
parent_parser.add_argument(
'-v', '--verbose',
action='count',
help='enable more verbose output')
try:
version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version
except pkg_resources.DistributionNotFound:
version = 'UNKNOWN'
parent_parser.add_argument(
'-V', '--version',
action='version',
version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')
.format(version),
help='display version information')
return parent_parser
def create_parser(prog_name):
parent_parser = create_parent_parser(prog_name)
parser = argparse.ArgumentParser(
parents=[parent_parser],
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(title='subcommands', dest='command')
add_set_parser(subparsers, parent_parser)
add_inc_parser(subparsers, parent_parser)
add_dec_parser(subparsers, parent_parser)
add_show_parser(subparsers, parent_parser)
add_list_parser(subparsers, parent_parser)
add_generate_parser(subparsers, parent_parser)
add_load_parser(subparsers, parent_parser)
add_populate_parser(subparsers, parent_parser)
add_create_batch_parser(subparsers, parent_parser)
add_workload_parser(subparsers, parent_parser)
return parser
def add_set_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to set <name> to <value>.'
parser = subparsers.add_parser(
'set',
parents=[parent_parser],
description=message,
help='Sets an intkey value')
parser.add_argument(
'name',
type=str,
help='name of key to set')
parser.add_argument(
'value',
type=int,
help='amount to set')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_set(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.set(name, value, wait)
print(response)
def add_inc_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to increment <name> by <value>.'
parser = subparsers.add_parser(
'inc',
parents=[parent_parser],
description=message,
help='Increments an intkey value')
parser.add_argument(
'name',
type=str,
help='identify name of key to increment')
parser.add_argument(
'value',
type=int,
help='specify amount to increment')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_inc(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.inc(name, value, wait)
print(response)
def add_dec_parser(subparsers, parent_parser):
message = 'Sends an intkey transaction to decrement <name> by <value>.'
parser = subparsers.add_parser(
'dec',
parents=[parent_parser],
description=message,
help='Decrements an intkey value')
parser.add_argument(
'name',
type=str,
help='identify name of key to decrement')
parser.add_argument(
'value',
type=int,
help='amount to decrement')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
parser.add_argument(
'--keyfile',
type=str,
help="identify file containing user's private key")
parser.add_argument(
'--wait',
nargs='?',
const=sys.maxsize,
type=int,
help='set time, in seconds, to wait for transaction to commit')
def do_dec(args):
name, value, wait = args.name, args.value, args.wait
client = _get_client(args)
response = client.dec(name, value, wait)
print(response)
def add_show_parser(subparsers, parent_parser):
message = 'Shows the value of the key <name>.'
parser = subparsers.add_parser(
'show',
parents=[parent_parser],
description=message,
help='Displays the specified intkey value')
parser.add_argument(
'name',
type=str,
help='name of key to show')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
def do_show(args):
name = args.name
client = _get_client(args)
value = client.show(name)
print('{}: {}'.format(name, value))
def add_list_parser(subparsers, parent_parser):
message = 'Shows the values of all keys in intkey state.'
parser = subparsers.add_parser(
'list',
parents=[parent_parser],
description=message,
help='Displays all intkey values')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API')
def do_list(args):
client = _get_client(args)
results = client.list()
for pair in results:
for name, value in pair.items():
print('{}: {}'.format(name, value))
def _get_client(args):
return IntkeyClient(
url=DEFAULT_URL if args.url is None else args.url,
keyfile=_get_keyfile(args))
def _get_keyfile(args):
try:
if args.keyfile is not None:
return args.keyfile
except AttributeError:
return None
real_user = getpass.getuser()
home = os.path.expanduser("~")
key_dir = os.path.join(home, ".sawtooth", "keys")
return '{}/{}.priv'.format(key_dir, real_user)
def main(prog_name=os.path.basename(sys.argv[0]), args=None):
if args is None:
args = sys.argv[1:]
parser = create_parser(prog_name)
args = parser.parse_args(args)
if args.verbose is None:
verbose_level = 0
else:
verbose_level = args.verbose
setup_loggers(verbose_level=verbose_level)
if not args.command:
parser.print_help()
sys.exit(1)
if args.command == 'set':
do_set(args)
elif args.command == 'inc':
do_inc(args)
elif args.command == 'dec':
do_dec(args)
elif args.command == 'show':
do_show(args)
elif args.command == 'list':
do_list(args)
elif args.command == 'generate':
do_generate(args)
elif args.command == 'populate':
do_populate(args)
elif args.command == 'load':
do_load(args)
elif args.command == 'create_batch':
do_create_batch(args)
elif args.command == 'workload':
do_workload(args)
else:
raise IntKeyCliException("invalid command: {}".format(args.command))
def main_wrapper():
# pylint: disable=bare-except
try:
main()
except (IntKeyCliException, IntkeyClientException) as err:
print("Error: {}".format(err), file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
except SystemExit as e:
raise e
except:
traceback.print_exc(file=sys.stderr)
sys.exit(1)
| true | true |
1c46148594b66e51e3b670cc5e04060e21b3f2a6 | 1,581 | py | Python | test_config.py | AshishMittal/watson-stt-wer-python | 62dea234665aa5c11a05327e49419d27b87f1b25 | [
"Apache-2.0"
] | 3 | 2021-06-17T14:19:44.000Z | 2022-02-27T18:13:51.000Z | test_config.py | AshishMittal/watson-stt-wer-python | 62dea234665aa5c11a05327e49419d27b87f1b25 | [
"Apache-2.0"
] | 22 | 2021-06-04T13:18:10.000Z | 2022-02-11T21:55:45.000Z | test_config.py | AshishMittal/watson-stt-wer-python | 62dea234665aa5c11a05327e49419d27b87f1b25 | [
"Apache-2.0"
] | 2 | 2021-07-15T19:43:36.000Z | 2022-02-23T09:56:47.000Z | import unittest, os
from config import Config
def getInstance():
return Config('config.ini.sample')
class MyTest(unittest.TestCase):
def test_get_value(self):
c = getInstance()
self.assertEqual(c.getValue('SpeechToText','base_model_name'), 'en-US_NarrowbandModel')
def test_get_missing_section(self):
c = getInstance()
self.assertEqual(c.getValue('NotARealSection','NotARealKey'), None)
def test_get_missing_key(self):
c = getInstance()
self.assertEqual(c.getValue('SpeechToText', 'NotARealKey'), None)
def test_get_boolean_false(self):
c = getInstance()
self.assertEqual(c.getBoolean('SpeechToText', 'use_bearer_token'), False)
def test_get_boolean_true(self):
c = getInstance()
self.assertEqual(c.getBoolean('Transformations', 'remove_empty_strings'), True)
def test_get_value_with_percent(self):
c = getInstance()
self.assertEqual(c.getValue('Transformations','remove_word_list'), 'uh,uhuh,%hesitation,hesitation')
def test_set_value_with_key(self):
c = getInstance()
c.setValue('SpeechToText','smart_formatting', 'True')
self.assertEqual(c.getValue('SpeechToText', 'smart_formatting'), 'True')
def test_write_file(self):
c = getInstance()
c.writeFile('config.ini.unit_test')
self.assertEqual(Config('config.ini.unit_test').getValue('SpeechToText','base_model_name'), 'en-US_NarrowbandModel')
os.remove('config.ini.unit_test')
if __name__ == '__main__':
unittest.main()
| 33.638298 | 124 | 0.683112 | import unittest, os
from config import Config
def getInstance():
return Config('config.ini.sample')
class MyTest(unittest.TestCase):
def test_get_value(self):
c = getInstance()
self.assertEqual(c.getValue('SpeechToText','base_model_name'), 'en-US_NarrowbandModel')
def test_get_missing_section(self):
c = getInstance()
self.assertEqual(c.getValue('NotARealSection','NotARealKey'), None)
def test_get_missing_key(self):
c = getInstance()
self.assertEqual(c.getValue('SpeechToText', 'NotARealKey'), None)
def test_get_boolean_false(self):
c = getInstance()
self.assertEqual(c.getBoolean('SpeechToText', 'use_bearer_token'), False)
def test_get_boolean_true(self):
c = getInstance()
self.assertEqual(c.getBoolean('Transformations', 'remove_empty_strings'), True)
def test_get_value_with_percent(self):
c = getInstance()
self.assertEqual(c.getValue('Transformations','remove_word_list'), 'uh,uhuh,%hesitation,hesitation')
def test_set_value_with_key(self):
c = getInstance()
c.setValue('SpeechToText','smart_formatting', 'True')
self.assertEqual(c.getValue('SpeechToText', 'smart_formatting'), 'True')
def test_write_file(self):
c = getInstance()
c.writeFile('config.ini.unit_test')
self.assertEqual(Config('config.ini.unit_test').getValue('SpeechToText','base_model_name'), 'en-US_NarrowbandModel')
os.remove('config.ini.unit_test')
if __name__ == '__main__':
unittest.main()
| true | true |
1c4616639bab4e32664cf09fae71be8e8f78f138 | 1,117 | py | Python | ex6_yaml_json_write.py | ro8harp/pynet_test | 711ede69b43e42ae6c62f7224bd5a84dfc491d5e | [
"Apache-2.0"
] | null | null | null | ex6_yaml_json_write.py | ro8harp/pynet_test | 711ede69b43e42ae6c62f7224bd5a84dfc491d5e | [
"Apache-2.0"
] | null | null | null | ex6_yaml_json_write.py | ro8harp/pynet_test | 711ede69b43e42ae6c62f7224bd5a84dfc491d5e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env
,,,
Write a Python program that creates a list. One of the elements of the list
should be a dictionary with at least two keys. Write this list out to a file
using both YAML and JSON formats. The YAML file should be in the expanded form.
'''
import yaml
import json
def main():
,,,
Write a Python program that creates a list. One of the elements of the list
should be a dictionary with at least two keys. Write this list out to a file
using both YAML and JSON formats. The YAML file should be in the expanded
form.
'''
yaml_file = 'my_test.yml'
json_file = 'my_test.json'
my_dict = {
'ip_addr': '172.31.200.1',
'platform': 'cisco_ios',
'vendor': 'cisco',
'model': '1921'
}
my_list = [
'some string',
'99',
'18',
'my_dict',
'another string',
'final string'
]
with open(yaml_file, "w") as f:
f.write(yaml.dump(my_list, default_flow_style=False))
with open(json_file, "w") as f:
json.dump(my_list, f)
if __name__ == "__main__":
main()
| 23.765957 | 80 | 0.606088 |
,,,
Write a Python program that creates a list. One of the elements of the list
should be a dictionary with at least two keys. Write this list out to a file
using both YAML and JSON formats. The YAML file should be in the expanded form.
'''
import yaml
import json
def main():
,,,
Write a Python program that creates a list. One of the elements of the list
should be a dictionary with at least two keys. Write this list out to a file
using both YAML and JSON formats. The YAML file should be in the expanded
form.
'''
yaml_file = 'my_test.yml'
json_file = 'my_test.json'
my_dict = {
'ip_addr': '172.31.200.1',
'platform': 'cisco_ios',
'vendor': 'cisco',
'model': '1921'
}
my_list = [
'some string',
'99',
'18',
'my_dict',
'another string',
'final string'
]
with open(yaml_file, "w") as f:
f.write(yaml.dump(my_list, default_flow_style=False))
with open(json_file, "w") as f:
json.dump(my_list, f)
if __name__ == "__main__":
main()
| false | true |
1c46168ec175c99047459da65d3253be700bc914 | 601 | py | Python | list/stack.py | Knight0xFF/Data-Structures | 2a3c20f21f0340b3ef10be520a0429f36e1fa60f | [
"MIT"
] | null | null | null | list/stack.py | Knight0xFF/Data-Structures | 2a3c20f21f0340b3ef10be520a0429f36e1fa60f | [
"MIT"
] | null | null | null | list/stack.py | Knight0xFF/Data-Structures | 2a3c20f21f0340b3ef10be520a0429f36e1fa60f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Stack(object):
def __init__(self):
self.items = []
def push(self, item):
return self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def get_size(self):
return len(self.items)
def is_empty(self):
return self.items == []
if __name__ == "__main__":
stack = Stack()
stack.push(3)
stack.push(142)
print stack.get_size()
print stack.peek()
print stack.pop()
print stack.is_empty()
| 18.78125 | 46 | 0.579035 |
class Stack(object):
def __init__(self):
self.items = []
def push(self, item):
return self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def get_size(self):
return len(self.items)
def is_empty(self):
return self.items == []
if __name__ == "__main__":
stack = Stack()
stack.push(3)
stack.push(142)
print stack.get_size()
print stack.peek()
print stack.pop()
print stack.is_empty()
| false | true |
1c46177306b899ada2c53a4c9fa5cec25807641b | 12,569 | py | Python | harmonica/equivalent_layer/harmonic_spherical.py | RichardScottOZ/harmonica | ccb0437ea0ed528cfd144844edab98141c8d08da | [
"BSD-3-Clause"
] | null | null | null | harmonica/equivalent_layer/harmonic_spherical.py | RichardScottOZ/harmonica | ccb0437ea0ed528cfd144844edab98141c8d08da | [
"BSD-3-Clause"
] | 1 | 2022-01-19T03:02:22.000Z | 2022-01-19T20:47:19.000Z | harmonica/equivalent_layer/harmonic_spherical.py | RichardScottOZ/harmonica | ccb0437ea0ed528cfd144844edab98141c8d08da | [
"BSD-3-Clause"
] | 1 | 2022-01-17T23:15:18.000Z | 2022-01-17T23:15:18.000Z | """
Equivalent layer for generic harmonic functions in spherical coordinates
"""
import numpy as np
from numba import jit
from sklearn.utils.validation import check_is_fitted
import verde as vd
import verde.base as vdb
from .utils import jacobian_numba, predict_numba, pop_extra_coords
from ..forward.utils import distance_spherical
class EQLHarmonicSpherical(vdb.BaseGridder):
r"""
Equivalent-layer for generic harmonic functions in spherical coordinates
This equivalent layer can be used for:
* Spherical coordinates (geographic coordinates must be converted before
use)
* Regional or global data where Earth's curvature must be taken into
account
* Gravity and magnetic data (including derivatives)
* Single data types
* Interpolation
* Upward continuation
* Finite-difference based derivative calculations
It cannot be used for:
* Joint inversion of multiple data types (e.g., gravity + gravity
gradients)
* Reduction to the pole of magnetic total field anomaly data
* Analytical derivative calculations
Point sources are located beneath the observed potential-field measurement
points by default [Cooper2000]_. Custom source locations can be used by
specifying the *points* argument. Coefficients associated with each point
source are estimated through linear least-squares with damping (Tikhonov
0th order) regularization.
The Green's function for point mass effects used is the inverse Euclidean
distance between the grid coordinates and the point source:
.. math::
\phi(\bar{x}, \bar{x}') = \frac{1}{||\bar{x} - \bar{x}'||}
where :math:`\bar{x}` and :math:`\bar{x}'` are the coordinate vectors of
the observation point and the source, respectively.
Parameters
----------
damping : None or float
The positive damping regularization parameter. Controls how much
smoothness is imposed on the estimated coefficients.
If None, no regularization is used.
points : None or list of arrays (optional)
List containing the coordinates of the point sources used as the
equivalent layer. Coordinates are assumed to be in the following order:
(``longitude``, ``latitude``, ``radius``). Both ``longitude`` and
``latitude`` must be in degrees and ``radius`` in meters.
If None, will place one point source bellow each observation point at
a fixed relative depth bellow the observation point [Cooper2000]_.
Defaults to None.
relative_depth : float
Relative depth at which the point sources are placed beneath the
observation points. Each source point will be set beneath each data
point at a depth calculated as the radius of the data point minus
this constant *relative_depth*. Use positive numbers (negative numbers
would mean point sources are above the data points). Ignored if
*points* is specified.
Attributes
----------
points_ : 2d-array
Coordinates of the point sources used to build the equivalent layer.
coefs_ : array
Estimated coefficients of every point source.
region_ : tuple
The boundaries (``[W, E, S, N]``) of the data used to fit the
interpolator. Used as the default region for the
:meth:`~harmonica.EQLHarmonicSpherical.grid` method.
"""
# Set the default dimension names for generated outputs
# as xr.Dataset.
dims = ("spherical_latitude", "longitude")
# Overwrite the defalt name for the upward coordinate.
extra_coords_name = "radius"
def __init__(
self,
damping=None,
points=None,
relative_depth=500,
):
self.damping = damping
self.points = points
self.relative_depth = relative_depth
# Define Green's function for spherical coordinates
self.greens_function = greens_func_spherical
def fit(self, coordinates, data, weights=None):
"""
Fit the coefficients of the equivalent layer.
The data region is captured and used as default for the
:meth:`~harmonica.EQLHarmonicSpherical.grid` method.
All input arrays must have the same shape.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``longitude``, ``latitude``, ``radius``, ...).
Only ``longitude``, ``latitude``, and ``radius`` will be used, all
subsequent coordinates will be ignored.
data : array
The data values of each data point.
weights : None or array
If not None, then the weights assigned to each data point.
Typically, this should be 1 over the data uncertainty squared.
Returns
-------
self
Returns this estimator instance for chaining operations.
"""
coordinates, data, weights = vdb.check_fit_input(coordinates, data, weights)
# Capture the data region to use as a default when gridding.
self.region_ = vd.get_region(coordinates[:2])
coordinates = vdb.n_1d_arrays(coordinates, 3)
if self.points is None:
self.points_ = (
coordinates[0],
coordinates[1],
coordinates[2] - self.relative_depth,
)
else:
self.points_ = vdb.n_1d_arrays(self.points, 3)
jacobian = self.jacobian(coordinates, self.points_)
self.coefs_ = vdb.least_squares(jacobian, data, weights, self.damping)
return self
def predict(self, coordinates):
"""
Evaluate the estimated equivalent layer on the given set of points.
Requires a fitted estimator
(see :meth:`~harmonica.EQLHarmonicSpherical.fit`).
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``longitude``, ``latitude``, ``radius``, ...).
Only ``longitude``, ``latitude`` and ``radius`` will be used, all
subsequent coordinates will be ignored.
Returns
-------
data : array
The data values evaluated on the given points.
"""
# We know the gridder has been fitted if it has the coefs_
check_is_fitted(self, ["coefs_"])
shape = np.broadcast(*coordinates[:3]).shape
size = np.broadcast(*coordinates[:3]).size
dtype = coordinates[0].dtype
coordinates = tuple(np.atleast_1d(i).ravel() for i in coordinates[:3])
data = np.zeros(size, dtype=dtype)
predict_numba(
coordinates, self.points_, self.coefs_, data, self.greens_function
)
return data.reshape(shape)
def jacobian(
self, coordinates, points, dtype="float64"
): # pylint: disable=no-self-use
"""
Make the Jacobian matrix for the equivalent layer.
Each column of the Jacobian is the Green's function for a single point
source evaluated on all observation points.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (``longitude``, ``latitude``, ``radius``, ...).
Only ``longitude``, ``latitude`` and ``radius`` will be used, all
subsequent coordinates will be ignored.
points : tuple of arrays
Tuple of arrays containing the coordinates of the point sources
used as equivalent layer in the following order:
(``longitude``, ``latitude``, ``radius``).
dtype : str or numpy dtype
The type of the Jacobian array.
Returns
-------
jacobian : 2D array
The (n_data, n_points) Jacobian matrix.
"""
# Compute Jacobian matrix
n_data = coordinates[0].size
n_points = points[0].size
jac = np.zeros((n_data, n_points), dtype=dtype)
jacobian_numba(coordinates, points, jac, self.greens_function)
return jac
def grid(
self,
upward,
region=None,
shape=None,
spacing=None,
dims=None,
data_names=None,
**kwargs
): # pylint: disable=arguments-differ
"""
Interpolate the data onto a regular grid.
The grid can be specified by either the number of points in each
dimension (the *shape*) or by the grid node spacing. See
:func:`verde.grid_coordinates` for details. All grid points will be
located at the same `upward` coordinate. Other arguments for
:func:`verde.grid_coordinates` can be passed as extra keyword arguments
(``kwargs``) to this method.
If the interpolator collected the input data region, then it will be
used if ``region=None``. Otherwise, you must specify the grid region.
Use the *dims* and *data_names* arguments to set custom names for the
dimensions and the data field(s) in the output :class:`xarray.Dataset`.
Default names will be provided if none are given.
Parameters
----------
upward : float
Upward coordinate of the grid points.
region : list = [W, E, S, N]
The west, east, south, and north boundaries of a given region.
shape : tuple = (n_north, n_east) or None
The number of points in the South-North and West-East directions,
respectively.
spacing : tuple = (s_north, s_east) or None
The grid spacing in the South-North and West-East directions,
respectively.
dims : list or None
The names of the northing and easting data dimensions,
respectively, in the output grid. Default is determined from the
``dims`` attribute of the class. Must be defined in the following
order: northing dimension, easting dimension.
**NOTE: This is an exception to the "easting" then
"northing" pattern but is required for compatibility with xarray.**
data_names : list of None
The name(s) of the data variables in the output grid. Defaults to
``['scalars']``.
Returns
-------
grid : xarray.Dataset
The interpolated grid. Metadata about the interpolator is written
to the ``attrs`` attribute.
"""
# We override the grid method from BaseGridder so it takes the upward
# coordinate as a positional argument. We disable pylint
# arguments-differ error because we intend to make this method
# different from the inherited one.
# Ignore extra_coords if passed
pop_extra_coords(kwargs)
# Grid data
# We always pass projection=None because that argument it's intended to
# be used only with Cartesian gridders.
grid = super().grid(
region=region,
shape=shape,
spacing=spacing,
dims=dims,
data_names=data_names,
projection=None,
extra_coords=upward,
**kwargs,
)
return grid
def scatter(
self,
region=None,
size=None,
random_state=None,
dims=None,
data_names=None,
projection=None,
**kwargs
):
"""
.. warning ::
Not implemented method. The scatter method will be deprecated on
Verde v2.0.0.
"""
raise NotImplementedError
def profile(
self,
point1,
point2,
size,
dims=None,
data_names=None,
projection=None,
**kwargs
):
"""
.. warning ::
Not implemented method. The profile on spherical coordinates should
be done using great-circle distances through the Haversine formula.
"""
raise NotImplementedError
@jit(nopython=True)
def greens_func_spherical(
longitude, latitude, radius, point_longitude, point_latitude, point_radius
):
"""
Green's function for the equivalent layer in spherical coordinates
Uses Numba to speed up things.
"""
distance = distance_spherical(
(longitude, latitude, radius), (point_longitude, point_latitude, point_radius)
)
return 1 / distance
| 36.32659 | 86 | 0.626701 | import numpy as np
from numba import jit
from sklearn.utils.validation import check_is_fitted
import verde as vd
import verde.base as vdb
from .utils import jacobian_numba, predict_numba, pop_extra_coords
from ..forward.utils import distance_spherical
class EQLHarmonicSpherical(vdb.BaseGridder):
dims = ("spherical_latitude", "longitude")
extra_coords_name = "radius"
def __init__(
self,
damping=None,
points=None,
relative_depth=500,
):
self.damping = damping
self.points = points
self.relative_depth = relative_depth
self.greens_function = greens_func_spherical
def fit(self, coordinates, data, weights=None):
coordinates, data, weights = vdb.check_fit_input(coordinates, data, weights)
# Capture the data region to use as a default when gridding.
self.region_ = vd.get_region(coordinates[:2])
coordinates = vdb.n_1d_arrays(coordinates, 3)
if self.points is None:
self.points_ = (
coordinates[0],
coordinates[1],
coordinates[2] - self.relative_depth,
)
else:
self.points_ = vdb.n_1d_arrays(self.points, 3)
jacobian = self.jacobian(coordinates, self.points_)
self.coefs_ = vdb.least_squares(jacobian, data, weights, self.damping)
return self
def predict(self, coordinates):
# We know the gridder has been fitted if it has the coefs_
check_is_fitted(self, ["coefs_"])
shape = np.broadcast(*coordinates[:3]).shape
size = np.broadcast(*coordinates[:3]).size
dtype = coordinates[0].dtype
coordinates = tuple(np.atleast_1d(i).ravel() for i in coordinates[:3])
data = np.zeros(size, dtype=dtype)
predict_numba(
coordinates, self.points_, self.coefs_, data, self.greens_function
)
return data.reshape(shape)
def jacobian(
self, coordinates, points, dtype="float64"
): # pylint: disable=no-self-use
# Compute Jacobian matrix
n_data = coordinates[0].size
n_points = points[0].size
jac = np.zeros((n_data, n_points), dtype=dtype)
jacobian_numba(coordinates, points, jac, self.greens_function)
return jac
def grid(
self,
upward,
region=None,
shape=None,
spacing=None,
dims=None,
data_names=None,
**kwargs
): # pylint: disable=arguments-differ
# We override the grid method from BaseGridder so it takes the upward
# coordinate as a positional argument. We disable pylint
# arguments-differ error because we intend to make this method
# different from the inherited one.
# Ignore extra_coords if passed
pop_extra_coords(kwargs)
# Grid data
# We always pass projection=None because that argument it's intended to
grid = super().grid(
region=region,
shape=shape,
spacing=spacing,
dims=dims,
data_names=data_names,
projection=None,
extra_coords=upward,
**kwargs,
)
return grid
def scatter(
self,
region=None,
size=None,
random_state=None,
dims=None,
data_names=None,
projection=None,
**kwargs
):
raise NotImplementedError
def profile(
self,
point1,
point2,
size,
dims=None,
data_names=None,
projection=None,
**kwargs
):
raise NotImplementedError
@jit(nopython=True)
def greens_func_spherical(
longitude, latitude, radius, point_longitude, point_latitude, point_radius
):
distance = distance_spherical(
(longitude, latitude, radius), (point_longitude, point_latitude, point_radius)
)
return 1 / distance
| true | true |
1c4618e45d73910b099a098744c5bee6d758142c | 18,581 | py | Python | dali/test/python/test_operator_slice.py | ancientmooner/DALI | 355e8db8130cee0d20e9ae3d698f195278544995 | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2020-05-09T03:07:07.000Z | 2021-06-15T14:48:04.000Z | dali/test/python/test_operator_slice.py | ancientmooner/DALI | 355e8db8130cee0d20e9ae3d698f195278544995 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | dali/test/python/test_operator_slice.py | ancientmooner/DALI | 355e8db8130cee0d20e9ae3d698f195278544995 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-04-26T14:59:51.000Z | 2020-04-26T14:59:51.000Z | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import os
from functools import partial
from test_utils import check_batch
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
from test_utils import RandomDataIterator
from math import floor
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
test_data_video = os.path.join(test_data_root, 'db', 'optical_flow', 'sintel_trailer')
class SliceSynthDataPipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None, normalized_anchor=True, normalized_shape=True):
super(SliceSynthDataPipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.device = device
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if axis_names:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
data = self.data.gpu() if self.device == 'gpu' else self.data
out = self.slice(data, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePipeline(Pipeline):
def __init__(self, device, batch_size, pos_size_iter,
num_threads=1, device_id=0, is_fused_decoder=False,
axes=None, axis_names=None, normalized_anchor=True, normalized_shape=True):
super(SlicePipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.is_fused_decoder = is_fused_decoder
self.pos_size_iter = pos_size_iter
self.device = device
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle=False)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if self.is_fused_decoder:
if axis_names:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
else:
self.decode = ops.ImageDecoder(device = "cpu",
output_type = types.RGB)
if axis_names:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
def define_graph(self):
inputs, labels = self.input(name="Reader")
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
if self.is_fused_decoder:
images = self.decode(inputs, self.crop_pos, self.crop_size)
else:
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
images = self.slice(images, self.crop_pos, self.crop_size)
return images
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SliceArgsIterator(object):
def __init__(self,
batch_size,
num_dims=3,
image_shape=None, # Needed if normalized_anchor and normalized_shape are False
image_layout=None, # Needed if axis_names is used to specify the slice
normalized_anchor=True,
normalized_shape=True,
axes=None,
axis_names=None,
min_norm_anchor=0.0,
max_norm_anchor=0.2,
min_norm_shape=0.4,
max_norm_shape=0.75,
seed=54643613):
self.batch_size = batch_size
self.num_dims = num_dims
self.image_shape = image_shape
self.image_layout = image_layout
self.normalized_anchor = normalized_anchor
self.normalized_shape = normalized_shape
self.axes = axes
self.axis_names = axis_names
self.min_norm_anchor=min_norm_anchor
self.max_norm_anchor=max_norm_anchor
self.min_norm_shape=min_norm_shape
self.max_norm_shape=max_norm_shape
self.seed=seed
if not self.axis_names and not self.axes:
self.axis_names = "WH"
if self.axis_names:
self.axes = []
for axis_name in self.axis_names:
assert axis_name in self.image_layout
self.axes.append(self.image_layout.index(axis_name))
assert(len(self.axes)>0)
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
pos = []
size = []
anchor_amplitude = self.max_norm_anchor - self.min_norm_anchor
anchor_offset = self.min_norm_anchor
shape_amplitude = self.max_norm_shape - self.min_norm_shape
shape_offset = self.min_norm_shape
np.random.seed(self.seed)
for k in range(self.batch_size):
norm_anchor = anchor_amplitude * np.random.rand(len(self.axes)) + anchor_offset
norm_shape = shape_amplitude * np.random.rand(len(self.axes)) + shape_offset
if self.normalized_anchor:
anchor = norm_anchor
else:
anchor = [floor(norm_anchor[i] * self.image_shape[self.axes[i]]) for i in range(len(self.axes))]
if self.normalized_shape:
shape = norm_shape
else:
shape = [floor(norm_shape[i] * self.image_shape[self.axes[i]]) for i in range(len(self.axes))]
pos.append(np.asarray(anchor, dtype=np.float32))
size.append(np.asarray(shape, dtype=np.float32))
self.i = (self.i + 1) % self.n
return (pos, size)
next = __next__
def slice_func_helper(axes, axis_names, layout, normalized_anchor, normalized_shape, image, slice_anchor, slice_shape):
# TODO(janton): remove this
if not axes and not axis_names:
axis_names = "WH"
if axis_names:
axes = []
for axis_name in axis_names:
assert(axis_name in layout)
axis_pos = layout.find(axis_name)
axes.append(axis_pos)
shape = image.shape
full_slice_anchor = [0] * len(shape)
full_slice_shape = list(shape)
for axis in axes:
idx = axes.index(axis)
full_slice_anchor[axis] = slice_anchor[idx]
full_slice_shape[axis] = slice_shape[idx]
#std::round has different behaviour than np.round so manually add 0.5 and truncate to int
if normalized_anchor and normalized_shape:
start = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
end = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]+full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
else:
if normalized_anchor:
start = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
else:
start = [int(np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
if normalized_shape:
end = [start[i] + int(np.float32(shape[i]) * np.float32(full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
else:
end = [start[i] + int(np.float32(full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
if len(full_slice_anchor) == 1:
return image[start[0]:end[0]]
elif len(full_slice_anchor) == 2:
return image[start[0]:end[0], start[1]:end[1]]
elif len(full_slice_anchor) == 3:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2]]
elif len(full_slice_anchor) == 4:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2], start[3]:end[3]]
else:
assert(False)
class SliceSynthDataPipelinePythonOp(Pipeline):
def __init__(self, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super(SliceSynthDataPipelinePythonOp, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.slice(self.data, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePythonOp(Pipeline):
def __init__(self, batch_size, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super(SlicePythonOp, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = "HWC"
self.pos_size_iter = pos_size_iter
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle=False)
self.decode = ops.ImageDecoder(device = 'cpu', output_type = types.RGB)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function)
def define_graph(self):
imgs, _ = self.input()
imgs = self.decode(imgs)
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.slice(imgs, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
def check_slice_synth_data_vs_numpy(device, batch_size, input_shape, layout, axes, axis_names,
normalized_anchor, normalized_shape):
eiis = [RandomDataIterator(batch_size, shape=input_shape)
for k in range(2)]
eii_args = [SliceArgsIterator(batch_size, len(input_shape), image_shape=input_shape,
image_layout=layout, axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
for k in range(2)]
compare_pipelines(
SliceSynthDataPipeline(device, batch_size, layout, iter(eiis[0]), iter(eii_args[0]),
axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape),
SliceSynthDataPipelinePythonOp(batch_size, layout, iter(eiis[0]), iter(eii_args[1]),
axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape),
batch_size=batch_size, N_iterations=5)
def test_slice_synth_data_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1, 8}:
for input_shape, layout, axes, axis_names in \
[((200,400,3), "HWC", None, "WH"),
((200,400,3), "HWC", None, "HW"),
((200,400,3), "HWC", None, "C"),
((200,400,3), "HWC", (1,0), None),
((200,400,3), "HWC", (0,1), None),
((200,400,3), "HWC", (2,), None),
((200,), "H", (0,), None),
((200,), "H", None, "H"),
((200,400), "HW", (1,), None),
((200,400), "HW", None, "W"),
((80, 30, 20, 3), "DHWC", (2,1,0), None),
((80, 30, 20, 3), "DHWC", (0,1,2), None),
((80, 30, 20, 3), "DHWC", (2,1), None),
((80, 30, 20, 3), "DHWC", None, "WHD"),
((80, 30, 20, 3), "DHWC", None, "DHW"),
((80, 30, 20, 3), "DHWC", None, "WH"),
((80, 30, 20, 3), "DHWC", None, "C")]:
for normalized_anchor in [True, False]:
for normalized_shape in [True, False]:
yield check_slice_synth_data_vs_numpy, device, batch_size, \
input_shape, layout, axes, axis_names, normalized_anchor, normalized_shape
def check_slice_vs_fused_decoder(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]), axes=axes, axis_names=axis_names, is_fused_decoder=False),
SlicePipeline(device, batch_size, iter(eii_args[1]), axes=axes, axis_names=axis_names, is_fused_decoder=True),
batch_size=batch_size, N_iterations=5)
def test_slice_vs_fused_decoder():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in \
[(None, "WH"), (None, "HW"),
((1,0), None), ((0,1), None)]:
yield check_slice_vs_fused_decoder, device, batch_size, axes, axis_names
def check_slice_vs_numpy(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]), axes=axes, axis_names=axis_names),
SlicePythonOp(batch_size, iter(eii_args[1]), axes=axes, axis_names=axis_names),
batch_size=batch_size, N_iterations=5)
def test_slice_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in \
[(None, "WH"), (None, "HW"),
((1,0), None), ((0,1), None)]:
yield check_slice_vs_numpy, device, batch_size, axes, axis_names
| 44.135392 | 119 | 0.592379 |
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import os
from functools import partial
from test_utils import check_batch
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
from test_utils import RandomDataIterator
from math import floor
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
test_data_video = os.path.join(test_data_root, 'db', 'optical_flow', 'sintel_trailer')
class SliceSynthDataPipeline(Pipeline):
def __init__(self, device, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None, normalized_anchor=True, normalized_shape=True):
super(SliceSynthDataPipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.device = device
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if axis_names:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
data = self.data.gpu() if self.device == 'gpu' else self.data
out = self.slice(data, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePipeline(Pipeline):
def __init__(self, device, batch_size, pos_size_iter,
num_threads=1, device_id=0, is_fused_decoder=False,
axes=None, axis_names=None, normalized_anchor=True, normalized_shape=True):
super(SlicePipeline, self).__init__(
batch_size, num_threads, device_id, seed=1234)
self.is_fused_decoder = is_fused_decoder
self.pos_size_iter = pos_size_iter
self.device = device
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle=False)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
if self.is_fused_decoder:
if axis_names:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.decode = ops.ImageDecoderSlice(device = "cpu",
output_type = types.RGB,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
else:
self.decode = ops.ImageDecoder(device = "cpu",
output_type = types.RGB)
if axis_names:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axis_names = axis_names)
elif axes:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape,
axes = axes)
else:
self.slice = ops.Slice(device = self.device,
normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
def define_graph(self):
inputs, labels = self.input(name="Reader")
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
if self.is_fused_decoder:
images = self.decode(inputs, self.crop_pos, self.crop_size)
else:
images = self.decode(inputs)
if self.device == 'gpu':
images = images.gpu()
images = self.slice(images, self.crop_pos, self.crop_size)
return images
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SliceArgsIterator(object):
def __init__(self,
batch_size,
num_dims=3,
image_shape=None,
image_layout=None,
normalized_anchor=True,
normalized_shape=True,
axes=None,
axis_names=None,
min_norm_anchor=0.0,
max_norm_anchor=0.2,
min_norm_shape=0.4,
max_norm_shape=0.75,
seed=54643613):
self.batch_size = batch_size
self.num_dims = num_dims
self.image_shape = image_shape
self.image_layout = image_layout
self.normalized_anchor = normalized_anchor
self.normalized_shape = normalized_shape
self.axes = axes
self.axis_names = axis_names
self.min_norm_anchor=min_norm_anchor
self.max_norm_anchor=max_norm_anchor
self.min_norm_shape=min_norm_shape
self.max_norm_shape=max_norm_shape
self.seed=seed
if not self.axis_names and not self.axes:
self.axis_names = "WH"
if self.axis_names:
self.axes = []
for axis_name in self.axis_names:
assert axis_name in self.image_layout
self.axes.append(self.image_layout.index(axis_name))
assert(len(self.axes)>0)
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
pos = []
size = []
anchor_amplitude = self.max_norm_anchor - self.min_norm_anchor
anchor_offset = self.min_norm_anchor
shape_amplitude = self.max_norm_shape - self.min_norm_shape
shape_offset = self.min_norm_shape
np.random.seed(self.seed)
for k in range(self.batch_size):
norm_anchor = anchor_amplitude * np.random.rand(len(self.axes)) + anchor_offset
norm_shape = shape_amplitude * np.random.rand(len(self.axes)) + shape_offset
if self.normalized_anchor:
anchor = norm_anchor
else:
anchor = [floor(norm_anchor[i] * self.image_shape[self.axes[i]]) for i in range(len(self.axes))]
if self.normalized_shape:
shape = norm_shape
else:
shape = [floor(norm_shape[i] * self.image_shape[self.axes[i]]) for i in range(len(self.axes))]
pos.append(np.asarray(anchor, dtype=np.float32))
size.append(np.asarray(shape, dtype=np.float32))
self.i = (self.i + 1) % self.n
return (pos, size)
next = __next__
def slice_func_helper(axes, axis_names, layout, normalized_anchor, normalized_shape, image, slice_anchor, slice_shape):
if not axes and not axis_names:
axis_names = "WH"
if axis_names:
axes = []
for axis_name in axis_names:
assert(axis_name in layout)
axis_pos = layout.find(axis_name)
axes.append(axis_pos)
shape = image.shape
full_slice_anchor = [0] * len(shape)
full_slice_shape = list(shape)
for axis in axes:
idx = axes.index(axis)
full_slice_anchor[axis] = slice_anchor[idx]
full_slice_shape[axis] = slice_shape[idx]
if normalized_anchor and normalized_shape:
start = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
end = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]+full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
else:
if normalized_anchor:
start = [int(np.float32(shape[i]) * np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
else:
start = [int(np.float32(full_slice_anchor[i]) + 0.5)
for i in range(len(shape))]
if normalized_shape:
end = [start[i] + int(np.float32(shape[i]) * np.float32(full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
else:
end = [start[i] + int(np.float32(full_slice_shape[i]) + 0.5)
for i in range(len(shape))]
if len(full_slice_anchor) == 1:
return image[start[0]:end[0]]
elif len(full_slice_anchor) == 2:
return image[start[0]:end[0], start[1]:end[1]]
elif len(full_slice_anchor) == 3:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2]]
elif len(full_slice_anchor) == 4:
return image[start[0]:end[0], start[1]:end[1], start[2]:end[2], start[3]:end[3]]
else:
assert(False)
class SliceSynthDataPipelinePythonOp(Pipeline):
def __init__(self, batch_size, layout, iterator, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super(SliceSynthDataPipelinePythonOp, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = layout
self.iterator = iterator
self.pos_size_iter = pos_size_iter
self.inputs = ops.ExternalSource()
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function)
def define_graph(self):
self.data = self.inputs()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.slice(self.data, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
data = self.iterator.next()
self.feed_input(self.data, data, layout=self.layout)
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class SlicePythonOp(Pipeline):
def __init__(self, batch_size, pos_size_iter,
num_threads=1, device_id=0, num_gpus=1,
axes=None, axis_names=None,
normalized_anchor=True, normalized_shape=True):
super(SlicePythonOp, self).__init__(
batch_size, num_threads, device_id,
seed=12345, exec_async=False, exec_pipelined=False)
self.device = "cpu"
self.layout = "HWC"
self.pos_size_iter = pos_size_iter
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle=False)
self.decode = ops.ImageDecoder(device = 'cpu', output_type = types.RGB)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
function = partial(
slice_func_helper, axes, axis_names, self.layout,
normalized_anchor, normalized_shape)
self.slice = ops.PythonFunction(function=function)
def define_graph(self):
imgs, _ = self.input()
imgs = self.decode(imgs)
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
out = self.slice(imgs, self.crop_pos, self.crop_size)
return out
def iter_setup(self):
(crop_pos, crop_size) = self.pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
def check_slice_synth_data_vs_numpy(device, batch_size, input_shape, layout, axes, axis_names,
normalized_anchor, normalized_shape):
eiis = [RandomDataIterator(batch_size, shape=input_shape)
for k in range(2)]
eii_args = [SliceArgsIterator(batch_size, len(input_shape), image_shape=input_shape,
image_layout=layout, axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape)
for k in range(2)]
compare_pipelines(
SliceSynthDataPipeline(device, batch_size, layout, iter(eiis[0]), iter(eii_args[0]),
axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape),
SliceSynthDataPipelinePythonOp(batch_size, layout, iter(eiis[0]), iter(eii_args[1]),
axes=axes, axis_names=axis_names, normalized_anchor=normalized_anchor,
normalized_shape=normalized_shape),
batch_size=batch_size, N_iterations=5)
def test_slice_synth_data_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1, 8}:
for input_shape, layout, axes, axis_names in \
[((200,400,3), "HWC", None, "WH"),
((200,400,3), "HWC", None, "HW"),
((200,400,3), "HWC", None, "C"),
((200,400,3), "HWC", (1,0), None),
((200,400,3), "HWC", (0,1), None),
((200,400,3), "HWC", (2,), None),
((200,), "H", (0,), None),
((200,), "H", None, "H"),
((200,400), "HW", (1,), None),
((200,400), "HW", None, "W"),
((80, 30, 20, 3), "DHWC", (2,1,0), None),
((80, 30, 20, 3), "DHWC", (0,1,2), None),
((80, 30, 20, 3), "DHWC", (2,1), None),
((80, 30, 20, 3), "DHWC", None, "WHD"),
((80, 30, 20, 3), "DHWC", None, "DHW"),
((80, 30, 20, 3), "DHWC", None, "WH"),
((80, 30, 20, 3), "DHWC", None, "C")]:
for normalized_anchor in [True, False]:
for normalized_shape in [True, False]:
yield check_slice_synth_data_vs_numpy, device, batch_size, \
input_shape, layout, axes, axis_names, normalized_anchor, normalized_shape
def check_slice_vs_fused_decoder(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]), axes=axes, axis_names=axis_names, is_fused_decoder=False),
SlicePipeline(device, batch_size, iter(eii_args[1]), axes=axes, axis_names=axis_names, is_fused_decoder=True),
batch_size=batch_size, N_iterations=5)
def test_slice_vs_fused_decoder():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in \
[(None, "WH"), (None, "HW"),
((1,0), None), ((0,1), None)]:
yield check_slice_vs_fused_decoder, device, batch_size, axes, axis_names
def check_slice_vs_numpy(device, batch_size, axes, axis_names):
eii_args = [SliceArgsIterator(batch_size, image_layout="HWC", axes=axes, axis_names=axis_names)
for k in range(2)]
compare_pipelines(
SlicePipeline(device, batch_size, iter(eii_args[0]), axes=axes, axis_names=axis_names),
SlicePythonOp(batch_size, iter(eii_args[1]), axes=axes, axis_names=axis_names),
batch_size=batch_size, N_iterations=5)
def test_slice_vs_numpy():
for device in ["cpu", "gpu"]:
for batch_size in {1}:
for axes, axis_names in \
[(None, "WH"), (None, "HW"),
((1,0), None), ((0,1), None)]:
yield check_slice_vs_numpy, device, batch_size, axes, axis_names
| true | true |
1c4618feed0faaaedbc546d3b6511a52116feb26 | 318 | py | Python | Lib/site-packages/django_makemessages_xgettext/management/commands/makemessagesxgettext.py | MortazaviM/Hackim | 28bf9897d1793176711d1c91f5b7ac57bf4b8a36 | [
"bzip2-1.0.6"
] | 2 | 2016-11-16T19:16:51.000Z | 2018-02-23T02:52:35.000Z | django_makemessages_xgettext/management/commands/makemessagesxgettext.py | resulto/django-makemessages-xgettext | 6af1590ec4dc2ffd6670e026d098cb0baa415d54 | [
"BSD-3-Clause"
] | null | null | null | django_makemessages_xgettext/management/commands/makemessagesxgettext.py | resulto/django-makemessages-xgettext | 6af1590ec4dc2ffd6670e026d098cb0baa415d54 | [
"BSD-3-Clause"
] | null | null | null | import django
if django.get_version().startswith("1.7"):
from django_makemessages_xgettext import django17_makemessagesxgettext
Command = django17_makemessagesxgettext.Command
else:
from django_makemessages_xgettext import django18_makemessagesxgettext
Command = django18_makemessagesxgettext.Command
| 35.333333 | 74 | 0.839623 | import django
if django.get_version().startswith("1.7"):
from django_makemessages_xgettext import django17_makemessagesxgettext
Command = django17_makemessagesxgettext.Command
else:
from django_makemessages_xgettext import django18_makemessagesxgettext
Command = django18_makemessagesxgettext.Command
| true | true |
1c4619c76a66576b7e0d2dd8529056fbf1cb9d05 | 67,648 | py | Python | dulwich/tests/test_porcelain.py | stmcginnis/dulwich | c33607e8d76643c6ec44b3010b138d2039c9acec | [
"Apache-2.0"
] | 1 | 2020-08-08T21:55:08.000Z | 2020-08-08T21:55:08.000Z | dulwich/tests/test_porcelain.py | stmcginnis/dulwich | c33607e8d76643c6ec44b3010b138d2039c9acec | [
"Apache-2.0"
] | null | null | null | dulwich/tests/test_porcelain.py | stmcginnis/dulwich | c33607e8d76643c6ec44b3010b138d2039c9acec | [
"Apache-2.0"
] | null | null | null | # test_porcelain.py -- porcelain tests
# Copyright (C) 2013 Jelmer Vernooij <jelmer@jelmer.uk>
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for dulwich.porcelain."""
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import errno
import os
import shutil
import tarfile
import tempfile
import time
from dulwich import porcelain
from dulwich.diff_tree import tree_changes
from dulwich.objects import (
Blob,
Tag,
Tree,
ZERO_SHA,
)
from dulwich.repo import (
NoIndexPresent,
Repo,
)
from dulwich.tests import (
TestCase,
)
from dulwich.tests.utils import (
build_commit_graph,
make_commit,
make_object,
)
def flat_walk_dir(dir_to_walk):
for dirpath, _, filenames in os.walk(dir_to_walk):
rel_dirpath = os.path.relpath(dirpath, dir_to_walk)
if not dirpath == dir_to_walk:
yield rel_dirpath
for filename in filenames:
if dirpath == dir_to_walk:
yield filename
else:
yield os.path.join(rel_dirpath, filename)
class PorcelainTestCase(TestCase):
def setUp(self):
super(PorcelainTestCase, self).setUp()
self.test_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.test_dir)
self.repo_path = os.path.join(self.test_dir, 'repo')
self.repo = Repo.init(self.repo_path, mkdir=True)
self.addCleanup(self.repo.close)
class ArchiveTests(PorcelainTestCase):
"""Tests for the archive command."""
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/master"] = c3.id
out = BytesIO()
err = BytesIO()
porcelain.archive(self.repo.path, b"refs/heads/master", outstream=out,
errstream=err)
self.assertEqual(b"", err.getvalue())
tf = tarfile.TarFile(fileobj=out)
self.addCleanup(tf.close)
self.assertEqual([], tf.getnames())
class UpdateServerInfoTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
porcelain.update_server_info(self.repo.path)
self.assertTrue(os.path.exists(
os.path.join(self.repo.controldir(), 'info', 'refs')))
class CommitTests(PorcelainTestCase):
def test_custom_author(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
def test_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path, message="Some message",
author="Joe <joe@example.com>",
committer="Bob <bob@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
class CleanTests(PorcelainTestCase):
def put_files(self, tracked, ignored, untracked, empty_dirs):
"""Put the described files in the wd
"""
all_files = tracked | ignored | untracked
for file_path in all_files:
abs_path = os.path.join(self.repo.path, file_path)
# File may need to be written in a dir that doesn't exist yet, so
# create the parent dir(s) as necessary
parent_dir = os.path.dirname(abs_path)
try:
os.makedirs(parent_dir)
except OSError as err:
if not err.errno == errno.EEXIST:
raise err
with open(abs_path, 'w') as f:
f.write('')
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.writelines(ignored)
for dir_path in empty_dirs:
os.mkdir(os.path.join(self.repo.path, 'empty_dir'))
files_to_add = [os.path.join(self.repo.path, t) for t in tracked]
porcelain.add(repo=self.repo.path, paths=files_to_add)
porcelain.commit(repo=self.repo.path, message="init commit")
def assert_wd(self, expected_paths):
"""Assert paths of files and dirs in wd are same as expected_paths
"""
control_dir_rel = os.path.relpath(
self.repo._controldir, self.repo.path)
# normalize paths to simplify comparison across platforms
found_paths = {
os.path.normpath(p)
for p in flat_walk_dir(self.repo.path)
if not p.split(os.sep)[0] == control_dir_rel}
norm_expected_paths = {os.path.normpath(p) for p in expected_paths}
self.assertEqual(found_paths, norm_expected_paths)
def test_from_root(self):
self.put_files(
tracked={
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore'},
ignored={
'ignored_file'},
untracked={
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'untracked_dir/untracked_dir/untracked_file'},
empty_dirs={
'empty_dir'})
porcelain.clean(repo=self.repo.path, target_dir=self.repo.path)
self.assert_wd({
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore',
'ignored_file',
'tracked_dir'})
def test_from_subdir(self):
self.put_files(
tracked={
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore'},
ignored={
'ignored_file'},
untracked={
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'untracked_dir/untracked_dir/untracked_file'},
empty_dirs={
'empty_dir'})
porcelain.clean(
repo=self.repo,
target_dir=os.path.join(self.repo.path, 'untracked_dir'))
self.assert_wd({
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore',
'ignored_file',
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'empty_dir',
'untracked_dir',
'tracked_dir',
'tracked_dir/untracked_dir'})
class CloneTests(PorcelainTestCase):
def test_simple_local(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
self.repo.refs[b"refs/tags/foo"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
r = porcelain.clone(self.repo.path, target_path,
checkout=False, errstream=errstream)
self.addCleanup(r.close)
self.assertEqual(r.path, target_path)
target_repo = Repo(target_path)
self.assertEqual(0, len(target_repo.open_index()))
self.assertEqual(c3.id, target_repo.refs[b'refs/tags/foo'])
self.assertTrue(b'f1' not in os.listdir(target_path))
self.assertTrue(b'f2' not in os.listdir(target_path))
c = r.get_config()
encoded_path = self.repo.path
if not isinstance(encoded_path, bytes):
encoded_path = encoded_path.encode('utf-8')
self.assertEqual(encoded_path, c.get((b'remote', b'origin'), b'url'))
self.assertEqual(
b'+refs/heads/*:refs/remotes/origin/*',
c.get((b'remote', b'origin'), b'fetch'))
def test_simple_local_with_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(self.repo.path, target_path,
checkout=True,
errstream=errstream) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
self.assertEqual(r.head(), c3.id)
self.assertTrue('f1' in os.listdir(target_path))
self.assertTrue('f2' in os.listdir(target_path))
def test_bare_local_with_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(
self.repo.path, target_path, bare=True,
errstream=errstream) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
r.head()
self.assertRaises(NoIndexPresent, r.open_index)
self.assertFalse(b'f1' in os.listdir(target_path))
self.assertFalse(b'f2' in os.listdir(target_path))
def test_no_checkout_with_bare(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
self.repo.refs[b"HEAD"] = c1.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
self.assertRaises(
ValueError, porcelain.clone, self.repo.path,
target_path, checkout=True, bare=True, errstream=errstream)
def test_no_head_no_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = BytesIO()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream)
r.close()
def test_no_head_no_checkout_outstream_errstream_autofallback(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = porcelain.NoneStream()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream)
r.close()
class InitTests(TestCase):
def test_non_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir)
def test_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir, bare=True)
class AddTests(PorcelainTestCase):
def test_add_default_paths(self):
# create a file for initial commit
fullpath = os.path.join(self.repo.path, 'blah')
with open(fullpath, 'w') as f:
f.write("\n")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>', committer=b'test <email>')
# Add a second test file and a file in a directory
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("\n")
os.mkdir(os.path.join(self.repo.path, 'adir'))
with open(os.path.join(self.repo.path, 'adir', 'afile'), 'w') as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.add(self.repo.path)
finally:
os.chdir(cwd)
# Check that foo was added and nothing in .git was modified
index = self.repo.open_index()
self.assertEqual(sorted(index), [b'adir/afile', b'blah', b'foo'])
def test_add_default_paths_subdir(self):
os.mkdir(os.path.join(self.repo.path, 'foo'))
with open(os.path.join(self.repo.path, 'blah'), 'w') as f:
f.write("\n")
with open(os.path.join(self.repo.path, 'foo', 'blie'), 'w') as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(os.path.join(self.repo.path, 'foo'))
porcelain.add(repo=self.repo.path)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
finally:
os.chdir(cwd)
index = self.repo.open_index()
self.assertEqual(sorted(index), [b'foo/blie'])
def test_add_file(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
self.assertIn(b"foo", self.repo.open_index())
def test_add_ignored(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write("foo")
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("BAR")
with open(os.path.join(self.repo.path, 'bar'), 'w') as f:
f.write("BAR")
(added, ignored) = porcelain.add(self.repo.path, paths=[
os.path.join(self.repo.path, "foo"),
os.path.join(self.repo.path, "bar")])
self.assertIn(b"bar", self.repo.open_index())
self.assertEqual(set(['bar']), set(added))
self.assertEqual(set(['foo']), ignored)
def test_add_file_absolute_path(self):
# Absolute paths are (not yet) supported
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("BAR")
porcelain.add(self.repo, paths=[os.path.join(self.repo.path, "foo")])
self.assertIn(b"foo", self.repo.open_index())
def test_add_not_in_repo(self):
with open(os.path.join(self.test_dir, 'foo'), 'w') as f:
f.write("BAR")
self.assertRaises(
ValueError,
porcelain.add, self.repo,
paths=[os.path.join(self.test_dir, "foo")])
self.assertRaises(
ValueError,
porcelain.add, self.repo,
paths=["../foo"])
self.assertEqual([], list(self.repo.open_index()))
def test_add_file_clrf_conversion(self):
# Set the right configuration to the repo
c = self.repo.get_config()
c.set("core", "autocrlf", "input")
c.write_to_path()
# Add a file with CRLF line-ending
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'wb') as f:
f.write(b"line1\r\nline2")
porcelain.add(self.repo.path, paths=[fullpath])
# The line-endings should have been converted to LF
index = self.repo.open_index()
self.assertIn(b"foo", index)
entry = index[b"foo"]
blob = self.repo[entry.sha]
self.assertEqual(blob.data, b"line1\nline2")
class RemoveTests(PorcelainTestCase):
def test_remove_file(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo, message=b'test',
author=b'test <email>',
committer=b'test <email>')
self.assertTrue(os.path.exists(os.path.join(self.repo.path, 'foo')))
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(self.repo.path, paths=["foo"])
finally:
os.chdir(cwd)
self.assertFalse(os.path.exists(os.path.join(self.repo.path, 'foo')))
def test_remove_file_staged(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.add(self.repo.path, paths=[fullpath])
self.assertRaises(Exception, porcelain.rm, self.repo.path,
paths=["foo"])
finally:
os.chdir(cwd)
class LogTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream)
self.assertEqual(3, outstream.getvalue().count("-" * 50))
def test_max_entries(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream, max_entries=1)
self.assertEqual(1, outstream.getvalue().count("-" * 50))
class ShowTests(PorcelainTestCase):
def test_nolist(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=c3.id, outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=[c3.id], outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_blob(self):
b = Blob.from_string(b"The Foo\n")
self.repo.object_store.add_object(b)
outstream = StringIO()
porcelain.show(self.repo.path, objects=[b.id], outstream=outstream)
self.assertEqual(outstream.getvalue(), "The Foo\n")
def test_commit_no_parent(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
outstream = StringIO()
porcelain.show(self.repo.path, objects=[ca.id], outstream=outstream)
self.assertMultiLineEqual(outstream.getvalue(), """\
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""")
def test_tag(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
porcelain.tag_create(
self.repo.path, b"tryme", b'foo <foo@bar.com>', b'bar',
annotated=True, objectish=ca.id, tag_time=1552854211,
tag_timezone=0)
outstream = StringIO()
porcelain.show(self.repo, objects=[b'refs/tags/tryme'],
outstream=outstream)
self.maxDiff = None
self.assertMultiLineEqual(outstream.getvalue(), """\
Tagger: foo <foo@bar.com>
Date: Sun Mar 17 2019 20:23:31 +0000
bar
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""")
def test_commit_with_change(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
b = Blob.from_string(b"The Bar\n")
tb = Tree()
tb.add(b"somename", 0o100644, b.id)
cb = make_commit(tree=tb.id, parents=[ca.id])
self.repo.object_store.add_objects(
[(a, None), (b, None), (ta, None), (tb, None),
(ca, None), (cb, None)])
outstream = StringIO()
porcelain.show(self.repo.path, objects=[cb.id], outstream=outstream)
self.assertMultiLineEqual(outstream.getvalue(), """\
--------------------------------------------------
commit: 2c6b6c9cb72c130956657e1fdae58e5b103744fa
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
index ea5c7bf..fd38bcb 100644
--- a/somename
+++ b/somename
@@ -1 +1 @@
-The Foo
+The Bar
""")
class SymbolicRefTests(PorcelainTestCase):
def test_set_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
self.assertRaises(ValueError, porcelain.symbolic_ref, self.repo.path,
b'foobar')
def test_set_force_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'force_foobar', force=True)
# test if we actually changed the file
with self.repo.get_named_file('HEAD') as f:
new_ref = f.read()
self.assertEqual(new_ref, b'ref: refs/heads/force_foobar\n')
def test_set_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'master')
def test_set_symbolic_ref_other_than_master(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]],
attrs=dict(refs='develop'))
self.repo.refs[b"HEAD"] = c3.id
self.repo.refs[b"refs/heads/develop"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'develop')
# test if we actually changed the file
with self.repo.get_named_file('HEAD') as f:
new_ref = f.read()
self.assertEqual(new_ref, b'ref: refs/heads/develop\n')
class DiffTreeTests(PorcelainTestCase):
def test_empty(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = BytesIO()
porcelain.diff_tree(self.repo.path, c2.tree, c3.tree,
outstream=outstream)
self.assertEqual(outstream.getvalue(), b"")
class CommitTreeTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
b = Blob()
b.data = b"foo the bar"
t = Tree()
t.add(b"somename", 0o100644, b.id)
self.repo.object_store.add_object(t)
self.repo.object_store.add_object(b)
sha = porcelain.commit_tree(
self.repo.path, t.id, message=b"Withcommit.",
author=b"Joe <joe@example.com>",
committer=b"Jane <jane@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
class RevListTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
outstream = BytesIO()
porcelain.rev_list(
self.repo.path, [c3.id], outstream=outstream)
self.assertEqual(
c3.id + b"\n" +
c2.id + b"\n" +
c1.id + b"\n",
outstream.getvalue())
class TagCreateTests(PorcelainTestCase):
def test_annotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
tag = self.repo[b'refs/tags/tryme']
self.assertTrue(isinstance(tag, Tag))
self.assertEqual(b"foo <foo@bar.com>", tag.tagger)
self.assertEqual(b"bar", tag.message)
self.assertLess(time.time() - tag.tag_time, 5)
def test_unannotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, b"tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b'refs/tags/tryme']
self.assertEqual(list(tags.values()), [self.repo.head()])
def test_unannotated_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, "tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b'refs/tags/tryme']
self.assertEqual(list(tags.values()), [self.repo.head()])
class TagListTests(PorcelainTestCase):
def test_empty(self):
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([], tags)
def test_simple(self):
self.repo.refs[b"refs/tags/foo"] = b"aa" * 20
self.repo.refs[b"refs/tags/bar/bla"] = b"bb" * 20
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([b"bar/bla", b"foo"], tags)
class TagDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.tag_create(self.repo, b'foo')
self.assertTrue(b"foo" in porcelain.tag_list(self.repo))
porcelain.tag_delete(self.repo, b'foo')
self.assertFalse(b"foo" in porcelain.tag_list(self.repo))
class ResetTests(PorcelainTestCase):
def test_hard_head(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(self.repo.path, message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
with open(os.path.join(self.repo.path, 'foo'), 'wb') as f:
f.write(b"OOH")
porcelain.reset(self.repo, "hard", b"HEAD")
index = self.repo.open_index()
changes = list(tree_changes(self.repo,
index.commit(self.repo.object_store),
self.repo[b'HEAD'].tree))
self.assertEqual([], changes)
def test_hard_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
sha = porcelain.commit(self.repo.path, message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
with open(fullpath, 'wb') as f:
f.write(b"BAZ")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(self.repo.path, message=b"Some other message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
porcelain.reset(self.repo, "hard", sha)
index = self.repo.open_index()
changes = list(tree_changes(self.repo,
index.commit(self.repo.object_store),
self.repo[sha].tree))
self.assertEqual([], changes)
class PushTests(PorcelainTestCase):
def test_simple(self):
"""
Basic test of porcelain push where self.repo is the remote. First
clone the remote, commit a file to the clone, then push the changes
back to the remote.
"""
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(repo=self.repo.path, message=b'init',
author=b'author <email>',
committer=b'committer <email>')
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(self.repo.path, target=clone_path,
errstream=errstream)
try:
self.assertEqual(target_repo[b'HEAD'], self.repo[b'HEAD'])
finally:
target_repo.close()
# create a second file to be pushed back to origin
handle, fullpath = tempfile.mkstemp(dir=clone_path)
os.close(handle)
porcelain.add(repo=clone_path, paths=[fullpath])
porcelain.commit(repo=clone_path, message=b'push',
author=b'author <email>',
committer=b'committer <email>')
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b'HEAD'].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(clone_path, self.repo.path, b"HEAD:" + refs_path,
outstream=outstream, errstream=errstream)
# Check that the target and source
with Repo(clone_path) as r_clone:
self.assertEqual({
b'HEAD': new_id,
b'refs/heads/foo': r_clone[b'HEAD'].id,
b'refs/heads/master': new_id,
}, self.repo.get_refs())
self.assertEqual(r_clone[b'HEAD'].id, self.repo[refs_path].id)
# Get the change in the target repo corresponding to the add
# this will be in the foo branch.
change = list(tree_changes(self.repo, self.repo[b'HEAD'].tree,
self.repo[b'refs/heads/foo'].tree))[0]
self.assertEqual(os.path.basename(fullpath),
change.new.path.decode('ascii'))
def test_delete(self):
"""Basic test of porcelain push, removing a branch.
"""
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(repo=self.repo.path, message=b'init',
author=b'author <email>',
committer=b'committer <email>')
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(self.repo.path, target=clone_path,
errstream=errstream)
target_repo.close()
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b'HEAD'].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(clone_path, self.repo.path, b":" + refs_path,
outstream=outstream, errstream=errstream)
self.assertEqual({
b'HEAD': new_id,
b'refs/heads/master': new_id,
}, self.repo.get_refs())
class PullTests(PorcelainTestCase):
def setUp(self):
super(PullTests, self).setUp()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
self.target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.target_path)
target_repo = porcelain.clone(self.repo.path, target=self.target_path,
errstream=BytesIO())
target_repo.close()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertTrue(b'refs/heads/master' in self.repo.refs)
self.assertTrue(b'refs/heads/master' in target_repo.refs)
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(self.target_path, self.repo.path, b'refs/heads/master',
outstream=outstream, errstream=errstream)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b'HEAD'].id, self.repo[b'HEAD'].id)
def test_no_refspec(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(self.target_path, self.repo.path, outstream=outstream,
errstream=errstream)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b'HEAD'].id, self.repo[b'HEAD'].id)
class StatusTests(PorcelainTestCase):
def test_empty(self):
results = porcelain.status(self.repo)
self.assertEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertEqual([], results.unstaged)
def test_status_base(self):
"""Integration test for `status` functionality."""
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# modify access and modify time of path
os.utime(fullpath, (0, 0))
with open(fullpath, 'wb') as f:
f.write(b'stuff')
# Make a dummy file and stage it
filename_add = 'bar'
fullpath = os.path.join(self.repo.path, filename_add)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
results = porcelain.status(self.repo)
self.assertEqual(results.staged['add'][0],
filename_add.encode('ascii'))
self.assertEqual(results.unstaged, [b'foo'])
def test_status_all(self):
del_path = os.path.join(self.repo.path, 'foo')
mod_path = os.path.join(self.repo.path, 'bar')
add_path = os.path.join(self.repo.path, 'baz')
us_path = os.path.join(self.repo.path, 'blye')
ut_path = os.path.join(self.repo.path, 'blyat')
with open(del_path, 'w') as f:
f.write('origstuff')
with open(mod_path, 'w') as f:
f.write('origstuff')
with open(us_path, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[del_path, mod_path, us_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
porcelain.remove(self.repo.path, [del_path])
with open(add_path, 'w') as f:
f.write('origstuff')
with open(mod_path, 'w') as f:
f.write('more_origstuff')
with open(us_path, 'w') as f:
f.write('more_origstuff')
porcelain.add(repo=self.repo.path, paths=[add_path, mod_path])
with open(us_path, 'w') as f:
f.write('\norigstuff')
with open(ut_path, 'w') as f:
f.write('origstuff')
results = porcelain.status(self.repo.path)
self.assertDictEqual(
{'add': [b'baz'], 'delete': [b'foo'], 'modify': [b'bar']},
results.staged)
self.assertListEqual(results.unstaged, [b'blye'])
self.assertListEqual(results.untracked, ['blyat'])
def test_status_crlf_mismatch(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, 'crlf')
with open(file_path, 'wb') as f:
f.write(b'line1\nline2')
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, 'wb') as f:
f.write(b'line1\r\nline2')
results = porcelain.status(self.repo)
self.assertDictEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertListEqual(results.unstaged, [b'crlf'])
self.assertListEqual(results.untracked, [])
def test_status_crlf_convert(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, 'crlf')
with open(file_path, 'wb') as f:
f.write(b'line1\nline2')
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, 'wb') as f:
f.write(b'line1\r\nline2')
# TODO: It should be set automatically by looking at the configuration
c = self.repo.get_config()
c.set("core", "autocrlf", True)
c.write_to_path()
results = porcelain.status(self.repo)
self.assertDictEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertListEqual(results.unstaged, [])
self.assertListEqual(results.untracked, [])
def test_get_tree_changes_add(self):
"""Unit test for get_tree_changes add."""
# Make a dummy file, stage
filename = 'bar'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['add'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 1)
self.assertEqual(len(changes['modify']), 0)
self.assertEqual(len(changes['delete']), 0)
def test_get_tree_changes_modify(self):
"""Unit test for get_tree_changes modify."""
# Make a dummy file, stage, commit, modify
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
with open(fullpath, 'w') as f:
f.write('otherstuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['modify'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 0)
self.assertEqual(len(changes['modify']), 1)
self.assertEqual(len(changes['delete']), 0)
def test_get_tree_changes_delete(self):
"""Unit test for get_tree_changes delete."""
# Make a dummy file, stage, commit, remove
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(repo=self.repo.path, paths=[filename])
finally:
os.chdir(cwd)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['delete'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 0)
self.assertEqual(len(changes['modify']), 0)
self.assertEqual(len(changes['delete']), 1)
def test_get_untracked_paths(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('ignored\n')
with open(os.path.join(self.repo.path, 'ignored'), 'w') as f:
f.write('blah\n')
with open(os.path.join(self.repo.path, 'notignored'), 'w') as f:
f.write('blah\n')
self.assertEqual(
set(['ignored', 'notignored', '.gitignore']),
set(porcelain.get_untracked_paths(self.repo.path, self.repo.path,
self.repo.open_index())))
self.assertEqual(set(['.gitignore', 'notignored']),
set(porcelain.status(self.repo).untracked))
self.assertEqual(set(['.gitignore', 'notignored', 'ignored']),
set(porcelain.status(self.repo, ignored=True)
.untracked))
def test_get_untracked_paths_nested(self):
with open(os.path.join(self.repo.path, 'notignored'), 'w') as f:
f.write('blah\n')
subrepo = Repo.init(os.path.join(self.repo.path, 'nested'), mkdir=True)
with open(os.path.join(subrepo.path, 'another'), 'w') as f:
f.write('foo\n')
self.assertEqual(
set(['notignored']),
set(porcelain.get_untracked_paths(self.repo.path, self.repo.path,
self.repo.open_index())))
self.assertEqual(
set(['another']),
set(porcelain.get_untracked_paths(subrepo.path, subrepo.path,
subrepo.open_index())))
# TODO(jelmer): Add test for dulwich.porcelain.daemon
class UploadPackTests(PorcelainTestCase):
"""Tests for upload_pack."""
def test_upload_pack(self):
outf = BytesIO()
exitcode = porcelain.upload_pack(
self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual([b"0000"], outlines)
self.assertEqual(0, exitcode)
class ReceivePackTests(PorcelainTestCase):
"""Tests for receive_pack."""
def test_receive_pack(self):
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
self.repo.do_commit(message=b'test status',
author=b'author <email>',
committer=b'committer <email>',
author_timestamp=1402354300,
commit_timestamp=1402354300, author_timezone=0,
commit_timezone=0)
outf = BytesIO()
exitcode = porcelain.receive_pack(
self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual([
b'0091319b56ce3aee2d489f759736a79cc552c9bb86d9 HEAD\x00 report-status ' # noqa: E501
b'delete-refs quiet ofs-delta side-band-64k '
b'no-done symref=HEAD:refs/heads/master',
b'003f319b56ce3aee2d489f759736a79cc552c9bb86d9 refs/heads/master',
b'0000'], outlines)
self.assertEqual(0, exitcode)
class BranchListTests(PorcelainTestCase):
def test_standard(self):
self.assertEqual(set([]), set(porcelain.branch_list(self.repo)))
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]),
set(porcelain.branch_list(self.repo)))
class BranchCreateTests(PorcelainTestCase):
def test_branch_exists(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertRaises(KeyError, porcelain.branch_create, self.repo, b"foo")
porcelain.branch_create(self.repo, b"foo", force=True)
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]),
set(porcelain.branch_list(self.repo)))
class BranchDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b'foo')
self.assertTrue(b"foo" in porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, b'foo')
self.assertFalse(b"foo" in porcelain.branch_list(self.repo))
def test_simple_unicode(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, 'foo')
self.assertTrue(b"foo" in porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, 'foo')
self.assertFalse(b"foo" in porcelain.branch_list(self.repo))
class FetchTests(PorcelainTestCase):
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(self.repo.path, target=target_path,
errstream=errstream)
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertFalse(self.repo[b'HEAD'].id in target_repo)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(target_path, self.repo.path,
outstream=outstream, errstream=errstream)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(
target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes
with Repo(target_path) as r:
self.assertTrue(self.repo[b'HEAD'].id in r)
def test_with_remote_name(self):
remote_name = b'origin'
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(self.repo.path, target=target_path,
errstream=errstream)
# Capture current refs
target_refs = target_repo.get_refs()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertFalse(self.repo[b'HEAD'].id in target_repo)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(target_path, self.repo.path, remote_name=remote_name,
outstream=outstream, errstream=errstream)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(
target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes, as well as updates
# for the refs
with Repo(target_path) as r:
self.assertTrue(self.repo[b'HEAD'].id in r)
self.assertNotEqual(self.repo.get_refs(), target_refs)
def assert_correct_remote_refs(
self, local_refs, remote_refs, remote_name=b'origin'):
"""Assert that known remote refs corresponds to actual remote refs."""
local_ref_prefix = b'refs/heads'
remote_ref_prefix = b'refs/remotes/' + remote_name
locally_known_remote_refs = {
k[len(remote_ref_prefix) + 1:]: v for k, v in local_refs.items()
if k.startswith(remote_ref_prefix)}
normalized_remote_refs = {
k[len(local_ref_prefix) + 1:]: v for k, v in remote_refs.items()
if k.startswith(local_ref_prefix)}
self.assertEqual(locally_known_remote_refs, normalized_remote_refs)
class RepackTests(PorcelainTestCase):
def test_empty(self):
porcelain.repack(self.repo)
def test_simple(self):
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.repack(self.repo)
class LsTreeTests(PorcelainTestCase):
def test_empty(self):
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(f.getvalue(), "")
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
'100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tfoo\n')
def test_recursive(self):
# Create a directory then write a dummy file in it
dirpath = os.path.join(self.repo.path, 'adir')
filepath = os.path.join(dirpath, 'afile')
os.mkdir(dirpath)
with open(filepath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[filepath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
'40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f, recursive=True)
self.assertEqual(
f.getvalue(),
'40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n'
'100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tadir'
'/afile\n')
class LsRemoteTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual({}, porcelain.ls_remote(self.repo.path))
def test_some(self):
cid = porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
self.assertEqual({
b'refs/heads/master': cid,
b'HEAD': cid},
porcelain.ls_remote(self.repo.path))
class LsFilesTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual([], list(porcelain.ls_files(self.repo)))
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual([b'foo'], list(porcelain.ls_files(self.repo)))
class RemoteAddTests(PorcelainTestCase):
def test_new(self):
porcelain.remote_add(
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
c = self.repo.get_config()
self.assertEqual(
c.get((b'remote', b'jelmer'), b'url'),
b'git://jelmer.uk/code/dulwich')
def test_exists(self):
porcelain.remote_add(
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
self.assertRaises(porcelain.RemoteExists, porcelain.remote_add,
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
class CheckIgnoreTests(PorcelainTestCase):
def test_check_ignored(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo')
foo_path = os.path.join(self.repo.path, 'foo')
with open(foo_path, 'w') as f:
f.write('BAR')
bar_path = os.path.join(self.repo.path, 'bar')
with open(bar_path, 'w') as f:
f.write('BAR')
self.assertEqual(
['foo'],
list(porcelain.check_ignore(self.repo, [foo_path])))
self.assertEqual(
[], list(porcelain.check_ignore(self.repo, [bar_path])))
def test_check_added_abs(self):
path = os.path.join(self.repo.path, 'foo')
with open(path, 'w') as f:
f.write('BAR')
self.repo.stage(['foo'])
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo\n')
self.assertEqual(
[], list(porcelain.check_ignore(self.repo, [path])))
self.assertEqual(
['foo'],
list(porcelain.check_ignore(self.repo, [path], no_index=True)))
def test_check_added_rel(self):
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write('BAR')
self.repo.stage(['foo'])
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo\n')
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, 'bar'))
os.chdir(os.path.join(self.repo.path, 'bar'))
try:
self.assertEqual(
list(porcelain.check_ignore(self.repo, ['../foo'])), [])
self.assertEqual(['../foo'], list(
porcelain.check_ignore(self.repo, ['../foo'], no_index=True)))
finally:
os.chdir(cwd)
class UpdateHeadTests(PorcelainTestCase):
def test_set_to_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b'ref: refs/heads/blah',
self.repo.refs.read_ref(b'HEAD'))
def test_set_to_branch_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b'HEAD'))
def test_set_to_commit_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, c1.id, detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b'HEAD'))
def test_set_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", new_branch="bar")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b'ref: refs/heads/bar',
self.repo.refs.read_ref(b'HEAD'))
class MailmapTests(PorcelainTestCase):
def test_no_mailmap(self):
self.assertEqual(
b'Jelmer Vernooij <jelmer@samba.org>',
porcelain.check_mailmap(
self.repo, b'Jelmer Vernooij <jelmer@samba.org>'))
def test_mailmap_lookup(self):
with open(os.path.join(self.repo.path, '.mailmap'), 'wb') as f:
f.write(b"""\
Jelmer Vernooij <jelmer@debian.org>
""")
self.assertEqual(
b'Jelmer Vernooij <jelmer@debian.org>',
porcelain.check_mailmap(
self.repo, b'Jelmer Vernooij <jelmer@samba.org>'))
class FsckTests(PorcelainTestCase):
def test_none(self):
self.assertEqual(
[],
list(porcelain.fsck(self.repo)))
def test_git_dir(self):
obj = Tree()
a = Blob()
a.data = b"foo"
obj.add(b".git", 0o100644, a.id)
self.repo.object_store.add_objects(
[(a, None), (obj, None)])
self.assertEqual(
[(obj.id, 'invalid name .git')],
[(sha, str(e)) for (sha, e) in porcelain.fsck(self.repo)])
class DescribeTests(PorcelainTestCase):
def test_no_commits(self):
self.assertRaises(KeyError, porcelain.describe, self.repo.path)
def test_single_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
'g{}'.format(sha[:7].decode('ascii')),
porcelain.describe(self.repo.path))
def test_tag(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
self.assertEqual(
"tryme",
porcelain.describe(self.repo.path))
def test_tag_and_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
with open(fullpath, 'w') as f:
f.write("BAR2")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
'tryme-1-g{}'.format(sha[:7].decode('ascii')),
porcelain.describe(self.repo.path))
class HelperTests(PorcelainTestCase):
def test_path_to_tree_path_base(self):
self.assertEqual(
b'bar', porcelain.path_to_tree_path('/home/foo', '/home/foo/bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', './bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', 'bar'))
cwd = os.getcwd()
self.assertEqual(
b'bar', porcelain.path_to_tree_path('.', os.path.join(cwd, 'bar')))
self.assertEqual(b'bar', porcelain.path_to_tree_path(cwd, 'bar'))
def test_path_to_tree_path_syntax(self):
self.assertEqual(b'bar', porcelain.path_to_tree_path(b'.', './bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', b'./bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path(b'.', b'./bar'))
def test_path_to_tree_path_error(self):
with self.assertRaises(ValueError):
porcelain.path_to_tree_path('/home/foo/', '/home/bar/baz')
def test_path_to_tree_path_rel(self):
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, 'foo'))
os.mkdir(os.path.join(self.repo.path, 'foo/bar'))
try:
os.chdir(os.path.join(self.repo.path, 'foo/bar'))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
'..', 'baz'))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
os.path.join(os.getcwd(), '..'),
os.path.join(os.getcwd(), 'baz')))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
'..', os.path.join(os.getcwd(), 'baz')))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
os.path.join(os.getcwd(), '..'), 'baz'))
finally:
os.chdir(cwd)
class GetObjectBypathTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
b"BAR",
porcelain.get_object_by_path(self.repo, 'foo').data)
def test_missing(self):
self.assertRaises(
KeyError,
porcelain.get_object_by_path, self.repo, 'foo')
class WriteTreeTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual(
b'd2092c8a9f311f0311083bf8d177f2ca0ab5b241',
porcelain.write_tree(self.repo))
| 37.624027 | 97 | 0.58457 |
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import errno
import os
import shutil
import tarfile
import tempfile
import time
from dulwich import porcelain
from dulwich.diff_tree import tree_changes
from dulwich.objects import (
Blob,
Tag,
Tree,
ZERO_SHA,
)
from dulwich.repo import (
NoIndexPresent,
Repo,
)
from dulwich.tests import (
TestCase,
)
from dulwich.tests.utils import (
build_commit_graph,
make_commit,
make_object,
)
def flat_walk_dir(dir_to_walk):
for dirpath, _, filenames in os.walk(dir_to_walk):
rel_dirpath = os.path.relpath(dirpath, dir_to_walk)
if not dirpath == dir_to_walk:
yield rel_dirpath
for filename in filenames:
if dirpath == dir_to_walk:
yield filename
else:
yield os.path.join(rel_dirpath, filename)
class PorcelainTestCase(TestCase):
def setUp(self):
super(PorcelainTestCase, self).setUp()
self.test_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.test_dir)
self.repo_path = os.path.join(self.test_dir, 'repo')
self.repo = Repo.init(self.repo_path, mkdir=True)
self.addCleanup(self.repo.close)
class ArchiveTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/master"] = c3.id
out = BytesIO()
err = BytesIO()
porcelain.archive(self.repo.path, b"refs/heads/master", outstream=out,
errstream=err)
self.assertEqual(b"", err.getvalue())
tf = tarfile.TarFile(fileobj=out)
self.addCleanup(tf.close)
self.assertEqual([], tf.getnames())
class UpdateServerInfoTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
porcelain.update_server_info(self.repo.path)
self.assertTrue(os.path.exists(
os.path.join(self.repo.controldir(), 'info', 'refs')))
class CommitTests(PorcelainTestCase):
def test_custom_author(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
def test_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"refs/heads/foo"] = c3.id
sha = porcelain.commit(
self.repo.path, message="Some message",
author="Joe <joe@example.com>",
committer="Bob <bob@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
class CleanTests(PorcelainTestCase):
def put_files(self, tracked, ignored, untracked, empty_dirs):
all_files = tracked | ignored | untracked
for file_path in all_files:
abs_path = os.path.join(self.repo.path, file_path)
# create the parent dir(s) as necessary
parent_dir = os.path.dirname(abs_path)
try:
os.makedirs(parent_dir)
except OSError as err:
if not err.errno == errno.EEXIST:
raise err
with open(abs_path, 'w') as f:
f.write('')
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.writelines(ignored)
for dir_path in empty_dirs:
os.mkdir(os.path.join(self.repo.path, 'empty_dir'))
files_to_add = [os.path.join(self.repo.path, t) for t in tracked]
porcelain.add(repo=self.repo.path, paths=files_to_add)
porcelain.commit(repo=self.repo.path, message="init commit")
def assert_wd(self, expected_paths):
control_dir_rel = os.path.relpath(
self.repo._controldir, self.repo.path)
# normalize paths to simplify comparison across platforms
found_paths = {
os.path.normpath(p)
for p in flat_walk_dir(self.repo.path)
if not p.split(os.sep)[0] == control_dir_rel}
norm_expected_paths = {os.path.normpath(p) for p in expected_paths}
self.assertEqual(found_paths, norm_expected_paths)
def test_from_root(self):
self.put_files(
tracked={
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore'},
ignored={
'ignored_file'},
untracked={
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'untracked_dir/untracked_dir/untracked_file'},
empty_dirs={
'empty_dir'})
porcelain.clean(repo=self.repo.path, target_dir=self.repo.path)
self.assert_wd({
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore',
'ignored_file',
'tracked_dir'})
def test_from_subdir(self):
self.put_files(
tracked={
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore'},
ignored={
'ignored_file'},
untracked={
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'untracked_dir/untracked_dir/untracked_file'},
empty_dirs={
'empty_dir'})
porcelain.clean(
repo=self.repo,
target_dir=os.path.join(self.repo.path, 'untracked_dir'))
self.assert_wd({
'tracked_file',
'tracked_dir/tracked_file',
'.gitignore',
'ignored_file',
'untracked_file',
'tracked_dir/untracked_dir/untracked_file',
'empty_dir',
'untracked_dir',
'tracked_dir',
'tracked_dir/untracked_dir'})
class CloneTests(PorcelainTestCase):
def test_simple_local(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
self.repo.refs[b"refs/tags/foo"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
r = porcelain.clone(self.repo.path, target_path,
checkout=False, errstream=errstream)
self.addCleanup(r.close)
self.assertEqual(r.path, target_path)
target_repo = Repo(target_path)
self.assertEqual(0, len(target_repo.open_index()))
self.assertEqual(c3.id, target_repo.refs[b'refs/tags/foo'])
self.assertTrue(b'f1' not in os.listdir(target_path))
self.assertTrue(b'f2' not in os.listdir(target_path))
c = r.get_config()
encoded_path = self.repo.path
if not isinstance(encoded_path, bytes):
encoded_path = encoded_path.encode('utf-8')
self.assertEqual(encoded_path, c.get((b'remote', b'origin'), b'url'))
self.assertEqual(
b'+refs/heads/*:refs/remotes/origin/*',
c.get((b'remote', b'origin'), b'fetch'))
def test_simple_local_with_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(self.repo.path, target_path,
checkout=True,
errstream=errstream) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
self.assertEqual(r.head(), c3.id)
self.assertTrue('f1' in os.listdir(target_path))
self.assertTrue('f2' in os.listdir(target_path))
def test_bare_local_with_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1], [2, 1], [3, 1, 2]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)],
2: [(b'f1', f1_1), (b'f2', f1_1)],
3: [(b'f1', f1_1), (b'f2', f1_1)], }
c1, c2, c3 = build_commit_graph(self.repo.object_store,
commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c3.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
with porcelain.clone(
self.repo.path, target_path, bare=True,
errstream=errstream) as r:
self.assertEqual(r.path, target_path)
with Repo(target_path) as r:
r.head()
self.assertRaises(NoIndexPresent, r.open_index)
self.assertFalse(b'f1' in os.listdir(target_path))
self.assertFalse(b'f2' in os.listdir(target_path))
def test_no_checkout_with_bare(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
self.repo.refs[b"HEAD"] = c1.id
target_path = tempfile.mkdtemp()
errstream = BytesIO()
self.addCleanup(shutil.rmtree, target_path)
self.assertRaises(
ValueError, porcelain.clone, self.repo.path,
target_path, checkout=True, bare=True, errstream=errstream)
def test_no_head_no_checkout(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = BytesIO()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream)
r.close()
def test_no_head_no_checkout_outstream_errstream_autofallback(self):
f1_1 = make_object(Blob, data=b'f1')
commit_spec = [[1]]
trees = {1: [(b'f1', f1_1), (b'f2', f1_1)]}
(c1, ) = build_commit_graph(self.repo.object_store, commit_spec, trees)
self.repo.refs[b"refs/heads/master"] = c1.id
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
errstream = porcelain.NoneStream()
r = porcelain.clone(
self.repo.path, target_path, checkout=True, errstream=errstream)
r.close()
class InitTests(TestCase):
def test_non_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir)
def test_bare(self):
repo_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, repo_dir)
porcelain.init(repo_dir, bare=True)
class AddTests(PorcelainTestCase):
def test_add_default_paths(self):
# create a file for initial commit
fullpath = os.path.join(self.repo.path, 'blah')
with open(fullpath, 'w') as f:
f.write("\n")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>', committer=b'test <email>')
# Add a second test file and a file in a directory
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("\n")
os.mkdir(os.path.join(self.repo.path, 'adir'))
with open(os.path.join(self.repo.path, 'adir', 'afile'), 'w') as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.add(self.repo.path)
finally:
os.chdir(cwd)
# Check that foo was added and nothing in .git was modified
index = self.repo.open_index()
self.assertEqual(sorted(index), [b'adir/afile', b'blah', b'foo'])
def test_add_default_paths_subdir(self):
os.mkdir(os.path.join(self.repo.path, 'foo'))
with open(os.path.join(self.repo.path, 'blah'), 'w') as f:
f.write("\n")
with open(os.path.join(self.repo.path, 'foo', 'blie'), 'w') as f:
f.write("\n")
cwd = os.getcwd()
try:
os.chdir(os.path.join(self.repo.path, 'foo'))
porcelain.add(repo=self.repo.path)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
finally:
os.chdir(cwd)
index = self.repo.open_index()
self.assertEqual(sorted(index), [b'foo/blie'])
def test_add_file(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
self.assertIn(b"foo", self.repo.open_index())
def test_add_ignored(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write("foo")
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("BAR")
with open(os.path.join(self.repo.path, 'bar'), 'w') as f:
f.write("BAR")
(added, ignored) = porcelain.add(self.repo.path, paths=[
os.path.join(self.repo.path, "foo"),
os.path.join(self.repo.path, "bar")])
self.assertIn(b"bar", self.repo.open_index())
self.assertEqual(set(['bar']), set(added))
self.assertEqual(set(['foo']), ignored)
def test_add_file_absolute_path(self):
# Absolute paths are (not yet) supported
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write("BAR")
porcelain.add(self.repo, paths=[os.path.join(self.repo.path, "foo")])
self.assertIn(b"foo", self.repo.open_index())
def test_add_not_in_repo(self):
with open(os.path.join(self.test_dir, 'foo'), 'w') as f:
f.write("BAR")
self.assertRaises(
ValueError,
porcelain.add, self.repo,
paths=[os.path.join(self.test_dir, "foo")])
self.assertRaises(
ValueError,
porcelain.add, self.repo,
paths=["../foo"])
self.assertEqual([], list(self.repo.open_index()))
def test_add_file_clrf_conversion(self):
# Set the right configuration to the repo
c = self.repo.get_config()
c.set("core", "autocrlf", "input")
c.write_to_path()
# Add a file with CRLF line-ending
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'wb') as f:
f.write(b"line1\r\nline2")
porcelain.add(self.repo.path, paths=[fullpath])
# The line-endings should have been converted to LF
index = self.repo.open_index()
self.assertIn(b"foo", index)
entry = index[b"foo"]
blob = self.repo[entry.sha]
self.assertEqual(blob.data, b"line1\nline2")
class RemoveTests(PorcelainTestCase):
def test_remove_file(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo, message=b'test',
author=b'test <email>',
committer=b'test <email>')
self.assertTrue(os.path.exists(os.path.join(self.repo.path, 'foo')))
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(self.repo.path, paths=["foo"])
finally:
os.chdir(cwd)
self.assertFalse(os.path.exists(os.path.join(self.repo.path, 'foo')))
def test_remove_file_staged(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.add(self.repo.path, paths=[fullpath])
self.assertRaises(Exception, porcelain.rm, self.repo.path,
paths=["foo"])
finally:
os.chdir(cwd)
class LogTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream)
self.assertEqual(3, outstream.getvalue().count("-" * 50))
def test_max_entries(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.log(self.repo.path, outstream=outstream, max_entries=1)
self.assertEqual(1, outstream.getvalue().count("-" * 50))
class ShowTests(PorcelainTestCase):
def test_nolist(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=c3.id, outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = StringIO()
porcelain.show(self.repo.path, objects=[c3.id], outstream=outstream)
self.assertTrue(outstream.getvalue().startswith("-" * 50))
def test_blob(self):
b = Blob.from_string(b"The Foo\n")
self.repo.object_store.add_object(b)
outstream = StringIO()
porcelain.show(self.repo.path, objects=[b.id], outstream=outstream)
self.assertEqual(outstream.getvalue(), "The Foo\n")
def test_commit_no_parent(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
outstream = StringIO()
porcelain.show(self.repo.path, objects=[ca.id], outstream=outstream)
self.assertMultiLineEqual(outstream.getvalue(), """\
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""")
def test_tag(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
self.repo.object_store.add_objects([(a, None), (ta, None), (ca, None)])
porcelain.tag_create(
self.repo.path, b"tryme", b'foo <foo@bar.com>', b'bar',
annotated=True, objectish=ca.id, tag_time=1552854211,
tag_timezone=0)
outstream = StringIO()
porcelain.show(self.repo, objects=[b'refs/tags/tryme'],
outstream=outstream)
self.maxDiff = None
self.assertMultiLineEqual(outstream.getvalue(), """\
Tagger: foo <foo@bar.com>
Date: Sun Mar 17 2019 20:23:31 +0000
bar
--------------------------------------------------
commit: 344da06c1bb85901270b3e8875c988a027ec087d
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
new file mode 100644
index 0000000..ea5c7bf
--- /dev/null
+++ b/somename
@@ -0,0 +1 @@
+The Foo
""")
def test_commit_with_change(self):
a = Blob.from_string(b"The Foo\n")
ta = Tree()
ta.add(b"somename", 0o100644, a.id)
ca = make_commit(tree=ta.id)
b = Blob.from_string(b"The Bar\n")
tb = Tree()
tb.add(b"somename", 0o100644, b.id)
cb = make_commit(tree=tb.id, parents=[ca.id])
self.repo.object_store.add_objects(
[(a, None), (b, None), (ta, None), (tb, None),
(ca, None), (cb, None)])
outstream = StringIO()
porcelain.show(self.repo.path, objects=[cb.id], outstream=outstream)
self.assertMultiLineEqual(outstream.getvalue(), """\
--------------------------------------------------
commit: 2c6b6c9cb72c130956657e1fdae58e5b103744fa
Author: Test Author <test@nodomain.com>
Committer: Test Committer <test@nodomain.com>
Date: Fri Jan 01 2010 00:00:00 +0000
Test message.
diff --git a/somename b/somename
index ea5c7bf..fd38bcb 100644
--- a/somename
+++ b/somename
@@ -1 +1 @@
-The Foo
+The Bar
""")
class SymbolicRefTests(PorcelainTestCase):
def test_set_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
self.assertRaises(ValueError, porcelain.symbolic_ref, self.repo.path,
b'foobar')
def test_set_force_wrong_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'force_foobar', force=True)
# test if we actually changed the file
with self.repo.get_named_file('HEAD') as f:
new_ref = f.read()
self.assertEqual(new_ref, b'ref: refs/heads/force_foobar\n')
def test_set_symbolic_ref(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'master')
def test_set_symbolic_ref_other_than_master(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]],
attrs=dict(refs='develop'))
self.repo.refs[b"HEAD"] = c3.id
self.repo.refs[b"refs/heads/develop"] = c3.id
porcelain.symbolic_ref(self.repo.path, b'develop')
# test if we actually changed the file
with self.repo.get_named_file('HEAD') as f:
new_ref = f.read()
self.assertEqual(new_ref, b'ref: refs/heads/develop\n')
class DiffTreeTests(PorcelainTestCase):
def test_empty(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
outstream = BytesIO()
porcelain.diff_tree(self.repo.path, c2.tree, c3.tree,
outstream=outstream)
self.assertEqual(outstream.getvalue(), b"")
class CommitTreeTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
b = Blob()
b.data = b"foo the bar"
t = Tree()
t.add(b"somename", 0o100644, b.id)
self.repo.object_store.add_object(t)
self.repo.object_store.add_object(b)
sha = porcelain.commit_tree(
self.repo.path, t.id, message=b"Withcommit.",
author=b"Joe <joe@example.com>",
committer=b"Jane <jane@example.com>")
self.assertTrue(isinstance(sha, bytes))
self.assertEqual(len(sha), 40)
class RevListTests(PorcelainTestCase):
def test_simple(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
outstream = BytesIO()
porcelain.rev_list(
self.repo.path, [c3.id], outstream=outstream)
self.assertEqual(
c3.id + b"\n" +
c2.id + b"\n" +
c1.id + b"\n",
outstream.getvalue())
class TagCreateTests(PorcelainTestCase):
def test_annotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
tag = self.repo[b'refs/tags/tryme']
self.assertTrue(isinstance(tag, Tag))
self.assertEqual(b"foo <foo@bar.com>", tag.tagger)
self.assertEqual(b"bar", tag.message)
self.assertLess(time.time() - tag.tag_time, 5)
def test_unannotated(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, b"tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b'refs/tags/tryme']
self.assertEqual(list(tags.values()), [self.repo.head()])
def test_unannotated_unicode(self):
c1, c2, c3 = build_commit_graph(
self.repo.object_store, [[1], [2, 1], [3, 1, 2]])
self.repo.refs[b"HEAD"] = c3.id
porcelain.tag_create(self.repo.path, "tryme", annotated=False)
tags = self.repo.refs.as_dict(b"refs/tags")
self.assertEqual(list(tags.keys()), [b"tryme"])
self.repo[b'refs/tags/tryme']
self.assertEqual(list(tags.values()), [self.repo.head()])
class TagListTests(PorcelainTestCase):
def test_empty(self):
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([], tags)
def test_simple(self):
self.repo.refs[b"refs/tags/foo"] = b"aa" * 20
self.repo.refs[b"refs/tags/bar/bla"] = b"bb" * 20
tags = porcelain.tag_list(self.repo.path)
self.assertEqual([b"bar/bla", b"foo"], tags)
class TagDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.tag_create(self.repo, b'foo')
self.assertTrue(b"foo" in porcelain.tag_list(self.repo))
porcelain.tag_delete(self.repo, b'foo')
self.assertFalse(b"foo" in porcelain.tag_list(self.repo))
class ResetTests(PorcelainTestCase):
def test_hard_head(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(self.repo.path, message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
with open(os.path.join(self.repo.path, 'foo'), 'wb') as f:
f.write(b"OOH")
porcelain.reset(self.repo, "hard", b"HEAD")
index = self.repo.open_index()
changes = list(tree_changes(self.repo,
index.commit(self.repo.object_store),
self.repo[b'HEAD'].tree))
self.assertEqual([], changes)
def test_hard_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(self.repo.path, paths=[fullpath])
sha = porcelain.commit(self.repo.path, message=b"Some message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
with open(fullpath, 'wb') as f:
f.write(b"BAZ")
porcelain.add(self.repo.path, paths=[fullpath])
porcelain.commit(self.repo.path, message=b"Some other message",
committer=b"Jane <jane@example.com>",
author=b"John <john@example.com>")
porcelain.reset(self.repo, "hard", sha)
index = self.repo.open_index()
changes = list(tree_changes(self.repo,
index.commit(self.repo.object_store),
self.repo[sha].tree))
self.assertEqual([], changes)
class PushTests(PorcelainTestCase):
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(repo=self.repo.path, message=b'init',
author=b'author <email>',
committer=b'committer <email>')
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(self.repo.path, target=clone_path,
errstream=errstream)
try:
self.assertEqual(target_repo[b'HEAD'], self.repo[b'HEAD'])
finally:
target_repo.close()
# create a second file to be pushed back to origin
handle, fullpath = tempfile.mkstemp(dir=clone_path)
os.close(handle)
porcelain.add(repo=clone_path, paths=[fullpath])
porcelain.commit(repo=clone_path, message=b'push',
author=b'author <email>',
committer=b'committer <email>')
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b'HEAD'].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(clone_path, self.repo.path, b"HEAD:" + refs_path,
outstream=outstream, errstream=errstream)
# Check that the target and source
with Repo(clone_path) as r_clone:
self.assertEqual({
b'HEAD': new_id,
b'refs/heads/foo': r_clone[b'HEAD'].id,
b'refs/heads/master': new_id,
}, self.repo.get_refs())
self.assertEqual(r_clone[b'HEAD'].id, self.repo[refs_path].id)
# Get the change in the target repo corresponding to the add
# this will be in the foo branch.
change = list(tree_changes(self.repo, self.repo[b'HEAD'].tree,
self.repo[b'refs/heads/foo'].tree))[0]
self.assertEqual(os.path.basename(fullpath),
change.new.path.decode('ascii'))
def test_delete(self):
outstream = BytesIO()
errstream = BytesIO()
porcelain.commit(repo=self.repo.path, message=b'init',
author=b'author <email>',
committer=b'committer <email>')
# Setup target repo cloned from temp test repo
clone_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, clone_path)
target_repo = porcelain.clone(self.repo.path, target=clone_path,
errstream=errstream)
target_repo.close()
# Setup a non-checked out branch in the remote
refs_path = b"refs/heads/foo"
new_id = self.repo[b'HEAD'].id
self.assertNotEqual(new_id, ZERO_SHA)
self.repo.refs[refs_path] = new_id
# Push to the remote
porcelain.push(clone_path, self.repo.path, b":" + refs_path,
outstream=outstream, errstream=errstream)
self.assertEqual({
b'HEAD': new_id,
b'refs/heads/master': new_id,
}, self.repo.get_refs())
class PullTests(PorcelainTestCase):
def setUp(self):
super(PullTests, self).setUp()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
self.target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.target_path)
target_repo = porcelain.clone(self.repo.path, target=self.target_path,
errstream=BytesIO())
target_repo.close()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertTrue(b'refs/heads/master' in self.repo.refs)
self.assertTrue(b'refs/heads/master' in target_repo.refs)
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(self.target_path, self.repo.path, b'refs/heads/master',
outstream=outstream, errstream=errstream)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b'HEAD'].id, self.repo[b'HEAD'].id)
def test_no_refspec(self):
outstream = BytesIO()
errstream = BytesIO()
# Pull changes into the cloned repo
porcelain.pull(self.target_path, self.repo.path, outstream=outstream,
errstream=errstream)
# Check the target repo for pushed changes
with Repo(self.target_path) as r:
self.assertEqual(r[b'HEAD'].id, self.repo[b'HEAD'].id)
class StatusTests(PorcelainTestCase):
def test_empty(self):
results = porcelain.status(self.repo)
self.assertEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertEqual([], results.unstaged)
def test_status_base(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# modify access and modify time of path
os.utime(fullpath, (0, 0))
with open(fullpath, 'wb') as f:
f.write(b'stuff')
# Make a dummy file and stage it
filename_add = 'bar'
fullpath = os.path.join(self.repo.path, filename_add)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
results = porcelain.status(self.repo)
self.assertEqual(results.staged['add'][0],
filename_add.encode('ascii'))
self.assertEqual(results.unstaged, [b'foo'])
def test_status_all(self):
del_path = os.path.join(self.repo.path, 'foo')
mod_path = os.path.join(self.repo.path, 'bar')
add_path = os.path.join(self.repo.path, 'baz')
us_path = os.path.join(self.repo.path, 'blye')
ut_path = os.path.join(self.repo.path, 'blyat')
with open(del_path, 'w') as f:
f.write('origstuff')
with open(mod_path, 'w') as f:
f.write('origstuff')
with open(us_path, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[del_path, mod_path, us_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
porcelain.remove(self.repo.path, [del_path])
with open(add_path, 'w') as f:
f.write('origstuff')
with open(mod_path, 'w') as f:
f.write('more_origstuff')
with open(us_path, 'w') as f:
f.write('more_origstuff')
porcelain.add(repo=self.repo.path, paths=[add_path, mod_path])
with open(us_path, 'w') as f:
f.write('\norigstuff')
with open(ut_path, 'w') as f:
f.write('origstuff')
results = porcelain.status(self.repo.path)
self.assertDictEqual(
{'add': [b'baz'], 'delete': [b'foo'], 'modify': [b'bar']},
results.staged)
self.assertListEqual(results.unstaged, [b'blye'])
self.assertListEqual(results.untracked, ['blyat'])
def test_status_crlf_mismatch(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, 'crlf')
with open(file_path, 'wb') as f:
f.write(b'line1\nline2')
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, 'wb') as f:
f.write(b'line1\r\nline2')
results = porcelain.status(self.repo)
self.assertDictEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertListEqual(results.unstaged, [b'crlf'])
self.assertListEqual(results.untracked, [])
def test_status_crlf_convert(self):
# First make a commit as if the file has been added on a Linux system
# or with core.autocrlf=True
file_path = os.path.join(self.repo.path, 'crlf')
with open(file_path, 'wb') as f:
f.write(b'line1\nline2')
porcelain.add(repo=self.repo.path, paths=[file_path])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
# Then update the file as if it was created by CGit on a Windows
# system with core.autocrlf=true
with open(file_path, 'wb') as f:
f.write(b'line1\r\nline2')
# TODO: It should be set automatically by looking at the configuration
c = self.repo.get_config()
c.set("core", "autocrlf", True)
c.write_to_path()
results = porcelain.status(self.repo)
self.assertDictEqual(
{'add': [], 'delete': [], 'modify': []},
results.staged)
self.assertListEqual(results.unstaged, [])
self.assertListEqual(results.untracked, [])
def test_get_tree_changes_add(self):
# Make a dummy file, stage
filename = 'bar'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['add'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 1)
self.assertEqual(len(changes['modify']), 0)
self.assertEqual(len(changes['delete']), 0)
def test_get_tree_changes_modify(self):
# Make a dummy file, stage, commit, modify
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
with open(fullpath, 'w') as f:
f.write('otherstuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['modify'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 0)
self.assertEqual(len(changes['modify']), 1)
self.assertEqual(len(changes['delete']), 0)
def test_get_tree_changes_delete(self):
# Make a dummy file, stage, commit, remove
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
cwd = os.getcwd()
try:
os.chdir(self.repo.path)
porcelain.remove(repo=self.repo.path, paths=[filename])
finally:
os.chdir(cwd)
changes = porcelain.get_tree_changes(self.repo.path)
self.assertEqual(changes['delete'][0], filename.encode('ascii'))
self.assertEqual(len(changes['add']), 0)
self.assertEqual(len(changes['modify']), 0)
self.assertEqual(len(changes['delete']), 1)
def test_get_untracked_paths(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('ignored\n')
with open(os.path.join(self.repo.path, 'ignored'), 'w') as f:
f.write('blah\n')
with open(os.path.join(self.repo.path, 'notignored'), 'w') as f:
f.write('blah\n')
self.assertEqual(
set(['ignored', 'notignored', '.gitignore']),
set(porcelain.get_untracked_paths(self.repo.path, self.repo.path,
self.repo.open_index())))
self.assertEqual(set(['.gitignore', 'notignored']),
set(porcelain.status(self.repo).untracked))
self.assertEqual(set(['.gitignore', 'notignored', 'ignored']),
set(porcelain.status(self.repo, ignored=True)
.untracked))
def test_get_untracked_paths_nested(self):
with open(os.path.join(self.repo.path, 'notignored'), 'w') as f:
f.write('blah\n')
subrepo = Repo.init(os.path.join(self.repo.path, 'nested'), mkdir=True)
with open(os.path.join(subrepo.path, 'another'), 'w') as f:
f.write('foo\n')
self.assertEqual(
set(['notignored']),
set(porcelain.get_untracked_paths(self.repo.path, self.repo.path,
self.repo.open_index())))
self.assertEqual(
set(['another']),
set(porcelain.get_untracked_paths(subrepo.path, subrepo.path,
subrepo.open_index())))
# TODO(jelmer): Add test for dulwich.porcelain.daemon
class UploadPackTests(PorcelainTestCase):
def test_upload_pack(self):
outf = BytesIO()
exitcode = porcelain.upload_pack(
self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual([b"0000"], outlines)
self.assertEqual(0, exitcode)
class ReceivePackTests(PorcelainTestCase):
def test_receive_pack(self):
filename = 'foo'
fullpath = os.path.join(self.repo.path, filename)
with open(fullpath, 'w') as f:
f.write('stuff')
porcelain.add(repo=self.repo.path, paths=fullpath)
self.repo.do_commit(message=b'test status',
author=b'author <email>',
committer=b'committer <email>',
author_timestamp=1402354300,
commit_timestamp=1402354300, author_timezone=0,
commit_timezone=0)
outf = BytesIO()
exitcode = porcelain.receive_pack(
self.repo.path, BytesIO(b"0000"), outf)
outlines = outf.getvalue().splitlines()
self.assertEqual([
b'0091319b56ce3aee2d489f759736a79cc552c9bb86d9 HEAD\x00 report-status ' # noqa: E501
b'delete-refs quiet ofs-delta side-band-64k '
b'no-done symref=HEAD:refs/heads/master',
b'003f319b56ce3aee2d489f759736a79cc552c9bb86d9 refs/heads/master',
b'0000'], outlines)
self.assertEqual(0, exitcode)
class BranchListTests(PorcelainTestCase):
def test_standard(self):
self.assertEqual(set([]), set(porcelain.branch_list(self.repo)))
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]),
set(porcelain.branch_list(self.repo)))
class BranchCreateTests(PorcelainTestCase):
def test_branch_exists(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertRaises(KeyError, porcelain.branch_create, self.repo, b"foo")
porcelain.branch_create(self.repo, b"foo", force=True)
def test_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b"foo")
self.assertEqual(
set([b"master", b"foo"]),
set(porcelain.branch_list(self.repo)))
class BranchDeleteTests(PorcelainTestCase):
def test_simple(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, b'foo')
self.assertTrue(b"foo" in porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, b'foo')
self.assertFalse(b"foo" in porcelain.branch_list(self.repo))
def test_simple_unicode(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo[b"HEAD"] = c1.id
porcelain.branch_create(self.repo, 'foo')
self.assertTrue(b"foo" in porcelain.branch_list(self.repo))
porcelain.branch_delete(self.repo, 'foo')
self.assertFalse(b"foo" in porcelain.branch_list(self.repo))
class FetchTests(PorcelainTestCase):
def test_simple(self):
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(self.repo.path, target=target_path,
errstream=errstream)
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertFalse(self.repo[b'HEAD'].id in target_repo)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(target_path, self.repo.path,
outstream=outstream, errstream=errstream)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(
target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes
with Repo(target_path) as r:
self.assertTrue(self.repo[b'HEAD'].id in r)
def test_with_remote_name(self):
remote_name = b'origin'
outstream = BytesIO()
errstream = BytesIO()
# create a file for initial commit
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test',
author=b'test <email>',
committer=b'test <email>')
# Setup target repo
target_path = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, target_path)
target_repo = porcelain.clone(self.repo.path, target=target_path,
errstream=errstream)
# Capture current refs
target_refs = target_repo.get_refs()
# create a second file to be pushed
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.commit(repo=self.repo.path, message=b'test2',
author=b'test2 <email>',
committer=b'test2 <email>')
self.assertFalse(self.repo[b'HEAD'].id in target_repo)
target_repo.close()
# Fetch changes into the cloned repo
porcelain.fetch(target_path, self.repo.path, remote_name=remote_name,
outstream=outstream, errstream=errstream)
# Assert that fetch updated the local image of the remote
self.assert_correct_remote_refs(
target_repo.get_refs(), self.repo.get_refs())
# Check the target repo for pushed changes, as well as updates
# for the refs
with Repo(target_path) as r:
self.assertTrue(self.repo[b'HEAD'].id in r)
self.assertNotEqual(self.repo.get_refs(), target_refs)
def assert_correct_remote_refs(
self, local_refs, remote_refs, remote_name=b'origin'):
local_ref_prefix = b'refs/heads'
remote_ref_prefix = b'refs/remotes/' + remote_name
locally_known_remote_refs = {
k[len(remote_ref_prefix) + 1:]: v for k, v in local_refs.items()
if k.startswith(remote_ref_prefix)}
normalized_remote_refs = {
k[len(local_ref_prefix) + 1:]: v for k, v in remote_refs.items()
if k.startswith(local_ref_prefix)}
self.assertEqual(locally_known_remote_refs, normalized_remote_refs)
class RepackTests(PorcelainTestCase):
def test_empty(self):
porcelain.repack(self.repo)
def test_simple(self):
handle, fullpath = tempfile.mkstemp(dir=self.repo.path)
os.close(handle)
porcelain.add(repo=self.repo.path, paths=fullpath)
porcelain.repack(self.repo)
class LsTreeTests(PorcelainTestCase):
def test_empty(self):
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(f.getvalue(), "")
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
'100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tfoo\n')
def test_recursive(self):
# Create a directory then write a dummy file in it
dirpath = os.path.join(self.repo.path, 'adir')
filepath = os.path.join(dirpath, 'afile')
os.mkdir(dirpath)
with open(filepath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[filepath])
porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f)
self.assertEqual(
f.getvalue(),
'40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n')
f = StringIO()
porcelain.ls_tree(self.repo, b"HEAD", outstream=f, recursive=True)
self.assertEqual(
f.getvalue(),
'40000 tree b145cc69a5e17693e24d8a7be0016ed8075de66d\tadir\n'
'100644 blob 8b82634d7eae019850bb883f06abf428c58bc9aa\tadir'
'/afile\n')
class LsRemoteTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual({}, porcelain.ls_remote(self.repo.path))
def test_some(self):
cid = porcelain.commit(repo=self.repo.path, message=b'test status',
author=b'author <email>',
committer=b'committer <email>')
self.assertEqual({
b'refs/heads/master': cid,
b'HEAD': cid},
porcelain.ls_remote(self.repo.path))
class LsFilesTests(PorcelainTestCase):
def test_empty(self):
self.assertEqual([], list(porcelain.ls_files(self.repo)))
def test_simple(self):
# Commit a dummy file then modify it
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write('origstuff')
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual([b'foo'], list(porcelain.ls_files(self.repo)))
class RemoteAddTests(PorcelainTestCase):
def test_new(self):
porcelain.remote_add(
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
c = self.repo.get_config()
self.assertEqual(
c.get((b'remote', b'jelmer'), b'url'),
b'git://jelmer.uk/code/dulwich')
def test_exists(self):
porcelain.remote_add(
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
self.assertRaises(porcelain.RemoteExists, porcelain.remote_add,
self.repo, 'jelmer', 'git://jelmer.uk/code/dulwich')
class CheckIgnoreTests(PorcelainTestCase):
def test_check_ignored(self):
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo')
foo_path = os.path.join(self.repo.path, 'foo')
with open(foo_path, 'w') as f:
f.write('BAR')
bar_path = os.path.join(self.repo.path, 'bar')
with open(bar_path, 'w') as f:
f.write('BAR')
self.assertEqual(
['foo'],
list(porcelain.check_ignore(self.repo, [foo_path])))
self.assertEqual(
[], list(porcelain.check_ignore(self.repo, [bar_path])))
def test_check_added_abs(self):
path = os.path.join(self.repo.path, 'foo')
with open(path, 'w') as f:
f.write('BAR')
self.repo.stage(['foo'])
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo\n')
self.assertEqual(
[], list(porcelain.check_ignore(self.repo, [path])))
self.assertEqual(
['foo'],
list(porcelain.check_ignore(self.repo, [path], no_index=True)))
def test_check_added_rel(self):
with open(os.path.join(self.repo.path, 'foo'), 'w') as f:
f.write('BAR')
self.repo.stage(['foo'])
with open(os.path.join(self.repo.path, '.gitignore'), 'w') as f:
f.write('foo\n')
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, 'bar'))
os.chdir(os.path.join(self.repo.path, 'bar'))
try:
self.assertEqual(
list(porcelain.check_ignore(self.repo, ['../foo'])), [])
self.assertEqual(['../foo'], list(
porcelain.check_ignore(self.repo, ['../foo'], no_index=True)))
finally:
os.chdir(cwd)
class UpdateHeadTests(PorcelainTestCase):
def test_set_to_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b'ref: refs/heads/blah',
self.repo.refs.read_ref(b'HEAD'))
def test_set_to_branch_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b'HEAD'))
def test_set_to_commit_detached(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, c1.id, detached=True)
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(c1.id, self.repo.refs.read_ref(b'HEAD'))
def test_set_new_branch(self):
[c1] = build_commit_graph(self.repo.object_store, [[1]])
self.repo.refs[b"refs/heads/blah"] = c1.id
porcelain.update_head(self.repo, "blah", new_branch="bar")
self.assertEqual(c1.id, self.repo.head())
self.assertEqual(b'ref: refs/heads/bar',
self.repo.refs.read_ref(b'HEAD'))
class MailmapTests(PorcelainTestCase):
def test_no_mailmap(self):
self.assertEqual(
b'Jelmer Vernooij <jelmer@samba.org>',
porcelain.check_mailmap(
self.repo, b'Jelmer Vernooij <jelmer@samba.org>'))
def test_mailmap_lookup(self):
with open(os.path.join(self.repo.path, '.mailmap'), 'wb') as f:
f.write(b"""\
Jelmer Vernooij <jelmer@debian.org>
""")
self.assertEqual(
b'Jelmer Vernooij <jelmer@debian.org>',
porcelain.check_mailmap(
self.repo, b'Jelmer Vernooij <jelmer@samba.org>'))
class FsckTests(PorcelainTestCase):
def test_none(self):
self.assertEqual(
[],
list(porcelain.fsck(self.repo)))
def test_git_dir(self):
obj = Tree()
a = Blob()
a.data = b"foo"
obj.add(b".git", 0o100644, a.id)
self.repo.object_store.add_objects(
[(a, None), (obj, None)])
self.assertEqual(
[(obj.id, 'invalid name .git')],
[(sha, str(e)) for (sha, e) in porcelain.fsck(self.repo)])
class DescribeTests(PorcelainTestCase):
def test_no_commits(self):
self.assertRaises(KeyError, porcelain.describe, self.repo.path)
def test_single_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
'g{}'.format(sha[:7].decode('ascii')),
porcelain.describe(self.repo.path))
def test_tag(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
self.assertEqual(
"tryme",
porcelain.describe(self.repo.path))
def test_tag_and_commit(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
porcelain.tag_create(self.repo.path, b"tryme", b'foo <foo@bar.com>',
b'bar', annotated=True)
with open(fullpath, 'w') as f:
f.write("BAR2")
porcelain.add(repo=self.repo.path, paths=[fullpath])
sha = porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
'tryme-1-g{}'.format(sha[:7].decode('ascii')),
porcelain.describe(self.repo.path))
class HelperTests(PorcelainTestCase):
def test_path_to_tree_path_base(self):
self.assertEqual(
b'bar', porcelain.path_to_tree_path('/home/foo', '/home/foo/bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', './bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', 'bar'))
cwd = os.getcwd()
self.assertEqual(
b'bar', porcelain.path_to_tree_path('.', os.path.join(cwd, 'bar')))
self.assertEqual(b'bar', porcelain.path_to_tree_path(cwd, 'bar'))
def test_path_to_tree_path_syntax(self):
self.assertEqual(b'bar', porcelain.path_to_tree_path(b'.', './bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path('.', b'./bar'))
self.assertEqual(b'bar', porcelain.path_to_tree_path(b'.', b'./bar'))
def test_path_to_tree_path_error(self):
with self.assertRaises(ValueError):
porcelain.path_to_tree_path('/home/foo/', '/home/bar/baz')
def test_path_to_tree_path_rel(self):
cwd = os.getcwd()
os.mkdir(os.path.join(self.repo.path, 'foo'))
os.mkdir(os.path.join(self.repo.path, 'foo/bar'))
try:
os.chdir(os.path.join(self.repo.path, 'foo/bar'))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
'..', 'baz'))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
os.path.join(os.getcwd(), '..'),
os.path.join(os.getcwd(), 'baz')))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
'..', os.path.join(os.getcwd(), 'baz')))
self.assertEqual(b'bar/baz', porcelain.path_to_tree_path(
os.path.join(os.getcwd(), '..'), 'baz'))
finally:
os.chdir(cwd)
class GetObjectBypathTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
porcelain.commit(
self.repo.path, message=b"Some message",
author=b"Joe <joe@example.com>",
committer=b"Bob <bob@example.com>")
self.assertEqual(
b"BAR",
porcelain.get_object_by_path(self.repo, 'foo').data)
def test_missing(self):
self.assertRaises(
KeyError,
porcelain.get_object_by_path, self.repo, 'foo')
class WriteTreeTests(PorcelainTestCase):
def test_simple(self):
fullpath = os.path.join(self.repo.path, 'foo')
with open(fullpath, 'w') as f:
f.write("BAR")
porcelain.add(repo=self.repo.path, paths=[fullpath])
self.assertEqual(
b'd2092c8a9f311f0311083bf8d177f2ca0ab5b241',
porcelain.write_tree(self.repo))
| true | true |
1c461aa2e5f63fa27680aa6cf11215cb8e9c8883 | 1,802 | py | Python | rllib/examples/export/onnx_torch.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 22 | 2018-05-08T05:52:34.000Z | 2020-04-01T10:09:55.000Z | rllib/examples/export/onnx_torch.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 73 | 2021-09-25T07:11:39.000Z | 2022-03-26T07:10:59.000Z | rllib/examples/export/onnx_torch.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 10 | 2018-04-27T10:50:59.000Z | 2020-02-24T02:41:43.000Z | from distutils.version import LooseVersion
import numpy as np
import ray
import ray.rllib.agents.ppo as ppo
import onnxruntime
import os
import shutil
import torch
# Configure our PPO trainer
config = ppo.DEFAULT_CONFIG.copy()
config["num_gpus"] = 0
config["num_workers"] = 1
config["framework"] = "torch"
outdir = "export_torch"
if os.path.exists(outdir):
shutil.rmtree(outdir)
np.random.seed(1234)
# We will run inference with this test batch
test_data = {
"obs": np.random.uniform(0, 1.0, size=(10, 4)).astype(np.float32),
"state_ins": np.array([0.0], dtype=np.float32),
}
# Start Ray and initialize a PPO trainer
ray.init()
trainer = ppo.PPOTrainer(config=config, env="CartPole-v0")
# You could train the model here
# trainer.train()
# Let's run inference on the torch model
policy = trainer.get_policy()
result_pytorch, _ = policy.model(
{
"obs": torch.tensor(test_data["obs"]),
}
)
# Evaluate tensor to fetch numpy array
result_pytorch = result_pytorch.detach().numpy()
# This line will export the model to ONNX
res = trainer.export_policy_model(outdir, onnx=11)
# Import ONNX model
exported_model_file = os.path.join(outdir, "model.onnx")
# Start an inference session for the ONNX model
session = onnxruntime.InferenceSession(exported_model_file, None)
# Pass the same test batch to the ONNX model
if LooseVersion(torch.__version__) < LooseVersion("1.9.0"):
# In torch < 1.9.0 the second input/output name gets mixed up
test_data["state_outs"] = test_data.pop("state_ins")
result_onnx = session.run(["output"], test_data)
# These results should be equal!
print("PYTORCH", result_pytorch)
print("ONNX", result_onnx)
assert np.allclose(result_pytorch, result_onnx), "Model outputs are NOT equal. FAILED"
print("Model outputs are equal. PASSED")
| 26.115942 | 86 | 0.736404 | from distutils.version import LooseVersion
import numpy as np
import ray
import ray.rllib.agents.ppo as ppo
import onnxruntime
import os
import shutil
import torch
config = ppo.DEFAULT_CONFIG.copy()
config["num_gpus"] = 0
config["num_workers"] = 1
config["framework"] = "torch"
outdir = "export_torch"
if os.path.exists(outdir):
shutil.rmtree(outdir)
np.random.seed(1234)
test_data = {
"obs": np.random.uniform(0, 1.0, size=(10, 4)).astype(np.float32),
"state_ins": np.array([0.0], dtype=np.float32),
}
ray.init()
trainer = ppo.PPOTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
result_pytorch, _ = policy.model(
{
"obs": torch.tensor(test_data["obs"]),
}
)
# Evaluate tensor to fetch numpy array
result_pytorch = result_pytorch.detach().numpy()
# This line will export the model to ONNX
res = trainer.export_policy_model(outdir, onnx=11)
# Import ONNX model
exported_model_file = os.path.join(outdir, "model.onnx")
# Start an inference session for the ONNX model
session = onnxruntime.InferenceSession(exported_model_file, None)
# Pass the same test batch to the ONNX model
if LooseVersion(torch.__version__) < LooseVersion("1.9.0"):
# In torch < 1.9.0 the second input/output name gets mixed up
test_data["state_outs"] = test_data.pop("state_ins")
result_onnx = session.run(["output"], test_data)
# These results should be equal!
print("PYTORCH", result_pytorch)
print("ONNX", result_onnx)
assert np.allclose(result_pytorch, result_onnx), "Model outputs are NOT equal. FAILED"
print("Model outputs are equal. PASSED")
| true | true |
1c461b183b4ab4d591ec0f8eb4bc1dd4b40c8651 | 152 | py | Python | webapp/urls.py | knschuckmann/Django_tableview | 1b874baf96fc72756e63f9c4178465c7064b9465 | [
"Apache-2.0"
] | null | null | null | webapp/urls.py | knschuckmann/Django_tableview | 1b874baf96fc72756e63f9c4178465c7064b9465 | [
"Apache-2.0"
] | null | null | null | webapp/urls.py | knschuckmann/Django_tableview | 1b874baf96fc72756e63f9c4178465c7064b9465 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from django.urls import path
from webapp import views
urlpatterns = [
path('', views.TableView.as_view(), name='webapp'),
] | 21.714286 | 55 | 0.664474 |
from django.urls import path
from webapp import views
urlpatterns = [
path('', views.TableView.as_view(), name='webapp'),
] | true | true |
1c461b501e3b828ff0c81891547cab2ecf40fbec | 4,713 | py | Python | messages.py | sushi-irc/nigiri | 9e1137a80f350ea05ae76df93061d3dc188e1ba7 | [
"BSD-2-Clause"
] | 1 | 2017-07-24T19:31:19.000Z | 2017-07-24T19:31:19.000Z | messages.py | sushi-irc/nigiri | 9e1137a80f350ea05ae76df93061d3dc188e1ba7 | [
"BSD-2-Clause"
] | null | null | null | messages.py | sushi-irc/nigiri | 9e1137a80f350ea05ae76df93061d3dc188e1ba7 | [
"BSD-2-Clause"
] | 1 | 2019-01-31T19:16:16.000Z | 2019-01-31T19:16:16.000Z | # coding: UTF-8
"""
Copyright (c) 2009 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""
provide methods for warnings, notifications, debug and errors
"""
import sys
import urwid
import urwid.util
import time
import config
from typecheck import types
import tabs
import helper
main_window = None
def setup (mw):
global main_window
main_window = mw
class FormattedMessage(object):
@property
def markup_cb(self): return self._markup_cb
@markup_cb.setter
def markup_cb(self, cb): self._markup_cb = cb
@types( category = basestring, template = basestring,
values = dict, highlight = bool, own = bool )
def __init__(self, category, template, values, base_color,
highlight = False, own = False):
self.category = category
self.template = template
self.values = values
self.highlight = highlight
self.base_color = base_color
self.own = own # we triggered that/we are meant by it
self.markup_cb = None
self.markup_cb_kwargs = {}
def __str__(self):
try:
return self.template % (self.values)
except KeyError,e:
return "TEMPLATE_MISSING_KEY(%s)" % (e)
def _markup(self):
return [(self.base_color, unicode(self))]
def markup(self):
if self.markup_cb:
return self.markup_cb(self, **self.markup_cb_kwargs)
return self._markup()
@types (mtype=basestring, template_id=basestring, values=dict)
def format_message(mtype, template_id, values, highlight=False, own=False):
""" factory method for FormattedMessage """
if highlight:
generic_type = mtype + "_highlight"
elif own:
generic_type = mtype + "_own"
template_id = template_id + "_own"
else:
generic_type = mtype
values["time"] = time.strftime(config.get("templates", "datestring"))
base_color = config.get("colors", generic_type, "default")
template = config.get("templates", template_id)
msg = FormattedMessage(
mtype, template, values, base_color, highlight, own)
if template == None:
msg.template = "TEMPLATE_ERROR(%s)" % template_id
return msg
@types(msg = (basestring, list, FormattedMessage))
def print_tab(dest_tab, msg, msgtype="informative"):
if not main_window:
raise ValueError, "No main_window found."
tablist = tabs.tree_to_list(main_window.servers)
try:
i = tablist.index(dest_tab)
except ValueError:
print_error("print_tab to invalid destinaton '%s'." % dest_tab)
return
else:
if isinstance(msg, FormattedMessage):
markup = msg.markup()
msgtype = msg.category
if isinstance(markup,tuple):
# ("...",[(color,pos),...])
new_markup = helper.markup.tuple_to_list(markup)
textItem = urwid.Text(new_markup)
elif isinstance(markup,list):
# [(color,text),...]
textItem = urwid.Text(markup)
else:
textItem = urwid.Text(markup)
else:
textItem = urwid.Text(msg)
tablist[i].output_walker.append(textItem)
if main_window.current_tab != dest_tab:
dest_tab.add_status(msgtype)
main_window.update_divider()
else:
main_window.body.scroll_to_bottom()
def print_tab_notification(tab, msg):
print_tab(tab, "*** Notification: " + msg)
def print_tab_error(tab, msg):
print_tab(tab, "!!! Error: " + msg)
def print_normal(msg, *args, **dargs):
main_window.print_text(msg)
def print_error (msg, *args, **dargs):
main_window.print_text("!!! Error: " + msg)
def print_notification (msg):
main_window.print_text("*** Notification: " + msg)
def print_debug (msg, *args, **dargs):
if not config.get_bool("nigiri", "show_debug"):
return
main_window.print_text("=== Debug: " + msg)
| 28.221557 | 75 | 0.737747 |
"""
Copyright (c) 2009 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
"""
provide methods for warnings, notifications, debug and errors
"""
import sys
import urwid
import urwid.util
import time
import config
from typecheck import types
import tabs
import helper
main_window = None
def setup (mw):
global main_window
main_window = mw
class FormattedMessage(object):
@property
def markup_cb(self): return self._markup_cb
@markup_cb.setter
def markup_cb(self, cb): self._markup_cb = cb
@types( category = basestring, template = basestring,
values = dict, highlight = bool, own = bool )
def __init__(self, category, template, values, base_color,
highlight = False, own = False):
self.category = category
self.template = template
self.values = values
self.highlight = highlight
self.base_color = base_color
self.own = own
self.markup_cb = None
self.markup_cb_kwargs = {}
def __str__(self):
try:
return self.template % (self.values)
except KeyError,e:
return "TEMPLATE_MISSING_KEY(%s)" % (e)
def _markup(self):
return [(self.base_color, unicode(self))]
def markup(self):
if self.markup_cb:
return self.markup_cb(self, **self.markup_cb_kwargs)
return self._markup()
@types (mtype=basestring, template_id=basestring, values=dict)
def format_message(mtype, template_id, values, highlight=False, own=False):
""" factory method for FormattedMessage """
if highlight:
generic_type = mtype + "_highlight"
elif own:
generic_type = mtype + "_own"
template_id = template_id + "_own"
else:
generic_type = mtype
values["time"] = time.strftime(config.get("templates", "datestring"))
base_color = config.get("colors", generic_type, "default")
template = config.get("templates", template_id)
msg = FormattedMessage(
mtype, template, values, base_color, highlight, own)
if template == None:
msg.template = "TEMPLATE_ERROR(%s)" % template_id
return msg
@types(msg = (basestring, list, FormattedMessage))
def print_tab(dest_tab, msg, msgtype="informative"):
if not main_window:
raise ValueError, "No main_window found."
tablist = tabs.tree_to_list(main_window.servers)
try:
i = tablist.index(dest_tab)
except ValueError:
print_error("print_tab to invalid destinaton '%s'." % dest_tab)
return
else:
if isinstance(msg, FormattedMessage):
markup = msg.markup()
msgtype = msg.category
if isinstance(markup,tuple):
new_markup = helper.markup.tuple_to_list(markup)
textItem = urwid.Text(new_markup)
elif isinstance(markup,list):
textItem = urwid.Text(markup)
else:
textItem = urwid.Text(markup)
else:
textItem = urwid.Text(msg)
tablist[i].output_walker.append(textItem)
if main_window.current_tab != dest_tab:
dest_tab.add_status(msgtype)
main_window.update_divider()
else:
main_window.body.scroll_to_bottom()
def print_tab_notification(tab, msg):
print_tab(tab, "*** Notification: " + msg)
def print_tab_error(tab, msg):
print_tab(tab, "!!! Error: " + msg)
def print_normal(msg, *args, **dargs):
main_window.print_text(msg)
def print_error (msg, *args, **dargs):
main_window.print_text("!!! Error: " + msg)
def print_notification (msg):
main_window.print_text("*** Notification: " + msg)
def print_debug (msg, *args, **dargs):
if not config.get_bool("nigiri", "show_debug"):
return
main_window.print_text("=== Debug: " + msg)
| false | true |
1c461c15867001aca948defb8fbac5a5e9fb967f | 11,442 | py | Python | tests/Demo.py | adityasingh177/trusted-compute-framework | b91410f6da21ba4d7458dd02048a447bcd4fed5a | [
"Apache-2.0"
] | null | null | null | tests/Demo.py | adityasingh177/trusted-compute-framework | b91410f6da21ba4d7458dd02048a447bcd4fed5a | [
"Apache-2.0"
] | null | null | null | tests/Demo.py | adityasingh177/trusted-compute-framework | b91410f6da21ba4d7458dd02048a447bcd4fed5a | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import argparse
import random
import json
import logging
from service_client.generic import GenericServiceClient
import crypto.crypto as crypto
import utility.signature as signature
import worker.worker_details as worker
from shared_kv.shared_kv_interface import KvStorage
import utility.utility as enclave_helper
import utility.file_utils as futils
from error_code.error_status import SignatureStatus, WorkOrderStatus
TCFHOME = os.environ.get("TCF_HOME", "../../")
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
def LocalMain(config):
if not input_json_str and not input_json_dir:
logger.error("JSON input file is not provided")
exit(1)
if not output_json_file_name:
logger.error("JSON output file is not provided")
exit(1)
if not server_uri:
logger.error("Server URI is not provided")
exit(1)
logger.info("Execute work order")
uri_client = GenericServiceClient(server_uri)
response = None
wo_id = None
if input_json_dir:
directory = os.fsencode(input_json_dir)
files = os.listdir(directory)
for file in sorted(files):
logger.info("---------------Input file name: %s ---------------\n",
file.decode("utf-8"))
input_json_str1 = futils.read_json_file((directory.decode("utf-8") + file.decode("utf-8")))
# -----------------------------------------------------------------
# If Client request is WorkOrderSubmit, a requester payload's
# signature with the requester private signing key is generated.
if "WorkOrderSubmit" in input_json_str1:
# Update workOrderId , workerId and workloadId
input_json_obj = json.loads(input_json_str1)
wo_id = hex(random.randint(1, 2**64 - 1))
input_json_obj["params"]["workOrderId"] = wo_id
input_json_obj["params"]["workerId"] = worker_obj.worker_id
# Convert workloadId to a hex string and update the request
workload_id = input_json_obj["params"]["workloadId"]
workload_id_hex = workload_id.encode("UTF-8").hex()
input_json_obj["params"]["workloadId"] = workload_id_hex
input_json_str1 = json.dumps(input_json_obj)
# Generate session iv an encrypted session key
session_iv = enclave_helper.generate_iv()
session_key = enclave_helper.generate_key()
encrypted_session_key = enclave_helper.generate_encrypted_key(session_key,
worker_obj.encryption_key)
input_json_str1, status = sig_obj.generate_client_signature(input_json_str1,
worker_obj, private_key, session_key, session_iv,
encrypted_session_key)
if status != SignatureStatus.PASSED:
logger.info("Generate signature failed\n")
exit(1)
if input_json_str1 is None:
continue
# -----------------------------------------------------------------
# Update the worker ID
if response:
if "workerId" in input_json_str1:
# Retrieve the worker id from the "WorkerRetrieve"
# response and update the worker id information for
# further json requests.
if "result" in response and "ids" in response["result"].keys():
input_json_final = json.loads(input_json_str1)
worker_id = response["result"]["ids"][0]
input_json_final["params"]["workerId"] = worker_id
input_json_str1 = json.dumps(input_json_final)
logger.info("**********Worker details Updated with "
"Worker ID*********\n%s\n", input_json_str1)
# -----------------------------------------------------------------
if "WorkOrderGetResult" in input_json_str1 or "WorkOrderReceiptRetrieve":
input_json_obj = json.loads(input_json_str1)
input_json_obj["params"]["workOrderId"] = wo_id
input_json_str1 = json.dumps(input_json_obj)
logger.info("*********Request Json********* \n%s\n", input_json_str1)
response = uri_client._postmsg(input_json_str1)
logger.info("**********Received Response*********\n%s\n", response)
# -----------------------------------------------------------------
# Worker details are loaded into Worker_Obj
if "WorkerRetrieve" in input_json_str1 and "result" in response:
worker_obj.load_worker(response)
# -----------------------------------------------------------------
# Polling for the "WorkOrderGetResult" and break when you get the result
while("WorkOrderGetResult" in input_json_str1 and "result" not in response):
if response["error"]["code"] != WorkOrderStatus.PENDING:
break
response = uri_client._postmsg(input_json_str1)
logger.info("Received Response : %s, \n \n ", response)
time.sleep(3)
# -----------------------------------------------------------------
# Verify the signature
if ("WorkOrderGetResult" in input_json_str1):
if "error" in response:
# Response has error, hence skip Signature verification
logger.info("Work order response has error, "
"skipping signature verification")
continue
sig_bool = sig_obj.verify_signature(response, worker_obj.verification_key)
try:
if sig_bool > 0:
logger.info("Signature Verified")
enclave_helper.decrypted_response(response,
session_key, session_iv)
else:
logger.info("Signature verification Failed")
exit(1)
except:
logger.error("ERROR: Failed to analyze Signature Verification")
exit(1)
# -----------------------------------------------------------------
else:
logger.info("Input Request %s", input_json_str)
response = uri_client._postmsg(input_json_str)
logger.info("Received Response : %s , \n \n ", response)
exit(0)
# -----------------------------------------------------------------------------
def ParseCommandLine(config, args):
logger.info('***************** TRUSTED COMPUTE FRAMEWORK (TCF)*****************')
global input_json_str
global input_json_dir
global server_uri
global output_json_file_name
global consensus_file_name
global sig_obj
global worker_obj
global private_key
global encrypted_session_key
global session_iv
parser = argparse.ArgumentParser()
parser.add_argument("--logfile", help="Name of the log file, __screen__ for standard output", type=str)
parser.add_argument("-p", "--private_key",
help="Private Key of the Client", type=str, default=None)
parser.add_argument("--loglevel", help="Logging level", type=str)
parser.add_argument("-i", "--input_file", help="JSON input file name", type=str, default="input.json")
parser.add_argument("--input_dir", help="Logging level", type=str, default=[])
parser.add_argument(
"-c", "--connect_uri", help="URI to send requests to", type=str, default=[])
parser.add_argument(
"output_file",
help="JSON output file name",
type=str,
default="output.json",
nargs="?")
options = parser.parse_args(args)
if config.get("Logging") is None:
config["Logging"] = {
"LogFile": "__screen__",
"LogLevel": "INFO"
}
if options.logfile:
config["Logging"]["LogFile"] = options.logfile
if options.loglevel:
config["Logging"]["LogLevel"] = options.loglevel.upper()
input_json_str = None
input_json_dir = None
if options.connect_uri:
server_uri = options.connect_uri
else:
logger.error("ERROR: Please enter the server URI")
if options.input_dir:
logger.info("Load Json Directory from %s", options.input_dir)
input_json_dir = options.input_dir
elif options.input_file:
try:
logger.info("load JSON input from %s", options.input_file)
with open(options.input_file, "r") as file:
input_json_str = file.read()
except:
logger.error("ERROR: Failed to read from file %s", options.input_file)
else:
logger.info("No input found")
if options.output_file:
output_json_file_name = options.output_file
else:
output_json_file_name = None
if options.private_key:
private_key = options.private_key
else:
# Generating the private Key for the client
private_key = enclave_helper.generate_signing_keys()
# Initializing Signature object, Worker Object
sig_obj = signature.ClientSignature()
worker_obj = worker.SGXWorkerDetails()
# -----------------------------------------------------------------------------
def Main(args=None):
import config.config as pconfig
import utility.logger as plogger
# parse out the configuration file first
conffiles = ["tcs_config.toml"]
confpaths = [".", TCFHOME + "/config", "../../etc"]
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="configuration file", nargs="+")
parser.add_argument("--config-dir", help="configuration folder", nargs="+")
(options, remainder) = parser.parse_known_args(args)
if options.config:
conffiles = options.config
if options.config_dir:
confpaths = options.config_dir
try:
config = pconfig.parse_configuration_files(conffiles, confpaths)
json.dumps(config, indent=4)
except pconfig.ConfigurationException as e:
logger.error(str(e))
sys.exit(-1)
plogger.setup_loggers(config.get("Logging", {}))
sys.stdout = plogger.stream_to_logger(logging.getLogger("STDOUT"), logging.DEBUG)
sys.stderr = plogger.stream_to_logger(logging.getLogger("STDERR"), logging.WARN)
ParseCommandLine(config, remainder)
LocalMain(config)
# -----------------------------------------------------------------------------
Main()
| 40.718861 | 107 | 0.569743 |
import os
import sys
import time
import argparse
import random
import json
import logging
from service_client.generic import GenericServiceClient
import crypto.crypto as crypto
import utility.signature as signature
import worker.worker_details as worker
from shared_kv.shared_kv_interface import KvStorage
import utility.utility as enclave_helper
import utility.file_utils as futils
from error_code.error_status import SignatureStatus, WorkOrderStatus
TCFHOME = os.environ.get("TCF_HOME", "../../")
logger = logging.getLogger(__name__)
def LocalMain(config):
if not input_json_str and not input_json_dir:
logger.error("JSON input file is not provided")
exit(1)
if not output_json_file_name:
logger.error("JSON output file is not provided")
exit(1)
if not server_uri:
logger.error("Server URI is not provided")
exit(1)
logger.info("Execute work order")
uri_client = GenericServiceClient(server_uri)
response = None
wo_id = None
if input_json_dir:
directory = os.fsencode(input_json_dir)
files = os.listdir(directory)
for file in sorted(files):
logger.info("---------------Input file name: %s ---------------\n",
file.decode("utf-8"))
input_json_str1 = futils.read_json_file((directory.decode("utf-8") + file.decode("utf-8")))
# signature with the requester private signing key is generated.
if "WorkOrderSubmit" in input_json_str1:
# Update workOrderId , workerId and workloadId
input_json_obj = json.loads(input_json_str1)
wo_id = hex(random.randint(1, 2**64 - 1))
input_json_obj["params"]["workOrderId"] = wo_id
input_json_obj["params"]["workerId"] = worker_obj.worker_id
# Convert workloadId to a hex string and update the request
workload_id = input_json_obj["params"]["workloadId"]
workload_id_hex = workload_id.encode("UTF-8").hex()
input_json_obj["params"]["workloadId"] = workload_id_hex
input_json_str1 = json.dumps(input_json_obj)
# Generate session iv an encrypted session key
session_iv = enclave_helper.generate_iv()
session_key = enclave_helper.generate_key()
encrypted_session_key = enclave_helper.generate_encrypted_key(session_key,
worker_obj.encryption_key)
input_json_str1, status = sig_obj.generate_client_signature(input_json_str1,
worker_obj, private_key, session_key, session_iv,
encrypted_session_key)
if status != SignatureStatus.PASSED:
logger.info("Generate signature failed\n")
exit(1)
if input_json_str1 is None:
continue
# -----------------------------------------------------------------
# Update the worker ID
if response:
if "workerId" in input_json_str1:
# Retrieve the worker id from the "WorkerRetrieve"
# response and update the worker id information for
# further json requests.
if "result" in response and "ids" in response["result"].keys():
input_json_final = json.loads(input_json_str1)
worker_id = response["result"]["ids"][0]
input_json_final["params"]["workerId"] = worker_id
input_json_str1 = json.dumps(input_json_final)
logger.info("**********Worker details Updated with "
"Worker ID*********\n%s\n", input_json_str1)
# -----------------------------------------------------------------
if "WorkOrderGetResult" in input_json_str1 or "WorkOrderReceiptRetrieve":
input_json_obj = json.loads(input_json_str1)
input_json_obj["params"]["workOrderId"] = wo_id
input_json_str1 = json.dumps(input_json_obj)
logger.info("*********Request Json********* \n%s\n", input_json_str1)
response = uri_client._postmsg(input_json_str1)
logger.info("**********Received Response*********\n%s\n", response)
# -----------------------------------------------------------------
# Worker details are loaded into Worker_Obj
if "WorkerRetrieve" in input_json_str1 and "result" in response:
worker_obj.load_worker(response)
# -----------------------------------------------------------------
# Polling for the "WorkOrderGetResult" and break when you get the result
while("WorkOrderGetResult" in input_json_str1 and "result" not in response):
if response["error"]["code"] != WorkOrderStatus.PENDING:
break
response = uri_client._postmsg(input_json_str1)
logger.info("Received Response : %s, \n \n ", response)
time.sleep(3)
# -----------------------------------------------------------------
# Verify the signature
if ("WorkOrderGetResult" in input_json_str1):
if "error" in response:
# Response has error, hence skip Signature verification
logger.info("Work order response has error, "
"skipping signature verification")
continue
sig_bool = sig_obj.verify_signature(response, worker_obj.verification_key)
try:
if sig_bool > 0:
logger.info("Signature Verified")
enclave_helper.decrypted_response(response,
session_key, session_iv)
else:
logger.info("Signature verification Failed")
exit(1)
except:
logger.error("ERROR: Failed to analyze Signature Verification")
exit(1)
# -----------------------------------------------------------------
else:
logger.info("Input Request %s", input_json_str)
response = uri_client._postmsg(input_json_str)
logger.info("Received Response : %s , \n \n ", response)
exit(0)
# -----------------------------------------------------------------------------
def ParseCommandLine(config, args):
logger.info('***************** TRUSTED COMPUTE FRAMEWORK (TCF)*****************')
global input_json_str
global input_json_dir
global server_uri
global output_json_file_name
global consensus_file_name
global sig_obj
global worker_obj
global private_key
global encrypted_session_key
global session_iv
parser = argparse.ArgumentParser()
parser.add_argument("--logfile", help="Name of the log file, __screen__ for standard output", type=str)
parser.add_argument("-p", "--private_key",
help="Private Key of the Client", type=str, default=None)
parser.add_argument("--loglevel", help="Logging level", type=str)
parser.add_argument("-i", "--input_file", help="JSON input file name", type=str, default="input.json")
parser.add_argument("--input_dir", help="Logging level", type=str, default=[])
parser.add_argument(
"-c", "--connect_uri", help="URI to send requests to", type=str, default=[])
parser.add_argument(
"output_file",
help="JSON output file name",
type=str,
default="output.json",
nargs="?")
options = parser.parse_args(args)
if config.get("Logging") is None:
config["Logging"] = {
"LogFile": "__screen__",
"LogLevel": "INFO"
}
if options.logfile:
config["Logging"]["LogFile"] = options.logfile
if options.loglevel:
config["Logging"]["LogLevel"] = options.loglevel.upper()
input_json_str = None
input_json_dir = None
if options.connect_uri:
server_uri = options.connect_uri
else:
logger.error("ERROR: Please enter the server URI")
if options.input_dir:
logger.info("Load Json Directory from %s", options.input_dir)
input_json_dir = options.input_dir
elif options.input_file:
try:
logger.info("load JSON input from %s", options.input_file)
with open(options.input_file, "r") as file:
input_json_str = file.read()
except:
logger.error("ERROR: Failed to read from file %s", options.input_file)
else:
logger.info("No input found")
if options.output_file:
output_json_file_name = options.output_file
else:
output_json_file_name = None
if options.private_key:
private_key = options.private_key
else:
# Generating the private Key for the client
private_key = enclave_helper.generate_signing_keys()
# Initializing Signature object, Worker Object
sig_obj = signature.ClientSignature()
worker_obj = worker.SGXWorkerDetails()
# -----------------------------------------------------------------------------
def Main(args=None):
import config.config as pconfig
import utility.logger as plogger
# parse out the configuration file first
conffiles = ["tcs_config.toml"]
confpaths = [".", TCFHOME + "/config", "../../etc"]
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="configuration file", nargs="+")
parser.add_argument("--config-dir", help="configuration folder", nargs="+")
(options, remainder) = parser.parse_known_args(args)
if options.config:
conffiles = options.config
if options.config_dir:
confpaths = options.config_dir
try:
config = pconfig.parse_configuration_files(conffiles, confpaths)
json.dumps(config, indent=4)
except pconfig.ConfigurationException as e:
logger.error(str(e))
sys.exit(-1)
plogger.setup_loggers(config.get("Logging", {}))
sys.stdout = plogger.stream_to_logger(logging.getLogger("STDOUT"), logging.DEBUG)
sys.stderr = plogger.stream_to_logger(logging.getLogger("STDERR"), logging.WARN)
ParseCommandLine(config, remainder)
LocalMain(config)
# -----------------------------------------------------------------------------
Main()
| true | true |
1c461c7ae39191873d06db62c17134524c45c945 | 16,111 | py | Python | vstruct/defs/pcap.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | 716 | 2015-01-01T14:41:11.000Z | 2022-03-28T06:51:50.000Z | vstruct/defs/pcap.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | 266 | 2015-01-01T15:07:27.000Z | 2022-03-30T15:19:26.000Z | vstruct/defs/pcap.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | 159 | 2015-01-01T16:19:44.000Z | 2022-03-21T21:55:34.000Z | import logging
import vstruct
import vstruct.defs.inet as vs_inet
from vstruct.primitives import *
logger = logging.getLogger(__name__)
PCAP_LINKTYPE_ETHER = 1
PCAP_LINKTYPE_RAW = 101
PCAP_LINKTYPE_LINUX_SLL = 113
PCAP_DLT_RAW = 12
PCAPNG_BOM = 0x1A2B3C4D
OPT_ENDOFOPT = 0
OPT_COMMENT = 1
#PCAPNG_BLOCKTYPE_SECTION_HEADER options
OPT_SHB_HARDWARE = 2
OPT_SHB_OS = 3
OPT_SHB_USERAPPL = 4
#PCAPNG_INTERFACE_DESCRIPTION_BLOCK options
OPT_IF_NAME = 2
OPT_IF_DESCRIPTION = 3
OPT_IF_IPV4ADDR = 4
OPT_IF_IPV6ADDR = 5
OPT_IF_MACADDR = 6
OPT_IF_EUIADDR = 7
OPT_IF_SPEED = 8
OPT_IF_TSRESOL = 9
OPT_IF_TZONE = 10
OPT_IF_FILTER = 11
OPT_IF_OS = 12
OPT_IF_FCSLEN = 13
OPT_IF_TSOFFSET = 14
# options for PCAPNG_ENHANCED_PACKET_BLOCK
OPT_EPB_FLAGS = 2
OPT_EPB_HASH = 3
OPT_EPB_DROPCOUNT = 4
# values used in the blocktype field
PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION = 0x00000001
PCAPNG_BLOCKTYPE_PACKET = 0x00000002
PCAPNG_BLOCKTYPE_SIMPLE_PACKET = 0x00000003
PCAPNG_BLOCKTYPE_NAME_RESOLUTION = 0x00000004
PCAPNG_BLOCKTYPE_INTERFACE_STATS = 0x00000005
PCAPNG_BLOCKTYPE_ENHANCED_PACKET = 0x00000006
PCAPNG_BLOCKTYPE_SECTION_HEADER = 0x0a0d0d0a
def pad4bytes(size):
if (size % 4) == 0:
return size
return size + (4 -( size % 4))
class PCAP_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32()
self.vers_maj = v_uint16()
self.vers_min = v_uint16()
self.thiszone = v_uint32()
self.sigfigs = v_uint32()
self.snaplen = v_uint32()
self.linktype = v_uint32()
class PCAP_PACKET_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.tvsec = v_uint32()
self.tvusec = v_uint32()
self.caplen = v_uint32()
self.len = v_uint32()
class PCAPNG_GENERIC_BLOCK_HEADER(vstruct.VStruct):
'''
Used to read the block type & size when parsing the file
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
class PCAPNG_BLOCK_PARENT(vstruct.VStruct):
'''
Used to inherit the weird parsing style where there's variable length
options at the end, followed by the duplicate block total length
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
#non-vstruct field, set during checking BOM
self.bigend = False
def vsParse(self, bytez, offset=0):
startoff = offset
roff = vstruct.VStruct.vsParse(self, bytez, offset=offset)
#(blocksize-4): because we still need the trailing blocksize2
# apparently blocks can completely omit the options list and not
# even have the OPT_ENDOFOPT entry
while (roff < len(bytez)) and ((roff-startoff) < (self.blocksize-4)):
opt = PCAPNG_OPTION(bigend=self.bigend)
roff = opt.vsParse(bytez, roff)
if opt.code == OPT_ENDOFOPT:
break
self.options.vsAddElement(opt)
# append trailing blocksize2
bs2 = v_uint32(bigend=self.bigend)
self.vsAddField('blocksize2', bs2)
roff = bs2.vsParse(bytez, roff)
#pad, plus we skip
return pad4bytes(roff)
class PCAPNG_SECTION_HEADER_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.bom = v_uint32(bigend=bigend)
self.vers_maj = v_uint16(bigend=bigend)
self.vers_min = v_uint16(bigend=bigend)
self.sectionsize = v_uint64(bigend=bigend)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def pcb_bom(self):
bom = self.vsGetField('bom')
if self.bom == PCAPNG_BOM:
#if it matches, then the endian of bom is correct
self.bigend = bom._vs_bigend
else:
self.bigend = not bom._vs_bigend
class PCAPNG_OPTION(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.code = v_uint16(bigend=bigend)
self.optsize = v_uint16(bigend=bigend)
self.bytes = v_bytes(0)
def pcb_optsize(self):
size = pad4bytes(self.optsize)
self.vsGetField('bytes').vsSetLength(size)
class PCAPNG_INTERFACE_DESCRIPTION_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.linktype = v_uint16(bigend=bigend)
self.reserved = v_uint16(bigend=bigend)
self.snaplen = v_uint32(bigend=bigend)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def vsParse(self, bytez, offset=0):
'''
We need the tsresol value to adjust timestamp values, so pull it
out here
'''
ret = PCAPNG_BLOCK_PARENT.vsParse(self, bytez, offset=0)
self.tsresol = None
#default offset is 0
self.tsoffset = 0
#sys.stderr.write('PCAPNG_INTERFACE_DESCRIPTION_BLOCK: searching options')
for i, opt in self.options:
if opt.code == OPT_IF_TSRESOL:
self.tsresol = ord(opt.bytes[0])
#sys.stderr.write('Got tsresol: 0x%x\n' % self.tsresol)
elif opt.code == OPT_IF_TSOFFSET:
fmt = '<Q'
if self.bigend:
fmt = '>Q'
self.tsoffset = struct.unpack_from(fmt, opt.bytes)[0]
#sys.stderr.write('Got tsoffset: 0x%x\n' % self.tsoffset)
return ret
class PCAPNG_ENHANCED_PACKET_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.interfaceid = v_uint32(bigend=bigend)
self.tstamphi = v_uint32(bigend=bigend)
self.tstamplow = v_uint32(bigend=bigend)
self.caplen = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.options = vstruct.VArray([])
#blocksize2: dynamcally added in vsParse()
#self.blocksize2 = v_uint32(bigend=bigend)
def pcb_caplen(self):
size = pad4bytes(self.caplen)
self.vsGetField('data').vsSetLength(size)
def setPcapTimestamp(self, idb):
'''
Adds a libpcap compatible tvsec and tvusec fields, based on the pcapng timestamp
'''
#orange left off here
self.snaplen = idb.snaplen
tstamp = (self.tstamphi << 32) | self.tstamplow
scale = 1000000
if idb.tsresol is None:
#if not set, capture assumes 10e-6 resolution
pass
elif (0x80 & idb.tsresol) == 0:
# remaining bits are resolution, to a negative power of 10
scale = 10**(idb.tsresol & 0x7f)
else:
# remaining bits are resolution, to a negative power of 2
scale = 1 << (idb.tsresol & 0x7f)
self.tvsec = (tstamp / scale) + idb.tsoffset
self.tvusec = tstamp % scale
class PCAPNG_SIMPLE_PACKET_BLOCK(vstruct.VStruct):
'''
Note: no variable length options fields, so inheriting from vstruct directly
'''
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.blocksize2 = v_uint32(bigend=bigend)
def pcb_blocksize(self):
self.caplen = pad4bytes(self.blocksize - 16)
self.vsGetField('data').vsSetLength(self.caplen)
def setPcapTimestamp(self, idb):
#no timestamp in this type of block :(
self.tvsec = idb.tsoffset
self.tvusec = 0
def iterPcapFileName(filename, reuse=False):
with open(filename, 'rb') as fd:
for x in iterPcapFile(fd, reuse=reuse):
yield x
def iterPcapFile(fd, reuse=False):
'''
Figure out if it's a tcpdump format, or pcapng
'''
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
fd.seek(0)
if h.magic == PCAPNG_BLOCKTYPE_SECTION_HEADER:
return _iterPcapNgFile(fd, reuse)
return _iterPcapFile(fd, reuse)
def _iterPcapFile(fd, reuse=False):
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
linktype = h.linktype
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
pktsize = len(pkt)
eIIsize = len(eII)
ipv4 = vs_inet.IPv4()
ipv4size = 20
tcp_hdr = vs_inet.TCP()
udp_hdr = vs_inet.UDP()
icmp_hdr = vs_inet.ICMP()
go = True
while go:
hdr = fd.read(pktsize)
if len(hdr) != pktsize:
break
pkt.vsParse(hdr, fast=True)
b = fd.read(pkt.caplen)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(b) < eIIsize:
continue
eII.vsParse(b, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
continue
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
if not reuse:
ipv4 = vs_inet.IPv4()
if (len(b) - offset) < ipv4size:
continue
ipv4.vsParse(b, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = b[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
continue
if not reuse:
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
yield pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
continue
if not reuse:
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
yield pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
continue
if not reuse:
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
yield pkt,ipv4,icmp_hdr,pdata
else:
logger.warning('UNHANDLED IP PROTOCOL: %d', ipv4.proto)
def _iterPcapNgFile(fd, reuse=False):
header = PCAPNG_GENERIC_BLOCK_HEADER()
ifaceidx = 0
ifacedict = {}
roff = 0
bigend = False
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
while len(b0) == len(header):
header.vsParse(b0, fast=True)
body = fd.read(header.blocksize)
if header.blocktype == PCAPNG_BLOCKTYPE_SECTION_HEADER:
shb = PCAPNG_SECTION_HEADER_BLOCK()
roff = shb.vsParse(body)
bigend = shb.bigend
#reset interface stuff since we're in a new section
ifaceidx = 0
ifacedict = {}
elif header.blocktype == PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION:
idb = PCAPNG_INTERFACE_DESCRIPTION_BLOCK(bigend)
roff = idb.vsParse(body)
#save off the interface for later reference
ifacedict[ifaceidx] = idb
ifaceidx += 1
elif header.blocktype == PCAPNG_BLOCKTYPE_SIMPLE_PACKET:
spb = PCAPNG_SIMPLE_PACKET_BLOCK(bigend)
roff = spb.vsParse(body)
tup = _parsePcapngPacketBytes(iface.linktype, spb)
if tup is not None:
#if it is None, just fall through & read next block
yield tup
elif header.blocktype == PCAPNG_BLOCKTYPE_ENHANCED_PACKET:
epb = PCAPNG_ENHANCED_PACKET_BLOCK(bigend)
roff = epb.vsParse(body)
iface = ifacedict.get(epb.interfaceid)
epb.setPcapTimestamp(iface)
tup = _parsePcapngPacketBytes(iface.linktype, epb)
if tup is not None:
#if tup is None, just fall through & read next block
yield tup
#TODO: other blocks needed?
#PCAPNG_BLOCKTYPE_PACKET (obsolete)
#PCAPNG_BLOCKTYPE_NAME_RESOLUTION:
#PCAPNG_BLOCKTYPE_INTERFACE_STATS:
else:
logger.warning('Unknown block type: 0x%08x: 0x%08x 0x%08x bytes', roff, header.blocktype, header.blocksize)
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
def _parsePcapngPacketBytes(linktype, pkt):
'''
pkt is either a parsed PCAPNG_SIMPLE_PACKET_BLOCK or PCAPNG_ENHANCED_PACKET_BLOCK
On success Returns tuple (pcapng_pkt, ipv4_vstruct, transport_vstruc, pdata)
Returns None if the packet can't be parsed
'''
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
#pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
eIIsize = len(eII)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(pkt.data) < eIIsize:
return None
eII.vsParse(pkt.data, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
return None
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
ipv4 = vs_inet.IPv4()
if (len(pkt.data) - offset) < len(ipv4):
return None
ipv4.vsParse(pkt.data, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = pkt.data[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
return None
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
return pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
return None
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
return pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
return None
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
return pkt,ipv4,icmp_hdr,pdata
else:
logger.warning('UNHANDLED IP PROTOCOL: %d', ipv4.proto)
return None
| 32.547475 | 119 | 0.597232 | import logging
import vstruct
import vstruct.defs.inet as vs_inet
from vstruct.primitives import *
logger = logging.getLogger(__name__)
PCAP_LINKTYPE_ETHER = 1
PCAP_LINKTYPE_RAW = 101
PCAP_LINKTYPE_LINUX_SLL = 113
PCAP_DLT_RAW = 12
PCAPNG_BOM = 0x1A2B3C4D
OPT_ENDOFOPT = 0
OPT_COMMENT = 1
OPT_SHB_HARDWARE = 2
OPT_SHB_OS = 3
OPT_SHB_USERAPPL = 4
OPT_IF_NAME = 2
OPT_IF_DESCRIPTION = 3
OPT_IF_IPV4ADDR = 4
OPT_IF_IPV6ADDR = 5
OPT_IF_MACADDR = 6
OPT_IF_EUIADDR = 7
OPT_IF_SPEED = 8
OPT_IF_TSRESOL = 9
OPT_IF_TZONE = 10
OPT_IF_FILTER = 11
OPT_IF_OS = 12
OPT_IF_FCSLEN = 13
OPT_IF_TSOFFSET = 14
OPT_EPB_FLAGS = 2
OPT_EPB_HASH = 3
OPT_EPB_DROPCOUNT = 4
PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION = 0x00000001
PCAPNG_BLOCKTYPE_PACKET = 0x00000002
PCAPNG_BLOCKTYPE_SIMPLE_PACKET = 0x00000003
PCAPNG_BLOCKTYPE_NAME_RESOLUTION = 0x00000004
PCAPNG_BLOCKTYPE_INTERFACE_STATS = 0x00000005
PCAPNG_BLOCKTYPE_ENHANCED_PACKET = 0x00000006
PCAPNG_BLOCKTYPE_SECTION_HEADER = 0x0a0d0d0a
def pad4bytes(size):
if (size % 4) == 0:
return size
return size + (4 -( size % 4))
class PCAP_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.magic = v_uint32()
self.vers_maj = v_uint16()
self.vers_min = v_uint16()
self.thiszone = v_uint32()
self.sigfigs = v_uint32()
self.snaplen = v_uint32()
self.linktype = v_uint32()
class PCAP_PACKET_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.tvsec = v_uint32()
self.tvusec = v_uint32()
self.caplen = v_uint32()
self.len = v_uint32()
class PCAPNG_GENERIC_BLOCK_HEADER(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
class PCAPNG_BLOCK_PARENT(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.bigend = False
def vsParse(self, bytez, offset=0):
startoff = offset
roff = vstruct.VStruct.vsParse(self, bytez, offset=offset)
while (roff < len(bytez)) and ((roff-startoff) < (self.blocksize-4)):
opt = PCAPNG_OPTION(bigend=self.bigend)
roff = opt.vsParse(bytez, roff)
if opt.code == OPT_ENDOFOPT:
break
self.options.vsAddElement(opt)
bs2 = v_uint32(bigend=self.bigend)
self.vsAddField('blocksize2', bs2)
roff = bs2.vsParse(bytez, roff)
return pad4bytes(roff)
class PCAPNG_SECTION_HEADER_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.bom = v_uint32(bigend=bigend)
self.vers_maj = v_uint16(bigend=bigend)
self.vers_min = v_uint16(bigend=bigend)
self.sectionsize = v_uint64(bigend=bigend)
self.options = vstruct.VArray([])
def pcb_bom(self):
bom = self.vsGetField('bom')
if self.bom == PCAPNG_BOM:
self.bigend = bom._vs_bigend
else:
self.bigend = not bom._vs_bigend
class PCAPNG_OPTION(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.code = v_uint16(bigend=bigend)
self.optsize = v_uint16(bigend=bigend)
self.bytes = v_bytes(0)
def pcb_optsize(self):
size = pad4bytes(self.optsize)
self.vsGetField('bytes').vsSetLength(size)
class PCAPNG_INTERFACE_DESCRIPTION_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.linktype = v_uint16(bigend=bigend)
self.reserved = v_uint16(bigend=bigend)
self.snaplen = v_uint32(bigend=bigend)
self.options = vstruct.VArray([])
def vsParse(self, bytez, offset=0):
ret = PCAPNG_BLOCK_PARENT.vsParse(self, bytez, offset=0)
self.tsresol = None
self.tsoffset = 0
for i, opt in self.options:
if opt.code == OPT_IF_TSRESOL:
self.tsresol = ord(opt.bytes[0])
elif opt.code == OPT_IF_TSOFFSET:
fmt = '<Q'
if self.bigend:
fmt = '>Q'
self.tsoffset = struct.unpack_from(fmt, opt.bytes)[0]
return ret
class PCAPNG_ENHANCED_PACKET_BLOCK(PCAPNG_BLOCK_PARENT):
def __init__(self, bigend=False):
PCAPNG_BLOCK_PARENT.__init__(self, bigend)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.interfaceid = v_uint32(bigend=bigend)
self.tstamphi = v_uint32(bigend=bigend)
self.tstamplow = v_uint32(bigend=bigend)
self.caplen = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.options = vstruct.VArray([])
def pcb_caplen(self):
size = pad4bytes(self.caplen)
self.vsGetField('data').vsSetLength(size)
def setPcapTimestamp(self, idb):
self.snaplen = idb.snaplen
tstamp = (self.tstamphi << 32) | self.tstamplow
scale = 1000000
if idb.tsresol is None:
pass
elif (0x80 & idb.tsresol) == 0:
scale = 10**(idb.tsresol & 0x7f)
else:
scale = 1 << (idb.tsresol & 0x7f)
self.tvsec = (tstamp / scale) + idb.tsoffset
self.tvusec = tstamp % scale
class PCAPNG_SIMPLE_PACKET_BLOCK(vstruct.VStruct):
def __init__(self, bigend=False):
vstruct.VStruct.__init__(self)
self.blocktype = v_uint32(bigend=bigend)
self.blocksize = v_uint32(bigend=bigend)
self.packetlen = v_uint32(bigend=bigend)
self.data = v_bytes(0)
self.blocksize2 = v_uint32(bigend=bigend)
def pcb_blocksize(self):
self.caplen = pad4bytes(self.blocksize - 16)
self.vsGetField('data').vsSetLength(self.caplen)
def setPcapTimestamp(self, idb):
self.tvsec = idb.tsoffset
self.tvusec = 0
def iterPcapFileName(filename, reuse=False):
with open(filename, 'rb') as fd:
for x in iterPcapFile(fd, reuse=reuse):
yield x
def iterPcapFile(fd, reuse=False):
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
fd.seek(0)
if h.magic == PCAPNG_BLOCKTYPE_SECTION_HEADER:
return _iterPcapNgFile(fd, reuse)
return _iterPcapFile(fd, reuse)
def _iterPcapFile(fd, reuse=False):
h = PCAP_FILE_HEADER()
b = fd.read(len(h))
h.vsParse(b, fast=True)
linktype = h.linktype
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
pktsize = len(pkt)
eIIsize = len(eII)
ipv4 = vs_inet.IPv4()
ipv4size = 20
tcp_hdr = vs_inet.TCP()
udp_hdr = vs_inet.UDP()
icmp_hdr = vs_inet.ICMP()
go = True
while go:
hdr = fd.read(pktsize)
if len(hdr) != pktsize:
break
pkt.vsParse(hdr, fast=True)
b = fd.read(pkt.caplen)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(b) < eIIsize:
continue
eII.vsParse(b, 0, fast=True)
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
continue
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
if not reuse:
ipv4 = vs_inet.IPv4()
if (len(b) - offset) < ipv4size:
continue
ipv4.vsParse(b, offset, fast=True)
b = b[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
continue
if not reuse:
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
yield pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
continue
if not reuse:
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
yield pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
continue
if not reuse:
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
yield pkt,ipv4,icmp_hdr,pdata
else:
logger.warning('UNHANDLED IP PROTOCOL: %d', ipv4.proto)
def _iterPcapNgFile(fd, reuse=False):
header = PCAPNG_GENERIC_BLOCK_HEADER()
ifaceidx = 0
ifacedict = {}
roff = 0
bigend = False
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
while len(b0) == len(header):
header.vsParse(b0, fast=True)
body = fd.read(header.blocksize)
if header.blocktype == PCAPNG_BLOCKTYPE_SECTION_HEADER:
shb = PCAPNG_SECTION_HEADER_BLOCK()
roff = shb.vsParse(body)
bigend = shb.bigend
ifaceidx = 0
ifacedict = {}
elif header.blocktype == PCAPNG_BLOCKTYPE_INTERFACE_DESCRIPTION:
idb = PCAPNG_INTERFACE_DESCRIPTION_BLOCK(bigend)
roff = idb.vsParse(body)
#save off the interface for later reference
ifacedict[ifaceidx] = idb
ifaceidx += 1
elif header.blocktype == PCAPNG_BLOCKTYPE_SIMPLE_PACKET:
spb = PCAPNG_SIMPLE_PACKET_BLOCK(bigend)
roff = spb.vsParse(body)
tup = _parsePcapngPacketBytes(iface.linktype, spb)
if tup is not None:
#if it is None, just fall through & read next block
yield tup
elif header.blocktype == PCAPNG_BLOCKTYPE_ENHANCED_PACKET:
epb = PCAPNG_ENHANCED_PACKET_BLOCK(bigend)
roff = epb.vsParse(body)
iface = ifacedict.get(epb.interfaceid)
epb.setPcapTimestamp(iface)
tup = _parsePcapngPacketBytes(iface.linktype, epb)
if tup is not None:
#if tup is None, just fall through & read next block
yield tup
#TODO: other blocks needed?
#PCAPNG_BLOCKTYPE_PACKET (obsolete)
#PCAPNG_BLOCKTYPE_NAME_RESOLUTION:
#PCAPNG_BLOCKTYPE_INTERFACE_STATS:
else:
logger.warning('Unknown block type: 0x%08x: 0x%08x 0x%08x bytes', roff, header.blocktype, header.blocksize)
curroff = fd.tell()
b0 = fd.read(len(header))
fd.seek(curroff)
def _parsePcapngPacketBytes(linktype, pkt):
if linktype not in (PCAP_LINKTYPE_ETHER, PCAP_LINKTYPE_RAW):
raise Exception('PCAP Link Type %d Not Supported Yet!' % linktype)
#pkt = PCAP_PACKET_HEADER()
eII = vs_inet.ETHERII()
eIIsize = len(eII)
offset = 0
if linktype == PCAP_LINKTYPE_ETHER:
if len(pkt.data) < eIIsize:
return None
eII.vsParse(pkt.data, 0, fast=True)
# No support for non-ip protocol yet...
if eII.etype not in (vs_inet.ETH_P_IP,vs_inet.ETH_P_VLAN):
return None
offset += eIIsize
if eII.etype == vs_inet.ETH_P_VLAN:
offset += 4
elif linktype == PCAP_LINKTYPE_RAW:
pass
ipv4 = vs_inet.IPv4()
if (len(pkt.data) - offset) < len(ipv4):
return None
ipv4.vsParse(pkt.data, offset, fast=True)
# Make b *only* the IP datagram bytes...
b = pkt.data[offset:offset+ipv4.totlen]
offset = 0
offset += len(ipv4)
tsize = len(b) - offset
if ipv4.proto == vs_inet.IPPROTO_TCP:
if tsize < 20:
return None
tcp_hdr = vs_inet.TCP()
tcp_hdr.vsParse(b, offset, fast=True)
offset += len(tcp_hdr)
pdata = b[offset:]
return pkt,ipv4,tcp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_UDP:
if tsize < 8:
return None
udp_hdr = vs_inet.UDP()
udp_hdr.vsParse(b, offset, fast=True)
offset += len(udp_hdr)
pdata = b[offset:]
return pkt,ipv4,udp_hdr,pdata
elif ipv4.proto == vs_inet.IPPROTO_ICMP:
if tsize < 4:
return None
icmp_hdr = vs_inet.ICMP()
icmp_hdr.vsParse(b, offset, fast=True)
offset += len(icmp_hdr)
pdata = b[offset:]
return pkt,ipv4,icmp_hdr,pdata
else:
logger.warning('UNHANDLED IP PROTOCOL: %d', ipv4.proto)
return None
| true | true |
1c461db4bc60cf1e92582559dd48bd01ee94d6f7 | 456 | py | Python | src/util/__init__.py | seahrh/coding-interview | 517d19e7e88c02acec4aa6336bc20206ce3f1897 | [
"MIT"
] | null | null | null | src/util/__init__.py | seahrh/coding-interview | 517d19e7e88c02acec4aa6336bc20206ce3f1897 | [
"MIT"
] | null | null | null | src/util/__init__.py | seahrh/coding-interview | 517d19e7e88c02acec4aa6336bc20206ce3f1897 | [
"MIT"
] | null | null | null | from typing import Iterable
# skip mypy check because open issue https://github.com/python/typing/issues/760
def argmin(elements: Iterable) -> int:
"""Returns first index of smallest element."""
return min(enumerate(elements), key=lambda x: x[1])[0] # type: ignore
def argmax(elements: Iterable) -> int:
"""Returns first index of largest element."""
return max(enumerate(elements), key=lambda x: x[1])[0] # type: ignore
| 32.571429 | 81 | 0.677632 | from typing import Iterable
def argmin(elements: Iterable) -> int:
return min(enumerate(elements), key=lambda x: x[1])[0]
def argmax(elements: Iterable) -> int:
return max(enumerate(elements), key=lambda x: x[1])[0]
| true | true |
1c461e2d8f683c54e0e3cf71b790ddfb6dc91f8a | 2,131 | py | Python | opencv_disparity/test.py | salihmarangoz/StereoDepthEstimation | a068df34329ee0642b5eb4277dedcd7012d78b4d | [
"MIT"
] | null | null | null | opencv_disparity/test.py | salihmarangoz/StereoDepthEstimation | a068df34329ee0642b5eb4277dedcd7012d78b4d | [
"MIT"
] | null | null | null | opencv_disparity/test.py | salihmarangoz/StereoDepthEstimation | a068df34329ee0642b5eb4277dedcd7012d78b4d | [
"MIT"
] | null | null | null | ##################################################################################
# SOURCE: https://github.com/aliyasineser/stereoDepth/blob/master/stereo_depth.py
##################################################################################
import numpy as np
import cv2 as cv
import cv2
from matplotlib import pyplot as plt
def depth_map(imgL, imgR):
""" Depth map calculation. Works with SGBM and WLS. Need rectified images, returns depth map ( left to right disparity ) """
# SGBM Parameters -----------------
window_size = 3 # wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
left_matcher = cv2.StereoSGBM_create(
minDisparity=0,
numDisparities=12*16, # max_disp has to be dividable by 16 f. E. HH 192, 256
blockSize=window_size,
P1=8 * 5 * window_size,
# wsize default 3; 5; 7 for SGBM reduced size image; 15 for SGBM full size image (1300px and above); 5 Works nicely
P2=32 * 5 * window_size,
disp12MaxDiff=12,
uniquenessRatio=10,
speckleWindowSize=50,
speckleRange=32,
preFilterCap=63,
mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY
)
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
# FILTER Parameters
lmbda = 80000
sigma = 1.3
visual_multiplier = 6
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)
displ = left_matcher.compute(imgL, imgR) # .astype(np.float32)/16
dispr = right_matcher.compute(imgR, imgL) # .astype(np.float32)/16
displ = np.int16(displ)
dispr = np.int16(dispr)
filteredImg = wls_filter.filter(displ, imgL, None, dispr) # important to put "imgL" here!!!
filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX);
filteredImg = np.uint8(filteredImg)
return filteredImg
imgL = cv.imread('l.png',0)
imgR = cv.imread('r.png',0)
disparity = depth_map(imgL, imgR)
plt.imshow(disparity,'gray')
plt.show() | 38.745455 | 136 | 0.63679 | true | true | |
1c461f5be0efef6234d9d0aa8c49ba9cdafb8ecd | 10,102 | py | Python | tests/unit/fs.py | ach3/fibratus | 655f0e6cee88caff4f75488fd90bf1bb00693847 | [
"Apache-2.0"
] | null | null | null | tests/unit/fs.py | ach3/fibratus | 655f0e6cee88caff4f75488fd90bf1bb00693847 | [
"Apache-2.0"
] | null | null | null | tests/unit/fs.py | ach3/fibratus | 655f0e6cee88caff4f75488fd90bf1bb00693847 | [
"Apache-2.0"
] | 1 | 2022-03-07T08:05:34.000Z | 2022-03-07T08:05:34.000Z | # Copyright 2015 by Nedim Sabic (RabbitStack)
# All Rights Reserved.
# http://rabbitstack.github.io
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest.mock import Mock
import pytest
from fibratus.common import DotD as dd, NA
from fibratus.fs import FsIO, FileOps
from fibratus.handle import HandleInfo, HandleType
from fibratus.kevent import KEvent
from fibratus.kevent_types import CREATE_FILE, DELETE_FILE, WRITE_FILE, RENAME_FILE, SET_FILE_INFORMATION
from fibratus.thread import ThreadRegistry
@pytest.fixture(scope='module')
def kevent():
return KEvent(Mock(spec_set=ThreadRegistry))
@pytest.fixture(scope='module')
def fsio(kevent):
handles = [HandleInfo(3080, 18446738026482168384, HandleType.DIRECTORY,
"\\Device\\HarddiskVolume2\\Users\\Nedo\\AppData\\Local\\VirtualStore", 640),
HandleInfo(2010, 18446738023471035392, HandleType.FILE,
"\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll", 640)]
fsio = FsIO(kevent, handles)
fsio.file_pool[18446738026474426144] = '\\Device\\HarddiskVolume2\\fibratus.log'
return fsio
class TestFsIO():
def test_init_fsio(self, fsio):
assert len(fsio.file_handles) == 2
@pytest.mark.parametrize('expected_op, kfsio',
[(FileOps.SUPERSEDE, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 1223456,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 1, "file_attributes": 0})),
(FileOps.OPEN, dd({"file_object": 18446738026482168384, "ttid": 1484, "process_id": 859,
"create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 2, "file_attributes": 0})),
(FileOps.CREATE, dd({"file_object": 18446738026482168384, "ttid": 1484, "process_id": 859,
"create_options": 33554532,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 4, "file_attributes": 0})),
(FileOps.OPEN_IF, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 58651617,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 3, "file_attributes": 0})),
(FileOps.OVERWRITE, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 78874400,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 5, "file_attributes": 0})),
(FileOps.OVERWRITE_IF, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 83886112,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 6, "file_attributes": 0}))])
def test_create_file_operation(self, expected_op, kfsio, fsio, kevent):
fsio.parse_fsio(CREATE_FILE, kfsio)
kparams = kevent.params
assert kparams.file == kfsio.open_path
assert kparams.tid == kfsio.ttid
assert kparams.pid == kfsio.process_id
assert kparams.operation == expected_op.name
@pytest.mark.parametrize('expected_share_mask, kfsio',
[('r--', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 1, "file_attributes": 0})),
('-w-', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 2, "file_attributes": 0})),
('--d', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 4, "file_attributes": 0})),
('rw-', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 3, "file_attributes": 0})),
('r-d', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 5, "file_attributes": 0})),
('-wd', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 6, "file_attributes": 0})),
('rwd', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 7, "file_attributes": 0})),
('---', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": -1, "file_attributes": 0}))])
def test_create_file_share_mask(self, expected_share_mask, kfsio, fsio, kevent):
fsio.parse_fsio(CREATE_FILE, kfsio)
assert kevent.params.share_mask == expected_share_mask
def test_delete_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738026474426144, "ttid": 1956, "process_id": 859, "irp_ptr": 18446738026471032392})
fsio.parse_fsio(DELETE_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == '\\Device\\HarddiskVolume2\\fibratus.log'
def test_write_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738026474426144, "process_id": 859, "io_flags": 0, "io_size": 8296,
"offset": 75279, "ttid": 1956})
fsio.parse_fsio(WRITE_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == NA
assert kevent.params.io_size == kfsio.io_size / 1024
def test_rename_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738023471035392, "ttid": 1956, "process_id": 859, "irp_ptr": 18446738026471032392})
fsio.parse_fsio(RENAME_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == '\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll'
def test_set_file_information(self, fsio, kevent):
kfsio = dd(
{"file_object": 18446738023471035392, "ttid": 1956, "info_class": 20, "process_id": 859,
"irp_ptr": 18446738026471032392})
fsio.parse_fsio(SET_FILE_INFORMATION, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.info_class == 20
assert kevent.params.file == '\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll'
| 65.597403 | 123 | 0.544447 |
from unittest.mock import Mock
import pytest
from fibratus.common import DotD as dd, NA
from fibratus.fs import FsIO, FileOps
from fibratus.handle import HandleInfo, HandleType
from fibratus.kevent import KEvent
from fibratus.kevent_types import CREATE_FILE, DELETE_FILE, WRITE_FILE, RENAME_FILE, SET_FILE_INFORMATION
from fibratus.thread import ThreadRegistry
@pytest.fixture(scope='module')
def kevent():
return KEvent(Mock(spec_set=ThreadRegistry))
@pytest.fixture(scope='module')
def fsio(kevent):
handles = [HandleInfo(3080, 18446738026482168384, HandleType.DIRECTORY,
"\\Device\\HarddiskVolume2\\Users\\Nedo\\AppData\\Local\\VirtualStore", 640),
HandleInfo(2010, 18446738023471035392, HandleType.FILE,
"\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll", 640)]
fsio = FsIO(kevent, handles)
fsio.file_pool[18446738026474426144] = '\\Device\\HarddiskVolume2\\fibratus.log'
return fsio
class TestFsIO():
def test_init_fsio(self, fsio):
assert len(fsio.file_handles) == 2
@pytest.mark.parametrize('expected_op, kfsio',
[(FileOps.SUPERSEDE, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 1223456,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 1, "file_attributes": 0})),
(FileOps.OPEN, dd({"file_object": 18446738026482168384, "ttid": 1484, "process_id": 859,
"create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 2, "file_attributes": 0})),
(FileOps.CREATE, dd({"file_object": 18446738026482168384, "ttid": 1484, "process_id": 859,
"create_options": 33554532,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 4, "file_attributes": 0})),
(FileOps.OPEN_IF, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 58651617,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 3, "file_attributes": 0})),
(FileOps.OVERWRITE, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 78874400,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 5, "file_attributes": 0})),
(FileOps.OVERWRITE_IF, dd({"file_object": 18446738026482168384, "ttid": 1484,
"process_id": 859,
"create_options": 83886112,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"irp_ptr": 18446738026471032392, "share_access": 6, "file_attributes": 0}))])
def test_create_file_operation(self, expected_op, kfsio, fsio, kevent):
fsio.parse_fsio(CREATE_FILE, kfsio)
kparams = kevent.params
assert kparams.file == kfsio.open_path
assert kparams.tid == kfsio.ttid
assert kparams.pid == kfsio.process_id
assert kparams.operation == expected_op.name
@pytest.mark.parametrize('expected_share_mask, kfsio',
[('r--', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 1, "file_attributes": 0})),
('-w-', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 2, "file_attributes": 0})),
('--d', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 4, "file_attributes": 0})),
('rw-', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 3, "file_attributes": 0})),
('r-d', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 5, "file_attributes": 0})),
('-wd', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 6, "file_attributes": 0})),
('rwd', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": 7, "file_attributes": 0})),
('---', dd({"file_object": 18446738026482168384, "ttid": 1484, "create_options": 18874368,
"open_path": "\\Device\\HarddiskVolume2\\Windows\\system32\\kernel32.dll",
"process_id": 859,
"irp_ptr": 18446738026471032392, "share_access": -1, "file_attributes": 0}))])
def test_create_file_share_mask(self, expected_share_mask, kfsio, fsio, kevent):
fsio.parse_fsio(CREATE_FILE, kfsio)
assert kevent.params.share_mask == expected_share_mask
def test_delete_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738026474426144, "ttid": 1956, "process_id": 859, "irp_ptr": 18446738026471032392})
fsio.parse_fsio(DELETE_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == '\\Device\\HarddiskVolume2\\fibratus.log'
def test_write_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738026474426144, "process_id": 859, "io_flags": 0, "io_size": 8296,
"offset": 75279, "ttid": 1956})
fsio.parse_fsio(WRITE_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == NA
assert kevent.params.io_size == kfsio.io_size / 1024
def test_rename_file(self, fsio, kevent):
kfsio = dd({"file_object": 18446738023471035392, "ttid": 1956, "process_id": 859, "irp_ptr": 18446738026471032392})
fsio.parse_fsio(RENAME_FILE, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.file == '\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll'
def test_set_file_information(self, fsio, kevent):
kfsio = dd(
{"file_object": 18446738023471035392, "ttid": 1956, "info_class": 20, "process_id": 859,
"irp_ptr": 18446738026471032392})
fsio.parse_fsio(SET_FILE_INFORMATION, kfsio)
assert kevent.params.tid == kfsio.ttid
assert kevent.params.info_class == 20
assert kevent.params.file == '\\Device\\HarddiskVolume2\\Windows\\system32\\rpcss.dll'
| true | true |
1c461fa375b527ed770883ccd44488bbb7967dad | 1,644 | py | Python | temp_scripts/update_parameters.py | openmaker-eu/watchtower | af4d3e92b4cf0bf93c10e288a8b8ea97079da86d | [
"MIT"
] | 2 | 2017-05-16T10:57:29.000Z | 2017-12-14T11:33:18.000Z | temp_scripts/update_parameters.py | openmaker-eu/watchtower | af4d3e92b4cf0bf93c10e288a8b8ea97079da86d | [
"MIT"
] | 9 | 2018-11-29T07:44:15.000Z | 2021-12-13T19:54:18.000Z | temp_scripts/update_parameters.py | openmaker-eu/watchtower | af4d3e92b4cf0bf93c10e288a8b8ea97079da86d | [
"MIT"
] | 1 | 2019-02-28T19:00:47.000Z | 2019-02-28T19:00:47.000Z | from application.Connections import Connection
from pdb import set_trace
def updateAudienceParameters(topicID, location, signal_strength):
with Connection.Instance().get_cursor() as cur:
sql = (
"UPDATE audience_parameters "
"SET signal_strength = %s "
"WHERE topic_id = %s and location = %s "
)
cur.execute(sql, [int(signal_strength), int(topicID), location])
def updateInfluencerParameters(topicID, location, signal_strength, following_limit):
with Connection.Instance().get_cursor() as cur:
sql = (
"UPDATE influencer_parameters "
"SET signal_strength = %s, following_limit = %s "
"WHERE topic_id = %s and location = %s "
)
cur.execute(sql, [int(signal_strength), int(following_limit), int(topicID), location])
print("Influencer or Audience ?\n1) Influencer\n2) Audience")
choice = int(input())
if choice == 1:
# Influencer
s = ""
print("Enter 'topicID, location, signal_strength, following_limit' and press enter.\nType 'DONE' to finish.")
s = input()
while(s != "DONE"):
l = s.strip().split()
if(len(l) == 4):
updateInfluencerParameters(*l)
print("UPDATED!")
s = input()
if choice == 2:
# Audience
s = ""
print("Enter 'topicID, location, signal_strength' and press enter.\nType 'DONE' to finish.")
s = input()
while(s != "DONE"):
l = s.strip().split()
if(len(l) == 3):
updateAudienceParameters(*l)
print("UPDATED!")
s = input()
| 34.25 | 113 | 0.58455 | from application.Connections import Connection
from pdb import set_trace
def updateAudienceParameters(topicID, location, signal_strength):
with Connection.Instance().get_cursor() as cur:
sql = (
"UPDATE audience_parameters "
"SET signal_strength = %s "
"WHERE topic_id = %s and location = %s "
)
cur.execute(sql, [int(signal_strength), int(topicID), location])
def updateInfluencerParameters(topicID, location, signal_strength, following_limit):
with Connection.Instance().get_cursor() as cur:
sql = (
"UPDATE influencer_parameters "
"SET signal_strength = %s, following_limit = %s "
"WHERE topic_id = %s and location = %s "
)
cur.execute(sql, [int(signal_strength), int(following_limit), int(topicID), location])
print("Influencer or Audience ?\n1) Influencer\n2) Audience")
choice = int(input())
if choice == 1:
s = ""
print("Enter 'topicID, location, signal_strength, following_limit' and press enter.\nType 'DONE' to finish.")
s = input()
while(s != "DONE"):
l = s.strip().split()
if(len(l) == 4):
updateInfluencerParameters(*l)
print("UPDATED!")
s = input()
if choice == 2:
s = ""
print("Enter 'topicID, location, signal_strength' and press enter.\nType 'DONE' to finish.")
s = input()
while(s != "DONE"):
l = s.strip().split()
if(len(l) == 3):
updateAudienceParameters(*l)
print("UPDATED!")
s = input()
| true | true |
1c462039acecb8d459a5e841e0c153542b907b5f | 3,583 | py | Python | sympy/concrete/products.py | gnulinooks/sympy | 46f63841f96cd025289b91ba9db3e261138d720a | [
"BSD-3-Clause"
] | 1 | 2016-05-09T10:08:18.000Z | 2016-05-09T10:08:18.000Z | sympy/concrete/products.py | gnulinooks/sympy | 46f63841f96cd025289b91ba9db3e261138d720a | [
"BSD-3-Clause"
] | null | null | null | sympy/concrete/products.py | gnulinooks/sympy | 46f63841f96cd025289b91ba9db3e261138d720a | [
"BSD-3-Clause"
] | null | null | null |
from sympy.core import Basic, S, C, Add, Mul, Symbol, sympify
from sympy.polys import quo, roots
from sympy.simplify import powsimp
class Product(Basic):
"""Represents unevaluated product.
"""
def __new__(cls, term, *symbols, **assumptions):
term = sympify(term)
if term.is_Number:
if term is S.NaN:
return S.NaN
elif term is S.Infinity:
return S.NaN
elif term is S.NegativeInfinity:
return S.NaN
elif term is S.Zero:
return S.Zero
elif term is S.One:
return S.One
if len(symbols) == 1:
symbol = symbols[0]
if isinstance(symbol, C.Equality):
k = symbol.lhs
a = symbol.rhs.start
n = symbol.rhs.end
elif isinstance(symbol, (tuple, list)):
k, a, n = symbol
else:
raise ValueError("Invalid arguments")
k, a, n = map(sympify, (k, a, n))
if isinstance(a, C.Number) and isinstance(n, C.Number):
return Mul(*[term.subs(k, i) for i in xrange(int(a), int(n)+1)])
else:
raise NotImplementedError
obj = Basic.__new__(cls, **assumptions)
obj._args = (term, k, a, n)
return obj
@property
def term(self):
return self._args[0]
@property
def index(self):
return self._args[1]
@property
def lower(self):
return self._args[2]
@property
def upper(self):
return self._args[3]
def doit(self):
prod = self._eval_product()
if prod is not None:
return powsimp(prod)
else:
return self
def _eval_product(self, term=None):
k = self.index
a = self.lower
n = self.upper
if term is None:
term = self.term
if not term.has(k):
return term**(n-a+1)
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
C_= poly.LC
all_roots = roots(poly, multiple=True)
for r in all_roots:
A *= C.RisingFactorial(a-r, n-a+1)
Q *= n - r
if len(all_roots) < poly.degree:
B = Product(quo(poly, Q.as_poly(k)), (k, a, n))
return poly.LC**(n-a+1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p)
q = self._eval_product(q)
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t)
if p is not None:
exclude.append(p)
else:
include.append(p)
if not exclude:
return None
else:
A, B = Mul(*exclude), Mul(*include)
return A * Product(B, (k, a, n))
elif term.is_Pow:
if not term.base.has(k):
s = sum(term.exp, (k, a, n))
if not isinstance(s, Sum):
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base)
if p is not None:
return p**term.exp
def product(*args, **kwargs):
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit()
else:
return prod
| 25.055944 | 80 | 0.476137 |
from sympy.core import Basic, S, C, Add, Mul, Symbol, sympify
from sympy.polys import quo, roots
from sympy.simplify import powsimp
class Product(Basic):
def __new__(cls, term, *symbols, **assumptions):
term = sympify(term)
if term.is_Number:
if term is S.NaN:
return S.NaN
elif term is S.Infinity:
return S.NaN
elif term is S.NegativeInfinity:
return S.NaN
elif term is S.Zero:
return S.Zero
elif term is S.One:
return S.One
if len(symbols) == 1:
symbol = symbols[0]
if isinstance(symbol, C.Equality):
k = symbol.lhs
a = symbol.rhs.start
n = symbol.rhs.end
elif isinstance(symbol, (tuple, list)):
k, a, n = symbol
else:
raise ValueError("Invalid arguments")
k, a, n = map(sympify, (k, a, n))
if isinstance(a, C.Number) and isinstance(n, C.Number):
return Mul(*[term.subs(k, i) for i in xrange(int(a), int(n)+1)])
else:
raise NotImplementedError
obj = Basic.__new__(cls, **assumptions)
obj._args = (term, k, a, n)
return obj
@property
def term(self):
return self._args[0]
@property
def index(self):
return self._args[1]
@property
def lower(self):
return self._args[2]
@property
def upper(self):
return self._args[3]
def doit(self):
prod = self._eval_product()
if prod is not None:
return powsimp(prod)
else:
return self
def _eval_product(self, term=None):
k = self.index
a = self.lower
n = self.upper
if term is None:
term = self.term
if not term.has(k):
return term**(n-a+1)
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
C_= poly.LC
all_roots = roots(poly, multiple=True)
for r in all_roots:
A *= C.RisingFactorial(a-r, n-a+1)
Q *= n - r
if len(all_roots) < poly.degree:
B = Product(quo(poly, Q.as_poly(k)), (k, a, n))
return poly.LC**(n-a+1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p)
q = self._eval_product(q)
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t)
if p is not None:
exclude.append(p)
else:
include.append(p)
if not exclude:
return None
else:
A, B = Mul(*exclude), Mul(*include)
return A * Product(B, (k, a, n))
elif term.is_Pow:
if not term.base.has(k):
s = sum(term.exp, (k, a, n))
if not isinstance(s, Sum):
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base)
if p is not None:
return p**term.exp
def product(*args, **kwargs):
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit()
else:
return prod
| true | true |
1c4620bd5f4a647daadaabbb35603c6d6b7b073f | 7,172 | py | Python | fiber/middleware.py | bsimons/django-fiber | 0f4b03217a4aeba6b48908825507fbe8c5732c8d | [
"Apache-2.0"
] | null | null | null | fiber/middleware.py | bsimons/django-fiber | 0f4b03217a4aeba6b48908825507fbe8c5732c8d | [
"Apache-2.0"
] | null | null | null | fiber/middleware.py | bsimons/django-fiber | 0f4b03217a4aeba6b48908825507fbe8c5732c8d | [
"Apache-2.0"
] | null | null | null | import random
import re
import json
from urllib import unquote
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import loader, RequestContext
from django.utils.encoding import smart_text
from django.utils.html import escape
from fiber.app_settings import LOGIN_STRING, EXCLUDE_URLS, EDITOR, PERMISSION_CLASS
from fiber.models import ContentItem, Page
from fiber.utils.import_util import import_element, load_class
perms = load_class(PERMISSION_CLASS)
def is_html(response):
"""
Returns True if the response is either `text/html` or `application/xhtml+xml`
"""
content_type = response.get('Content-Type', None)
return bool(content_type and content_type.split(';')[0] in ('text/html', 'application/xhtml+xml'))
class AdminPageMiddleware(object):
LOGIN_SESSION_KEY = 'show_fiber_login'
body_re = re.compile(
r'<head>(?P<HEAD>.*)</head>(?P<AFTER_HEAD>.*)<body(?P<BODY_ATTRS>.*?)>(?P<BODY>.*)</body>',
re.DOTALL)
def __init__(self):
self.editor_settings = import_element(EDITOR)
def process_response(self, request, response):
# only process non-streaming html and xhtml responses
if is_html(response) and hasattr(response, 'content'):
if self.should_setup_login_session(request):
return self.setup_login_session(request)
if self.show_login(request) or self.show_admin(request, response):
return self.modify_response(request, response)
return response
def should_setup_login_session(self, request):
"""
Only set self.LOGIN_SESSION_KEY in the session when the request
- has LOGIN_STRING (defaults to @fiber) behind its request-url
"""
qs = unquote(request.META.get('QUERY_STRING', ''))
return request.path_info.endswith(LOGIN_STRING) or qs.endswith(LOGIN_STRING)
def setup_login_session(self, request):
"""
Add self.LOGIN_SESSION_KEY to the session and redirect to the the requested path without LOGIN_STRING
"""
request.session[self.LOGIN_SESSION_KEY] = True
url = request.path_info.replace(LOGIN_STRING, '')
qs = unquote(request.META.get('QUERY_STRING', ''))
if qs:
qs = '?%s' % qs.replace(LOGIN_STRING, '').rstrip('&')
return HttpResponseRedirect(''.join([url, qs]))
def show_login(self, request):
"""
Only show the Fiber login interface when the request
- is NOT performed by an admin user
- has session key self.LOGIN_SESSION_KEY = True
"""
return not request.user.is_staff and request.session.get(self.LOGIN_SESSION_KEY)
def show_admin(self, request, response):
"""
Only show the Fiber admin interface when the request
- is not an AJAX request
- has a response status code of 200
- is performed by an admin user
- has a user with sufficient permissions based on the Permission Class
- does not match EXCLUDE_URLS (empty by default)
"""
if request.is_ajax() or response.status_code != 200:
return False
if request.user.is_staff and perms.is_fiber_editor(request.user):
if EXCLUDE_URLS:
url = request.path_info.lstrip('/')
for exclude_url in EXCLUDE_URLS:
if re.search(exclude_url, url):
return False
return True
return False
def modify_response(self, request, response):
"""
Modify the response to include Fiber assets and data.
"""
fiber_data = {}
replacement = r'<head>\g<HEAD>%(header_html)s</head>\g<AFTER_HEAD><body data-fiber-data="%(fiber_data)s"\g<BODY_ATTRS>>\g<BODY></body>'
content = smart_text(response.content)
if self.show_login(request):
# Only show the login window once
request.session[self.LOGIN_SESSION_KEY] = False
fiber_data['show_login'] = True
elif self.show_admin(request, response):
if self.is_django_admin(request):
fiber_data['backend'] = True
else:
fiber_data['frontend'] = True
page = Page.objects.get_by_url(request.path_info)
if page:
fiber_data['page_id'] = page.pk
# Inject admin html in body, wrap the original body content in a div.
replacement = r'<head>\g<HEAD>%(header_html)s</head>\g<AFTER_HEAD><body data-fiber-data="%(fiber_data)s"\g<BODY_ATTRS>><div id="wpr-body">\g<BODY></body>'
content = content.replace('</body>', '</div>%s</body>' % self.get_body_html(request))
# Inject header html in head.
# Add fiber-data attribute to body tag.
replacement = replacement % {
'header_html': self.get_header_html(request),
'fiber_data': escape(json.dumps(fiber_data, sort_keys=True))
}
response.content = self.body_re.sub(replacement, content)
return response
def is_django_admin(self, request):
return request.path_info.startswith(reverse('admin:index'))
def get_header_html(self, request):
context = {
'editor_template_js': self.editor_settings.get('template_js'),
'editor_template_css': self.editor_settings.get('template_css'),
'BACKEND_BASE_URL': reverse('admin:index'),
'FIBER_LOGIN_URL': reverse('fiber_login'),
}
return loader.render_to_string('fiber/header.html', context, RequestContext(request))
def get_body_html(self, request):
context = {
'logout_url': self.get_logout_url(request)
}
return loader.render_to_string('fiber/admin.html', context, RequestContext(request))
def get_logout_url(self, request):
if request.META['QUERY_STRING']:
return '%s?next=%s?%s' % (reverse('admin:logout'), request.path_info, request.META['QUERY_STRING'])
else:
return '%s?next=%s' % (reverse('admin:logout'), request.path_info)
class ObfuscateEmailAddressMiddleware(object):
"""
Replaces plain email addresses with escaped addresses in (non streaming) HTML responses
"""
def process_response(self, request, response):
if is_html(response) and hasattr(response, 'content'): # Do not obfuscate non-html and streaming responses.
# http://www.lampdocs.com/blog/2008/10/regular-expression-to-extract-all-e-mail-addresses-from-a-file-with-php/
email_pattern = re.compile(r'(mailto:)?[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*(\+[_a-zA-Z0-9-]+)?@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))')
response.content = email_pattern.sub(self.encode_email, response.content)
return response
def encode_email(self, matches):
encoded_char_list = []
for char in matches.group(0):
encoded_char_list.append(random.choice(['&#%d;' % ord(char), '&#x%x;' % ord(char)]))
return ''.join(encoded_char_list)
| 43.204819 | 198 | 0.641104 | import random
import re
import json
from urllib import unquote
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import loader, RequestContext
from django.utils.encoding import smart_text
from django.utils.html import escape
from fiber.app_settings import LOGIN_STRING, EXCLUDE_URLS, EDITOR, PERMISSION_CLASS
from fiber.models import ContentItem, Page
from fiber.utils.import_util import import_element, load_class
perms = load_class(PERMISSION_CLASS)
def is_html(response):
content_type = response.get('Content-Type', None)
return bool(content_type and content_type.split(';')[0] in ('text/html', 'application/xhtml+xml'))
class AdminPageMiddleware(object):
LOGIN_SESSION_KEY = 'show_fiber_login'
body_re = re.compile(
r'<head>(?P<HEAD>.*)</head>(?P<AFTER_HEAD>.*)<body(?P<BODY_ATTRS>.*?)>(?P<BODY>.*)</body>',
re.DOTALL)
def __init__(self):
self.editor_settings = import_element(EDITOR)
def process_response(self, request, response):
if is_html(response) and hasattr(response, 'content'):
if self.should_setup_login_session(request):
return self.setup_login_session(request)
if self.show_login(request) or self.show_admin(request, response):
return self.modify_response(request, response)
return response
def should_setup_login_session(self, request):
qs = unquote(request.META.get('QUERY_STRING', ''))
return request.path_info.endswith(LOGIN_STRING) or qs.endswith(LOGIN_STRING)
def setup_login_session(self, request):
request.session[self.LOGIN_SESSION_KEY] = True
url = request.path_info.replace(LOGIN_STRING, '')
qs = unquote(request.META.get('QUERY_STRING', ''))
if qs:
qs = '?%s' % qs.replace(LOGIN_STRING, '').rstrip('&')
return HttpResponseRedirect(''.join([url, qs]))
def show_login(self, request):
return not request.user.is_staff and request.session.get(self.LOGIN_SESSION_KEY)
def show_admin(self, request, response):
if request.is_ajax() or response.status_code != 200:
return False
if request.user.is_staff and perms.is_fiber_editor(request.user):
if EXCLUDE_URLS:
url = request.path_info.lstrip('/')
for exclude_url in EXCLUDE_URLS:
if re.search(exclude_url, url):
return False
return True
return False
def modify_response(self, request, response):
fiber_data = {}
replacement = r'<head>\g<HEAD>%(header_html)s</head>\g<AFTER_HEAD><body data-fiber-data="%(fiber_data)s"\g<BODY_ATTRS>>\g<BODY></body>'
content = smart_text(response.content)
if self.show_login(request):
request.session[self.LOGIN_SESSION_KEY] = False
fiber_data['show_login'] = True
elif self.show_admin(request, response):
if self.is_django_admin(request):
fiber_data['backend'] = True
else:
fiber_data['frontend'] = True
page = Page.objects.get_by_url(request.path_info)
if page:
fiber_data['page_id'] = page.pk
replacement = r'<head>\g<HEAD>%(header_html)s</head>\g<AFTER_HEAD><body data-fiber-data="%(fiber_data)s"\g<BODY_ATTRS>><div id="wpr-body">\g<BODY></body>'
content = content.replace('</body>', '</div>%s</body>' % self.get_body_html(request))
replacement = replacement % {
'header_html': self.get_header_html(request),
'fiber_data': escape(json.dumps(fiber_data, sort_keys=True))
}
response.content = self.body_re.sub(replacement, content)
return response
def is_django_admin(self, request):
return request.path_info.startswith(reverse('admin:index'))
def get_header_html(self, request):
context = {
'editor_template_js': self.editor_settings.get('template_js'),
'editor_template_css': self.editor_settings.get('template_css'),
'BACKEND_BASE_URL': reverse('admin:index'),
'FIBER_LOGIN_URL': reverse('fiber_login'),
}
return loader.render_to_string('fiber/header.html', context, RequestContext(request))
def get_body_html(self, request):
context = {
'logout_url': self.get_logout_url(request)
}
return loader.render_to_string('fiber/admin.html', context, RequestContext(request))
def get_logout_url(self, request):
if request.META['QUERY_STRING']:
return '%s?next=%s?%s' % (reverse('admin:logout'), request.path_info, request.META['QUERY_STRING'])
else:
return '%s?next=%s' % (reverse('admin:logout'), request.path_info)
class ObfuscateEmailAddressMiddleware(object):
def process_response(self, request, response):
if is_html(response) and hasattr(response, 'content'):
email_pattern = re.compile(r'(mailto:)?[_a-zA-Z0-9-]+(\.[_a-zA-Z0-9-]+)*(\+[_a-zA-Z0-9-]+)?@[a-zA-Z0-9-]+(\.[a-zA-Z0-9-]+)*\.(([0-9]{1,3})|([a-zA-Z]{2,3})|(aero|coop|info|museum|name))')
response.content = email_pattern.sub(self.encode_email, response.content)
return response
def encode_email(self, matches):
encoded_char_list = []
for char in matches.group(0):
encoded_char_list.append(random.choice(['&#%d;' % ord(char), '&#x%x;' % ord(char)]))
return ''.join(encoded_char_list)
| true | true |
1c46219a94ef2b0745f859e73be317175fb547fb | 391 | py | Python | rsvp/urls.py | DXDSpirits/appsbackend | 2c69487c4e4d6dc78091ba8030889a5ddc990836 | [
"MIT"
] | null | null | null | rsvp/urls.py | DXDSpirits/appsbackend | 2c69487c4e4d6dc78091ba8030889a5ddc990836 | [
"MIT"
] | null | null | null | rsvp/urls.py | DXDSpirits/appsbackend | 2c69487c4e4d6dc78091ba8030889a5ddc990836 | [
"MIT"
] | null | null | null | from django.conf.urls import url, patterns, include
from rest_framework.routers import DefaultRouter
from rsvp import views
router = DefaultRouter()
router.register(r'rsvp', views.RsvpViewSet)
router.register(r'guest', views.GuestViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
)
| 30.076923 | 83 | 0.754476 | from django.conf.urls import url, patterns, include
from rest_framework.routers import DefaultRouter
from rsvp import views
router = DefaultRouter()
router.register(r'rsvp', views.RsvpViewSet)
router.register(r'guest', views.GuestViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
)
| true | true |
1c4624dd307575e0198507f2f32738456ad7f101 | 1,086 | py | Python | util.py | mhaberler/jumpvis | 93b3b723d27aab7f3d4319cc91d06432022ddc6d | [
"MIT"
] | null | null | null | util.py | mhaberler/jumpvis | 93b3b723d27aab7f3d4319cc91d06432022ddc6d | [
"MIT"
] | null | null | null | util.py | mhaberler/jumpvis | 93b3b723d27aab7f3d4319cc91d06432022ddc6d | [
"MIT"
] | null | null | null |
def get_bounds(points):
"""
return bounding box of a list of gpxpy points
"""
min_lat = None
max_lat = None
min_lon = None
max_lon = None
min_ele = None
max_ele = None
for point in points:
if min_lat is None or point.latitude < min_lat:
min_lat = point.latitude
if max_lat is None or point.latitude > max_lat:
max_lat = point.latitude
if min_lon is None or point.longitude < min_lon:
min_lon = point.longitude
if max_lon is None or point.longitude > max_lon:
max_lon = point.longitude
if min_ele is None or point.elevation < min_ele:
min_ele = point.elevation
if max_ele is None or point.elevation > max_ele:
max_ele = point.elevation
if min_lat and max_lat and min_lon and max_lon:
return {'min_latitude': min_lat, 'max_latitude': max_lat,
'min_longitude': min_lon, 'max_longitude': max_lon,
'min_elevation': min_ele, 'max_elevation': max_ele,
}
return None
| 32.909091 | 67 | 0.608656 |
def get_bounds(points):
min_lat = None
max_lat = None
min_lon = None
max_lon = None
min_ele = None
max_ele = None
for point in points:
if min_lat is None or point.latitude < min_lat:
min_lat = point.latitude
if max_lat is None or point.latitude > max_lat:
max_lat = point.latitude
if min_lon is None or point.longitude < min_lon:
min_lon = point.longitude
if max_lon is None or point.longitude > max_lon:
max_lon = point.longitude
if min_ele is None or point.elevation < min_ele:
min_ele = point.elevation
if max_ele is None or point.elevation > max_ele:
max_ele = point.elevation
if min_lat and max_lat and min_lon and max_lon:
return {'min_latitude': min_lat, 'max_latitude': max_lat,
'min_longitude': min_lon, 'max_longitude': max_lon,
'min_elevation': min_ele, 'max_elevation': max_ele,
}
return None
| true | true |
1c4624f33204aa75a7cdc3d84fb7b0e45eb71211 | 8,645 | py | Python | vendor-local/lib/python/taggit/managers.py | lmorchard/badg.us | aa75b9cb6858e99de16aa840add0eef9065fdb4c | [
"BSD-3-Clause"
] | 4 | 2015-09-01T01:19:45.000Z | 2018-05-16T16:03:10.000Z | vendor-local/lib/python/taggit/managers.py | lmorchard/badg.us | aa75b9cb6858e99de16aa840add0eef9065fdb4c | [
"BSD-3-Clause"
] | 7 | 2022-01-11T19:42:12.000Z | 2022-01-11T19:42:55.000Z | vendor-local/lib/python/taggit/managers.py | lmorchard/badg.us | aa75b9cb6858e99de16aa840add0eef9065fdb4c | [
"BSD-3-Clause"
] | 3 | 2015-05-21T15:36:01.000Z | 2020-11-20T23:58:12.000Z | from django.contrib.contenttypes.generic import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import ManyToManyRel, RelatedField, add_lazy_relation
from django.db.models.related import RelatedObject
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from taggit.models import TaggedItem, GenericTaggedItemBase
from taggit.utils import require_instance_manager
try:
all
except NameError:
# 2.4 compat
try:
from django.utils.itercompat import all
except ImportError:
# 1.1.X compat
def all(iterable):
for item in iterable:
if not item:
return False
return True
class TaggableRel(ManyToManyRel):
def __init__(self):
self.related_name = None
self.limit_choices_to = {}
self.symmetrical = True
self.multiple = True
self.through = None
class TaggableManager(RelatedField):
def __init__(self, verbose_name=_("Tags"),
help_text=_("A comma-separated list of tags."), through=None, blank=False):
self.through = through or TaggedItem
self.rel = TaggableRel()
self.verbose_name = verbose_name
self.help_text = help_text
self.blank = blank
self.editable = True
self.unique = False
self.creates_table = False
self.db_column = None
self.choices = None
self.serialize = False
self.null = True
self.creation_counter = models.Field.creation_counter
models.Field.creation_counter += 1
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError("%s objects need to have a primary key value "
"before you can access their tags." % model.__name__)
manager = _TaggableManager(
through=self.through, model=model, instance=instance
)
return manager
def contribute_to_class(self, cls, name):
self.name = self.column = name
self.model = cls
cls._meta.add_field(self)
setattr(cls, name, self)
if not cls._meta.abstract:
if isinstance(self.through, basestring):
def resolve_related_class(field, model, cls):
self.through = model
self.post_through_setup(cls)
add_lazy_relation(
cls, self, self.through, resolve_related_class
)
else:
self.post_through_setup(cls)
def post_through_setup(self, cls):
self.use_gfk = (
self.through is None or issubclass(self.through, GenericTaggedItemBase)
)
self.rel.to = self.through._meta.get_field("tag").rel.to
if self.use_gfk:
tagged_items = GenericRelation(self.through)
tagged_items.contribute_to_class(cls, "tagged_items")
def save_form_data(self, instance, value):
getattr(instance, self.name).set(*value)
def formfield(self, form_class=TagField, **kwargs):
defaults = {
"label": capfirst(self.verbose_name),
"help_text": self.help_text,
"required": not self.blank
}
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, instance):
if instance.pk:
return self.through.objects.filter(**self.through.lookup_kwargs(instance))
return self.through.objects.none()
def related_query_name(self):
return self.model._meta.module_name
def m2m_reverse_name(self):
return self.through._meta.get_field_by_name("tag")[0].column
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def m2m_column_name(self):
if self.use_gfk:
return self.through._meta.virtual_fields[0].fk_field
return self.through._meta.get_field('content_object').column
def db_type(self, connection=None):
return None
def m2m_db_table(self):
return self.through._meta.db_table
def extra_filters(self, pieces, pos, negate):
if negate or not self.use_gfk:
return []
prefix = "__".join(["tagged_items"] + pieces[:pos-2])
cts = map(ContentType.objects.get_for_model, _get_subclasses(self.model))
if len(cts) == 1:
return [("%s__content_type" % prefix, cts[0])]
return [("%s__content_type__in" % prefix, cts)]
def bulk_related_objects(self, new_objs, using):
return []
class _TaggableManager(models.Manager):
def __init__(self, through, model, instance):
self.through = through
self.model = model
self.instance = instance
def get_query_set(self):
return self.through.tags_for(self.model, self.instance)
def _lookup_kwargs(self):
return self.through.lookup_kwargs(self.instance)
@require_instance_manager
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
# Checking for existing tags irrespective of the case
if str_tags:
q = models.Q()
for str_tag in str_tags:
q |= models.Q(name__iexact=str_tag)
existing = self.through.tag_model().objects.filter(q)
tag_objs.update(existing)
existing_low = [t.name.lower() for t in existing]
new_tags = [t for t in str_tags if t.lower() not in existing_low]
for new_tag in new_tags:
tag_objs.add(self.through.tag_model().objects.create(name=new_tag))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
@require_instance_manager
def set(self, *tags):
self.clear()
self.add(*tags)
@require_instance_manager
def remove(self, *tags):
q = models.Q()
for tag in tags:
q |= models.Q(tag__name__iexact=tag)
self.through.objects.filter(**self._lookup_kwargs()).filter(
q).delete()
@require_instance_manager
def clear(self):
self.through.objects.filter(**self._lookup_kwargs()).delete()
def most_common(self):
return self.get_query_set().annotate(
num_times=models.Count(self.through.tag_relname())
).order_by('-num_times')
@require_instance_manager
def similar_objects(self):
lookup_kwargs = self._lookup_kwargs()
lookup_keys = sorted(lookup_kwargs)
qs = self.through.objects.values(*lookup_kwargs.keys())
qs = qs.annotate(n=models.Count('pk'))
qs = qs.exclude(**lookup_kwargs)
qs = qs.filter(tag__in=self.all())
qs = qs.order_by('-n')
# TODO: This all feels like a bit of a hack.
items = {}
if len(lookup_keys) == 1:
# Can we do this without a second query by using a select_related()
# somehow?
f = self.through._meta.get_field_by_name(lookup_keys[0])[0]
objs = f.rel.to._default_manager.filter(**{
"%s__in" % f.rel.field_name: [r["content_object"] for r in qs]
})
for obj in objs:
items[(getattr(obj, f.rel.field_name),)] = obj
else:
preload = {}
for result in qs:
preload.setdefault(result['content_type'], set())
preload[result["content_type"]].add(result["object_id"])
for ct, obj_ids in preload.iteritems():
ct = ContentType.objects.get_for_id(ct)
for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
items[(ct.pk, obj.pk)] = obj
results = []
for result in qs:
obj = items[
tuple(result[k] for k in lookup_keys)
]
obj.similar_tags = result["n"]
results.append(obj)
return results
def _get_subclasses(model):
subclasses = [model]
for f in model._meta.get_all_field_names():
field = model._meta.get_field_by_name(f)[0]
if (isinstance(field, RelatedObject) and
getattr(field.field.rel, "parent_link", None)):
subclasses.extend(_get_subclasses(field.model))
return subclasses
| 34.035433 | 90 | 0.616888 | from django.contrib.contenttypes.generic import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import ManyToManyRel, RelatedField, add_lazy_relation
from django.db.models.related import RelatedObject
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from taggit.models import TaggedItem, GenericTaggedItemBase
from taggit.utils import require_instance_manager
try:
all
except NameError:
try:
from django.utils.itercompat import all
except ImportError:
def all(iterable):
for item in iterable:
if not item:
return False
return True
class TaggableRel(ManyToManyRel):
def __init__(self):
self.related_name = None
self.limit_choices_to = {}
self.symmetrical = True
self.multiple = True
self.through = None
class TaggableManager(RelatedField):
def __init__(self, verbose_name=_("Tags"),
help_text=_("A comma-separated list of tags."), through=None, blank=False):
self.through = through or TaggedItem
self.rel = TaggableRel()
self.verbose_name = verbose_name
self.help_text = help_text
self.blank = blank
self.editable = True
self.unique = False
self.creates_table = False
self.db_column = None
self.choices = None
self.serialize = False
self.null = True
self.creation_counter = models.Field.creation_counter
models.Field.creation_counter += 1
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError("%s objects need to have a primary key value "
"before you can access their tags." % model.__name__)
manager = _TaggableManager(
through=self.through, model=model, instance=instance
)
return manager
def contribute_to_class(self, cls, name):
self.name = self.column = name
self.model = cls
cls._meta.add_field(self)
setattr(cls, name, self)
if not cls._meta.abstract:
if isinstance(self.through, basestring):
def resolve_related_class(field, model, cls):
self.through = model
self.post_through_setup(cls)
add_lazy_relation(
cls, self, self.through, resolve_related_class
)
else:
self.post_through_setup(cls)
def post_through_setup(self, cls):
self.use_gfk = (
self.through is None or issubclass(self.through, GenericTaggedItemBase)
)
self.rel.to = self.through._meta.get_field("tag").rel.to
if self.use_gfk:
tagged_items = GenericRelation(self.through)
tagged_items.contribute_to_class(cls, "tagged_items")
def save_form_data(self, instance, value):
getattr(instance, self.name).set(*value)
def formfield(self, form_class=TagField, **kwargs):
defaults = {
"label": capfirst(self.verbose_name),
"help_text": self.help_text,
"required": not self.blank
}
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, instance):
if instance.pk:
return self.through.objects.filter(**self.through.lookup_kwargs(instance))
return self.through.objects.none()
def related_query_name(self):
return self.model._meta.module_name
def m2m_reverse_name(self):
return self.through._meta.get_field_by_name("tag")[0].column
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def m2m_column_name(self):
if self.use_gfk:
return self.through._meta.virtual_fields[0].fk_field
return self.through._meta.get_field('content_object').column
def db_type(self, connection=None):
return None
def m2m_db_table(self):
return self.through._meta.db_table
def extra_filters(self, pieces, pos, negate):
if negate or not self.use_gfk:
return []
prefix = "__".join(["tagged_items"] + pieces[:pos-2])
cts = map(ContentType.objects.get_for_model, _get_subclasses(self.model))
if len(cts) == 1:
return [("%s__content_type" % prefix, cts[0])]
return [("%s__content_type__in" % prefix, cts)]
def bulk_related_objects(self, new_objs, using):
return []
class _TaggableManager(models.Manager):
def __init__(self, through, model, instance):
self.through = through
self.model = model
self.instance = instance
def get_query_set(self):
return self.through.tags_for(self.model, self.instance)
def _lookup_kwargs(self):
return self.through.lookup_kwargs(self.instance)
@require_instance_manager
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
if str_tags:
q = models.Q()
for str_tag in str_tags:
q |= models.Q(name__iexact=str_tag)
existing = self.through.tag_model().objects.filter(q)
tag_objs.update(existing)
existing_low = [t.name.lower() for t in existing]
new_tags = [t for t in str_tags if t.lower() not in existing_low]
for new_tag in new_tags:
tag_objs.add(self.through.tag_model().objects.create(name=new_tag))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
@require_instance_manager
def set(self, *tags):
self.clear()
self.add(*tags)
@require_instance_manager
def remove(self, *tags):
q = models.Q()
for tag in tags:
q |= models.Q(tag__name__iexact=tag)
self.through.objects.filter(**self._lookup_kwargs()).filter(
q).delete()
@require_instance_manager
def clear(self):
self.through.objects.filter(**self._lookup_kwargs()).delete()
def most_common(self):
return self.get_query_set().annotate(
num_times=models.Count(self.through.tag_relname())
).order_by('-num_times')
@require_instance_manager
def similar_objects(self):
lookup_kwargs = self._lookup_kwargs()
lookup_keys = sorted(lookup_kwargs)
qs = self.through.objects.values(*lookup_kwargs.keys())
qs = qs.annotate(n=models.Count('pk'))
qs = qs.exclude(**lookup_kwargs)
qs = qs.filter(tag__in=self.all())
qs = qs.order_by('-n')
items = {}
if len(lookup_keys) == 1:
f = self.through._meta.get_field_by_name(lookup_keys[0])[0]
objs = f.rel.to._default_manager.filter(**{
"%s__in" % f.rel.field_name: [r["content_object"] for r in qs]
})
for obj in objs:
items[(getattr(obj, f.rel.field_name),)] = obj
else:
preload = {}
for result in qs:
preload.setdefault(result['content_type'], set())
preload[result["content_type"]].add(result["object_id"])
for ct, obj_ids in preload.iteritems():
ct = ContentType.objects.get_for_id(ct)
for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
items[(ct.pk, obj.pk)] = obj
results = []
for result in qs:
obj = items[
tuple(result[k] for k in lookup_keys)
]
obj.similar_tags = result["n"]
results.append(obj)
return results
def _get_subclasses(model):
subclasses = [model]
for f in model._meta.get_all_field_names():
field = model._meta.get_field_by_name(f)[0]
if (isinstance(field, RelatedObject) and
getattr(field.field.rel, "parent_link", None)):
subclasses.extend(_get_subclasses(field.model))
return subclasses
| true | true |
1c46258e69edc1d51b3b465582f6145ad636ebc5 | 813 | py | Python | Controller/countryStatisticsHashedUserIdsController.py | lionick/map-ip-to-country | ccc44b511b7cf1451849038bae66e682140a68a9 | [
"Apache-2.0"
] | null | null | null | Controller/countryStatisticsHashedUserIdsController.py | lionick/map-ip-to-country | ccc44b511b7cf1451849038bae66e682140a68a9 | [
"Apache-2.0"
] | null | null | null | Controller/countryStatisticsHashedUserIdsController.py | lionick/map-ip-to-country | ccc44b511b7cf1451849038bae66e682140a68a9 | [
"Apache-2.0"
] | 1 | 2021-03-16T11:07:22.000Z | 2021-03-16T11:07:22.000Z | from datetime import date, timedelta
from Model.ipStatistics import ipStatistics
from Model.countryStatisticsHashedUserId import countryStatisticsHashedUserId
from datetime import datetime, timedelta
class countryStatisticsHashedUserIdsController:
@classmethod
def getDataNotMapped(self):
dateFrom = countryStatisticsHashedUserId.getLastDate()
# we dont have any country statistics saved
if dateFrom[0][0] == None:
result = ipStatistics.getAllIpStatistics()
else:
dayAfter = dateFrom[0][0] + timedelta(days=1)
dayFrom = dayAfter.strftime('%Y-%m-%d 00:00:00')
yesterday = date.today() - timedelta(days=1)
dateTo = yesterday.strftime('%Y-%m-%d 23:59:59')
result = ipStatistics.getIpStatisticsByDate(dayFrom, dateTo)
return result
| 33.875 | 77 | 0.723247 | from datetime import date, timedelta
from Model.ipStatistics import ipStatistics
from Model.countryStatisticsHashedUserId import countryStatisticsHashedUserId
from datetime import datetime, timedelta
class countryStatisticsHashedUserIdsController:
@classmethod
def getDataNotMapped(self):
dateFrom = countryStatisticsHashedUserId.getLastDate()
if dateFrom[0][0] == None:
result = ipStatistics.getAllIpStatistics()
else:
dayAfter = dateFrom[0][0] + timedelta(days=1)
dayFrom = dayAfter.strftime('%Y-%m-%d 00:00:00')
yesterday = date.today() - timedelta(days=1)
dateTo = yesterday.strftime('%Y-%m-%d 23:59:59')
result = ipStatistics.getIpStatisticsByDate(dayFrom, dateTo)
return result
| true | true |
1c46261cd54386528b25cc006d779402084d8229 | 484 | py | Python | PyPark/version.py | liuzhuogood/PyPark | e605502344a3bfcc7696ba56f193fd50d773f1ea | [
"Apache-2.0"
] | 1 | 2021-11-16T10:33:01.000Z | 2021-11-16T10:33:01.000Z | PyPark/version.py | liuzhuogood/PyPark | e605502344a3bfcc7696ba56f193fd50d773f1ea | [
"Apache-2.0"
] | null | null | null | PyPark/version.py | liuzhuogood/PyPark | e605502344a3bfcc7696ba56f193fd50d773f1ea | [
"Apache-2.0"
] | null | null | null | import logging
from PyPark.util.zk_util import path_join
def print_infos(pk):
for u in pk.rest.services.keys():
pk.log.info(f"Rest Service : /{path_join(pk.rest_base_url, u)}")
if len(pk.rest.services.keys()) > 0:
logging.info(f"Started By [{pk.group}] http://{pk.ip}:{pk.port}")
if pk.nat_port:
logging.info(f"Started By [NAT] http://{pk.nat_ip}:{pk.nat_port}")
if pk.debug:
logging.warning(f"Debug Enable Address:{pk.debug_host}")
| 32.266667 | 74 | 0.646694 | import logging
from PyPark.util.zk_util import path_join
def print_infos(pk):
for u in pk.rest.services.keys():
pk.log.info(f"Rest Service : /{path_join(pk.rest_base_url, u)}")
if len(pk.rest.services.keys()) > 0:
logging.info(f"Started By [{pk.group}] http://{pk.ip}:{pk.port}")
if pk.nat_port:
logging.info(f"Started By [NAT] http://{pk.nat_ip}:{pk.nat_port}")
if pk.debug:
logging.warning(f"Debug Enable Address:{pk.debug_host}")
| true | true |
1c4626a4a0981b699bd3f0e091123348bc6f9ecc | 1,097 | py | Python | python/ConvertDocx2HtmlUsingWord.py | netchira/netchira.github.io | bed7b1425fe0ec206887be9cf48a571afbded9e8 | [
"CC0-1.0"
] | 6 | 2019-09-25T06:43:01.000Z | 2022-03-11T02:54:47.000Z | python/ConvertDocx2HtmlUsingWord.py | netchira/netchira.github.io | bed7b1425fe0ec206887be9cf48a571afbded9e8 | [
"CC0-1.0"
] | 6 | 2019-01-06T07:35:10.000Z | 2022-02-26T03:46:28.000Z | python/ConvertDocx2HtmlUsingWord.py | netchira/netchira.github.io | bed7b1425fe0ec206887be9cf48a571afbded9e8 | [
"CC0-1.0"
] | 7 | 2021-05-14T07:04:36.000Z | 2022-03-20T18:23:28.000Z | # -*- coding: utf-8 -*-
"""
Created on Mon May 26 21:28:35 2019
Spyderエディタ
For Python ver 2.7
@author: netchira
"""
def ConvertDocx2HtmlUsingWord(DocxFilePath):
import win32com.client
import os
# ファイル拡張子の確認
if os.path.exists(DocxFilePath) and (DocxFilePath[-5:] == ".docx"):
# ファイルパスから拡張子(ピリオド含む5文字分)を取り除く
str_FilePathNoExt = DocxFilePath[0:-5]
# ファイルの拡張子として".htm"を付与
str_HtmlFilePath = str_FilePathNoExt + ".htm"
# ファイルパスとして生成
HtmlFilePath = os.path.abspath(str_HtmlFilePath)
else:
raise UserWarning("File Format is not .docx")
# Wordを起動する : Applicationオブジェクトを生成する
Application = win32com.client.Dispatch("Word.Application")
# Wordを画面表示する : VisibleプロパティをTrueにする
Application.Visible = True
# 既存文書を開く
doc = Application.Documents.Open(DocxFilePath)
# 名前を付けて保存 : ファイル形式を[Webページ(フィルター後)]に指定
WdFormatHTML = 8
WdFormatFilteredHTML = 10
doc.SaveAs2(HtmlFilePath, FileFormat=WdFormatFilteredHTML)
# 文書を閉じる
doc.Close()
# Wordを終了する : Quitメソッドを呼ぶ
Application.Quit()
| 26.119048 | 71 | 0.678213 |
def ConvertDocx2HtmlUsingWord(DocxFilePath):
import win32com.client
import os
if os.path.exists(DocxFilePath) and (DocxFilePath[-5:] == ".docx"):
str_FilePathNoExt = DocxFilePath[0:-5]
str_HtmlFilePath = str_FilePathNoExt + ".htm"
HtmlFilePath = os.path.abspath(str_HtmlFilePath)
else:
raise UserWarning("File Format is not .docx")
Application = win32com.client.Dispatch("Word.Application")
Application.Visible = True
doc = Application.Documents.Open(DocxFilePath)
WdFormatHTML = 8
WdFormatFilteredHTML = 10
doc.SaveAs2(HtmlFilePath, FileFormat=WdFormatFilteredHTML)
doc.Close()
Application.Quit()
| true | true |
1c4627682d3ef50f786fa60721404010b28e5f2d | 2,151 | py | Python | misp/utils/wsi_utils.py | zhoudaxia233/misp | c0d36e3f1a1eeac417d6bfff015ea5430f1d0de5 | [
"MIT"
] | 2 | 2019-12-21T10:46:57.000Z | 2019-12-22T14:01:23.000Z | misp/utils/wsi_utils.py | zhoudaxia233/misp | c0d36e3f1a1eeac417d6bfff015ea5430f1d0de5 | [
"MIT"
] | null | null | null | misp/utils/wsi_utils.py | zhoudaxia233/misp | c0d36e3f1a1eeac417d6bfff015ea5430f1d0de5 | [
"MIT"
] | null | null | null | import os
import openslide
from openslide.deepzoom import DeepZoomGenerator
from tqdm import tqdm
__all__ = ['WSI', 'validate_mpp', 'stitch_tiles']
class WSI():
def __init__(self, path: str, tile_size: int = 224):
self.path = path
self.tile_size = tile_size
self.slide = openslide.OpenSlide(self.path)
self.deepzoom_gen = DeepZoomGenerator(self.slide, tile_size=self.tile_size, overlap=0, limit_bounds=False)
self.mpp = (float(self.slide.properties[openslide.PROPERTY_NAME_MPP_X]),
float(self.slide.properties[openslide.PROPERTY_NAME_MPP_Y]))
self.level_count = self.deepzoom_gen.level_count
self.level_dimensions = self.deepzoom_gen.level_dimensions
self.level_tiles = self.deepzoom_gen.level_tiles
def get_info(self):
"""Get basic information of the wsi.
"""
print(f'Num of levels: {self.level_count}')
print('Dimensions of levels:')
for level_nr in range(self.level_count):
print(f'- level {level_nr}: {self.level_dimensions[level_nr]}')
print(f'MPP: {self.mpp}')
print()
def get_tile(self, row: int, col: int):
return self.deepzoom_gen.get_tile(level=self.level_count - 1, address=(col, row))
def make_tiles(self, tiles_folder_path: str):
if not os.path.isdir(tiles_folder_path):
os.mkdir(tiles_folder_path)
level = self.level_count - 1
cols, rows = self.level_tiles[level]
for row in tqdm(range(rows)):
for col in range(cols):
self.get_tile(row, col).save(tiles_folder_path + f'{row}_{col}.tif')
return
def validate_mpp(path: str, mpp: float = 0.5, thresh: float = 0.015) -> bool:
"""Validate if the input WSI has the same MPP as our training data.
"""
wsi = openslide.OpenSlide(path)
x_diff = float(wsi.properties[openslide.PROPERTY_NAME_MPP_X]) - mpp
y_diff = float(wsi.properties[openslide.PROPERTY_NAME_MPP_Y]) - mpp
return (abs(x_diff) < thresh) and (abs(y_diff) < thresh)
def stitch_tiles(patches_folder_path: str, target_folder_path: str):
pass
| 38.410714 | 114 | 0.666202 | import os
import openslide
from openslide.deepzoom import DeepZoomGenerator
from tqdm import tqdm
__all__ = ['WSI', 'validate_mpp', 'stitch_tiles']
class WSI():
def __init__(self, path: str, tile_size: int = 224):
self.path = path
self.tile_size = tile_size
self.slide = openslide.OpenSlide(self.path)
self.deepzoom_gen = DeepZoomGenerator(self.slide, tile_size=self.tile_size, overlap=0, limit_bounds=False)
self.mpp = (float(self.slide.properties[openslide.PROPERTY_NAME_MPP_X]),
float(self.slide.properties[openslide.PROPERTY_NAME_MPP_Y]))
self.level_count = self.deepzoom_gen.level_count
self.level_dimensions = self.deepzoom_gen.level_dimensions
self.level_tiles = self.deepzoom_gen.level_tiles
def get_info(self):
print(f'Num of levels: {self.level_count}')
print('Dimensions of levels:')
for level_nr in range(self.level_count):
print(f'- level {level_nr}: {self.level_dimensions[level_nr]}')
print(f'MPP: {self.mpp}')
print()
def get_tile(self, row: int, col: int):
return self.deepzoom_gen.get_tile(level=self.level_count - 1, address=(col, row))
def make_tiles(self, tiles_folder_path: str):
if not os.path.isdir(tiles_folder_path):
os.mkdir(tiles_folder_path)
level = self.level_count - 1
cols, rows = self.level_tiles[level]
for row in tqdm(range(rows)):
for col in range(cols):
self.get_tile(row, col).save(tiles_folder_path + f'{row}_{col}.tif')
return
def validate_mpp(path: str, mpp: float = 0.5, thresh: float = 0.015) -> bool:
wsi = openslide.OpenSlide(path)
x_diff = float(wsi.properties[openslide.PROPERTY_NAME_MPP_X]) - mpp
y_diff = float(wsi.properties[openslide.PROPERTY_NAME_MPP_Y]) - mpp
return (abs(x_diff) < thresh) and (abs(y_diff) < thresh)
def stitch_tiles(patches_folder_path: str, target_folder_path: str):
pass
| true | true |
1c4627bc2af1de74f5fa845dc646606e7fc21076 | 110,473 | py | Python | ds_discovery/sample/map_companies_fortune1000.py | project-hadron/discovery-transition-ds | 08229ca3b7617b42ce2dd8e47ff93876c0843810 | [
"BSD-3-Clause"
] | 2 | 2020-09-21T17:24:16.000Z | 2021-05-28T18:02:54.000Z | ds_discovery/sample/map_companies_fortune1000.py | project-hadron/discovery-transition-ds | 08229ca3b7617b42ce2dd8e47ff93876c0843810 | [
"BSD-3-Clause"
] | null | null | null | ds_discovery/sample/map_companies_fortune1000.py | project-hadron/discovery-transition-ds | 08229ca3b7617b42ce2dd8e47ff93876c0843810 | [
"BSD-3-Clause"
] | 1 | 2021-07-23T13:52:04.000Z | 2021-07-23T13:52:04.000Z | data={'title': ['Walmart', 'Exxon Mobil', 'Berkshire Hathaway', 'Apple', 'UnitedHealth Group', 'McKesson', 'CVS Health', 'Amazon.com', 'AT&T', 'General Motors', 'Ford Motor', 'AmerisourceBergen', 'Chevron', 'Cardinal Health', 'Costco', 'Verizon', 'Kroger', 'General Electric', 'Walgreens Boots Alliance', 'JPMorgan Chase', 'Fannie Mae', 'Alphabet', 'Home Depot', 'Bank of America Corp.', 'Express Scripts Holding', 'Wells Fargo', 'Boeing', 'Phillips 66', 'Anthem', 'Microsoft', 'Valero Energy', 'Citigroup', 'Comcast', 'IBM', 'Dell Technologies', 'State Farm Insurance Cos.', 'Johnson & Johnson', 'Freddie Mac', 'Target', 'Lowes', 'Marathon Petroleum', 'Procter & Gamble', 'MetLife', 'UPS', 'PepsiCo', 'Intel', 'DowDuPont', 'Archer Daniels Midland', 'Aetna', 'FedEx', 'United Technologies', 'Prudential Financial', 'Albertsons Cos.', 'Sysco', 'Disney', 'Humana', 'Pfizer', 'HP', 'Lockheed Martin', 'AIG', 'Centene', 'Cisco Systems', 'HCA Healthcare', 'Energy Transfer Equity', 'Caterpillar', 'Nationwide', 'Morgan Stanley', 'Liberty Mutual Insurance Group', 'New York Life Insurance', 'Goldman Sachs Group', 'American Airlines Group', 'Best Buy', 'Cigna', 'Charter Communications', 'Delta Air Lines', 'Facebook', 'Honeywell International', 'Merck', 'Allstate', 'Tyson Foods', 'United Continental Holdings', 'Oracle', 'Tech Data', 'TIAA', 'TJX', 'American Express', 'Coca-Cola', 'Publix Super Markets', 'Nike', 'Andeavor', 'World Fuel Services', 'Exelon', 'Massachusetts Mutual Life Insurance', 'Rite Aid', 'ConocoPhillips', 'CHS', '3M', 'Time Warner', 'General Dynamics', 'USAA', 'Capital One Financial', 'Deere', 'INTL FCStone', 'Northwestern Mutual', 'Enterprise Products Partners', 'Travelers Cos.', 'Hewlett Packard Enterprise', 'Philip Morris International', 'Twenty-First Century Fox', 'AbbVie', 'Abbott Laboratories', 'Progressive', 'Arrow Electronics', 'Kraft Heinz', 'Plains GP Holdings', 'Gilead Sciences', 'Mondelez International', 'Northrop Grumman', 'Raytheon', 'Macys', 'US Foods Holding', 'U.S. Bancorp', 'Dollar General', 'International Paper', 'Duke Energy', 'Southern', 'Marriott International', 'Avnet', 'Eli Lilly', 'Amgen', 'McDonalds', 'Starbucks', 'Qualcomm', 'Dollar Tree', 'PBF Energy', 'Icahn Enterprises', 'Aflac', 'AutoNation', 'Penske Automotive Group', 'Whirlpool', 'Union Pacific', 'Southwest Airlines', 'ManpowerGroup', 'Thermo Fisher Scientific', 'Bristol-Myers Squibb', 'Halliburton', 'Tenet Healthcare', 'Lear', 'Cummins', 'Micron Technology', 'Nucor', 'Molina Healthcare', 'Fluor', 'Altria Group', 'Paccar', 'Hartford Financial Services', 'Kohls', 'Western Digital', 'Jabil', 'Community Health Systems', 'Visa', 'Danaher', 'Kimberly-Clark', 'AECOM', 'PNC Financial Services', 'CenturyLink', 'NextEra Energy', 'PG& E Corp.', 'Synnex', 'WellCare Health Plans', 'Performance Food Group', 'Sears Holdings', 'Synchrony Financial', 'CarMax', 'Bank of New York Mellon', 'Freeport-McMoRan', 'Genuine Parts', 'Emerson Electric', 'DaVita', 'Supervalu', 'Gap', 'General Mills', 'Nordstrom', 'Colgate-Palmolive', 'American Electric Power', 'XPO Logistics', 'Goodyear Tire & Rubber', 'Omnicom Group', 'CDW', 'Sherwin-Williams', 'PPG Industries', 'Texas Instruments', 'C.H. Robinson Worldwide', 'WestRock', 'Cognizant Technology Solutions', 'Newell Brands', 'CBS', 'Envision Healthcare', 'Monsanto', 'Aramark', 'Applied Materials', 'Waste Management', 'DISH Network', 'Illinois Tool Works', 'Lincoln National', 'HollyFrontier', 'CBRE Group', 'Textron', 'Ross Stores', 'Principal Financial', 'D.R. Horton', 'Marsh & McLennan', 'Devon Energy', 'AES', 'Ecolab', "Land O'Lakes", 'Loews', 'Kinder Morgan', 'FirstEnergy', 'Occidental Petroleum', 'Viacom', 'PayPal Holdings', 'NGL Energy Partners', 'Celgene', 'Arconic', 'Kellogg', 'Las Vegas Sands', 'Stanley Black & Decker', 'Booking Holdings', 'Lennar', 'L Brands', 'DTE Energy', 'Dominion Energy', 'Reinsurance Group of America', 'J.C. Penney', 'Mastercard', 'BlackRock', 'Henry Schein', 'Guardian Life Ins. Co. of America', 'Stryker', 'Jefferies Financial Group', 'VF', 'ADP', 'Edison International', 'Biogen', 'United States Steel', 'Core-Mark Holding', 'Bed Bath & Beyond', 'Oneok', 'BB& T Corp.', 'Becton Dickinson', 'Ameriprise Financial', 'Farmers Insurance Exchange', 'First Data', 'Consolidated Edison', 'Parker-Hannifin', 'Anadarko Petroleum', 'Estee Lauder', 'State Street Corp.', 'Tesla', 'Netflix', 'Alcoa', 'Discover Financial Services', 'Praxair', 'CSX', 'Xcel Energy', 'Unum Group', 'Universal Health Services', 'NRG Energy', 'EOG Resources', 'Sempra Energy', "Toys 'R'ù Us", 'Group 1 Automotive', 'Entergy', 'Molson Coors Brewing', 'L3 Technologies', 'Ball', 'AutoZone', 'Murphy USA', 'MGM Resorts International', 'Office Depot', 'Huntsman', 'Baxter International', 'Norfolk Southern', 'salesforce.com', 'Laboratory Corp. of America', 'W.W. Grainger', 'Qurate Retail', 'Autoliv', 'Live Nation Entertainment', 'Xerox', 'Leidos Holdings', 'Corning', 'Lithia Motors', 'Expedia Group', 'Republic Services', 'Jacobs Engineering Group', 'Sonic Automotive', 'Ally Financial', 'LKQ', 'BorgWarner', 'Fidelity National Financial', 'SunTrust Banks', 'IQVIA Holdings', 'Reliance Steel & Aluminum', 'Nvidia', 'Voya Financial', 'CenterPoint Energy', 'eBay', 'Eastman Chemical', 'American Family Insurance Group', 'Steel Dynamics', 'Pacific Life', 'Chesapeake Energy', 'Mohawk Industries', 'Quanta Services', 'Advance Auto Parts', 'Owens & Minor', 'United Natural Foods', 'Tenneco', 'Conagra Brands', 'GameStop', 'Hormel Foods', 'Hilton Worldwide Holdings', 'Frontier Communications', 'Fidelity National Information Services', 'Public Service Enterprise Group', 'Boston Scientific', 'OReilly Automotive', 'Charles Schwab', 'Global Partners', 'PVH', 'Avis Budget Group', 'Targa Resources', 'Hertz Global Holdings', 'Calpine', 'Mutual of Omaha Insurance', 'Crown Holdings', 'Peter Kiewit Sons', 'Dicks Sporting Goods', 'PulteGroup', 'Navistar International', 'Thrivent Financial for Lutherans', 'DCP Midstream', 'Air Products & Chemicals', 'Veritiv', 'AGCO', 'Genworth Financial', 'Univar', 'News Corp.', 'SpartanNash', 'Westlake Chemical', 'Williams', 'Lam Research', 'Alaska Air Group', 'Jones Lang LaSalle', 'Anixter International', 'Campbell Soup', 'Interpublic Group', 'Dover', 'Zimmer Biomet Holdings', 'Dean Foods', 'Foot Locker', 'Eversource Energy', 'Alliance Data Systems', 'Fifth Third Bancorp', 'Quest Diagnostics', 'EMCOR Group', 'W.R. Berkley', 'WESCO International', 'Coty', 'WEC Energy Group', 'Masco', 'DXC Technology', 'Auto-Owners Insurance', 'Jones Financial (Edward Jones)', 'Liberty Media', 'Erie Insurance Group', 'Hershey', 'PPL', 'Huntington Ingalls Industries', 'Mosaic', 'J.M. Smucker', 'Delek US Holdings', 'Newmont Mining', 'Constellation Brands', 'Ryder System', 'National Oilwell Varco', 'Adobe Systems', 'LifePoint Health', 'Tractor Supply', 'Thor Industries', 'Dana', 'Weyerhaeuser', 'J.B. Hunt Transport Services', 'Darden Restaurants', 'Yum China Holdings', 'Blackstone Group', 'Berry Global Group', 'Builders FirstSource', 'Activision Blizzard', 'JetBlue Airways', 'Amphenol', 'A-Mark Precious Metals', 'Spirit AeroSystems Holdings', 'R.R. Donnelley & Sons', 'Harris', 'Expeditors Intl. of Washington', 'Discovery', 'Owens-Illinois', 'Sanmina', 'KeyCorp', 'American Financial Group', 'Oshkosh', 'Rockwell Collins', 'Kindred Healthcare', 'Insight Enterprises', 'Dr Pepper Snapple Group', 'American Tower', 'Fortive', 'Ralph Lauren', 'HRG Group', 'Ascena Retail Group', 'United Rentals', 'Caseys General Stores', 'Graybar Electric', 'Avery Dennison', 'MasTec', 'CMS Energy', 'HD Supply Holdings', 'Raymond James Financial', 'NCR', 'Hanesbrands', 'Asbury Automotive Group', 'Citizens Financial Group', 'Packaging Corp. of America', 'Alleghany', 'Apache', 'Dillards', 'Assurant', 'Franklin Resources', 'Owens Corning', 'Motorola Solutions', 'NVR', 'Rockwell Automation', 'TreeHouse Foods', 'Wynn Resorts', 'Olin', 'American Axle & Manufacturing', 'Old Republic International', 'Chemours', 'iHeartMedia', 'Ameren', 'Arthur J. Gallagher', 'Celanese', 'Sealed Air', 'UGI', 'Realogy Holdings', 'Burlington Stores', 'Regions Financial', 'AK Steel Holding', 'Securian Financial Group', 'S& P Global', 'Markel', 'TravelCenters of America', 'Conduent', 'M& T Bank Corp.', 'Clorox', 'AmTrust Financial Services', 'KKR', 'Ulta Beauty', 'Yum Brands', 'Regeneron Pharmaceuticals', 'Windstream Holdings', 'Magellan Health', 'Western & Southern Financial', 'Intercontinental Exchange', 'Ingredion', 'Wyndham Destinations', 'Toll Brothers', 'Seaboard', 'Booz Allen Hamilton', 'First American Financial', 'Cincinnati Financial', 'Avon Products', 'Northern Trust', 'Fiserv', 'Harley-Davidson', 'Cheniere Energy', 'Patterson', 'Peabody Energy', 'ON Semiconductor', 'Simon Property Group', 'Western Union', 'NetApp', 'Polaris Industries', 'Pioneer Natural Resources', 'ABM Industries', 'Vistra Energy', 'Cintas', 'Hess', 'Host Hotels & Resorts', 'Kelly Services', 'Genesis Healthcare', 'Michaels Cos.', 'Advanced Micro Devices', 'Zoetis', 'Williams-Sonoma', 'Fortune Brands Home & Security', 'Big Lots', 'Robert Half International', 'Post Holdings', 'Hasbro', 'Hanover Insurance Group', 'Navient', 'Intuit', 'Domtar', 'Marathon Oil', 'Cerner', 'Analog Devices', 'Telephone & Data Systems', 'Essendant', 'Sonoco Products', 'Juniper Networks', 'Commercial Metals', 'CSRA', 'Under Armour', 'RPM International', 'Total System Services', 'Levi Strauss', 'Brunswick', 'YRC Worldwide', 'Mattel', 'FM Global', 'NiSource', 'Caesars Entertainment', 'Electronic Arts', 'Dynegy', 'McCormick', 'T. Rowe Price', 'Orbital ATK', 'Tutor Perini', 'Brookdale Senior Living', 'Huntington Bancshares', 'Wayfair', 'Rush Enterprises', 'Xylem', 'Neiman Marcus Group', 'Hyatt Hotels', 'Sprouts Farmers Market', 'Diebold Nixdorf', 'Roper Technologies', 'Smart & Final Stores', 'CommScope Holding', 'Tapestry', 'Diplomat Pharmacy', 'Chipotle Mexican Grill', 'Agilent Technologies', 'Science Applications International', 'MDU Resources Group', 'Select Medical Holdings', 'Boise Cascade', 'National General Holdings', 'SCANA', 'Graphic Packaging Holding', 'Fastenal', 'Schneider National', 'Laureate Education', 'Beacon Roofing Supply', 'KB Home', 'Equinix', 'Terex', 'Crown Castle International', 'CACI International', 'Watsco', 'Coca-Cola Bottling', 'Welltower', 'ADT', 'Ametek', 'CNO Financial Group', 'Camping World Holdings', 'LPL Financial Holdings', 'Noble Energy', 'Bloomin Brands', 'Moodys', 'Symantec', 'Amkor Technology', 'Skechers U.S.A.', 'KBR', 'Tiffany', 'Torchmark', 'Broadridge Financial Solutions', 'Quad/Graphics', 'CF Industries Holdings', 'Carlisle', 'Silgan Holdings', 'Bemis', 'CA', 'Hub Group', 'Worldpay', 'Ingles Markets', 'Snap-on', 'Dentsply Sirona', 'Calumet Specialty Products', 'Global Payments', 'Encompass Health', 'Martin Marietta Materials', 'Nasdaq', 'Leggett & Platt', 'Universal Forest Products', 'Sally Beauty Holdings', 'Flowers Foods', 'Barnes & Noble', 'American Equity Investment Life', 'Vulcan Materials', 'Taylor Morrison Home', 'Westinghouse Air Brake', 'Crestwood Equity Partners', 'Iron Mountain', 'Lennox International', 'General Cable', 'American Eagle Outfitters', 'Church & Dwight', 'Platform Specialty Products', 'JELD-WEN Holding', 'OneMain Holdings', 'Colfax', 'Zebra Technologies', 'Andersons', 'TD Ameritrade Holding', 'Carlyle Group', 'Hubbell', 'Trinity Industries', 'Darling Ingredients', 'Flowserve', 'Antero Resources', 'Skyworks Solutions', 'Landstar System', 'Buckeye Partners', 'MRC Global', 'CME Group', 'Greif', 'Nexeo Solutions', 'Cooper-Standard Holdings', 'Urban Outfitters', 'LSC Communications', 'Sabre', 'Green Plains', 'Hexion', 'Stericycle', 'Warner Music Group', 'Ventas', 'ScanSource', 'Pinnacle West Capital', 'Scripps Networks Interactive', 'Alexion Pharmaceuticals', 'Pitney Bowes', 'CIT Group', 'Country Financial', 'CUNA Mutual Group', 'Triumph Group', 'TransDigm Group', 'Allegheny Technologies', 'Resolute Forest Products', 'Acuity Brands', 'Abercrombie & Fitch', 'KLA-Tencor', 'Weis Markets', 'Puget Energy', 'Mednax', 'Kar Auction Services', 'PolyOne', 'FMC', 'Edwards Lifesciences', 'Microchip Technology', 'Amerco', 'Mercury General', 'American National Insurance', 'Carters', 'International Flavors & Fragrances', 'Aarons', 'Alliant Energy', 'EQT', 'Monster Beverage', 'BMC Stock Holdings', 'Ryerson Holding', 'Equifax', 'Regal Beloit', 'Old Dominion Freight Line', 'American Water Works', 'BGC Partners', 'Brinks', 'Meritor', 'Sentry Insurance Group', 'Sanderson Farms', 'KapStone Paper & Packaging', 'Gartner', 'IAC/InterActiveCorp', 'Tailored Brands', 'WABCO Holdings', 'Insperity', 'Comerica', 'TriNet Group', 'Avaya Holdings', 'Ashland Global Holdings', 'Meritage Homes', 'SkyWest', 'USG', 'Southwestern Energy', 'Keysight Technologies', 'Regal Entertainment Group', 'Mutual of America Life Insurance', 'Paychex', 'Brinker International', 'Penn National Gaming', 'Gannett', 'Visteon', 'Pinnacle Foods', 'Intuitive Surgical', 'Continental Resources', 'Service Corp. International', 'Scientific Games', 'Albemarle', 'Atmos Energy', 'Hologic', 'H& R Block', 'Qorvo', 'Steelcase', 'Univision Communications', 'Worthington Industries', 'Timken', 'A.O. Smith', 'PriceSmart', 'Stifel Financial', 'Brown-Forman', 'Cinemark Holdings', 'Granite Construction', 'Dycom Industries', 'Clean Harbors', 'First Solar', 'Scotts Miracle-Gro', 'Cracker Barrel Old Country Store', 'Triple-S Management', 'First Republic Bank', 'ServiceMaster Global Holdings', 'PC Connection', 'Genesco', 'Medical Mutual of Ohio', 'MSC Industrial Direct', 'Legg Mason', 'Hyster-Yale Materials Handling', 'Apollo Global Management', 'Citrix Systems', 'Acadia Healthcare', 'Varian Medical Systems', 'Groupon', 'Aleris', 'Sprague Resources', 'Cooper Tire & Rubber', 'Hain Celestial Group', 'Penn Mutual Life Insurance', 'Colony NorthStar', 'ArcBest', 'Presidio', 'TRI Pointe Group', 'Annaly Capital Management', 'G-III Apparel Group', 'AMC Networks', 'Enable Midstream Partners', 'Ciena', 'DSW', 'Convergys', 'Park Hotels & Resorts', 'Pool', 'Fossil Group', 'Dominos Pizza', 'Crane', 'Caleres', 'Tempur Sealy International', 'Tetra Tech', 'Illumina', 'Valmont Industries', 'Hill-Rom Holdings', 'Unisys', 'Zions Bancorp.', 'Sinclair Broadcast Group', 'Louisiana-Pacific', 'Mettler-Toledo International', 'Synopsys', 'Kemper', 'Cabot', 'Great Plains Energy', 'Rent-A-Center', 'Hawaiian Holdings', 'Revlon', 'Syneos Health', 'Public Storage', 'TTM Technologies', 'Vectren', 'Trimble', 'NOW', 'Spirit Airlines', 'ASGN', 'Lincoln Electric Holdings', 'Prologis', 'Range Resources', 'Teledyne Technologies', 'Vishay Intertechnology', 'Boston Properties', 'Applied Industrial Technologies', 'Graham Holdings', 'Amica Mutual Insurance', 'Concho Resources', 'ITT', 'Kansas City Southern', 'MDC Holdings', 'Evergy', 'Pinnacle Entertainment', 'Hawaiian Electric Industries', 'TEGNA', 'Southwest Gas Holdings', 'Vista Outdoor', 'Bon-Ton Stores', 'Super Micro Computer', 'Plexus', 'TrueBlue', 'Magellan Midstream Partners', 'Toro', 'Akamai Technologies', 'Moog', 'Vertex Pharmaceuticals', 'Equity Residential', 'Selective Insurance Group', 'AptarGroup', 'Benchmark Electronics', 'Columbia Sportswear', 'A. Schulman', 'Verso', 'Digital Realty Trust', 'GNC Holdings', 'E*Trade Financial', 'Hovnanian Enterprises', 'Maximus', 'Twitter', 'Par Pacific Holdings', 'Parexel International', 'RH', 'Nexstar Media Group', 'Knight-Swift Transportation Holdings', 'Red Hat', 'Belden', 'Boyd Gaming', 'Primoris Services', 'Gardner Denver', 'Donaldson', 'Party City Holdco', 'J.Crew Group', 'EnerSys', 'Guess', 'Patterson-UTI Energy', 'WGL Holdings', 'Wolverine World Wide', 'Xilinx', 'Vornado Realty Trust', 'Middleby', 'MPM Holdings', 'Cleveland-Cliffs', 'GGP', 'Cypress Semiconductor', 'Arch Coal', 'GMS', 'Waters', 'H.B. Fuller', 'Affiliated Managers Group', 'PerkinElmer', 'Edgewell Personal Care', 'Maxim Integrated Products', 'Knights of Columbus', 'IDEX', 'DST Systems', 'Chicos FAS', 'Nu Skin Enterprises', 'Herman Miller', 'NLV Financial', 'Curtiss-Wright', 'New Jersey Resources', 'REV Group', 'Mueller Industries', 'GEO Group', 'Allison Transmission Holdings', 'OGE Energy', 'Cheesecake Factory', 'PRA Health Sciences', 'Tupperware Brands', 'Euronet Worldwide', 'FLEETCOR Technologies', 'Nationstar Mortgage Holdings', 'GoDaddy', 'Blackhawk Network Holdings', 'Cboe Global Markets', 'Snyders-Lance', 'Murphy Oil', 'CDK Global', 'Texas Roadhouse', 'Kirby', 'Square', 'Genesee & Wyoming', 'Zayo Group Holdings', 'NewMarket', '99 Cents Only Stores', 'PCM', 'Federated Mutual Insurance', 'HNI', 'Hospitality Properties Trust', 'Greenbrier Cos.', 'Bio-Rad Laboratories', 'AvalonBay Communities', 'Renewable Energy Group', 'Atlas Air Worldwide Holdings', 'Teradata', 'LCI Industries', 'Teleflex', 'Verisk Analytics', 'Popular', 'Workday', 'Cooper Cos.', 'Express', 'Teradyne', 'Werner Enterprises', 'Oaktree Capital Group', 'Woodward', 'F5 Networks', 'Valvoline', 'Roadrunner Transportation Systems', 'SemGroup', 'Catalent', 'Quorum Health', 'Universal', 'Nordson', 'ResMed', 'Tower International', 'Freds', 'Foundation Building Materials', 'Kennametal', 'Autodesk', 'Ply Gem Holdings', 'Central Garden & Pet', 'Matson', 'EchoStar', 'Genesis Energy', 'SVB Financial Group', 'Itron', 'Portland General Electric', 'California Resources', 'Esterline Technologies', 'Delta Tucker Holdings', 'AMN Healthcare Services', 'Griffon', 'Valhi', 'Hexcel', 'IDEXX Laboratories', 'Deluxe', 'M/I Homes', 'Kraton', 'Stewart Information Services', 'Marriott Vacations Worldwide', 'SPX FLOW', 'ACCO Brands', 'Echo Global Logistics', 'Cadence Design Systems', 'Nuance Communications', 'Finish Line', 'TransUnion', 'ServiceNow', 'Summit Materials', 'Engility Holdings', 'Ferrellgas Partners', 'Interactive Brokers Group', 'Stepan', 'Oceaneering International', 'Cimarex Energy', 'Rexnord', 'Beazer Homes USA', 'MKS Instruments', 'Vail Resorts', 'Ohio National Mutual', 'TopBuild', 'Brown & Brown', 'Aerojet Rocketdyne Holdings', 'Barnes & Noble Education', 'Superior Energy Services', 'VeriFone Systems', 'Childrens Place', 'Tribune Media', 'Healthcare Services Group', 'SiteOne Landscape Supply', 'Charles River Laboratories Intl', 'CoreLogic', 'Ensign Group', 'HCP'], 'Sector': ['Retailing', 'Energy', 'Financials', 'Technology', 'Health Care', 'Wholesalers', 'Health Care', 'Retailing', 'Telecommunications', 'Motor Vehicles & Parts', 'Motor Vehicles & Parts', 'Wholesalers', 'Energy', 'Wholesalers', 'Retailing', 'Telecommunications', 'Food & Drug Stores', 'Industrials', 'Food & Drug Stores', 'Financials', 'Financials', 'Technology', 'Retailing', 'Financials', 'Health Care', 'Financials', 'Aerospace & Defense', 'Energy', 'Health Care', 'Technology', 'Energy', 'Financials', 'Telecommunications', 'Technology', 'Technology', 'Financials', 'Health Care', 'Financials', 'Retailing', 'Retailing', 'Energy', 'Household Products', 'Financials', 'Transportation', 'Food, Beverages & Tobacco', 'Technology', 'Chemicals', 'Food, Beverages & Tobacco', 'Health Care', 'Transportation', 'Aerospace & Defense', 'Financials', 'Food & Drug Stores', 'Wholesalers', 'Media', 'Health Care', 'Health Care', 'Technology', 'Aerospace & Defense', 'Financials', 'Health Care', 'Technology', 'Health Care', 'Energy', 'Industrials', 'Financials', 'Financials', 'Financials', 'Financials', 'Financials', 'Transportation', 'Retailing', 'Health Care', 'Telecommunications', 'Transportation', 'Technology', 'Industrials', 'Health Care', 'Financials', 'Food, Beverages & Tobacco', 'Transportation', 'Technology', 'Wholesalers', 'Financials', 'Retailing', 'Financials', 'Food, Beverages & Tobacco', 'Food & Drug Stores', 'Apparel', 'Energy', 'Energy', 'Energy', 'Financials', 'Food & Drug Stores', 'Energy', 'Food, Beverages & Tobacco', 'Industrials', 'Media', 'Aerospace & Defense', 'Financials', 'Financials', 'Industrials', 'Financials', 'Financials', 'Energy', 'Financials', 'Technology', 'Food, Beverages & Tobacco', 'Media', 'Health Care', 'Health Care', 'Financials', 'Wholesalers', 'Food, Beverages & Tobacco', 'Energy', 'Health Care', 'Food, Beverages & Tobacco', 'Aerospace & Defense', 'Aerospace & Defense', 'Retailing', 'Wholesalers', 'Financials', 'Retailing', 'Materials', 'Energy', 'Energy', 'Hotels, Restaurants & Leisure', 'Wholesalers', 'Health Care', 'Health Care', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Technology', 'Retailing', 'Energy', 'Financials', 'Financials', 'Retailing', 'Retailing', 'Industrials', 'Transportation', 'Transportation', 'Business Services', 'Technology', 'Health Care', 'Energy', 'Health Care', 'Motor Vehicles & Parts', 'Industrials', 'Technology', 'Materials', 'Health Care', 'Engineering & Construction', 'Food, Beverages & Tobacco', 'Industrials', 'Financials', 'Retailing', 'Technology', 'Technology', 'Health Care', 'Business Services', 'Health Care', 'Household Products', 'Engineering & Construction', 'Financials', 'Telecommunications', 'Energy', 'Energy', 'Wholesalers', 'Health Care', 'Wholesalers', 'Retailing', 'Financials', 'Retailing', 'Financials', 'Energy', 'Wholesalers', 'Industrials', 'Health Care', 'Food & Drug Stores', 'Retailing', 'Food, Beverages & Tobacco', 'Retailing', 'Household Products', 'Energy', 'Transportation', 'Motor Vehicles & Parts', 'Business Services', 'Technology', 'Chemicals', 'Chemicals', 'Technology', 'Transportation', 'Materials', 'Technology', 'Household Products', 'Media', 'Health Care', 'Chemicals', 'Business Services', 'Technology', 'Business Services', 'Telecommunications', 'Industrials', 'Financials', 'Energy', 'Financials', 'Aerospace & Defense', 'Retailing', 'Financials', 'Engineering & Construction', 'Financials', 'Energy', 'Energy', 'Chemicals', 'Food, Beverages & Tobacco', 'Financials', 'Energy', 'Energy', 'Energy', 'Media', 'Business Services', 'Energy', 'Health Care', 'Aerospace & Defense', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Household Products', 'Technology', 'Engineering & Construction', 'Retailing', 'Energy', 'Energy', 'Financials', 'Retailing', 'Business Services', 'Financials', 'Wholesalers', 'Financials', 'Health Care', 'Financials', 'Apparel', 'Business Services', 'Energy', 'Health Care', 'Materials', 'Wholesalers', 'Retailing', 'Energy', 'Financials', 'Health Care', 'Financials', 'Financials', 'Business Services', 'Energy', 'Industrials', 'Energy', 'Household Products', 'Financials', 'Motor Vehicles & Parts', 'Technology', 'Materials', 'Financials', 'Chemicals', 'Transportation', 'Energy', 'Financials', 'Health Care', 'Energy', 'Energy', 'Energy', 'Retailing', 'Retailing', 'Energy', 'Food, Beverages & Tobacco', 'Aerospace & Defense', 'Materials', 'Retailing', 'Retailing', 'Hotels, Restaurants & Leisure', 'Retailing', 'Chemicals', 'Health Care', 'Transportation', 'Technology', 'Health Care', 'Wholesalers', 'Retailing', 'Motor Vehicles & Parts', 'Media', 'Technology', 'Technology', 'Industrials', 'Retailing', 'Retailing', 'Business Services', 'Engineering & Construction', 'Retailing', 'Financials', 'Wholesalers', 'Motor Vehicles & Parts', 'Financials', 'Financials', 'Health Care', 'Materials', 'Technology', 'Financials', 'Energy', 'Technology', 'Chemicals', 'Financials', 'Materials', 'Financials', 'Energy', 'Household Products', 'Engineering & Construction', 'Retailing', 'Wholesalers', 'Wholesalers', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Retailing', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Telecommunications', 'Business Services', 'Energy', 'Health Care', 'Retailing', 'Financials', 'Wholesalers', 'Apparel', 'Retailing', 'Energy', 'Retailing', 'Energy', 'Financials', 'Materials', 'Engineering & Construction', 'Retailing', 'Engineering & Construction', 'Industrials', 'Financials', 'Energy', 'Chemicals', 'Wholesalers', 'Industrials', 'Financials', 'Wholesalers', 'Media', 'Wholesalers', 'Chemicals', 'Energy', 'Technology', 'Transportation', 'Financials', 'Wholesalers', 'Food, Beverages & Tobacco', 'Business Services', 'Industrials', 'Health Care', 'Food, Beverages & Tobacco', 'Retailing', 'Energy', 'Business Services', 'Financials', 'Health Care', 'Engineering & Construction', 'Financials', 'Wholesalers', 'Household Products', 'Energy', 'Household Products', 'Technology', 'Financials', 'Financials', 'Media', 'Financials', 'Food, Beverages & Tobacco', 'Energy', 'Aerospace & Defense', 'Chemicals', 'Food, Beverages & Tobacco', 'Energy', 'Energy', 'Food, Beverages & Tobacco', 'Transportation', 'Energy', 'Technology', 'Health Care', 'Retailing', 'Motor Vehicles & Parts', 'Motor Vehicles & Parts', 'Materials', 'Transportation', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Financials', 'Materials', 'Materials', 'Technology', 'Transportation', 'Technology', 'Materials', 'Aerospace & Defense', 'Media', 'Aerospace & Defense', 'Transportation', 'Media', 'Materials', 'Technology', 'Financials', 'Financials', 'Industrials', 'Aerospace & Defense', 'Health Care', 'Technology', 'Food, Beverages & Tobacco', 'Financials', 'Industrials', 'Apparel', 'Household Products', 'Retailing', 'Business Services', 'Retailing', 'Wholesalers', 'Materials', 'Engineering & Construction', 'Energy', 'Wholesalers', 'Financials', 'Technology', 'Apparel', 'Retailing', 'Financials', 'Materials', 'Financials', 'Energy', 'Retailing', 'Financials', 'Financials', 'Materials', 'Technology', 'Engineering & Construction', 'Industrials', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Chemicals', 'Motor Vehicles & Parts', 'Financials', 'Chemicals', 'Media', 'Energy', 'Financials', 'Chemicals', 'Materials', 'Energy', 'Financials', 'Retailing', 'Financials', 'Materials', 'Financials', 'Business Services', 'Financials', 'Retailing', 'Business Services', 'Financials', 'Household Products', 'Financials', 'Financials', 'Retailing', 'Hotels, Restaurants & Leisure', 'Health Care', 'Telecommunications', 'Health Care', 'Financials', 'Financials', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Engineering & Construction', 'Food, Beverages & Tobacco', 'Technology', 'Financials', 'Financials', 'Household Products', 'Financials', 'Business Services', 'Transportation', 'Energy', 'Wholesalers', 'Energy', 'Technology', 'Financials', 'Business Services', 'Technology', 'Transportation', 'Energy', 'Business Services', 'Energy', 'Business Services', 'Energy', 'Financials', 'Business Services', 'Health Care', 'Retailing', 'Technology', 'Health Care', 'Retailing', 'Household Products', 'Retailing', 'Business Services', 'Food, Beverages & Tobacco', 'Household Products', 'Financials', 'Financials', 'Technology', 'Materials', 'Energy', 'Health Care', 'Technology', 'Telecommunications', 'Wholesalers', 'Materials', 'Technology', 'Materials', 'Technology', 'Apparel', 'Chemicals', 'Business Services', 'Apparel', 'Transportation', 'Transportation', 'Household Products', 'Financials', 'Energy', 'Hotels, Restaurants & Leisure', 'Technology', 'Energy', 'Food, Beverages & Tobacco', 'Financials', 'Aerospace & Defense', 'Engineering & Construction', 'Health Care', 'Financials', 'Technology', 'Retailing', 'Industrials', 'Retailing', 'Hotels, Restaurants & Leisure', 'Food & Drug Stores', 'Technology', 'Technology', 'Food & Drug Stores', 'Technology', 'Apparel', 'Health Care', 'Hotels, Restaurants & Leisure', 'Technology', 'Technology', 'Energy', 'Health Care', 'Wholesalers', 'Financials', 'Energy', 'Materials', 'Wholesalers', 'Transportation', 'Business Services', 'Wholesalers', 'Engineering & Construction', 'Financials', 'Industrials', 'Financials', 'Technology', 'Wholesalers', 'Food, Beverages & Tobacco', 'Financials', 'Business Services', 'Technology', 'Financials', 'Retailing', 'Financials', 'Energy', 'Hotels, Restaurants & Leisure', 'Business Services', 'Technology', 'Technology', 'Apparel', 'Engineering & Construction', 'Retailing', 'Financials', 'Business Services', 'Media', 'Chemicals', 'Materials', 'Materials', 'Materials', 'Technology', 'Transportation', 'Business Services', 'Food & Drug Stores', 'Industrials', 'Health Care', 'Energy', 'Business Services', 'Health Care', 'Materials', 'Financials', 'Household Products', 'Materials', 'Retailing', 'Food, Beverages & Tobacco', 'Retailing', 'Financials', 'Materials', 'Engineering & Construction', 'Industrials', 'Energy', 'Business Services', 'Industrials', 'Industrials', 'Retailing', 'Household Products', 'Chemicals', 'Materials', 'Financials', 'Industrials', 'Industrials', 'Food, Beverages & Tobacco', 'Financials', 'Financials', 'Industrials', 'Transportation', 'Food, Beverages & Tobacco', 'Industrials', 'Energy', 'Technology', 'Transportation', 'Energy', 'Energy', 'Financials', 'Materials', 'Wholesalers', 'Motor Vehicles & Parts', 'Retailing', 'Media', 'Technology', 'Energy', 'Chemicals', 'Business Services', 'Media', 'Financials', 'Wholesalers', 'Energy', 'Media', 'Health Care', 'Technology', 'Financials', 'Financials', 'Financials', 'Aerospace & Defense', 'Aerospace & Defense', 'Materials', 'Materials', 'Industrials', 'Retailing', 'Technology', 'Food & Drug Stores', 'Energy', 'Health Care', 'Wholesalers', 'Chemicals', 'Chemicals', 'Health Care', 'Technology', 'Transportation', 'Financials', 'Financials', 'Apparel', 'Chemicals', 'Retailing', 'Energy', 'Energy', 'Food, Beverages & Tobacco', 'Wholesalers', 'Materials', 'Business Services', 'Industrials', 'Transportation', 'Energy', 'Financials', 'Business Services', 'Business Services', 'Financials', 'Food, Beverages & Tobacco', 'Materials', 'Technology', 'Technology', 'Retailing', 'Motor Vehicles & Parts', 'Business Services', 'Financials', 'Business Services', 'Technology', 'Chemicals', 'Engineering & Construction', 'Transportation', 'Materials', 'Energy', 'Technology', 'Media', 'Financials', 'Business Services', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Media', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Health Care', 'Energy', 'Business Services', 'Hotels, Restaurants & Leisure', 'Chemicals', 'Energy', 'Health Care', 'Financials', 'Technology', 'Household Products', 'Media', 'Materials', 'Industrials', 'Industrials', 'Retailing', 'Financials', 'Food, Beverages & Tobacco', 'Media', 'Engineering & Construction', 'Engineering & Construction', 'Business Services', 'Energy', 'Chemicals', 'Hotels, Restaurants & Leisure', 'Health Care', 'Financials', 'Business Services', 'Retailing', 'Retailing', 'Financials', 'Wholesalers', 'Financials', 'Industrials', 'Financials', 'Technology', 'Health Care', 'Health Care', 'Technology', 'Materials', 'Wholesalers', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Financials', 'Financials', 'Transportation', 'Technology', 'Engineering & Construction', 'Financials', 'Apparel', 'Media', 'Energy', 'Technology', 'Retailing', 'Business Services', 'Financials', 'Wholesalers', 'Apparel', 'Hotels, Restaurants & Leisure', 'Industrials', 'Retailing', 'Household Products', 'Engineering & Construction', 'Technology', 'Materials', 'Health Care', 'Technology', 'Financials', 'Media', 'Materials', 'Technology', 'Technology', 'Financials', 'Chemicals', 'Energy', 'Retailing', 'Transportation', 'Household Products', 'Health Care', 'Financials', 'Technology', 'Energy', 'Technology', 'Wholesalers', 'Transportation', 'Business Services', 'Industrials', 'Financials', 'Energy', 'Aerospace & Defense', 'Technology', 'Financials', 'Wholesalers', 'Business Services', 'Financials', 'Energy', 'Industrials', 'Transportation', 'Engineering & Construction', 'Energy', 'Hotels, Restaurants & Leisure', 'Energy', 'Media', 'Energy', 'Household Products', 'Retailing', 'Technology', 'Technology', 'Business Services', 'Energy', 'Industrials', 'Technology', 'Aerospace & Defense', 'Health Care', 'Financials', 'Financials', 'Materials', 'Technology', 'Apparel', 'Chemicals', 'Materials', 'Financials', 'Food & Drug Stores', 'Financials', 'Engineering & Construction', 'Technology', 'Technology', 'Energy', 'Health Care', 'Retailing', 'Media', 'Transportation', 'Technology', 'Industrials', 'Hotels, Restaurants & Leisure', 'Engineering & Construction', 'Industrials', 'Industrials', 'Retailing', 'Retailing', 'Industrials', 'Retailing', 'Energy', 'Energy', 'Apparel', 'Technology', 'Financials', 'Industrials', 'Chemicals', 'Energy', 'Financials', 'Technology', 'Energy', 'Wholesalers', 'Technology', 'Chemicals', 'Financials', 'Technology', 'Household Products', 'Technology', 'Financials', 'Industrials', 'Business Services', 'Retailing', 'Household Products', 'Household Products', 'Financials', 'Aerospace & Defense', 'Energy', 'Motor Vehicles & Parts', 'Industrials', 'Business Services', 'Motor Vehicles & Parts', 'Energy', 'Hotels, Restaurants & Leisure', 'Technology', 'Household Products', 'Business Services', 'Business Services', 'Financials', 'Technology', 'Business Services', 'Financials', 'Food, Beverages & Tobacco', 'Energy', 'Technology', 'Hotels, Restaurants & Leisure', 'Transportation', 'Business Services', 'Transportation', 'Telecommunications', 'Chemicals', 'Retailing', 'Wholesalers', 'Financials', 'Household Products', 'Financials', 'Transportation', 'Health Care', 'Financials', 'Energy', 'Transportation', 'Technology', 'Motor Vehicles & Parts', 'Health Care', 'Business Services', 'Financials', 'Technology', 'Health Care', 'Retailing', 'Technology', 'Transportation', 'Financials', 'Aerospace & Defense', 'Technology', 'Chemicals', 'Transportation', 'Energy', 'Health Care', 'Health Care', 'Food, Beverages & Tobacco', 'Industrials', 'Health Care', 'Motor Vehicles & Parts', 'Food & Drug Stores', 'Wholesalers', 'Industrials', 'Technology', 'Materials', 'Household Products', 'Transportation', 'Technology', 'Energy', 'Financials', 'Industrials', 'Energy', 'Energy', 'Aerospace & Defense', 'Aerospace & Defense', 'Health Care', 'Materials', 'Chemicals', 'Aerospace & Defense', 'Health Care', 'Media', 'Engineering & Construction', 'Chemicals', 'Financials', 'Hotels, Restaurants & Leisure', 'Industrials', 'Household Products', 'Transportation', 'Technology', 'Technology', 'Retailing', 'Business Services', 'Technology', 'Materials', 'Aerospace & Defense', 'Energy', 'Financials', 'Chemicals', 'Energy', 'Energy', 'Industrials', 'Engineering & Construction', 'Technology', 'Hotels, Restaurants & Leisure', 'Financials', 'Engineering & Construction', 'Financials', 'Aerospace & Defense', 'Retailing', 'Energy', 'Technology', 'Retailing', 'Media', 'Health Care', 'Wholesalers', 'Health Care', 'Business Services', 'Health Care', 'Financials'], 'Industry': ['General Merchandisers', 'Petroleum Refining', 'Insurance: Property and Casualty (Stock)', 'Computers, Office Equipment', 'Health Care: Insurance and Managed Care', 'Wholesalers: Health Care', 'Health Care: Pharmacy and Other Services', 'Internet Services and Retailing', 'Telecommunications', 'Motor Vehicles and Parts', 'Motor Vehicles and Parts', 'Wholesalers: Health Care', 'Petroleum Refining', 'Wholesalers: Health Care', 'General Merchandisers', 'Telecommunications', 'Food and Drug Stores', 'Industrial Machinery', 'Food and Drug Stores', 'Commercial Banks', 'Diversified Financials', 'Internet Services and Retailing', 'Specialty Retailers: Other', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Commercial Banks', 'Aerospace and Defense', 'Petroleum Refining', 'Health Care: Insurance and Managed Care', 'Computer Software', 'Petroleum Refining', 'Commercial Banks', 'Telecommunications', 'Information Technology Services', 'Computers, Office Equipment', 'Insurance: Property and Casualty (Mutual)', 'Pharmaceuticals', 'Diversified Financials', 'General Merchandisers', 'Specialty Retailers: Other', 'Petroleum Refining', 'Household and Personal Products', 'Insurance: Life, Health (stock)', 'Mail, Package, and Freight Delivery', 'Food Consumer Products', 'Semiconductors and Other Electronic Components', 'Chemicals', 'Food Production', 'Health Care: Insurance and Managed Care', 'Mail, Package, and Freight Delivery', 'Aerospace and Defense', 'Insurance: Life, Health (stock)', 'Food and Drug Stores', 'Wholesalers: Food and Grocery', 'Entertainment', 'Health Care: Insurance and Managed Care', 'Pharmaceuticals', 'Computers, Office Equipment', 'Aerospace and Defense', 'Insurance: Property and Casualty (Stock)', 'Health Care: Insurance and Managed Care', 'Network and Other Communications Equipment', 'Health Care: Medical Facilities', 'Pipelines', 'Construction and Farm Machinery', 'Insurance: Property and Casualty (Mutual)', 'Commercial Banks', 'Insurance: Property and Casualty (Stock)', 'Insurance: Life, Health (Mutual)', 'Commercial Banks', 'Airlines', 'Specialty Retailers: Other', 'Health Care: Insurance and Managed Care', 'Telecommunications', 'Airlines', 'Internet Services and Retailing', 'Electronics, Electrical Equip.', 'Pharmaceuticals', 'Insurance: Property and Casualty (Stock)', 'Food Production', 'Airlines', 'Computer Software', 'Wholesalers: Electronics and Office Equipment', 'Insurance: Life, Health (Mutual)', 'Specialty Retailers: Apparel', 'Diversified Financials', 'Beverages', 'Food and Drug Stores', 'Apparel', 'Petroleum Refining', 'Energy', 'Utilities: Gas and Electric', 'Insurance: Life, Health (Mutual)', 'Food and Drug Stores', 'Mining, Crude-Oil Production', 'Food Production', 'Miscellaneous', 'Entertainment', 'Aerospace and Defense', 'Insurance: Property and Casualty (Stock)', 'Commercial Banks', 'Construction and Farm Machinery', 'Diversified Financials', 'Insurance: Life, Health (Mutual)', 'Pipelines', 'Insurance: Property and Casualty (Stock)', 'Computers, Office Equipment', 'Tobacco', 'Entertainment', 'Pharmaceuticals', 'Medical Products and Equipment', 'Insurance: Property and Casualty (Stock)', 'Wholesalers: Electronics and Office Equipment', 'Food Consumer Products', 'Pipelines', 'Pharmaceuticals', 'Food Consumer Products', 'Aerospace and Defense', 'Aerospace and Defense', 'General Merchandisers', 'Wholesalers: Food and Grocery', 'Commercial Banks', 'Specialty Retailers: Other', 'Packaging, Containers', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Wholesalers: Electronics and Office Equipment', 'Pharmaceuticals', 'Pharmaceuticals', 'Food Services', 'Food Services', 'Semiconductors and Other Electronic Components', 'Specialty Retailers: Other', 'Petroleum Refining', 'Diversified Financials', 'Insurance: Life, Health (stock)', 'Automotive Retailing, Services', 'Automotive Retailing, Services', 'Electronics, Electrical Equip.', 'Railroads', 'Airlines', 'Temporary Help', 'Scientific,Photographic and Control Equipment', 'Pharmaceuticals', 'Oil and Gas Equipment, Services', 'Health Care: Medical Facilities', 'Motor Vehicles and Parts', 'Industrial Machinery', 'Semiconductors and Other Electronic Components', 'Metals', 'Health Care: Insurance and Managed Care', 'Engineering, Construction', 'Tobacco', 'Construction and Farm Machinery', 'Insurance: Property and Casualty (Stock)', 'General Merchandisers', 'Computers, Office Equipment', 'Semiconductors and Other Electronic Components', 'Health Care: Medical Facilities', 'Financial Data Services', 'Medical Products and Equipment', 'Household and Personal Products', 'Engineering, Construction', 'Commercial Banks', 'Telecommunications', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Wholesalers: Electronics and Office Equipment', 'Health Care: Insurance and Managed Care', 'Wholesalers: Food and Grocery', 'General Merchandisers', 'Diversified Financials', 'Automotive Retailing, Services', 'Commercial Banks', 'Mining, Crude-Oil Production', 'Wholesalers: Diversified', 'Industrial Machinery', 'Health Care: Medical Facilities', 'Food and Drug Stores', 'Specialty Retailers: Apparel', 'Food Consumer Products', 'General Merchandisers', 'Household and Personal Products', 'Utilities: Gas and Electric', 'Transportation and Logistics', 'Motor Vehicles and Parts', 'Advertising, marketing', 'Information Technology Services', 'Chemicals', 'Chemicals', 'Semiconductors and Other Electronic Components', 'Transportation and Logistics', 'Packaging, Containers', 'Information Technology Services', 'Home Equipment, Furnishings', 'Entertainment', 'Health Care: Pharmacy and Other Services', 'Chemicals', 'Diversified Outsourcing Services', 'Semiconductors and Other Electronic Components', 'Waste Management', 'Telecommunications', 'Industrial Machinery', 'Insurance: Life, Health (stock)', 'Petroleum Refining', 'Real estate', 'Aerospace and Defense', 'Specialty Retailers: Apparel', 'Insurance: Life, Health (stock)', 'Homebuilders', 'Diversified Financials', 'Mining, Crude-Oil Production', 'Utilities: Gas and Electric', 'Chemicals', 'Food Consumer Products', 'Insurance: Property and Casualty (Stock)', 'Pipelines', 'Utilities: Gas and Electric', 'Mining, Crude-Oil Production', 'Entertainment', 'Financial Data Services', 'Energy', 'Pharmaceuticals', 'Aerospace and Defense', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Home Equipment, Furnishings', 'Internet Services and Retailing', 'Homebuilders', 'Specialty Retailers: Apparel', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Insurance: Life, Health (stock)', 'General Merchandisers', 'Financial Data Services', 'Securities', 'Wholesalers: Health Care', 'Insurance: Life, Health (Mutual)', 'Medical Products and Equipment', 'Diversified Financials', 'Apparel', 'Diversified Outsourcing Services', 'Utilities: Gas and Electric', 'Pharmaceuticals', 'Metals', 'Wholesalers: Food and Grocery', 'Specialty Retailers: Other', 'Pipelines', 'Commercial Banks', 'Medical Products and Equipment', 'Diversified Financials', 'Insurance: Property and Casualty (Mutual)', 'Financial Data Services', 'Utilities: Gas and Electric', 'Industrial Machinery', 'Mining, Crude-Oil Production', 'Household and Personal Products', 'Commercial Banks', 'Motor Vehicles and Parts', 'Internet Services and Retailing', 'Metals', 'Commercial Banks', 'Chemicals', 'Railroads', 'Utilities: Gas and Electric', 'Insurance: Life, Health (stock)', 'Health Care: Medical Facilities', 'Energy', 'Mining, Crude-Oil Production', 'Utilities: Gas and Electric', 'Specialty Retailers: Other', 'Automotive Retailing, Services', 'Utilities: Gas and Electric', 'Beverages', 'Aerospace and Defense', 'Packaging, Containers', 'Specialty Retailers: Other', 'Specialty Retailers: Other', 'Hotels, Casinos, Resorts', 'Specialty Retailers: Other', 'Chemicals', 'Medical Products and Equipment', 'Railroads', 'Computer Software', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Internet Services and Retailing', 'Motor Vehicles and Parts', 'Entertainment', 'Computers, Office Equipment', 'Information Technology Services', 'Electronics, Electrical Equip.', 'Automotive Retailing, Services', 'Internet Services and Retailing', 'Waste Management', 'Engineering, Construction', 'Automotive Retailing, Services', 'Diversified Financials', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Insurance: Property and Casualty (Stock)', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Metals', 'Semiconductors and Other Electronic Components', 'Diversified Financials', 'Utilities: Gas and Electric', 'Internet Services and Retailing', 'Chemicals', 'Insurance: Property and Casualty (Stock)', 'Metals', 'Insurance: Life, Health (stock)', 'Mining, Crude-Oil Production', 'Home Equipment, Furnishings', 'Engineering, Construction', 'Specialty Retailers: Other', 'Wholesalers: Health Care', 'Wholesalers: Food and Grocery', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Specialty Retailers: Other', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Telecommunications', 'Financial Data Services', 'Utilities: Gas and Electric', 'Medical Products and Equipment', 'Specialty Retailers: Other', 'Securities', 'Wholesalers: Diversified', 'Apparel', 'Automotive Retailing, Services', 'Pipelines', 'Automotive Retailing, Services', 'Energy', 'Insurance: Life, Health (stock)', 'Packaging, Containers', 'Engineering, Construction', 'Specialty Retailers: Other', 'Homebuilders', 'Construction and Farm Machinery', 'Insurance: Life, Health (Mutual)', 'Pipelines', 'Chemicals', 'Wholesalers: Diversified', 'Construction and Farm Machinery', 'Insurance: Life, Health (stock)', 'Wholesalers: Diversified', 'Publishing, Printing', 'Wholesalers: Food and Grocery', 'Chemicals', 'Energy', 'Semiconductors and Other Electronic Components', 'Airlines', 'Real estate', 'Wholesalers: Electronics and Office Equipment', 'Food Consumer Products', 'Advertising, marketing', 'Industrial Machinery', 'Medical Products and Equipment', 'Food Consumer Products', 'Specialty Retailers: Apparel', 'Utilities: Gas and Electric', 'Financial Data Services', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Engineering, Construction', 'Insurance: Property and Casualty (Stock)', 'Wholesalers: Diversified', 'Household and Personal Products', 'Utilities: Gas and Electric', 'Home Equipment, Furnishings', 'Information Technology Services', 'Insurance: Property and Casualty (Mutual)', 'Securities', 'Entertainment', 'Insurance: Property and Casualty (Mutual)', 'Food Consumer Products', 'Utilities: Gas and Electric', 'Aerospace and Defense', 'Chemicals', 'Food Consumer Products', 'Petroleum Refining', 'Mining, Crude-Oil Production', 'Beverages', 'Trucking, Truck Leasing', 'Oil and Gas Equipment, Services', 'Computer Software', 'Health Care: Medical Facilities', 'Specialty Retailers: Other', 'Motor Vehicles and Parts', 'Motor Vehicles and Parts', 'Forest and Paper Products', 'Trucking, Truck Leasing', 'Food Services', 'Food Services', 'Diversified Financials', 'Packaging, Containers', 'Building Materials, Glass', 'Entertainment', 'Airlines', 'Network and Other Communications Equipment', 'Miscellaneous', 'Aerospace and Defense', 'Publishing, Printing', 'Aerospace and Defense', 'Transportation and Logistics', 'Entertainment', 'Packaging, Containers', 'Semiconductors and Other Electronic Components', 'Commercial Banks', 'Insurance: Property and Casualty (Stock)', 'Construction and Farm Machinery', 'Aerospace and Defense', 'Health Care: Medical Facilities', 'Information Technology Services', 'Beverages', 'Real estate', 'Industrial Machinery', 'Apparel', 'Household and Personal Products', 'Specialty Retailers: Apparel', 'Miscellaneous', 'Specialty Retailers: Other', 'Wholesalers: Diversified', 'Packaging, Containers', 'Engineering, Construction', 'Utilities: Gas and Electric', 'Wholesalers: Diversified', 'Securities', 'Computers, Office Equipment', 'Apparel', 'Automotive Retailing, Services', 'Commercial Banks', 'Packaging, Containers', 'Insurance: Property and Casualty (Stock)', 'Mining, Crude-Oil Production', 'General Merchandisers', 'Insurance: Property and Casualty (Stock)', 'Securities', 'Building Materials, Glass', 'Network and Other Communications Equipment', 'Homebuilders', 'Electronics, Electrical Equip.', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Chemicals', 'Motor Vehicles and Parts', 'Insurance: Property and Casualty (Stock)', 'Chemicals', 'Entertainment', 'Utilities: Gas and Electric', 'Diversified Financials', 'Chemicals', 'Packaging, Containers', 'Energy', 'Real estate', 'Specialty Retailers: Apparel', 'Commercial Banks', 'Metals', 'Insurance: Life, Health (stock)', 'Financial Data Services', 'Insurance: Property and Casualty (Stock)', 'Specialty Retailers: Other', 'Diversified Outsourcing Services', 'Commercial Banks', 'Household and Personal Products', 'Insurance: Property and Casualty (Stock)', 'Securities', 'Specialty Retailers: Other', 'Food Services', 'Pharmaceuticals', 'Telecommunications', 'Health Care: Insurance and Managed Care', 'Insurance: Life, Health (Mutual)', 'Securities', 'Food Production', 'Hotels, Casinos, Resorts', 'Homebuilders', 'Food Production', 'Information Technology Services', 'Insurance: Property and Casualty (Stock)', 'Insurance: Property and Casualty (Stock)', 'Household and Personal Products', 'Commercial Banks', 'Financial Data Services', 'Transportation Equipment', 'Energy', 'Wholesalers: Health Care', 'Mining, Crude-Oil Production', 'Semiconductors and Other Electronic Components', 'Real estate', 'Financial Data Services', 'Computers, Office Equipment', 'Transportation Equipment', 'Mining, Crude-Oil Production', 'Diversified Outsourcing Services', 'Energy', 'Diversified Outsourcing Services', 'Mining, Crude-Oil Production', 'Real estate', 'Temporary Help', 'Health Care: Medical Facilities', 'Specialty Retailers: Other', 'Semiconductors and Other Electronic Components', 'Pharmaceuticals', 'Specialty Retailers: Other', 'Home Equipment, Furnishings', 'Specialty Retailers: Other', 'Temporary Help', 'Food Consumer Products', 'Toys, Sporting Goods', 'Insurance: Property and Casualty (Stock)', 'Diversified Financials', 'Computer Software', 'Forest and Paper Products', 'Mining, Crude-Oil Production', 'Health Care: Pharmacy and Other Services', 'Semiconductors and Other Electronic Components', 'Telecommunications', 'Wholesalers: Electronics and Office Equipment', 'Packaging, Containers', 'Network and Other Communications Equipment', 'Metals', 'Information Technology Services', 'Apparel', 'Chemicals', 'Financial Data Services', 'Apparel', 'Transportation Equipment', 'Trucking, Truck Leasing', 'Toys, Sporting Goods', 'Insurance: Property and Casualty (Stock)', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Entertainment', 'Energy', 'Food Consumer Products', 'Securities', 'Aerospace and Defense', 'Engineering, Construction', 'Health Care: Medical Facilities', 'Commercial Banks', 'Internet Services and Retailing', 'Automotive Retailing, Services', 'Industrial Machinery', 'Specialty Retailers: Apparel', 'Hotels, Casinos, Resorts', 'Food and Drug Stores', 'Computers, Office Equipment', 'Scientific,Photographic and Control Equipment', 'Food and Drug Stores', 'Network and Other Communications Equipment', 'Apparel', 'Health Care: Pharmacy and Other Services', 'Food Services', 'Scientific,Photographic and Control Equipment', 'Information Technology Services', 'Energy', 'Health Care: Medical Facilities', 'Wholesalers: Diversified', 'Insurance: Property and Casualty (Stock)', 'Utilities: Gas and Electric', 'Packaging, Containers', 'Wholesalers: Diversified', 'Trucking, Truck Leasing', 'Education', 'Wholesalers: Diversified', 'Homebuilders', 'Real estate', 'Construction and Farm Machinery', 'Real estate', 'Information Technology Services', 'Wholesalers: Diversified', 'Beverages', 'Real estate', 'Diversified Outsourcing Services', 'Scientific,Photographic and Control Equipment', 'Insurance: Life, Health (stock)', 'Automotive Retailing, Services', 'Securities', 'Mining, Crude-Oil Production', 'Food Services', 'Financial Data Services', 'Computer Software', 'Semiconductors and Other Electronic Components', 'Apparel', 'Engineering, Construction', 'Specialty Retailers: Other', 'Insurance: Life, Health (stock)', 'Financial Data Services', 'Publishing, Printing', 'Chemicals', 'Building Materials, Glass', 'Packaging, Containers', 'Packaging, Containers', 'Computer Software', 'Transportation and Logistics', 'Financial Data Services', 'Food and Drug Stores', 'Industrial Machinery', 'Medical Products and Equipment', 'Petroleum Refining', 'Financial Data Services', 'Health Care: Medical Facilities', 'Building Materials, Glass', 'Securities', 'Home Equipment, Furnishings', 'Building Materials, Glass', 'Specialty Retailers: Other', 'Food Consumer Products', 'Specialty Retailers: Other', 'Insurance: Life, Health (stock)', 'Building Materials, Glass', 'Homebuilders', 'Industrial Machinery', 'Energy', 'Diversified Outsourcing Services', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Household and Personal Products', 'Chemicals', 'Building Materials, Glass', 'Diversified Financials', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'Food Production', 'Securities', 'Securities', 'Electronics, Electrical Equip.', 'Transportation Equipment', 'Food Production', 'Industrial Machinery', 'Mining, Crude-Oil Production', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Pipelines', 'Oil and Gas Equipment, Services', 'Securities', 'Packaging, Containers', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Specialty Retailers: Apparel', 'Publishing, Printing', 'Internet Services and Retailing', 'Energy', 'Chemicals', 'Waste Management', 'Entertainment', 'Real estate', 'Wholesalers: Electronics and Office Equipment', 'Utilities: Gas and Electric', 'Entertainment', 'Pharmaceuticals', 'Computers, Office Equipment', 'Commercial Banks', 'Insurance: Property and Casualty (Mutual)', 'Insurance: Life, Health (stock)', 'Aerospace and Defense', 'Aerospace and Defense', 'Metals', 'Forest and Paper Products', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Semiconductors and Other Electronic Components', 'Food and Drug Stores', 'Utilities: Gas and Electric', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Chemicals', 'Chemicals', 'Medical Products and Equipment', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Insurance: Property and Casualty (Stock)', 'Insurance: Life, Health (stock)', 'Apparel', 'Chemicals', 'Specialty Retailers: Other', 'Utilities: Gas and Electric', 'Energy', 'Beverages', 'Wholesalers: Diversified', 'Metals', 'Financial Data Services', 'Electronics, Electrical Equip.', 'Trucking, Truck Leasing', 'Miscellaneous', 'Securities', 'Diversified Outsourcing Services', 'Diversified Outsourcing Services', 'Insurance: Property and Casualty (Mutual)', 'Food Production', 'Packaging, Containers', 'Information Technology Services', 'Internet Services and Retailing', 'Specialty Retailers: Apparel', 'Motor Vehicles and Parts', 'Diversified Outsourcing Services', 'Commercial Banks', 'Diversified Outsourcing Services', 'Information Technology Services', 'Chemicals', 'Homebuilders', 'Airlines', 'Building Materials, Glass', 'Mining, Crude-Oil Production', 'Scientific,Photographic and Control Equipment', 'Entertainment', 'Insurance: Life, Health (Mutual)', 'Diversified Outsourcing Services', 'Food Services', 'Hotels, Casinos, Resorts', 'Publishing, Printing', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Medical Products and Equipment', 'Mining, Crude-Oil Production', 'Miscellaneous', 'Hotels, Casinos, Resorts', 'Chemicals', 'Utilities: Gas and Electric', 'Medical Products and Equipment', 'Diversified Financials', 'Semiconductors and Other Electronic Components', 'Home Equipment, Furnishings', 'Entertainment', 'Metals', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'General Merchandisers', 'Securities', 'Beverages', 'Entertainment', 'Engineering, Construction', 'Engineering, Construction', 'Waste Management', 'Energy', 'Chemicals', 'Food Services', 'Health Care: Insurance and Managed Care', 'Commercial Banks', 'Diversified Outsourcing Services', 'Specialty Retailers: Other', 'Specialty Retailers: Apparel', 'Insurance: Life, Health (Mutual)', 'Wholesalers: Diversified', 'Securities', 'Industrial Machinery', 'Securities', 'Computer Software', 'Health Care: Medical Facilities', 'Medical Products and Equipment', 'Internet Services and Retailing', 'Metals', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Insurance: Life, Health (stock)', 'Real estate', 'Trucking, Truck Leasing', 'Information Technology Services', 'Homebuilders', 'Diversified Financials', 'Apparel', 'Entertainment', 'Pipelines', 'Network and Other Communications Equipment', 'Specialty Retailers: Apparel', 'Diversified Outsourcing Services', 'Real estate', 'Wholesalers: Diversified', 'Apparel', 'Food Services', 'Industrial Machinery', 'Specialty Retailers: Apparel', 'Home Equipment, Furnishings', 'Engineering, Construction', 'Scientific,Photographic and Control Equipment', 'Metals', 'Medical Products and Equipment', 'Information Technology Services', 'Commercial Banks', 'Entertainment', 'Building Materials, Glass', 'Scientific,Photographic and Control Equipment', 'Computer Software', 'Insurance: Property and Casualty (Stock)', 'Chemicals', 'Utilities: Gas and Electric', 'Specialty Retailers: Other', 'Airlines', 'Household and Personal Products', 'Health Care: Pharmacy and Other Services', 'Real estate', 'Semiconductors and Other Electronic Components', 'Utilities: Gas and Electric', 'Scientific,Photographic and Control Equipment', 'Wholesalers: Diversified', 'Airlines', 'Temporary Help', 'Industrial Machinery', 'Real estate', 'Mining, Crude-Oil Production', 'Aerospace and Defense', 'Semiconductors and Other Electronic Components', 'Real estate', 'Wholesalers: Diversified', 'Education', 'Insurance: Property and Casualty (Mutual)', 'Mining, Crude-Oil Production', 'Industrial Machinery', 'Railroads', 'Homebuilders', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Utilities: Gas and Electric', 'Entertainment', 'Utilities: Gas and Electric', 'Miscellaneous', 'General Merchandisers', 'Computers, Office Equipment', 'Semiconductors and Other Electronic Components', 'Temporary Help', 'Pipelines', 'Construction and Farm Machinery', 'Internet Services and Retailing', 'Aerospace and Defense', 'Pharmaceuticals', 'Real estate', 'Insurance: Property and Casualty (Stock)', 'Packaging, Containers', 'Semiconductors and Other Electronic Components', 'Apparel', 'Chemicals', 'Forest and Paper Products', 'Real estate', 'Food and Drug Stores', 'Securities', 'Homebuilders', 'Information Technology Services', 'Internet Services and Retailing', 'Petroleum Refining', 'Health Care: Pharmacy and Other Services', 'Specialty Retailers: Other', 'Entertainment', 'Trucking, Truck Leasing', 'Computer Software', 'Electronics, Electrical Equip.', 'Hotels, Casinos, Resorts', 'Engineering, Construction', 'Industrial Machinery', 'Industrial Machinery', 'Specialty Retailers: Other', 'Specialty Retailers: Apparel', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Oil and Gas Equipment, Services', 'Energy', 'Apparel', 'Semiconductors and Other Electronic Components', 'Real estate', 'Industrial Machinery', 'Chemicals', 'Mining, Crude-Oil Production', 'Real estate', 'Semiconductors and Other Electronic Components', 'Mining, Crude-Oil Production', 'Wholesalers: Diversified', 'Scientific,Photographic and Control Equipment', 'Chemicals', 'Securities', 'Scientific,Photographic and Control Equipment', 'Household and Personal Products', 'Semiconductors and Other Electronic Components', 'Insurance: Life, Health (Mutual)', 'Industrial Machinery', 'Financial Data Services', 'Specialty Retailers: Apparel', 'Household and Personal Products', 'Home Equipment, Furnishings', 'Insurance: Life, Health (stock)', 'Aerospace and Defense', 'Energy', 'Motor Vehicles and Parts', 'Industrial Machinery', 'Miscellaneous', 'Motor Vehicles and Parts', 'Utilities: Gas and Electric', 'Food Services', 'Scientific,Photographic and Control Equipment', 'Household and Personal Products', 'Financial Data Services', 'Financial Data Services', 'Diversified Financials', 'Internet Services and Retailing', 'Financial Data Services', 'Securities', 'Food Consumer Products', 'Mining, Crude-Oil Production', 'Computer Software', 'Food Services', 'Shipping', 'Financial Data Services', 'Railroads', 'Telecommunications', 'Chemicals', 'Specialty Retailers: Other', 'Wholesalers: Electronics and Office Equipment', 'Insurance: Property and Casualty (Mutual)', 'Home Equipment, Furnishings', 'Real estate', 'Transportation Equipment', 'Scientific,Photographic and Control Equipment', 'Real estate', 'Energy', 'Transportation and Logistics', 'Information Technology Services', 'Motor Vehicles and Parts', 'Medical Products and Equipment', 'Financial Data Services', 'Commercial Banks', 'Computer Software', 'Medical Products and Equipment', 'Specialty Retailers: Apparel', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Securities', 'Aerospace and Defense', 'Network and Other Communications Equipment', 'Chemicals', 'Trucking, Truck Leasing', 'Pipelines', 'Pharmaceuticals', 'Health Care: Medical Facilities', 'Tobacco', 'Industrial Machinery', 'Medical Products and Equipment', 'Motor Vehicles and Parts', 'Food and Drug Stores', 'Wholesalers: Diversified', 'Industrial Machinery', 'Computer Software', 'Building Materials, Glass', 'Household and Personal Products', 'Shipping', 'Network and Other Communications Equipment', 'Pipelines', 'Commercial Banks', 'Electronics, Electrical Equip.', 'Utilities: Gas and Electric', 'Mining, Crude-Oil Production', 'Aerospace and Defense', 'Aerospace and Defense', 'Health Care: Pharmacy and Other Services', 'Building Materials, Glass', 'Chemicals', 'Aerospace and Defense', 'Medical Products and Equipment', 'Publishing, Printing', 'Homebuilders', 'Chemicals', 'Insurance: Property and Casualty (Stock)', 'Hotels, Casinos, Resorts', 'Industrial Machinery', 'Home Equipment, Furnishings', 'Transportation and Logistics', 'Computer Software', 'Computer Software', 'Specialty Retailers: Apparel', 'Financial Data Services', 'Computer Software', 'Building Materials, Glass', 'Aerospace and Defense', 'Energy', 'Securities', 'Chemicals', 'Oil and Gas Equipment, Services', 'Mining, Crude-Oil Production', 'Industrial Machinery', 'Homebuilders', 'Semiconductors and Other Electronic Components', 'Hotels, Casinos, Resorts', 'Insurance: Life, Health (stock)', 'Engineering, Construction', 'Insurance: Property and Casualty (Stock)', 'Aerospace and Defense', 'Specialty Retailers: Other', 'Oil and Gas Equipment, Services', 'Financial Data Services', 'Specialty Retailers: Apparel', 'Entertainment', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Health Care: Pharmacy and Other Services', 'Financial Data Services', 'Health Care: Medical Facilities', 'Real estate'], 'City': ['Bentonville', 'Irving', 'Omaha', 'Cupertino', 'Minnetonka', 'SF', 'Woonsocket', 'Seattle', 'Dallas', 'Detroit', 'Dearborn', 'Chesterbrook', 'San Ramon', 'Dublin', 'Issaquah', 'New York', 'Cincinnati', 'Boston', 'Deerfield', 'New York', 'Leavenworth', 'Mountain View', 'Atlanta', 'Charlotte', 'St. Louis', 'SF', 'Chicago', 'Houston', 'Indianapolis', 'Redmond', 'San Antonio', 'New York', 'Philadelphia', 'Armonk', 'Round Rock', 'Bloomington', 'New Brunswick', 'McLean', 'Minneapolis', 'Mooresville', 'Findlay', 'Cincinnati', 'New York', 'Atlanta', 'Harrison', 'Santa Clara', 'Midland', 'Chicago', 'Hartford', 'Memphis', 'Farmington', 'Newark', 'Boise', 'Houston', 'Burbank', 'Louisville', 'New York', 'Palo Alto', 'Bethesda', 'New York', 'St. Louis', 'San Jose', 'Nashville', 'Dallas', 'Deerfield', 'Columbus', 'New York', 'Boston', 'New York', 'New York', 'Fort Worth', 'Richfield', 'Bloomfield', 'Stamford', 'Atlanta', 'Menlo Park', 'Morris Plains', 'Kenilworth', 'Northbrook', 'Springdale', 'Chicago', 'Redwood City', 'Clearwater', 'New York', 'Framingham', 'New York', 'Atlanta', 'Lakeland', 'Beaverton', 'San Antonio', 'Miami', 'Chicago', 'Springfield', 'Camp Hill', 'Houston', 'Inver Grove Heights', 'St Paul', 'New York', 'Falls Church', 'San Antonio', 'McLean', 'Moline', 'New York', 'Milwaukee', 'Houston', 'New York', 'Palo Alto', 'New York', 'New York', 'North Chicago', 'Lake Bluff', 'Mayfield', 'Centennial', 'Pittsburgh', 'Houston', 'San Mateo', 'Deerfield', 'Falls Church', 'Waltham', 'Cincinnati', 'Rosemont', 'Minneapolis', 'Goodlettsville', 'Memphis', 'Charlotte', 'Atlanta', 'Bethesda', 'Phoenix', 'Indianapolis', 'Thousand Oaks', 'Oak Brook', 'Seattle', 'San Diego', 'Chesapeake', 'Parsippany-Troy Hills', 'New York', 'Columbus', 'Fort Lauderdale', 'Bloomfield Hills', 'Benton Harbor', 'Omaha', 'Dallas', 'Milwaukee', 'Waltham', 'New York', 'Houston', 'Dallas', 'Southfield', 'Columbus', 'Boise', 'Charlotte', 'Long Beach', 'Irving', 'Richmond', 'Bellevue', 'Hartford', 'Menomonee Falls', 'San Jose', 'St. Petersburg', 'Franklin', 'SF', 'Leavenworth', 'Irving', 'Los Angeles', 'Pittsburgh', 'Monroe', 'North Palm Beach', 'SF', 'Fremont', 'Tampa', 'Richmond', 'Hoffman Estates', 'Stamford', 'Richmond', 'New York City', 'Phoenix', 'Atlanta', 'St. Louis', 'Denver', 'Eden Prairie', 'SF', 'Minneapolis', 'Seattle', 'New York', 'Columbus', 'Greenwich', 'Akron', 'New York', 'Lincolnshire', 'Cleveland', 'Pittsburgh', 'Dallas', 'Eden Prairie', 'Atlanta', 'Teaneck', 'Hoboken', 'New York', 'Nashville', 'St. Louis', 'Philadelphia', 'Santa Clara', 'Houston', 'Englewood', 'Glenview', 'Wayne', 'Dallas', 'Los Angeles', 'Providence', 'Dublin', 'Des Moines', 'Arlington', 'New York', 'Oklahoma City', 'Arlington', 'St Paul', 'Arden Hills', 'New York', 'Houston', 'Akron', 'Houston', 'New York', 'San Jose', 'Tulsa', 'Summit', 'New York', 'Battle Creek', 'Las Vegas', 'New Britain', 'Norwalk', 'Miami', 'Columbus', 'Detroit', 'Richmond', 'Chesterfield', 'Plano', 'Harrison', 'New York', 'Melville', 'New York', 'Kalamazoo', 'New York', 'Greensboro', 'Roseland', 'Rosemead', 'Cambridge', 'Pittsburgh', 'South San Francisco', 'Union', 'Tulsa', 'Winston-Salem', 'Franklin Lakes', 'Minneapolis', 'Los Angeles', 'New York', 'New York', 'Cleveland', 'The Woodlands', 'New York', 'Boston', 'Palo Alto', 'Los Gatos', 'Pittsburgh', 'Riverwoods', 'Danbury', 'Jacksonville', 'Minneapolis', 'Chattanooga', 'King of Prussia', 'Princeton', 'Houston', 'San Diego', 'Wayne', 'Houston', 'New Orleans', 'Denver', 'New York', 'Broomfield', 'Memphis', 'El Dorado', 'Las Vegas', 'Boca Raton', 'The Woodlands', 'Deerfield', 'Norfolk', 'SF', 'Burlington', 'Lake Forest', 'Englewood', 'Auburn Hills', 'Beverly Hills', 'Norwalk', 'Reston', 'Corning', 'Medford', 'Bellevue', 'Phoenix', 'Dallas', 'Charlotte', 'Detroit', 'Chicago', 'Auburn Hills', 'Jacksonville', 'Atlanta', 'Durham', 'Los Angeles', 'Santa Clara', 'New York', 'Houston', 'San Jose', 'Kingsport', 'Madison', 'Fort Wayne', 'Newport Beach', 'Oklahoma City', 'Calhoun', 'Houston', 'Roanoke', 'Mechanicsville', 'Providence', 'Lake Forest', 'Chicago', 'Grapevine', 'Austin', 'McLean', 'Norwalk', 'Jacksonville', 'Newark', 'Marlboro', 'Springfield', 'SF', 'Waltham', 'New York', 'Parsippany-Troy Hills', 'Houston', 'Estero', 'Houston', 'Omaha', 'Philadelphia', 'Omaha', 'Coraopolis', 'Atlanta', 'Lisle', 'Minneapolis', 'Denver', 'Allentown', 'Atlanta', 'Duluth', 'Richmond', 'Downers Grove', 'New York', 'Byron Center', 'Houston', 'Tulsa', 'Fremont', 'Seattle', 'Chicago', 'Glenview', 'Camden', 'New York', 'Downers Grove', 'Warsaw', 'Dallas', 'New York', 'Springfield', 'Plano', 'Cincinnati', 'Secaucus', 'Norwalk', 'Greenwich', 'Pittsburgh', 'New York', 'Milwaukee', 'Livonia', 'Tysons', 'Lansing', 'Des Peres', 'Englewood', 'Erie', 'Hershey', 'Allentown', 'Newport News', 'Plymouth', 'Orrville', 'Brentwood', 'Greenwood Village', 'Victor', 'Miami', 'Houston', 'San Jose', 'Brentwood', 'Brentwood', 'Elkhart', 'Maumee', 'Seattle', 'Lowell', 'Orlando', 'Plano', 'New York', 'Evansville', 'Dallas', 'Santa Monica', 'Wilco inc', 'Wallingford', 'El Segundo', 'Wichita', 'Chicago', 'Melbourne', 'Seattle', 'Silver Spring', 'Perrysburg', 'San Jose', 'Cleveland', 'Cincinnati', 'Oshkosh', 'Cedar Rapids', 'Louisville', 'Tempe', 'Plano', 'Boston', 'Everett', 'New York', 'New York', 'Mahwah', 'Stamford', 'Ankeny', 'St. Louis', 'Glendale', 'Coral Gables', 'Jackson', 'Atlanta', 'St. Petersburg', 'Atlanta', 'Winston-Salem', 'Duluth', 'Providence', 'Lake Forest', 'New York', 'Houston', 'Little Rock', 'New York', 'San Mateo', 'Toledo', 'Chicago', 'Reston', 'Milwaukee', 'Oak Brook', 'Las Vegas', 'Clayton', 'Detroit', 'Chicago', 'Wilmington', 'San Antonio', 'St. Louis', 'Rolling Meadows', 'Irving', 'Charlotte', 'King of Prussia', 'Madison', 'Burlington', 'Birmingham', 'Beckett Ridge', 'St Paul', 'New York', 'Glen Allen', 'Westlake', 'Florham Park', 'Buffalo', 'Oakland', 'New York', 'New York', 'Bolingbrook', 'Louisville', 'Tarrytown', 'Little Rock', 'Scottsdale', 'Cincinnati', 'Atlanta', 'Westchester', 'Parsippany-Troy Hills', 'Horsham', 'Merriam', 'McLean', 'Santa Ana', 'Fairfield', 'Rye', 'Chicago', 'Brookfield', 'Milwaukee', 'Houston', 'St Paul', 'St. Louis', 'Phoenix', 'Indianapolis', 'Englewood', 'Sunnyvale', 'Medina', 'Irving', 'New York', 'Irving', 'Cincinnati', 'New York', 'Bethesda', 'Troy', 'Kennett Square', 'Irving', 'Santa Clara', 'Parsippany-Troy Hills', 'SF', 'Deerfield', 'Columbus', 'Menlo Park', 'St. Louis', 'Pawtucket', 'Worcester', 'Wilmington', 'Mountain View', 'Fort Mill', 'Houston', 'North Kansas City', 'Norwood', 'Chicago', 'Deerfield', 'Hartsville', 'Sunnyvale', 'Irving', 'Falls Church', 'Baltimore', 'Medina', 'Columbus', 'SF', 'Libertyville', 'Overland Park', 'El Segundo', 'Johnston', 'Merrillville', 'Las Vegas', 'Redwood City', 'Houston', 'Sparks Glencoe', 'Baltimore', 'Sterling', 'Los Angeles', 'Brentwood', 'Columbus', 'Boston', 'New Braunfels', 'Rye Brook', 'Dallas', 'Chicago', 'Phoenix', 'North Canton', 'Sarasota', 'Commerce', 'Newton', 'New York City', 'Flint', 'Denver', 'Santa Clara', 'Reston', 'Bismarck', 'Mechanicsburg', 'Boise', 'New York', 'Cayce', 'Atlanta', 'Winona', 'Green Bay', 'Baltimore', 'Herndon', 'Los Angeles', 'Redwood City', 'Westport', 'Houston', 'Arlington', 'Miami', 'Charlotte', 'Toledo', 'Boca Raton', 'Berwyn', 'Carmel', 'Lincolnshire', 'Boston', 'Houston', 'Tampa', 'New York', 'Mountain View', 'Tempe', 'Manhattan Beach', 'Houston', 'New York', 'McKinney', 'Lake Success', 'Sussex', 'Deerfield', 'Scottsdale', 'Stamford', 'Neenah', 'New York', 'Oak Brook', 'Cincinnati', 'Black Mountain', 'Kenosha', 'York', 'Indianapolis', 'Atlanta', 'Birmingham', 'Raleigh', 'New York', 'Carthage', 'Grand Rapids', 'Denton', 'Thomasville', 'New York', 'West Des Moines', 'Birmingham', 'Scottsdale', 'Wilmerding', 'Houston', 'Boston', 'Richardson', 'Highland Heights', 'Pittsburgh', 'Ewing Township', 'West Palm Beach', 'Charlotte', 'Evansville', 'Annapolis Junction', 'Lincolnshire', 'Maumee', 'Omaha', 'Leavenworth', 'Shelton', 'Dallas', 'Irving', 'Irving', 'Denver', 'Woburn', 'Jacksonville', 'Houston', 'Houston', 'Chicago', 'Delaware', 'The Woodlands', 'Novi', 'Philadelphia', 'Chicago', 'Southlake', 'Omaha', 'Columbus', 'Lake Forest', 'New York', 'Chicago', 'Greenville', 'Phoenix', 'Knoxville', 'New Haven', 'Stamford', 'New York', 'Bloomington', 'Madison', 'Berwyn', 'Cleveland', 'Pittsburgh', 'Catawba', 'Atlanta', 'New Albany', 'Milpitas', 'Sunbury', 'Bellevue', 'Fort Lauderdale', 'Carmel', 'Avon Lake', 'Philadelphia', 'Irvine', 'Chandler', 'Reno', 'Los Angeles', 'Galveston', 'Atlanta', 'New York', 'Atlanta', 'Madison', 'Pittsburgh', 'Corona', 'Atlanta', 'Chicago', 'Atlanta', 'Beloit', 'Thomasville', 'Voorhees Township', 'New York', 'Richmond', 'Richmond', 'Stevens Point', 'Laurel', 'Northbrook', 'Stamford', 'New York City', 'Houston', 'Rochester Hills', 'Houston', 'Dallas', 'San Leandro', 'Santa Clara', 'Covington', 'Scottsdale', 'St. George', 'Chicago', 'Spring', 'Santa Rosa', 'Knoxville', 'New York', 'Rochester', 'Dallas', 'Wyomissing', 'McLean', 'Van Buren Charter Township', 'Parsippany-Troy Hills', 'Sunnyvale', 'Oklahoma City', 'Houston', 'Las Vegas', 'Charlotte', 'Dallas', 'Marlboro', 'KCMO', 'Greensboro', 'Grand Rapids', 'New York', 'Columbus', 'North Canton', 'Milwaukee', 'San Diego', 'St. Louis', 'Louisville', 'Plano', 'Watsonville', 'Palm Beach Gardens', 'Norwell', 'Tempe', 'Marysville', 'Lebanon', 'San Juan', 'SF', 'Memphis', 'Merrimack', 'Nashville', 'Cleveland', 'Melville', 'Baltimore', 'Cleveland', 'New York', 'Fort Lauderdale', 'Franklin', 'Palo Alto', 'Chicago', 'Cleveland', 'Portsmouth', 'Findlay', 'Lake Success', 'Horsham', 'Los Angeles', 'Fort Smith', 'New York', 'Irvine', 'New York', 'New York', 'New York', 'Oklahoma City', 'Hanover', 'Columbus', 'Cincinnati', 'McLean', 'Covington', 'Richardson', 'Ann Arbor', 'Stamford', 'St. Louis', 'Lexington', 'Pasadena', 'San Diego', 'Omaha', 'Chicago', 'Blue Bell', 'Salt Lake City', 'Cockeysville', 'Nashville', 'Columbus', 'Mountain View', 'Chicago', 'Boston', 'KCMO', 'Plano', 'Honolulu', 'New York', 'Raleigh', 'Glendale', 'Costa Mesa', 'Evansville', 'Sunnyvale', 'Houston', 'Miramar', 'Calabasas', 'Cleveland', 'SF', 'Fort Worth', 'Thousand Oaks', 'Malvern', 'Boston', 'Cleveland', 'Arlington', 'Lincoln', 'Midland', 'White Plains', 'KCMO', 'Denver', 'Topeka', 'Las Vegas', 'Honolulu', 'McLean', 'Las Vegas', 'Farmington', 'York', 'San Jose', 'Neenah', 'Tacoma', 'Tulsa', 'Bloomington', 'Cambridge', 'Elma Center', 'Boston', 'Chicago', 'Branchville', 'Crystal Lake', 'Scottsdale', 'Portland', 'Fairlawn', 'Miamisburg', 'SF', 'Pittsburgh', 'New York', 'Red Bank', 'Reston', 'SF', 'Houston', 'Waltham', 'Corte Madera', 'Irving', 'Phoenix', 'Raleigh', 'St. Louis', 'Las Vegas', 'Dallas', 'Milwaukee', 'Minneapolis', 'Elmsford', 'New York', 'Reading', 'Los Angeles', 'Houston', 'Leavenworth', 'Rockford', 'San Jose', 'New York', 'Elgin', 'Waterford', 'Cleveland', 'Chicago', 'San Jose', 'St. Louis', 'Tucker', 'Milford', 'St Paul', 'West Palm Beach', 'Waltham', 'Chesterfield', 'San Jose', 'New Haven', 'Lake Forest', 'KCMO', 'Fort Myers', 'Provo', 'Zeeland', 'Montpelier', 'Charlotte', 'Wall Township', 'Milwaukee', 'Memphis', 'Boca Raton', 'Indianapolis', 'Oklahoma City', 'Calabasas', 'Raleigh', 'Orlando', 'Leawood', 'Norcross', 'Coppell', 'Scottsdale', 'Pleasanton', 'Chicago', 'Charlotte', 'El Dorado', 'Hoffman Estates', 'Louisville', 'Houston', 'SF', 'Darien', 'Boulder', 'Richmond', 'Commerce', 'El Segundo', 'Owatonna', 'Muscatine', 'Newton', 'Lake Oswego', 'Hercules', 'Arlington', 'Ames', 'Harrison', 'Dayton', 'Elkhart', 'Wayne', 'Jersey City', 'Hato Rey', 'Pleasanton', 'Pleasanton', 'Columbus', 'North Reading', 'Omaha', 'Los Angeles', 'Fort Collins', 'Seattle', 'Lexington', 'Downers Grove', 'Tulsa', 'Franklin Township', 'Brentwood', 'Richmond', 'Westlake', 'San Diego', 'Livonia', 'Memphis', 'Tustin', 'Pittsburgh', 'San Rafael', 'Cary', 'Walnut Creek', 'Honolulu', 'Englewood', 'Houston', 'Santa Clara', 'Liberty Lake', 'Portland', 'Los Angeles', 'Bellevue', 'McLean', 'San Diego', 'New York', 'Dallas', 'Stamford', 'Westbrook', 'Shoreview', 'Columbus', 'Houston', 'Houston', 'Orlando', 'Charlotte', 'Lake Zurich', 'Chicago', 'San Jose', 'Burlington', 'Indianapolis', 'Chicago', 'Santa Clara', 'Denver', 'Chantilly', 'Overland Park', 'Greenwich', 'Northfield', 'Houston', 'Denver', 'Milwaukee', 'Atlanta', 'Andover', 'Broomfield', 'Cincinnati', 'Daytona Beach', 'Daytona Beach', 'El Segundo', 'Bernards', 'Houston', 'San Jose', 'Secaucus', 'Chicago', 'Bensalem', 'Roswell', 'Wilmington', 'Irvine', 'Mission Viejo', 'Irvine'], 'State': ['AR', 'TX', 'NE', 'CA', 'MN', 'CA', 'RI', 'WA', 'TX', 'MI', 'MI', 'PA', 'CA', 'OH', 'WA', 'NY', 'OH', 'MA', 'IL', 'NY', 'WA', 'CA', 'GA', 'NC', 'MO', 'CA', 'IL', 'TX', 'IN', 'WA', 'TX', 'NY', 'PA', 'NY', 'TX', 'IL', 'NJ', 'VA', 'MN', 'NC', 'OH', 'OH', 'NY', 'GA', 'NY', 'CA', 'Michigan', 'IL', 'CT', 'TN', 'CT', 'NJ', 'ID', 'TX', 'CA', 'KY', 'NY', 'CA', 'MD', 'NY', 'MO', 'CA', 'TN', 'TX', 'IL', 'OH', 'NY', 'MA', 'NY', 'NY', 'TX', 'MN', 'CT', 'CT', 'GA', 'CA', 'NJ', 'NJ', 'IL', 'AR', 'IL', 'CA', 'FL', 'NY', 'MA', 'NY', 'GA', 'FL', 'OR', 'TX', 'FL', 'IL', 'MA', 'PA', 'TX', 'MN', 'MN', 'NY', 'VA', 'TX', 'VA', 'IL', 'NY', 'WI', 'TX', 'NY', 'CA', 'NY', 'NY', 'IL', 'IL', 'OH', 'CO', 'PA', 'TX', 'CA', 'IL', 'VA', 'MA', 'OH', 'IL', 'MN', 'TN', 'TN', 'NC', 'GA', 'MD', 'AZ', 'IN', 'CA', 'IL', 'WA', 'CA', 'VA', 'NJ', 'NY', 'GA', 'FL', 'MI', 'MI', 'NE', 'TX', 'WI', 'MA', 'NY', 'TX', 'TX', 'MI', 'IN', 'ID', 'NC', 'CA', 'TX', 'VA', 'WA', 'CT', 'WI', 'CA', 'FL', 'TN', 'CA', 'WA', 'TX', 'CA', 'PA', 'LA', 'FL', 'CA', 'CA', 'FL', 'VA', 'IL', 'CT', 'VA', 'NY', 'AZ', 'GA', 'MO', 'CO', 'MN', 'CA', 'MN', 'WA', 'NY', 'OH', 'CT', 'OH', 'NY', 'IL', 'OH', 'PA', 'TX', 'MN', 'GA', 'NJ', 'NJ', 'NY', 'TN', 'MO', 'PA', 'CA', 'TX', 'CO', 'IL', 'PA', 'TX', 'CA', 'RI', 'CA', 'IA', 'TX', 'NY', 'OK', 'VA', 'MN', 'MN', 'NY', 'TX', 'OH', 'TX', 'NY', 'CA', 'OK', 'NJ', 'NY', 'MI', 'NV', 'CT', 'CT', 'FL', 'OH', 'MI', 'VA', 'MO', 'TX', 'NY', 'NY', 'NY', 'NY', 'MI', 'NY', 'NC', 'NJ', 'CA', 'MA', 'PA', 'CA', 'NJ', 'OK', 'NC', 'NJ', 'MN', 'CA', 'NY', 'NY', 'OH', 'TX', 'NY', 'MA', 'CA', 'CA', 'PA', 'IL', 'CT', 'FL', 'MN', 'TN', 'PA', 'NJ', 'TX', 'CA', 'NJ', 'TX', 'LA', 'CO', 'NY', 'CO', 'TN', 'AR', 'NV', 'FL', 'TX', 'IL', 'VA', 'CA', 'NC', 'IL', 'CO', 'MI', 'CA', 'CT', 'VA', 'NY', 'OR', 'WA', 'AZ', 'TX', 'NC', 'MI', 'IL', 'MI', 'FL', 'GA', 'NC', 'CA', 'CA', 'NY', 'TX', 'CA', 'TN', 'WI', 'IN', 'CA', 'OK', 'GA', 'TX', 'VA', 'VA', 'RI', 'IL', 'IL', 'TX', 'MN', 'VA', 'CT', 'FL', 'NJ', 'MA', 'MO', 'CA', 'MA', 'NY', 'NJ', 'TX', 'FL', 'TX', 'NE', 'PA', 'NE', 'PA', 'GA', 'IL', 'MN', 'CO', 'PA', 'GA', 'GA', 'VA', 'IL', 'NY', 'MI', 'TX', 'OK', 'CA', 'WA', 'IL', 'IL', 'NJ', 'NY', 'IL', 'IN', 'TX', 'NY', 'MA', 'TX', 'OH', 'NJ', 'CT', 'CT', 'PA', 'NY', 'WI', 'MI', 'VA', 'MI', 'MO', 'CO', 'PA', 'PA', 'PA', 'VA', 'MN', 'OH', 'TN', 'CO', 'NY', 'FL', 'TX', 'CA', 'TN', 'TN', 'IN', 'OH', 'WA', 'AR', 'FL', 'TX', 'NY', 'IN', 'TX', 'CA', 'NY', 'CT', 'CA', 'KS', 'IL', 'FL', 'WA', 'MD', 'OH', 'CA', 'OH', 'OH', 'WI', 'IA', 'KY', 'AZ', 'TX', 'MA', 'WA', 'NY', 'NY', 'NJ', 'CT', 'IA', 'MO', 'CA', 'FL', 'MI', 'GA', 'FL', 'GA', 'NC', 'GA', 'RI', 'IL', 'NY', 'TX', 'AR', 'NY', 'CA', 'OH', 'IL', 'VA', 'WI', 'IL', 'NV', 'MO', 'MI', 'IL', 'DE', 'TX', 'MO', 'IL', 'TX', 'NC', 'PA', 'NJ', 'NJ', 'AL', 'OH', 'MN', 'NY', 'VA', 'OH', 'NJ', 'NY', 'CA', 'NY', 'NY', 'IL', 'KY', 'NY', 'AR', 'AZ', 'OH', 'GA', 'IL', 'NJ', 'PA', 'KS', 'VA', 'CA', 'OH', 'NY', 'IL', 'WI', 'WI', 'TX', 'MN', 'MO', 'AZ', 'IN', 'CO', 'CA', 'MN', 'TX', 'NY', 'TX', 'OH', 'NY', 'MD', 'MI', 'PA', 'TX', 'CA', 'NJ', 'CA', 'IL', 'OH', 'CA', 'MO', 'RI', 'MA', 'DE', 'CA', 'SC', 'TX', 'MO', 'MA', 'IL', 'IL', 'SC', 'CA', 'TX', 'VA', 'MD', 'OH', 'GA', 'CA', 'IL', 'KS', 'CA', 'RI', 'IN', 'NV', 'CA', 'TX', 'MD', 'MD', 'VA', 'CA', 'TN', 'OH', 'MA', 'TX', 'NY', 'TX', 'IL', 'AZ', 'OH', 'FL', 'CA', 'NC', 'NY', 'MI', 'CO', 'CA', 'VA', 'ND', 'PA', 'ID', 'NY', 'SC', 'GA', 'MN', 'WI', 'MD', 'VA', 'CA', 'CA', 'CT', 'TX', 'VA', 'FL', 'NC', 'OH', 'FL', 'PA', 'IN', 'IL', 'MA', 'TX', 'FL', 'NY', 'CA', 'AZ', 'CA', 'TX', 'NY', 'TX', 'NY', 'WI', 'IL', 'AZ', 'CT', 'WI', 'NY', 'IL', 'OH', 'NC', 'WI', 'PA', 'IN', 'GA', 'AL', 'NC', 'NY', 'MO', 'MI', 'TX', 'GA', 'NY', 'IA', 'AL', 'AZ', 'PA', 'TX', 'MA', 'TX', 'KY', 'PA', 'NJ', 'FL', 'NC', 'IN', 'MD', 'IL', 'OH', 'NE', 'WA', 'CT', 'TX', 'TX', 'TX', 'CO', 'MA', 'FL', 'TX', 'TX', 'IL', 'OH', 'TX', 'MI', 'PA', 'IL', 'TX', 'NE', 'OH', 'IL', 'NY', 'IL', 'SC', 'AZ', 'TN', 'CT', 'CT', 'NY', 'IL', 'WI', 'PA', 'OH', 'PA', 'SC', 'GA', 'OH', 'CA', 'PA', 'WA', 'FL', 'IN', 'OH', 'PA', 'CA', 'AZ', 'NV', 'CA', 'TX', 'GA', 'NY', 'GA', 'WI', 'PA', 'CA', 'GA', 'IL', 'GA', 'WI', 'NC', 'NJ', 'NY', 'VA', 'VA', 'WI', 'MS', 'IL', 'CT', 'NY', 'TX', 'MI', 'TX', 'TX', 'CA', 'CA', 'KY', 'AZ', 'UT', 'IL', 'TX', 'CA', 'TN', 'NY', 'NY', 'TX', 'PA', 'VA', 'MI', 'NJ', 'CA', 'OK', 'TX', 'NV', 'NC', 'TX', 'MA', 'MO', 'NC', 'MI', 'NY', 'OH', 'OH', 'WI', 'CA', 'MO', 'KY', 'TX', 'CA', 'FL', 'MA', 'AZ', 'OH', 'TN', 'Puerto Rico', 'CA', 'TN', 'NH', 'TN', 'OH', 'NY', 'MD', 'OH', 'NY', 'FL', 'TN', 'CA', 'IL', 'OH', 'NH', 'OH', 'NY', 'PA', 'CA', 'AR', 'NY', 'CA', 'NY', 'NY', 'NY', 'OK', 'MD', 'OH', 'OH', 'VA', 'LA', 'TX', 'MI', 'CT', 'MO', 'KY', 'CA', 'CA', 'NE', 'IL', 'PA', 'UT', 'MD', 'TN', 'OH', 'CA', 'IL', 'MA', 'MO', 'TX', 'HI', 'NY', 'NC', 'CA', 'CA', 'IN', 'CA', 'TX', 'FL', 'CA', 'OH', 'CA', 'TX', 'CA', 'PA', 'MA', 'OH', 'VA', 'RI', 'TX', 'NY', 'MO', 'CO', 'KS', 'NV', 'HI', 'VA', 'NV', 'UT', 'PA', 'CA', 'WI', 'WA', 'OK', 'MN', 'MA', 'NY', 'MA', 'IL', 'NJ', 'IL', 'AZ', 'OR', 'OH', 'OH', 'CA', 'PA', 'NY', 'NJ', 'VA', 'CA', 'TX', 'MA', 'CA', 'TX', 'AZ', 'NC', 'MO', 'NV', 'TX', 'WI', 'MN', 'NY', 'NY', 'PA', 'CA', 'TX', 'WA', 'MI', 'CA', 'NY', 'IL', 'NY', 'OH', 'IL', 'CA', 'MO', 'GA', 'MA', 'MN', 'FL', 'MA', 'MO', 'CA', 'CT', 'IL', 'MO', 'FL', 'UT', 'MI', 'VT', 'NC', 'NJ', 'WI', 'TN', 'FL', 'IN', 'OK', 'CA', 'NC', 'FL', 'KS', 'GA', 'TX', 'AZ', 'CA', 'IL', 'NC', 'AR', 'IL', 'KY', 'TX', 'CA', 'CT', 'CO', 'VA', 'CA', 'CA', 'MN', 'IA', 'MA', 'OR', 'CA', 'VA', 'IA', 'NY', 'OH', 'IN', 'PA', 'NJ', 'Puerto Rico', 'CA', 'CA', 'OH', 'MA', 'NE', 'CA', 'CO', 'WA', 'KY', 'IL', 'OK', 'NJ', 'TN', 'VA', 'OH', 'CA', 'MI', 'TN', 'CA', 'PA', 'CA', 'NC', 'CA', 'HI', 'CO', 'TX', 'CA', 'WA', 'OR', 'CA', 'WA', 'VA', 'CA', 'NY', 'TX', 'CT', 'ME', 'MN', 'OH', 'TX', 'TX', 'FL', 'NC', 'IL', 'IL', 'CA', 'MA', 'IN', 'IL', 'CA', 'CO', 'VA', 'KS', 'CT', 'IL', 'TX', 'CO', 'WI', 'GA', 'MA', 'CO', 'OH', 'FL', 'FL', 'CA', 'NJ', 'TX', 'CA', 'NJ', 'IL', 'PA', 'GA', 'MA', 'CA', 'CA', 'CA'], 'Latitude': [36.372853799999994, 32.814017699999994, 41.2565369, 37.322997799999996, 44.9211836, 37.7749295, 42.0028761, 47.6062095, 32.7766642, 42.331427000000005, 42.3222599, 40.0756627, 37.7799273, 40.0992294, 47.5301011, 40.7127753, 39.103118200000004, 42.360082500000004, 42.171136499999996, 40.7127753, 47.751074100000004, 37.3860517, 33.7489954, 35.2270869, 38.6270025, 37.7749295, 41.8781136, 29.7604267, 39.768403, 47.6739881, 29.4241219, 40.7127753, 39.9525839, 41.1264849, 30.508255100000003, 40.4842027, 40.4862157, 38.933867600000006, 44.977753, 35.5848596, 41.04422, 39.103118200000004, 40.7127753, 33.7489954, 41.0400135, 37.354107899999995, 43.623574, 41.8781136, 41.76580429999999, 35.1495343, 41.7360305, 40.735657, 43.6150186, 29.7604267, 34.18083920000001, 38.252664700000004, 40.7127753, 37.441883399999995, 38.984652000000004, 40.7127753, 38.6270025, 37.338208200000004, 36.1626638, 32.7766642, 42.171136499999996, 39.9611755, 40.7127753, 42.360082500000004, 40.7127753, 40.7127753, 32.7554883, 44.8832982, 41.826488, 41.0534302, 33.7489954, 37.4529598, 40.839592200000006, 40.6764911, 42.127526700000004, 36.18674420000001, 41.8781136, 37.4852152, 27.9658533, 40.7127753, 42.279286, 40.7127753, 33.7489954, 28.039465399999997, 45.4887993, 29.4241219, 25.7616798, 41.8781136, 42.1014831, 40.2398118, 29.7604267, 44.8480218, 44.953702899999996, 40.7127753, 38.882334, 29.4241219, 38.933867600000006, 41.5067003, 40.7127753, 43.0389025, 29.7604267, 40.7127753, 37.441883399999995, 40.7127753, 40.7127753, 42.325578, 42.304505, 41.55199520000001, 39.5807452, 40.440624799999995, 29.7604267, 37.558546500000006, 42.171136499999996, 38.882334, 42.376485200000005, 39.103118200000004, 41.9867507, 44.977753, 36.3231066, 35.1495343, 35.2270869, 33.7489954, 38.984652000000004, 33.4483771, 39.768403, 34.1705609, 41.8397865, 47.6062095, 32.715738, 36.7682088, 40.865286499999996, 40.7127753, 32.4609764, 26.122438600000002, 42.583645000000004, 42.1167065, 41.2565369, 32.7766642, 43.0389025, 42.376485200000005, 40.7127753, 29.7604267, 32.7766642, 42.473368799999996, 39.201440399999996, 43.6150186, 35.2270869, 33.770050399999995, 32.814017699999994, 37.540724600000004, 47.6101497, 41.76580429999999, 43.1788967, 37.338208200000004, 27.767600800000004, 35.9250637, 37.7749295, 47.751074100000004, 32.814017699999994, 34.0522342, 40.440624799999995, 32.5093109, 26.879781899999998, 37.7749295, 37.548269700000006, 27.950575, 37.540724600000004, 42.062991499999995, 41.0534302, 37.540724600000004, 40.7127753, 33.4483771, 33.7489954, 38.6270025, 39.739235799999996, 44.8546856, 37.7749295, 44.977753, 47.6062095, 40.7127753, 39.9611755, 41.0262417, 41.081444700000006, 40.7127753, 42.190024900000004, 41.499320000000004, 40.440624799999995, 32.7766642, 44.8546856, 33.7489954, 40.8932469, 40.7439905, 40.7127753, 36.1626638, 38.6270025, 39.9525839, 37.354107899999995, 29.7604267, 39.647765299999996, 42.069750899999995, 40.0462208, 32.7766642, 34.0522342, 41.8239891, 37.7021521, 41.600544799999994, 32.735687, 40.7127753, 35.4675602, 38.8816208, 44.953702899999996, 45.0502435, 40.7127753, 29.7604267, 41.081444700000006, 29.7604267, 40.7127753, 37.338208200000004, 36.1539816, 40.714637599999996, 40.7127753, 42.32115220000001, 36.169941200000004, 41.6612104, 41.117744, 25.7616798, 39.9611755, 42.331427000000005, 37.540724600000004, 38.6631083, 33.0198431, 41.0400135, 40.7127753, 40.793432200000005, 40.7127753, 42.291706899999994, 40.7127753, 36.072635399999996, 40.8206555, 34.0805651, 42.373615799999996, 40.440624799999995, 37.654656, 40.697589799999996, 36.1539816, 36.0998596, 41.016763899999994, 44.977753, 34.165357, 40.7127753, 40.7127753, 41.499320000000004, 30.1658207, 40.7127753, 42.360082500000004, 37.441883399999995, 37.235807799999996, 40.440624799999995, 42.167525399999995, 41.394816999999996, 30.3321838, 44.977753, 35.0456297, 40.101285600000004, 40.3572976, 29.7604267, 32.715738, 40.925372499999995, 29.7604267, 29.9510658, 39.739235799999996, 40.7127753, 39.9205411, 35.1495343, 33.20763, 36.169941200000004, 26.3683064, 30.1658207, 42.171136499999996, 36.8507689, 37.7749295, 36.0956918, 42.2586342, 39.647765299999996, 42.6875323, 34.073620399999996, 41.117744, 38.9586307, 42.1428521, 42.3265152, 47.6101497, 33.4483771, 32.7766642, 35.2270869, 42.331427000000005, 41.8781136, 42.6875323, 30.3321838, 33.7489954, 35.980513, 34.0522342, 37.354107899999995, 40.7127753, 29.7604267, 37.338208200000004, 36.548434, 43.07305170000001, 41.079273, 33.618882899999996, 35.4675602, 34.502587, 29.7604267, 37.270970399999996, 37.6087561, 41.8239891, 42.2586342, 41.8781136, 32.9342919, 43.6666296, 38.933867600000006, 41.117744, 30.3321838, 40.735657, 42.3459271, 37.20895720000001, 37.7749295, 42.376485200000005, 40.7127753, 40.865286499999996, 29.7604267, 26.438136, 29.7604267, 41.2565369, 39.9525839, 41.2565369, 40.5184013, 33.7489954, 41.801140999999994, 44.977753, 39.739235799999996, 40.6022939, 33.7489954, 34.0028786, 37.540724600000004, 41.8089191, 40.7127753, 42.812250799999994, 29.7604267, 36.1539816, 37.548269700000006, 47.6062095, 41.8781136, 42.069750899999995, 39.9259463, 40.7127753, 41.8089191, 41.2381, 32.7766642, 40.7127753, 42.1014831, 33.0198431, 39.103118200000004, 40.7895453, 41.117744, 41.0262417, 40.440624799999995, 40.7127753, 43.0389025, 42.36837, 38.9187222, 42.732535, 38.59722, 39.647765299999996, 42.1292241, 40.2859239, 40.6022939, 37.087082099999996, 45.01051939999999, 40.843666299999995, 36.0331164, 39.6172101, 42.982563299999995, 25.7616798, 29.7604267, 37.338208200000004, 36.0331164, 36.0331164, 41.6819935, 41.5628294, 47.6062095, 36.25535429999999, 28.5383355, 33.0198431, 40.7127753, 37.9715592, 32.7766642, 34.01945429999999, 40.744679, 41.45701079999999, 33.9191799, 37.6871761, 41.8781136, 28.0836269, 47.6062095, 38.9906657, 41.556996000000005, 37.338208200000004, 41.499320000000004, 39.103118200000004, 44.024706200000004, 41.9778795, 38.252664700000004, 33.4255104, 33.0198431, 42.360082500000004, 47.9789848, 40.7127753, 40.7127753, 41.0886216, 41.0534302, 41.7317884, 38.6270025, 34.1425078, 25.72149, 42.245869, 33.7489954, 27.767600800000004, 33.7489954, 36.0998596, 34.0028786, 41.8239891, 42.2586342, 40.7127753, 29.7604267, 34.7464809, 40.7127753, 37.562991700000005, 41.6528052, 41.8781136, 38.9586307, 43.0389025, 41.8397865, 36.169941200000004, 38.6425518, 42.331427000000005, 41.8781136, 39.7390721, 29.4241219, 38.6270025, 42.0841936, 32.814017699999994, 35.2270869, 40.101285600000004, 40.7598227, 40.071222, 33.5206608, 39.332126200000005, 44.953702899999996, 40.7127753, 37.665978, 41.4553232, 40.787878000000006, 42.8864468, 37.8043637, 40.7127753, 40.7127753, 41.6986416, 38.252664700000004, 41.076207700000005, 34.7464809, 33.494170399999994, 39.103118200000004, 33.7489954, 41.8498339, 40.865286499999996, 40.1784422, 39.023616499999996, 38.933867600000006, 33.7454725, 39.345467299999996, 40.980653499999995, 41.8781136, 43.0605671, 43.0389025, 29.7604267, 44.953702899999996, 38.6270025, 33.4483771, 39.768403, 39.647765299999996, 37.368829999999996, 45.0352411, 32.814017699999994, 40.7127753, 32.814017699999994, 39.103118200000004, 40.7127753, 38.984652000000004, 42.6064095, 39.84677670000001, 32.814017699999994, 37.354107899999995, 40.865286499999996, 37.7749295, 42.171136499999996, 39.9611755, 37.4529598, 38.6270025, 41.878710999999996, 42.262593200000005, 39.7390721, 37.3860517, 35.007369700000005, 29.7604267, 39.1429081, 42.1943909, 41.8781136, 42.171136499999996, 34.3740431, 37.368829999999996, 32.814017699999994, 38.882334, 39.2903848, 41.143245, 32.4609764, 37.7749295, 42.2333571, 38.9822282, 33.9191799, 41.820519899999994, 41.482814399999995, 36.169941200000004, 37.4852152, 29.7604267, 39.530938899999995, 39.2903848, 38.962489899999994, 34.3058279, 36.0331164, 39.9611755, 42.360082500000004, 29.7030024, 41.0192641, 32.7766642, 41.8781136, 33.4483771, 40.875890999999996, 27.3364347, 34.0005691, 35.7344538, 40.7127753, 43.012527399999996, 39.739235799999996, 37.354107899999995, 38.9586307, 46.808326799999996, 40.2142565, 43.6150186, 40.7127753, 33.9657091, 33.7489954, 44.0553908, 44.51331879999999, 39.2903848, 38.9695545, 34.0522342, 37.4852152, 41.141471700000004, 29.7604267, 38.8816208, 25.7616798, 35.2270869, 41.6528052, 26.3683064, 40.045823999999996, 39.978371, 42.190024900000004, 42.360082500000004, 29.7604267, 27.950575, 40.7127753, 37.3860517, 33.4255104, 33.884736100000005, 29.7604267, 40.7127753, 33.197246500000006, 40.7706572, 43.13418, 42.171136499999996, 33.494170399999994, 41.0534302, 44.1858193, 40.7127753, 41.8397865, 39.2807348, 35.6178951, 42.5847425, 39.9625984, 39.768403, 33.7489954, 33.5206608, 35.7795897, 40.7127753, 37.176446999999996, 42.9633599, 33.2148412, 30.8365815, 40.7127753, 41.5772115, 33.5206608, 33.494170399999994, 40.3909023, 29.7604267, 42.360082500000004, 32.948333500000004, 39.033116899999996, 40.440624799999995, 40.2599864, 26.7153424, 35.2270869, 37.9715592, 39.1202934, 42.190024900000004, 41.5628294, 41.2565369, 47.751074100000004, 41.3164856, 32.7766642, 32.814017699999994, 32.814017699999994, 39.739235799999996, 42.479261799999996, 30.3321838, 29.7604267, 29.7604267, 41.8781136, 40.2986724, 30.1658207, 42.48059, 39.9525839, 41.8781136, 32.9412363, 41.2565369, 39.9611755, 42.2586342, 40.7127753, 41.8781136, 34.8526176, 33.4483771, 35.96063839999999, 41.308274, 41.0534302, 40.7127753, 40.4842027, 43.07305170000001, 40.045823999999996, 41.499320000000004, 40.440624799999995, 34.85292329999999, 33.7489954, 40.0811745, 37.4323341, 40.862584999999996, 47.6101497, 26.1669711, 39.978371, 41.50531779999999, 39.9525839, 33.6845673, 33.3061605, 39.529632899999996, 34.0522342, 29.3013479, 33.7489954, 40.7127753, 33.7489954, 43.07305170000001, 40.440624799999995, 33.8752935, 33.7489954, 41.8781136, 33.7489954, 42.5083482, 35.8826369, 39.851944700000004, 40.7127753, 37.540724600000004, 37.540724600000004, 44.5235792, 31.694050899999997, 42.127526700000004, 41.0534302, 40.7127753, 29.7604267, 42.6583661, 30.0575359, 32.7766642, 37.7249296, 37.354107899999995, 39.083671200000005, 33.494170399999994, 37.0965278, 41.8781136, 30.079940500000003, 38.440428999999995, 35.96063839999999, 40.7127753, 43.156577899999995, 32.7766642, 40.329537, 38.933867600000006, 42.2203171, 40.865286499999996, 37.368829999999996, 35.4675602, 29.7604267, 36.169941200000004, 35.2270869, 32.7766642, 42.3459271, 39.0997265, 36.072635399999996, 42.9633599, 40.7127753, 39.9611755, 40.875890999999996, 43.0389025, 32.715738, 38.6270025, 38.252664700000004, 33.0198431, 36.910231, 26.8233946, 42.1615157, 33.4255104, 40.2364486, 36.2081098, 18.465539399999997, 37.7749295, 35.1495343, 42.867869299999995, 36.1626638, 41.499320000000004, 40.793432200000005, 39.2903848, 41.499320000000004, 40.7127753, 26.122438600000002, 35.9250637, 37.441883399999995, 41.8781136, 41.499320000000004, 43.071755200000005, 41.04422, 40.7706572, 40.1784422, 34.0522342, 35.385924200000005, 40.7127753, 33.6845673, 40.7127753, 40.7127753, 40.7127753, 35.4675602, 39.1955042, 39.9611755, 39.103118200000004, 38.933867600000006, 30.4754702, 32.948333500000004, 42.2808256, 41.0534302, 38.6270025, 38.040583700000006, 34.1477849, 32.715738, 41.2565369, 41.8781136, 40.1523309, 40.760779299999996, 39.490001299999996, 36.1626638, 39.9611755, 37.3860517, 41.8781136, 42.360082500000004, 39.0997265, 33.0198431, 21.3069444, 40.7127753, 35.7795897, 34.1425078, 33.6412156, 37.9715592, 37.368829999999996, 29.7604267, 25.9860762, 34.1367208, 41.499320000000004, 37.7749295, 32.7554883, 34.1705609, 40.0362184, 42.360082500000004, 41.499320000000004, 38.8816208, 41.911012299999996, 31.9973456, 41.0339862, 39.0997265, 39.739235799999996, 39.0473451, 36.169941200000004, 21.3069444, 38.933867600000006, 36.169941200000004, 40.9804999, 39.9625984, 37.338208200000004, 44.1858193, 47.252876799999996, 36.1539816, 44.840798, 42.373615799999996, 42.82122879999999, 42.360082500000004, 41.8781136, 41.1464852, 42.2411344, 33.494170399999994, 45.5122308, 41.127833, 39.642836200000005, 37.7749295, 40.440624799999995, 40.7127753, 40.347054299999996, 38.9586307, 37.7749295, 29.7604267, 42.376485200000005, 37.92548060000001, 32.814017699999994, 33.4483771, 35.7795897, 38.6270025, 36.169941200000004, 32.7766642, 43.0389025, 44.977753, 41.055096899999995, 40.7127753, 40.335648299999995, 34.0522342, 29.7604267, 47.751074100000004, 43.1200272, 37.338208200000004, 40.7127753, 42.035408399999994, 42.7925777, 41.499320000000004, 41.8781136, 37.338208200000004, 38.6270025, 33.8545479, 42.13985770000001, 44.953702899999996, 26.7153424, 42.376485200000005, 38.6631083, 37.338208200000004, 41.308274, 42.2586342, 39.0997265, 26.640628000000003, 40.233843799999995, 42.8125246, 44.2600593, 35.2270869, 40.160666600000006, 43.0389025, 35.1495343, 26.3683064, 39.768403, 35.4675602, 34.1372953, 35.7795897, 28.5383355, 38.966673, 33.969864, 32.954568699999996, 33.494170399999994, 37.6624312, 41.8781136, 35.2270869, 33.20763, 42.062991499999995, 38.252664700000004, 29.7604267, 37.7749295, 41.0771914, 40.0149856, 37.540724600000004, 34.0005691, 33.9191799, 44.085557200000004, 41.424473, 42.337041299999996, 45.4156817, 38.017144099999996, 38.8816208, 42.03078120000001, 41.0400135, 39.758947799999994, 41.6819935, 40.041599600000005, 40.7281575, 18.4225782, 37.6624312, 37.6624312, 39.9611755, 42.5750939, 41.2565369, 34.0522342, 40.5852602, 47.6062095, 38.040583700000006, 41.8089191, 36.1539816, 40.497603999999995, 36.0331164, 37.540724600000004, 41.4553232, 32.715738, 42.36837, 35.1495343, 33.7420005, 40.440624799999995, 37.9735346, 35.791540000000005, 37.9100783, 21.3069444, 39.647765299999996, 29.7604267, 37.354107899999995, 47.6743428, 45.5122308, 34.0522342, 47.6101497, 38.933867600000006, 32.715738, 40.7127753, 32.7766642, 41.0534302, 43.6770252, 45.0791325, 39.9611755, 29.7604267, 29.7604267, 28.5383355, 35.2270869, 42.1969689, 41.8781136, 37.338208200000004, 42.5047161, 39.768403, 41.8781136, 37.354107899999995, 39.739235799999996, 38.8942786, 38.9822282, 41.0262417, 42.09975, 29.7604267, 39.739235799999996, 43.0389025, 33.7489954, 42.6583356, 39.9205411, 39.103118200000004, 29.2108147, 29.2108147, 33.9191799, 40.7066174, 29.7604267, 37.338208200000004, 40.7895453, 41.8781136, 40.0994425, 34.0232431, 42.5481714, 33.6845673, 33.596891299999996, 33.6845673], 'Longitude': [-94.2088172, -96.9488945, -95.93450340000001, -122.03218229999999, -93.46874890000001, -122.4194155, -71.5147839, -122.33207079999998, -96.7969879, -83.0457538, -83.1763145, -75.4590816, -121.9780153, -83.11407709999999, -122.03261909999999, -74.0059728, -84.5120196, -71.0588801, -87.8445119, -74.0059728, -120.7401385, -122.08385109999999, -84.3879824, -80.8431267, -90.1994042, -122.4194155, -87.62979820000001, -95.36980279999999, -86.158068, -122.12151200000001, -98.4936282, -74.0059728, -75.1652215, -73.71401949999999, -97.678896, -88.9936873, -74.4518188, -77.17726040000001, -93.2650108, -80.81007240000001, -83.6499321, -84.5120196, -74.0059728, -84.3879824, -73.71444770000001, -121.9552356, -84.232105, -87.62979820000001, -72.6733723, -90.0489801, -72.795027, -74.1723667, -116.2023137, -95.36980279999999, -118.30896609999998, -85.7584557, -74.0059728, -122.14301950000001, -77.09470920000001, -74.0059728, -90.1994042, -121.88632859999998, -86.78160159999999, -96.7969879, -87.8445119, -82.9987942, -74.0059728, -71.0588801, -74.0059728, -74.0059728, -97.3307658, -93.28300209999999, -72.73009449999999, -73.53873409999999, -84.3879824, -122.1817252, -74.4818698, -74.2907032, -87.82895479999999, -94.1288141, -87.62979820000001, -122.2363548, -82.8001026, -74.0059728, -71.4161565, -74.0059728, -84.3879824, -81.9498042, -122.80133319999999, -98.4936282, -80.1917902, -87.62979820000001, -72.589811, -76.91997420000001, -95.36980279999999, -93.0427153, -93.0899578, -74.0059728, -77.17109140000001, -98.4936282, -77.17726040000001, -90.5151342, -74.0059728, -87.9064736, -95.36980279999999, -74.0059728, -122.14301950000001, -74.0059728, -74.0059728, -87.8411818, -87.89607120000001, -81.4392828, -104.87717260000001, -79.9958864, -95.36980279999999, -122.2710788, -87.8445119, -77.17109140000001, -71.2356113, -84.5120196, -87.87216020000001, -93.2650108, -86.7133302, -90.0489801, -80.8431267, -84.3879824, -77.09470920000001, -112.07403729999999, -86.158068, -118.83759369999999, -87.9535534, -122.33207079999998, -117.1610838, -76.2874927, -74.41738769999999, -74.0059728, -84.9877094, -80.13731740000001, -83.24548829999999, -86.4541894, -95.93450340000001, -96.7969879, -87.9064736, -71.2356113, -74.0059728, -95.36980279999999, -96.7969879, -83.2218731, -85.9213796, -116.2023137, -80.8431267, -118.1937395, -96.9488945, -77.4360481, -122.2015159, -72.6733723, -88.1173132, -121.88632859999998, -82.6402915, -86.86888990000001, -122.4194155, -120.7401385, -96.9488945, -118.24368490000002, -79.9958864, -92.1193012, -80.0533743, -122.4194155, -121.98857190000001, -82.4571776, -77.4360481, -88.1227199, -73.53873409999999, -77.4360481, -74.0059728, -112.07403729999999, -84.3879824, -90.1994042, -104.990251, -93.470786, -122.4194155, -93.2650108, -122.33207079999998, -74.0059728, -82.9987942, -73.62819640000001, -81.51900529999999, -74.0059728, -87.90840390000001, -81.6943605, -79.9958864, -96.7969879, -93.470786, -84.3879824, -74.0116536, -74.0323626, -74.0059728, -86.78160159999999, -90.1994042, -75.1652215, -121.9552356, -95.36980279999999, -104.98775970000001, -87.7878408, -75.3599105, -96.7969879, -118.24368490000002, -71.4128343, -121.9357918, -93.6091064, -97.10806559999999, -74.0059728, -97.5164276, -77.0909809, -93.0899578, -93.15661120000001, -74.0059728, -95.36980279999999, -81.51900529999999, -95.36980279999999, -74.0059728, -121.88632859999998, -95.992775, -74.3646122, -74.0059728, -85.17971419999999, -115.13982959999998, -72.77954190000001, -73.4081575, -80.1917902, -82.9987942, -83.0457538, -77.4360481, -90.5770675, -96.6988856, -73.71444770000001, -74.0059728, -73.4151214, -74.0059728, -85.5872286, -74.0059728, -79.79197540000001, -74.2937594, -118.072846, -71.10973349999999, -79.9958864, -122.40774979999999, -74.2631635, -95.992775, -80.24421600000001, -74.2057011, -93.2650108, -118.6089752, -74.0059728, -74.0059728, -81.6943605, -95.4612625, -74.0059728, -71.0588801, -122.14301950000001, -121.96237509999999, -79.9958864, -87.897014, -73.45401109999999, -81.655651, -93.2650108, -85.3096801, -75.38355250000001, -74.66722259999999, -95.36980279999999, -117.1610838, -74.2765441, -95.36980279999999, -90.0715323, -104.990251, -74.0059728, -105.0866504, -90.0489801, -92.66626740000001, -115.13982959999998, -80.1289321, -95.4612625, -87.8445119, -76.28587259999999, -122.4194155, -79.43779909999999, -87.840625, -104.98775970000001, -83.23410279999999, -118.40035630000001, -73.4081575, -77.35700279999999, -77.05469029999999, -122.8755949, -122.2015159, -112.07403729999999, -96.7969879, -80.8431267, -83.0457538, -87.62979820000001, -83.23410279999999, -81.655651, -84.3879824, -78.90511, -118.24368490000002, -121.9552356, -74.0059728, -95.36980279999999, -121.88632859999998, -82.5618186, -89.4012302, -85.13935129999999, -117.9298493, -97.5164276, -84.9510542, -95.36980279999999, -79.9414266, -77.37331390000001, -71.4128343, -87.840625, -87.62979820000001, -97.07806540000001, -92.9746367, -77.17726040000001, -73.4081575, -81.655651, -74.1723667, -71.55228740000001, -93.2922989, -122.4194155, -71.2356113, -74.0059728, -74.41738769999999, -95.36980279999999, -81.8067523, -95.36980279999999, -95.93450340000001, -75.1652215, -95.93450340000001, -80.1667247, -84.3879824, -88.0747875, -93.2650108, -104.990251, -75.4714098, -84.3879824, -84.1446376, -77.4360481, -88.01117459999999, -74.0059728, -85.7228061, -95.36980279999999, -95.992775, -121.98857190000001, -122.33207079999998, -87.62979820000001, -87.7878408, -75.1196199, -74.0059728, -88.01117459999999, -85.85304690000001, -96.7969879, -74.0059728, -72.589811, -96.6988856, -84.5120196, -74.05652979999999, -73.4081575, -73.62819640000001, -79.9958864, -74.0059728, -87.9064736, -83.3527097, -77.2310925, -84.55553470000001, -90.448126, -104.98775970000001, -80.085059, -76.6502468, -75.4714098, -76.4730122, -93.4555093, -81.7640212, -86.7827772, -104.95081409999999, -77.40887940000002, -80.1917902, -95.36980279999999, -121.88632859999998, -86.7827772, -86.7827772, -85.9766671, -83.6538244, -122.33207079999998, -94.1307587, -81.3792365, -96.6988856, -74.0059728, -87.5710898, -96.7969879, -118.4911912, -73.94854240000001, -72.82307359999999, -118.4164652, -97.330053, -87.62979820000001, -80.6081089, -122.33207079999998, -77.026088, -83.627157, -121.88632859999998, -81.6943605, -84.5120196, -88.5426136, -91.66562320000001, -85.7584557, -111.9400054, -96.6988856, -71.0588801, -122.2020795, -74.0059728, -74.0059728, -74.1435843, -73.53873409999999, -93.6001278, -90.1994042, -118.255075, -80.2683838, -84.4013462, -84.3879824, -82.6402915, -84.3879824, -80.24421600000001, -84.1446376, -71.4128343, -87.840625, -74.0059728, -95.36980279999999, -92.2895948, -74.0059728, -122.32552539999999, -83.53786740000001, -87.62979820000001, -77.35700279999999, -87.9064736, -87.9535534, -115.13982959999998, -90.32372629999999, -83.0457538, -87.62979820000001, -75.5397878, -98.4936282, -90.1994042, -88.0131275, -96.9488945, -80.8431267, -75.38355250000001, -74.417097, -74.8648873, -86.80249, -84.41726659999999, -93.0899578, -74.0059728, -77.5063739, -81.9179173, -74.38820720000001, -78.8783689, -122.27111370000002, -74.0059728, -74.0059728, -88.0683955, -85.7584557, -73.85874609999999, -92.2895948, -111.9260519, -84.5120196, -84.3879824, -87.8806738, -74.41738769999999, -75.1285061, -94.69357009999999, -77.17726040000001, -117.867653, -84.56031870000001, -73.6837399, -87.62979820000001, -88.1064787, -87.9064736, -95.36980279999999, -93.0899578, -90.1994042, -112.07403729999999, -86.158068, -104.98775970000001, -122.03634960000001, -93.5824586, -96.9488945, -74.0059728, -96.9488945, -84.5120196, -74.0059728, -77.09470920000001, -83.1497751, -75.7116032, -96.9488945, -121.9552356, -74.41738769999999, -122.4194155, -87.8445119, -82.9987942, -122.1817252, -90.1994042, -71.38255579999999, -71.8022934, -75.5397878, -122.08385109999999, -80.9450759, -95.36980279999999, -94.5729781, -71.19896949999999, -87.62979820000001, -87.8445119, -80.0734005, -122.03634960000001, -96.9488945, -77.17109140000001, -76.6121893, -81.8552196, -84.9877094, -122.4194155, -87.9259058, -94.6707917, -118.4164652, -71.512617, -87.3328139, -115.13982959999998, -122.2363548, -95.36980279999999, -76.6458043, -76.6121893, -77.4380485, -118.45719740000001, -86.7827772, -82.9987942, -71.0588801, -98.1244531, -73.68346209999999, -96.7969879, -87.62979820000001, -112.07403729999999, -81.40233559999999, -82.5306527, -118.1597929, -81.3444573, -74.0059728, -83.6874562, -104.990251, -121.9552356, -77.35700279999999, -100.7837392, -77.0085876, -116.2023137, -74.0059728, -81.0739827, -84.3879824, -91.6663523, -88.0132958, -76.6121893, -77.38609759999999, -118.24368490000002, -122.2363548, -73.35790490000001, -95.36980279999999, -77.0909809, -80.1917902, -80.8431267, -83.53786740000001, -80.1289321, -75.4395931, -86.1180435, -87.90840390000001, -71.0588801, -95.36980279999999, -82.4571776, -74.0059728, -122.08385109999999, -111.9400054, -118.41090890000001, -95.36980279999999, -74.0059728, -96.6397822, -73.7176312, -88.22294000000001, -87.8445119, -111.9260519, -73.53873409999999, -88.462609, -74.0059728, -87.9535534, -84.3173878, -82.3212302, -87.8211854, -76.727745, -86.158068, -84.3879824, -86.80249, -78.6381787, -74.0059728, -94.3102228, -85.6680863, -97.13306829999999, -83.9787808, -74.0059728, -93.711332, -86.80249, -111.9260519, -79.8100472, -95.36980279999999, -71.0588801, -96.72985190000001, -84.45188540000001, -79.9958864, -74.7909125, -80.0533746, -80.8431267, -87.5710898, -76.7769324, -87.90840390000001, -83.6538244, -95.93450340000001, -120.7401385, -73.0931641, -96.7969879, -96.9488945, -96.9488945, -104.990251, -71.1522765, -81.655651, -95.36980279999999, -95.36980279999999, -87.62979820000001, -83.067965, -95.4612625, -83.47549129999999, -75.1652215, -87.62979820000001, -97.13417829999999, -95.93450340000001, -82.9987942, -87.840625, -74.0059728, -87.62979820000001, -82.3940104, -112.07403729999999, -83.9207392, -72.9278835, -73.53873409999999, -74.0059728, -88.9936873, -89.4012302, -75.4395931, -81.6943605, -79.9958864, -80.9111862, -84.3879824, -82.8087864, -121.8995741, -76.7944104, -122.2015159, -80.25659499999999, -86.1180435, -82.02820009999999, -75.1652215, -117.82650490000002, -111.8412502, -119.8138027, -118.24368490000002, -94.7976958, -84.3879824, -74.0059728, -84.3879824, -89.4012302, -79.9958864, -117.56643840000001, -84.3879824, -87.62979820000001, -84.3879824, -89.0317765, -80.0819879, -74.961517, -74.0059728, -77.4360481, -77.4360481, -89.574563, -89.1306124, -87.82895479999999, -73.53873409999999, -74.0059728, -95.36980279999999, -83.14993220000001, -95.19029859999999, -96.7969879, -122.1560768, -121.9552356, -84.5085536, -111.9260519, -113.5684164, -87.62979820000001, -95.41716009999999, -122.7140548, -83.9207392, -74.0059728, -77.6088465, -96.7969879, -75.96521170000001, -77.17726040000001, -83.4838244, -74.41738769999999, -122.03634960000001, -97.5164276, -95.36980279999999, -115.13982959999998, -80.8431267, -96.7969879, -71.55228740000001, -94.5785667, -79.79197540000001, -85.6680863, -74.0059728, -82.9987942, -81.40233559999999, -87.9064736, -117.1610838, -90.1994042, -85.7584557, -96.6988856, -121.7568946, -80.1386547, -70.7927832, -111.9400054, -83.3671432, -86.29110240000001, -66.1057355, -122.4194155, -90.0489801, -71.4948322, -86.78160159999999, -81.6943605, -73.4151214, -76.6121893, -81.6943605, -74.0059728, -80.13731740000001, -86.86888990000001, -122.14301950000001, -87.62979820000001, -81.6943605, -70.7625532, -83.6499321, -73.7176312, -75.1285061, -118.24368490000002, -94.39854749999999, -74.0059728, -117.82650490000002, -74.0059728, -74.0059728, -74.0059728, -97.5164276, -76.72282270000001, -82.9987942, -84.5120196, -77.17726040000001, -90.1009108, -96.72985190000001, -83.7430378, -73.53873409999999, -90.1994042, -84.50371640000002, -118.1445155, -117.1610838, -95.93450340000001, -87.62979820000001, -75.266289, -111.89104740000002, -76.6585074, -86.78160159999999, -82.9987942, -122.08385109999999, -87.62979820000001, -71.0588801, -94.5785667, -96.6988856, -157.8583333, -74.0059728, -78.6381787, -118.255075, -117.91882209999999, -87.5710898, -122.03634960000001, -95.36980279999999, -80.3035602, -118.66148090000002, -81.6943605, -122.4194155, -97.3307658, -118.83759369999999, -75.5138118, -71.0588801, -81.6943605, -77.0909809, -71.4418101, -102.0779146, -73.76290970000001, -94.5785667, -104.990251, -95.67515759999999, -115.13982959999998, -157.8583333, -77.17726040000001, -115.13982959999998, -111.8874392, -76.727745, -121.88632859999998, -88.462609, -122.4442906, -95.992775, -93.29827990000001, -71.10973349999999, -78.63419959999999, -71.0588801, -87.62979820000001, -74.7523874, -88.3161965, -111.9260519, -122.6587185, -81.609844, -84.2866083, -122.4194155, -79.9958864, -74.0059728, -74.0643065, -77.35700279999999, -122.4194155, -95.36980279999999, -71.2356113, -122.5274755, -96.9488945, -112.07403729999999, -78.6381787, -90.1994042, -115.13982959999998, -96.7969879, -87.9064736, -93.2650108, -73.8201337, -74.0059728, -75.9268747, -118.24368490000002, -95.36980279999999, -120.7401385, -85.5600316, -121.88632859999998, -74.0059728, -88.2825668, -73.6812293, -81.6943605, -87.62979820000001, -121.88632859999998, -90.1994042, -84.21714240000001, -71.51630490000001, -93.0899578, -80.0533746, -71.2356113, -90.5770675, -121.88632859999998, -72.9278835, -87.840625, -94.5785667, -81.87230840000001, -111.6585337, -86.018651, -72.57538690000001, -80.8431267, -74.0679753, -87.9064736, -90.0489801, -80.1289321, -86.158068, -97.5164276, -118.6541895, -78.6381787, -81.3792365, -94.6169012, -84.2212938, -97.01500779999999, -111.9260519, -121.8746789, -87.62979820000001, -80.8431267, -92.66626740000001, -88.1227199, -85.7584557, -95.36980279999999, -122.4194155, -73.4686858, -105.2705456, -77.4360481, -118.1597929, -118.4164652, -93.2259349, -91.0432051, -71.2092214, -122.7159726, -122.28858079999999, -77.0909809, -93.63191309999999, -73.71444770000001, -84.1916069, -85.9766671, -75.3698895, -74.0776417, -66.0509549, -121.8746789, -121.8746789, -82.9987942, -71.0786653, -95.93450340000001, -118.24368490000002, -105.084423, -122.33207079999998, -84.50371640000002, -88.01117459999999, -95.992775, -74.4884868, -86.7827772, -77.4360481, -81.9179173, -117.1610838, -83.3527097, -90.0489801, -117.82363909999998, -79.9958864, -122.5310874, -78.78111690000001, -122.06518190000001, -157.8583333, -104.98775970000001, -95.36980279999999, -121.9552356, -117.1124241, -122.6587185, -118.24368490000002, -122.2015159, -77.17726040000001, -117.1610838, -74.0059728, -96.7969879, -73.53873409999999, -70.3711617, -93.1471667, -82.9987942, -95.36980279999999, -95.36980279999999, -81.3792365, -80.8431267, -88.0934108, -87.62979820000001, -121.88632859999998, -71.19562049999999, -86.158068, -87.62979820000001, -121.9552356, -104.990251, -77.4310992, -94.6707917, -73.62819640000001, -87.7808967, -95.36980279999999, -104.990251, -87.9064736, -84.3879824, -71.1367953, -105.0866504, -84.5120196, -81.02283309999999, -81.02283309999999, -118.4164652, -74.54932840000001, -95.36980279999999, -121.88632859999998, -74.05652979999999, -87.62979820000001, -74.9325683, -84.36155550000001, -71.17244670000001, -117.82650490000002, -117.6581562, -117.82650490000002]}
| 55,236.5 | 110,472 | 0.712165 | data={'title': ['Walmart', 'Exxon Mobil', 'Berkshire Hathaway', 'Apple', 'UnitedHealth Group', 'McKesson', 'CVS Health', 'Amazon.com', 'AT&T', 'General Motors', 'Ford Motor', 'AmerisourceBergen', 'Chevron', 'Cardinal Health', 'Costco', 'Verizon', 'Kroger', 'General Electric', 'Walgreens Boots Alliance', 'JPMorgan Chase', 'Fannie Mae', 'Alphabet', 'Home Depot', 'Bank of America Corp.', 'Express Scripts Holding', 'Wells Fargo', 'Boeing', 'Phillips 66', 'Anthem', 'Microsoft', 'Valero Energy', 'Citigroup', 'Comcast', 'IBM', 'Dell Technologies', 'State Farm Insurance Cos.', 'Johnson & Johnson', 'Freddie Mac', 'Target', 'Lowes', 'Marathon Petroleum', 'Procter & Gamble', 'MetLife', 'UPS', 'PepsiCo', 'Intel', 'DowDuPont', 'Archer Daniels Midland', 'Aetna', 'FedEx', 'United Technologies', 'Prudential Financial', 'Albertsons Cos.', 'Sysco', 'Disney', 'Humana', 'Pfizer', 'HP', 'Lockheed Martin', 'AIG', 'Centene', 'Cisco Systems', 'HCA Healthcare', 'Energy Transfer Equity', 'Caterpillar', 'Nationwide', 'Morgan Stanley', 'Liberty Mutual Insurance Group', 'New York Life Insurance', 'Goldman Sachs Group', 'American Airlines Group', 'Best Buy', 'Cigna', 'Charter Communications', 'Delta Air Lines', 'Facebook', 'Honeywell International', 'Merck', 'Allstate', 'Tyson Foods', 'United Continental Holdings', 'Oracle', 'Tech Data', 'TIAA', 'TJX', 'American Express', 'Coca-Cola', 'Publix Super Markets', 'Nike', 'Andeavor', 'World Fuel Services', 'Exelon', 'Massachusetts Mutual Life Insurance', 'Rite Aid', 'ConocoPhillips', 'CHS', '3M', 'Time Warner', 'General Dynamics', 'USAA', 'Capital One Financial', 'Deere', 'INTL FCStone', 'Northwestern Mutual', 'Enterprise Products Partners', 'Travelers Cos.', 'Hewlett Packard Enterprise', 'Philip Morris International', 'Twenty-First Century Fox', 'AbbVie', 'Abbott Laboratories', 'Progressive', 'Arrow Electronics', 'Kraft Heinz', 'Plains GP Holdings', 'Gilead Sciences', 'Mondelez International', 'Northrop Grumman', 'Raytheon', 'Macys', 'US Foods Holding', 'U.S. Bancorp', 'Dollar General', 'International Paper', 'Duke Energy', 'Southern', 'Marriott International', 'Avnet', 'Eli Lilly', 'Amgen', 'McDonalds', 'Starbucks', 'Qualcomm', 'Dollar Tree', 'PBF Energy', 'Icahn Enterprises', 'Aflac', 'AutoNation', 'Penske Automotive Group', 'Whirlpool', 'Union Pacific', 'Southwest Airlines', 'ManpowerGroup', 'Thermo Fisher Scientific', 'Bristol-Myers Squibb', 'Halliburton', 'Tenet Healthcare', 'Lear', 'Cummins', 'Micron Technology', 'Nucor', 'Molina Healthcare', 'Fluor', 'Altria Group', 'Paccar', 'Hartford Financial Services', 'Kohls', 'Western Digital', 'Jabil', 'Community Health Systems', 'Visa', 'Danaher', 'Kimberly-Clark', 'AECOM', 'PNC Financial Services', 'CenturyLink', 'NextEra Energy', 'PG& E Corp.', 'Synnex', 'WellCare Health Plans', 'Performance Food Group', 'Sears Holdings', 'Synchrony Financial', 'CarMax', 'Bank of New York Mellon', 'Freeport-McMoRan', 'Genuine Parts', 'Emerson Electric', 'DaVita', 'Supervalu', 'Gap', 'General Mills', 'Nordstrom', 'Colgate-Palmolive', 'American Electric Power', 'XPO Logistics', 'Goodyear Tire & Rubber', 'Omnicom Group', 'CDW', 'Sherwin-Williams', 'PPG Industries', 'Texas Instruments', 'C.H. Robinson Worldwide', 'WestRock', 'Cognizant Technology Solutions', 'Newell Brands', 'CBS', 'Envision Healthcare', 'Monsanto', 'Aramark', 'Applied Materials', 'Waste Management', 'DISH Network', 'Illinois Tool Works', 'Lincoln National', 'HollyFrontier', 'CBRE Group', 'Textron', 'Ross Stores', 'Principal Financial', 'D.R. Horton', 'Marsh & McLennan', 'Devon Energy', 'AES', 'Ecolab', "Land O'Lakes", 'Loews', 'Kinder Morgan', 'FirstEnergy', 'Occidental Petroleum', 'Viacom', 'PayPal Holdings', 'NGL Energy Partners', 'Celgene', 'Arconic', 'Kellogg', 'Las Vegas Sands', 'Stanley Black & Decker', 'Booking Holdings', 'Lennar', 'L Brands', 'DTE Energy', 'Dominion Energy', 'Reinsurance Group of America', 'J.C. Penney', 'Mastercard', 'BlackRock', 'Henry Schein', 'Guardian Life Ins. Co. of America', 'Stryker', 'Jefferies Financial Group', 'VF', 'ADP', 'Edison International', 'Biogen', 'United States Steel', 'Core-Mark Holding', 'Bed Bath & Beyond', 'Oneok', 'BB& T Corp.', 'Becton Dickinson', 'Ameriprise Financial', 'Farmers Insurance Exchange', 'First Data', 'Consolidated Edison', 'Parker-Hannifin', 'Anadarko Petroleum', 'Estee Lauder', 'State Street Corp.', 'Tesla', 'Netflix', 'Alcoa', 'Discover Financial Services', 'Praxair', 'CSX', 'Xcel Energy', 'Unum Group', 'Universal Health Services', 'NRG Energy', 'EOG Resources', 'Sempra Energy', "Toys 'R'ù Us", 'Group 1 Automotive', 'Entergy', 'Molson Coors Brewing', 'L3 Technologies', 'Ball', 'AutoZone', 'Murphy USA', 'MGM Resorts International', 'Office Depot', 'Huntsman', 'Baxter International', 'Norfolk Southern', 'salesforce.com', 'Laboratory Corp. of America', 'W.W. Grainger', 'Qurate Retail', 'Autoliv', 'Live Nation Entertainment', 'Xerox', 'Leidos Holdings', 'Corning', 'Lithia Motors', 'Expedia Group', 'Republic Services', 'Jacobs Engineering Group', 'Sonic Automotive', 'Ally Financial', 'LKQ', 'BorgWarner', 'Fidelity National Financial', 'SunTrust Banks', 'IQVIA Holdings', 'Reliance Steel & Aluminum', 'Nvidia', 'Voya Financial', 'CenterPoint Energy', 'eBay', 'Eastman Chemical', 'American Family Insurance Group', 'Steel Dynamics', 'Pacific Life', 'Chesapeake Energy', 'Mohawk Industries', 'Quanta Services', 'Advance Auto Parts', 'Owens & Minor', 'United Natural Foods', 'Tenneco', 'Conagra Brands', 'GameStop', 'Hormel Foods', 'Hilton Worldwide Holdings', 'Frontier Communications', 'Fidelity National Information Services', 'Public Service Enterprise Group', 'Boston Scientific', 'OReilly Automotive', 'Charles Schwab', 'Global Partners', 'PVH', 'Avis Budget Group', 'Targa Resources', 'Hertz Global Holdings', 'Calpine', 'Mutual of Omaha Insurance', 'Crown Holdings', 'Peter Kiewit Sons', 'Dicks Sporting Goods', 'PulteGroup', 'Navistar International', 'Thrivent Financial for Lutherans', 'DCP Midstream', 'Air Products & Chemicals', 'Veritiv', 'AGCO', 'Genworth Financial', 'Univar', 'News Corp.', 'SpartanNash', 'Westlake Chemical', 'Williams', 'Lam Research', 'Alaska Air Group', 'Jones Lang LaSalle', 'Anixter International', 'Campbell Soup', 'Interpublic Group', 'Dover', 'Zimmer Biomet Holdings', 'Dean Foods', 'Foot Locker', 'Eversource Energy', 'Alliance Data Systems', 'Fifth Third Bancorp', 'Quest Diagnostics', 'EMCOR Group', 'W.R. Berkley', 'WESCO International', 'Coty', 'WEC Energy Group', 'Masco', 'DXC Technology', 'Auto-Owners Insurance', 'Jones Financial (Edward Jones)', 'Liberty Media', 'Erie Insurance Group', 'Hershey', 'PPL', 'Huntington Ingalls Industries', 'Mosaic', 'J.M. Smucker', 'Delek US Holdings', 'Newmont Mining', 'Constellation Brands', 'Ryder System', 'National Oilwell Varco', 'Adobe Systems', 'LifePoint Health', 'Tractor Supply', 'Thor Industries', 'Dana', 'Weyerhaeuser', 'J.B. Hunt Transport Services', 'Darden Restaurants', 'Yum China Holdings', 'Blackstone Group', 'Berry Global Group', 'Builders FirstSource', 'Activision Blizzard', 'JetBlue Airways', 'Amphenol', 'A-Mark Precious Metals', 'Spirit AeroSystems Holdings', 'R.R. Donnelley & Sons', 'Harris', 'Expeditors Intl. of Washington', 'Discovery', 'Owens-Illinois', 'Sanmina', 'KeyCorp', 'American Financial Group', 'Oshkosh', 'Rockwell Collins', 'Kindred Healthcare', 'Insight Enterprises', 'Dr Pepper Snapple Group', 'American Tower', 'Fortive', 'Ralph Lauren', 'HRG Group', 'Ascena Retail Group', 'United Rentals', 'Caseys General Stores', 'Graybar Electric', 'Avery Dennison', 'MasTec', 'CMS Energy', 'HD Supply Holdings', 'Raymond James Financial', 'NCR', 'Hanesbrands', 'Asbury Automotive Group', 'Citizens Financial Group', 'Packaging Corp. of America', 'Alleghany', 'Apache', 'Dillards', 'Assurant', 'Franklin Resources', 'Owens Corning', 'Motorola Solutions', 'NVR', 'Rockwell Automation', 'TreeHouse Foods', 'Wynn Resorts', 'Olin', 'American Axle & Manufacturing', 'Old Republic International', 'Chemours', 'iHeartMedia', 'Ameren', 'Arthur J. Gallagher', 'Celanese', 'Sealed Air', 'UGI', 'Realogy Holdings', 'Burlington Stores', 'Regions Financial', 'AK Steel Holding', 'Securian Financial Group', 'S& P Global', 'Markel', 'TravelCenters of America', 'Conduent', 'M& T Bank Corp.', 'Clorox', 'AmTrust Financial Services', 'KKR', 'Ulta Beauty', 'Yum Brands', 'Regeneron Pharmaceuticals', 'Windstream Holdings', 'Magellan Health', 'Western & Southern Financial', 'Intercontinental Exchange', 'Ingredion', 'Wyndham Destinations', 'Toll Brothers', 'Seaboard', 'Booz Allen Hamilton', 'First American Financial', 'Cincinnati Financial', 'Avon Products', 'Northern Trust', 'Fiserv', 'Harley-Davidson', 'Cheniere Energy', 'Patterson', 'Peabody Energy', 'ON Semiconductor', 'Simon Property Group', 'Western Union', 'NetApp', 'Polaris Industries', 'Pioneer Natural Resources', 'ABM Industries', 'Vistra Energy', 'Cintas', 'Hess', 'Host Hotels & Resorts', 'Kelly Services', 'Genesis Healthcare', 'Michaels Cos.', 'Advanced Micro Devices', 'Zoetis', 'Williams-Sonoma', 'Fortune Brands Home & Security', 'Big Lots', 'Robert Half International', 'Post Holdings', 'Hasbro', 'Hanover Insurance Group', 'Navient', 'Intuit', 'Domtar', 'Marathon Oil', 'Cerner', 'Analog Devices', 'Telephone & Data Systems', 'Essendant', 'Sonoco Products', 'Juniper Networks', 'Commercial Metals', 'CSRA', 'Under Armour', 'RPM International', 'Total System Services', 'Levi Strauss', 'Brunswick', 'YRC Worldwide', 'Mattel', 'FM Global', 'NiSource', 'Caesars Entertainment', 'Electronic Arts', 'Dynegy', 'McCormick', 'T. Rowe Price', 'Orbital ATK', 'Tutor Perini', 'Brookdale Senior Living', 'Huntington Bancshares', 'Wayfair', 'Rush Enterprises', 'Xylem', 'Neiman Marcus Group', 'Hyatt Hotels', 'Sprouts Farmers Market', 'Diebold Nixdorf', 'Roper Technologies', 'Smart & Final Stores', 'CommScope Holding', 'Tapestry', 'Diplomat Pharmacy', 'Chipotle Mexican Grill', 'Agilent Technologies', 'Science Applications International', 'MDU Resources Group', 'Select Medical Holdings', 'Boise Cascade', 'National General Holdings', 'SCANA', 'Graphic Packaging Holding', 'Fastenal', 'Schneider National', 'Laureate Education', 'Beacon Roofing Supply', 'KB Home', 'Equinix', 'Terex', 'Crown Castle International', 'CACI International', 'Watsco', 'Coca-Cola Bottling', 'Welltower', 'ADT', 'Ametek', 'CNO Financial Group', 'Camping World Holdings', 'LPL Financial Holdings', 'Noble Energy', 'Bloomin Brands', 'Moodys', 'Symantec', 'Amkor Technology', 'Skechers U.S.A.', 'KBR', 'Tiffany', 'Torchmark', 'Broadridge Financial Solutions', 'Quad/Graphics', 'CF Industries Holdings', 'Carlisle', 'Silgan Holdings', 'Bemis', 'CA', 'Hub Group', 'Worldpay', 'Ingles Markets', 'Snap-on', 'Dentsply Sirona', 'Calumet Specialty Products', 'Global Payments', 'Encompass Health', 'Martin Marietta Materials', 'Nasdaq', 'Leggett & Platt', 'Universal Forest Products', 'Sally Beauty Holdings', 'Flowers Foods', 'Barnes & Noble', 'American Equity Investment Life', 'Vulcan Materials', 'Taylor Morrison Home', 'Westinghouse Air Brake', 'Crestwood Equity Partners', 'Iron Mountain', 'Lennox International', 'General Cable', 'American Eagle Outfitters', 'Church & Dwight', 'Platform Specialty Products', 'JELD-WEN Holding', 'OneMain Holdings', 'Colfax', 'Zebra Technologies', 'Andersons', 'TD Ameritrade Holding', 'Carlyle Group', 'Hubbell', 'Trinity Industries', 'Darling Ingredients', 'Flowserve', 'Antero Resources', 'Skyworks Solutions', 'Landstar System', 'Buckeye Partners', 'MRC Global', 'CME Group', 'Greif', 'Nexeo Solutions', 'Cooper-Standard Holdings', 'Urban Outfitters', 'LSC Communications', 'Sabre', 'Green Plains', 'Hexion', 'Stericycle', 'Warner Music Group', 'Ventas', 'ScanSource', 'Pinnacle West Capital', 'Scripps Networks Interactive', 'Alexion Pharmaceuticals', 'Pitney Bowes', 'CIT Group', 'Country Financial', 'CUNA Mutual Group', 'Triumph Group', 'TransDigm Group', 'Allegheny Technologies', 'Resolute Forest Products', 'Acuity Brands', 'Abercrombie & Fitch', 'KLA-Tencor', 'Weis Markets', 'Puget Energy', 'Mednax', 'Kar Auction Services', 'PolyOne', 'FMC', 'Edwards Lifesciences', 'Microchip Technology', 'Amerco', 'Mercury General', 'American National Insurance', 'Carters', 'International Flavors & Fragrances', 'Aarons', 'Alliant Energy', 'EQT', 'Monster Beverage', 'BMC Stock Holdings', 'Ryerson Holding', 'Equifax', 'Regal Beloit', 'Old Dominion Freight Line', 'American Water Works', 'BGC Partners', 'Brinks', 'Meritor', 'Sentry Insurance Group', 'Sanderson Farms', 'KapStone Paper & Packaging', 'Gartner', 'IAC/InterActiveCorp', 'Tailored Brands', 'WABCO Holdings', 'Insperity', 'Comerica', 'TriNet Group', 'Avaya Holdings', 'Ashland Global Holdings', 'Meritage Homes', 'SkyWest', 'USG', 'Southwestern Energy', 'Keysight Technologies', 'Regal Entertainment Group', 'Mutual of America Life Insurance', 'Paychex', 'Brinker International', 'Penn National Gaming', 'Gannett', 'Visteon', 'Pinnacle Foods', 'Intuitive Surgical', 'Continental Resources', 'Service Corp. International', 'Scientific Games', 'Albemarle', 'Atmos Energy', 'Hologic', 'H& R Block', 'Qorvo', 'Steelcase', 'Univision Communications', 'Worthington Industries', 'Timken', 'A.O. Smith', 'PriceSmart', 'Stifel Financial', 'Brown-Forman', 'Cinemark Holdings', 'Granite Construction', 'Dycom Industries', 'Clean Harbors', 'First Solar', 'Scotts Miracle-Gro', 'Cracker Barrel Old Country Store', 'Triple-S Management', 'First Republic Bank', 'ServiceMaster Global Holdings', 'PC Connection', 'Genesco', 'Medical Mutual of Ohio', 'MSC Industrial Direct', 'Legg Mason', 'Hyster-Yale Materials Handling', 'Apollo Global Management', 'Citrix Systems', 'Acadia Healthcare', 'Varian Medical Systems', 'Groupon', 'Aleris', 'Sprague Resources', 'Cooper Tire & Rubber', 'Hain Celestial Group', 'Penn Mutual Life Insurance', 'Colony NorthStar', 'ArcBest', 'Presidio', 'TRI Pointe Group', 'Annaly Capital Management', 'G-III Apparel Group', 'AMC Networks', 'Enable Midstream Partners', 'Ciena', 'DSW', 'Convergys', 'Park Hotels & Resorts', 'Pool', 'Fossil Group', 'Dominos Pizza', 'Crane', 'Caleres', 'Tempur Sealy International', 'Tetra Tech', 'Illumina', 'Valmont Industries', 'Hill-Rom Holdings', 'Unisys', 'Zions Bancorp.', 'Sinclair Broadcast Group', 'Louisiana-Pacific', 'Mettler-Toledo International', 'Synopsys', 'Kemper', 'Cabot', 'Great Plains Energy', 'Rent-A-Center', 'Hawaiian Holdings', 'Revlon', 'Syneos Health', 'Public Storage', 'TTM Technologies', 'Vectren', 'Trimble', 'NOW', 'Spirit Airlines', 'ASGN', 'Lincoln Electric Holdings', 'Prologis', 'Range Resources', 'Teledyne Technologies', 'Vishay Intertechnology', 'Boston Properties', 'Applied Industrial Technologies', 'Graham Holdings', 'Amica Mutual Insurance', 'Concho Resources', 'ITT', 'Kansas City Southern', 'MDC Holdings', 'Evergy', 'Pinnacle Entertainment', 'Hawaiian Electric Industries', 'TEGNA', 'Southwest Gas Holdings', 'Vista Outdoor', 'Bon-Ton Stores', 'Super Micro Computer', 'Plexus', 'TrueBlue', 'Magellan Midstream Partners', 'Toro', 'Akamai Technologies', 'Moog', 'Vertex Pharmaceuticals', 'Equity Residential', 'Selective Insurance Group', 'AptarGroup', 'Benchmark Electronics', 'Columbia Sportswear', 'A. Schulman', 'Verso', 'Digital Realty Trust', 'GNC Holdings', 'E*Trade Financial', 'Hovnanian Enterprises', 'Maximus', 'Twitter', 'Par Pacific Holdings', 'Parexel International', 'RH', 'Nexstar Media Group', 'Knight-Swift Transportation Holdings', 'Red Hat', 'Belden', 'Boyd Gaming', 'Primoris Services', 'Gardner Denver', 'Donaldson', 'Party City Holdco', 'J.Crew Group', 'EnerSys', 'Guess', 'Patterson-UTI Energy', 'WGL Holdings', 'Wolverine World Wide', 'Xilinx', 'Vornado Realty Trust', 'Middleby', 'MPM Holdings', 'Cleveland-Cliffs', 'GGP', 'Cypress Semiconductor', 'Arch Coal', 'GMS', 'Waters', 'H.B. Fuller', 'Affiliated Managers Group', 'PerkinElmer', 'Edgewell Personal Care', 'Maxim Integrated Products', 'Knights of Columbus', 'IDEX', 'DST Systems', 'Chicos FAS', 'Nu Skin Enterprises', 'Herman Miller', 'NLV Financial', 'Curtiss-Wright', 'New Jersey Resources', 'REV Group', 'Mueller Industries', 'GEO Group', 'Allison Transmission Holdings', 'OGE Energy', 'Cheesecake Factory', 'PRA Health Sciences', 'Tupperware Brands', 'Euronet Worldwide', 'FLEETCOR Technologies', 'Nationstar Mortgage Holdings', 'GoDaddy', 'Blackhawk Network Holdings', 'Cboe Global Markets', 'Snyders-Lance', 'Murphy Oil', 'CDK Global', 'Texas Roadhouse', 'Kirby', 'Square', 'Genesee & Wyoming', 'Zayo Group Holdings', 'NewMarket', '99 Cents Only Stores', 'PCM', 'Federated Mutual Insurance', 'HNI', 'Hospitality Properties Trust', 'Greenbrier Cos.', 'Bio-Rad Laboratories', 'AvalonBay Communities', 'Renewable Energy Group', 'Atlas Air Worldwide Holdings', 'Teradata', 'LCI Industries', 'Teleflex', 'Verisk Analytics', 'Popular', 'Workday', 'Cooper Cos.', 'Express', 'Teradyne', 'Werner Enterprises', 'Oaktree Capital Group', 'Woodward', 'F5 Networks', 'Valvoline', 'Roadrunner Transportation Systems', 'SemGroup', 'Catalent', 'Quorum Health', 'Universal', 'Nordson', 'ResMed', 'Tower International', 'Freds', 'Foundation Building Materials', 'Kennametal', 'Autodesk', 'Ply Gem Holdings', 'Central Garden & Pet', 'Matson', 'EchoStar', 'Genesis Energy', 'SVB Financial Group', 'Itron', 'Portland General Electric', 'California Resources', 'Esterline Technologies', 'Delta Tucker Holdings', 'AMN Healthcare Services', 'Griffon', 'Valhi', 'Hexcel', 'IDEXX Laboratories', 'Deluxe', 'M/I Homes', 'Kraton', 'Stewart Information Services', 'Marriott Vacations Worldwide', 'SPX FLOW', 'ACCO Brands', 'Echo Global Logistics', 'Cadence Design Systems', 'Nuance Communications', 'Finish Line', 'TransUnion', 'ServiceNow', 'Summit Materials', 'Engility Holdings', 'Ferrellgas Partners', 'Interactive Brokers Group', 'Stepan', 'Oceaneering International', 'Cimarex Energy', 'Rexnord', 'Beazer Homes USA', 'MKS Instruments', 'Vail Resorts', 'Ohio National Mutual', 'TopBuild', 'Brown & Brown', 'Aerojet Rocketdyne Holdings', 'Barnes & Noble Education', 'Superior Energy Services', 'VeriFone Systems', 'Childrens Place', 'Tribune Media', 'Healthcare Services Group', 'SiteOne Landscape Supply', 'Charles River Laboratories Intl', 'CoreLogic', 'Ensign Group', 'HCP'], 'Sector': ['Retailing', 'Energy', 'Financials', 'Technology', 'Health Care', 'Wholesalers', 'Health Care', 'Retailing', 'Telecommunications', 'Motor Vehicles & Parts', 'Motor Vehicles & Parts', 'Wholesalers', 'Energy', 'Wholesalers', 'Retailing', 'Telecommunications', 'Food & Drug Stores', 'Industrials', 'Food & Drug Stores', 'Financials', 'Financials', 'Technology', 'Retailing', 'Financials', 'Health Care', 'Financials', 'Aerospace & Defense', 'Energy', 'Health Care', 'Technology', 'Energy', 'Financials', 'Telecommunications', 'Technology', 'Technology', 'Financials', 'Health Care', 'Financials', 'Retailing', 'Retailing', 'Energy', 'Household Products', 'Financials', 'Transportation', 'Food, Beverages & Tobacco', 'Technology', 'Chemicals', 'Food, Beverages & Tobacco', 'Health Care', 'Transportation', 'Aerospace & Defense', 'Financials', 'Food & Drug Stores', 'Wholesalers', 'Media', 'Health Care', 'Health Care', 'Technology', 'Aerospace & Defense', 'Financials', 'Health Care', 'Technology', 'Health Care', 'Energy', 'Industrials', 'Financials', 'Financials', 'Financials', 'Financials', 'Financials', 'Transportation', 'Retailing', 'Health Care', 'Telecommunications', 'Transportation', 'Technology', 'Industrials', 'Health Care', 'Financials', 'Food, Beverages & Tobacco', 'Transportation', 'Technology', 'Wholesalers', 'Financials', 'Retailing', 'Financials', 'Food, Beverages & Tobacco', 'Food & Drug Stores', 'Apparel', 'Energy', 'Energy', 'Energy', 'Financials', 'Food & Drug Stores', 'Energy', 'Food, Beverages & Tobacco', 'Industrials', 'Media', 'Aerospace & Defense', 'Financials', 'Financials', 'Industrials', 'Financials', 'Financials', 'Energy', 'Financials', 'Technology', 'Food, Beverages & Tobacco', 'Media', 'Health Care', 'Health Care', 'Financials', 'Wholesalers', 'Food, Beverages & Tobacco', 'Energy', 'Health Care', 'Food, Beverages & Tobacco', 'Aerospace & Defense', 'Aerospace & Defense', 'Retailing', 'Wholesalers', 'Financials', 'Retailing', 'Materials', 'Energy', 'Energy', 'Hotels, Restaurants & Leisure', 'Wholesalers', 'Health Care', 'Health Care', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Technology', 'Retailing', 'Energy', 'Financials', 'Financials', 'Retailing', 'Retailing', 'Industrials', 'Transportation', 'Transportation', 'Business Services', 'Technology', 'Health Care', 'Energy', 'Health Care', 'Motor Vehicles & Parts', 'Industrials', 'Technology', 'Materials', 'Health Care', 'Engineering & Construction', 'Food, Beverages & Tobacco', 'Industrials', 'Financials', 'Retailing', 'Technology', 'Technology', 'Health Care', 'Business Services', 'Health Care', 'Household Products', 'Engineering & Construction', 'Financials', 'Telecommunications', 'Energy', 'Energy', 'Wholesalers', 'Health Care', 'Wholesalers', 'Retailing', 'Financials', 'Retailing', 'Financials', 'Energy', 'Wholesalers', 'Industrials', 'Health Care', 'Food & Drug Stores', 'Retailing', 'Food, Beverages & Tobacco', 'Retailing', 'Household Products', 'Energy', 'Transportation', 'Motor Vehicles & Parts', 'Business Services', 'Technology', 'Chemicals', 'Chemicals', 'Technology', 'Transportation', 'Materials', 'Technology', 'Household Products', 'Media', 'Health Care', 'Chemicals', 'Business Services', 'Technology', 'Business Services', 'Telecommunications', 'Industrials', 'Financials', 'Energy', 'Financials', 'Aerospace & Defense', 'Retailing', 'Financials', 'Engineering & Construction', 'Financials', 'Energy', 'Energy', 'Chemicals', 'Food, Beverages & Tobacco', 'Financials', 'Energy', 'Energy', 'Energy', 'Media', 'Business Services', 'Energy', 'Health Care', 'Aerospace & Defense', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Household Products', 'Technology', 'Engineering & Construction', 'Retailing', 'Energy', 'Energy', 'Financials', 'Retailing', 'Business Services', 'Financials', 'Wholesalers', 'Financials', 'Health Care', 'Financials', 'Apparel', 'Business Services', 'Energy', 'Health Care', 'Materials', 'Wholesalers', 'Retailing', 'Energy', 'Financials', 'Health Care', 'Financials', 'Financials', 'Business Services', 'Energy', 'Industrials', 'Energy', 'Household Products', 'Financials', 'Motor Vehicles & Parts', 'Technology', 'Materials', 'Financials', 'Chemicals', 'Transportation', 'Energy', 'Financials', 'Health Care', 'Energy', 'Energy', 'Energy', 'Retailing', 'Retailing', 'Energy', 'Food, Beverages & Tobacco', 'Aerospace & Defense', 'Materials', 'Retailing', 'Retailing', 'Hotels, Restaurants & Leisure', 'Retailing', 'Chemicals', 'Health Care', 'Transportation', 'Technology', 'Health Care', 'Wholesalers', 'Retailing', 'Motor Vehicles & Parts', 'Media', 'Technology', 'Technology', 'Industrials', 'Retailing', 'Retailing', 'Business Services', 'Engineering & Construction', 'Retailing', 'Financials', 'Wholesalers', 'Motor Vehicles & Parts', 'Financials', 'Financials', 'Health Care', 'Materials', 'Technology', 'Financials', 'Energy', 'Technology', 'Chemicals', 'Financials', 'Materials', 'Financials', 'Energy', 'Household Products', 'Engineering & Construction', 'Retailing', 'Wholesalers', 'Wholesalers', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Retailing', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Telecommunications', 'Business Services', 'Energy', 'Health Care', 'Retailing', 'Financials', 'Wholesalers', 'Apparel', 'Retailing', 'Energy', 'Retailing', 'Energy', 'Financials', 'Materials', 'Engineering & Construction', 'Retailing', 'Engineering & Construction', 'Industrials', 'Financials', 'Energy', 'Chemicals', 'Wholesalers', 'Industrials', 'Financials', 'Wholesalers', 'Media', 'Wholesalers', 'Chemicals', 'Energy', 'Technology', 'Transportation', 'Financials', 'Wholesalers', 'Food, Beverages & Tobacco', 'Business Services', 'Industrials', 'Health Care', 'Food, Beverages & Tobacco', 'Retailing', 'Energy', 'Business Services', 'Financials', 'Health Care', 'Engineering & Construction', 'Financials', 'Wholesalers', 'Household Products', 'Energy', 'Household Products', 'Technology', 'Financials', 'Financials', 'Media', 'Financials', 'Food, Beverages & Tobacco', 'Energy', 'Aerospace & Defense', 'Chemicals', 'Food, Beverages & Tobacco', 'Energy', 'Energy', 'Food, Beverages & Tobacco', 'Transportation', 'Energy', 'Technology', 'Health Care', 'Retailing', 'Motor Vehicles & Parts', 'Motor Vehicles & Parts', 'Materials', 'Transportation', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Financials', 'Materials', 'Materials', 'Technology', 'Transportation', 'Technology', 'Materials', 'Aerospace & Defense', 'Media', 'Aerospace & Defense', 'Transportation', 'Media', 'Materials', 'Technology', 'Financials', 'Financials', 'Industrials', 'Aerospace & Defense', 'Health Care', 'Technology', 'Food, Beverages & Tobacco', 'Financials', 'Industrials', 'Apparel', 'Household Products', 'Retailing', 'Business Services', 'Retailing', 'Wholesalers', 'Materials', 'Engineering & Construction', 'Energy', 'Wholesalers', 'Financials', 'Technology', 'Apparel', 'Retailing', 'Financials', 'Materials', 'Financials', 'Energy', 'Retailing', 'Financials', 'Financials', 'Materials', 'Technology', 'Engineering & Construction', 'Industrials', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Chemicals', 'Motor Vehicles & Parts', 'Financials', 'Chemicals', 'Media', 'Energy', 'Financials', 'Chemicals', 'Materials', 'Energy', 'Financials', 'Retailing', 'Financials', 'Materials', 'Financials', 'Business Services', 'Financials', 'Retailing', 'Business Services', 'Financials', 'Household Products', 'Financials', 'Financials', 'Retailing', 'Hotels, Restaurants & Leisure', 'Health Care', 'Telecommunications', 'Health Care', 'Financials', 'Financials', 'Food, Beverages & Tobacco', 'Hotels, Restaurants & Leisure', 'Engineering & Construction', 'Food, Beverages & Tobacco', 'Technology', 'Financials', 'Financials', 'Household Products', 'Financials', 'Business Services', 'Transportation', 'Energy', 'Wholesalers', 'Energy', 'Technology', 'Financials', 'Business Services', 'Technology', 'Transportation', 'Energy', 'Business Services', 'Energy', 'Business Services', 'Energy', 'Financials', 'Business Services', 'Health Care', 'Retailing', 'Technology', 'Health Care', 'Retailing', 'Household Products', 'Retailing', 'Business Services', 'Food, Beverages & Tobacco', 'Household Products', 'Financials', 'Financials', 'Technology', 'Materials', 'Energy', 'Health Care', 'Technology', 'Telecommunications', 'Wholesalers', 'Materials', 'Technology', 'Materials', 'Technology', 'Apparel', 'Chemicals', 'Business Services', 'Apparel', 'Transportation', 'Transportation', 'Household Products', 'Financials', 'Energy', 'Hotels, Restaurants & Leisure', 'Technology', 'Energy', 'Food, Beverages & Tobacco', 'Financials', 'Aerospace & Defense', 'Engineering & Construction', 'Health Care', 'Financials', 'Technology', 'Retailing', 'Industrials', 'Retailing', 'Hotels, Restaurants & Leisure', 'Food & Drug Stores', 'Technology', 'Technology', 'Food & Drug Stores', 'Technology', 'Apparel', 'Health Care', 'Hotels, Restaurants & Leisure', 'Technology', 'Technology', 'Energy', 'Health Care', 'Wholesalers', 'Financials', 'Energy', 'Materials', 'Wholesalers', 'Transportation', 'Business Services', 'Wholesalers', 'Engineering & Construction', 'Financials', 'Industrials', 'Financials', 'Technology', 'Wholesalers', 'Food, Beverages & Tobacco', 'Financials', 'Business Services', 'Technology', 'Financials', 'Retailing', 'Financials', 'Energy', 'Hotels, Restaurants & Leisure', 'Business Services', 'Technology', 'Technology', 'Apparel', 'Engineering & Construction', 'Retailing', 'Financials', 'Business Services', 'Media', 'Chemicals', 'Materials', 'Materials', 'Materials', 'Technology', 'Transportation', 'Business Services', 'Food & Drug Stores', 'Industrials', 'Health Care', 'Energy', 'Business Services', 'Health Care', 'Materials', 'Financials', 'Household Products', 'Materials', 'Retailing', 'Food, Beverages & Tobacco', 'Retailing', 'Financials', 'Materials', 'Engineering & Construction', 'Industrials', 'Energy', 'Business Services', 'Industrials', 'Industrials', 'Retailing', 'Household Products', 'Chemicals', 'Materials', 'Financials', 'Industrials', 'Industrials', 'Food, Beverages & Tobacco', 'Financials', 'Financials', 'Industrials', 'Transportation', 'Food, Beverages & Tobacco', 'Industrials', 'Energy', 'Technology', 'Transportation', 'Energy', 'Energy', 'Financials', 'Materials', 'Wholesalers', 'Motor Vehicles & Parts', 'Retailing', 'Media', 'Technology', 'Energy', 'Chemicals', 'Business Services', 'Media', 'Financials', 'Wholesalers', 'Energy', 'Media', 'Health Care', 'Technology', 'Financials', 'Financials', 'Financials', 'Aerospace & Defense', 'Aerospace & Defense', 'Materials', 'Materials', 'Industrials', 'Retailing', 'Technology', 'Food & Drug Stores', 'Energy', 'Health Care', 'Wholesalers', 'Chemicals', 'Chemicals', 'Health Care', 'Technology', 'Transportation', 'Financials', 'Financials', 'Apparel', 'Chemicals', 'Retailing', 'Energy', 'Energy', 'Food, Beverages & Tobacco', 'Wholesalers', 'Materials', 'Business Services', 'Industrials', 'Transportation', 'Energy', 'Financials', 'Business Services', 'Business Services', 'Financials', 'Food, Beverages & Tobacco', 'Materials', 'Technology', 'Technology', 'Retailing', 'Motor Vehicles & Parts', 'Business Services', 'Financials', 'Business Services', 'Technology', 'Chemicals', 'Engineering & Construction', 'Transportation', 'Materials', 'Energy', 'Technology', 'Media', 'Financials', 'Business Services', 'Hotels, Restaurants & Leisure', 'Hotels, Restaurants & Leisure', 'Media', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Health Care', 'Energy', 'Business Services', 'Hotels, Restaurants & Leisure', 'Chemicals', 'Energy', 'Health Care', 'Financials', 'Technology', 'Household Products', 'Media', 'Materials', 'Industrials', 'Industrials', 'Retailing', 'Financials', 'Food, Beverages & Tobacco', 'Media', 'Engineering & Construction', 'Engineering & Construction', 'Business Services', 'Energy', 'Chemicals', 'Hotels, Restaurants & Leisure', 'Health Care', 'Financials', 'Business Services', 'Retailing', 'Retailing', 'Financials', 'Wholesalers', 'Financials', 'Industrials', 'Financials', 'Technology', 'Health Care', 'Health Care', 'Technology', 'Materials', 'Wholesalers', 'Motor Vehicles & Parts', 'Food, Beverages & Tobacco', 'Financials', 'Financials', 'Transportation', 'Technology', 'Engineering & Construction', 'Financials', 'Apparel', 'Media', 'Energy', 'Technology', 'Retailing', 'Business Services', 'Financials', 'Wholesalers', 'Apparel', 'Hotels, Restaurants & Leisure', 'Industrials', 'Retailing', 'Household Products', 'Engineering & Construction', 'Technology', 'Materials', 'Health Care', 'Technology', 'Financials', 'Media', 'Materials', 'Technology', 'Technology', 'Financials', 'Chemicals', 'Energy', 'Retailing', 'Transportation', 'Household Products', 'Health Care', 'Financials', 'Technology', 'Energy', 'Technology', 'Wholesalers', 'Transportation', 'Business Services', 'Industrials', 'Financials', 'Energy', 'Aerospace & Defense', 'Technology', 'Financials', 'Wholesalers', 'Business Services', 'Financials', 'Energy', 'Industrials', 'Transportation', 'Engineering & Construction', 'Energy', 'Hotels, Restaurants & Leisure', 'Energy', 'Media', 'Energy', 'Household Products', 'Retailing', 'Technology', 'Technology', 'Business Services', 'Energy', 'Industrials', 'Technology', 'Aerospace & Defense', 'Health Care', 'Financials', 'Financials', 'Materials', 'Technology', 'Apparel', 'Chemicals', 'Materials', 'Financials', 'Food & Drug Stores', 'Financials', 'Engineering & Construction', 'Technology', 'Technology', 'Energy', 'Health Care', 'Retailing', 'Media', 'Transportation', 'Technology', 'Industrials', 'Hotels, Restaurants & Leisure', 'Engineering & Construction', 'Industrials', 'Industrials', 'Retailing', 'Retailing', 'Industrials', 'Retailing', 'Energy', 'Energy', 'Apparel', 'Technology', 'Financials', 'Industrials', 'Chemicals', 'Energy', 'Financials', 'Technology', 'Energy', 'Wholesalers', 'Technology', 'Chemicals', 'Financials', 'Technology', 'Household Products', 'Technology', 'Financials', 'Industrials', 'Business Services', 'Retailing', 'Household Products', 'Household Products', 'Financials', 'Aerospace & Defense', 'Energy', 'Motor Vehicles & Parts', 'Industrials', 'Business Services', 'Motor Vehicles & Parts', 'Energy', 'Hotels, Restaurants & Leisure', 'Technology', 'Household Products', 'Business Services', 'Business Services', 'Financials', 'Technology', 'Business Services', 'Financials', 'Food, Beverages & Tobacco', 'Energy', 'Technology', 'Hotels, Restaurants & Leisure', 'Transportation', 'Business Services', 'Transportation', 'Telecommunications', 'Chemicals', 'Retailing', 'Wholesalers', 'Financials', 'Household Products', 'Financials', 'Transportation', 'Health Care', 'Financials', 'Energy', 'Transportation', 'Technology', 'Motor Vehicles & Parts', 'Health Care', 'Business Services', 'Financials', 'Technology', 'Health Care', 'Retailing', 'Technology', 'Transportation', 'Financials', 'Aerospace & Defense', 'Technology', 'Chemicals', 'Transportation', 'Energy', 'Health Care', 'Health Care', 'Food, Beverages & Tobacco', 'Industrials', 'Health Care', 'Motor Vehicles & Parts', 'Food & Drug Stores', 'Wholesalers', 'Industrials', 'Technology', 'Materials', 'Household Products', 'Transportation', 'Technology', 'Energy', 'Financials', 'Industrials', 'Energy', 'Energy', 'Aerospace & Defense', 'Aerospace & Defense', 'Health Care', 'Materials', 'Chemicals', 'Aerospace & Defense', 'Health Care', 'Media', 'Engineering & Construction', 'Chemicals', 'Financials', 'Hotels, Restaurants & Leisure', 'Industrials', 'Household Products', 'Transportation', 'Technology', 'Technology', 'Retailing', 'Business Services', 'Technology', 'Materials', 'Aerospace & Defense', 'Energy', 'Financials', 'Chemicals', 'Energy', 'Energy', 'Industrials', 'Engineering & Construction', 'Technology', 'Hotels, Restaurants & Leisure', 'Financials', 'Engineering & Construction', 'Financials', 'Aerospace & Defense', 'Retailing', 'Energy', 'Technology', 'Retailing', 'Media', 'Health Care', 'Wholesalers', 'Health Care', 'Business Services', 'Health Care', 'Financials'], 'Industry': ['General Merchandisers', 'Petroleum Refining', 'Insurance: Property and Casualty (Stock)', 'Computers, Office Equipment', 'Health Care: Insurance and Managed Care', 'Wholesalers: Health Care', 'Health Care: Pharmacy and Other Services', 'Internet Services and Retailing', 'Telecommunications', 'Motor Vehicles and Parts', 'Motor Vehicles and Parts', 'Wholesalers: Health Care', 'Petroleum Refining', 'Wholesalers: Health Care', 'General Merchandisers', 'Telecommunications', 'Food and Drug Stores', 'Industrial Machinery', 'Food and Drug Stores', 'Commercial Banks', 'Diversified Financials', 'Internet Services and Retailing', 'Specialty Retailers: Other', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Commercial Banks', 'Aerospace and Defense', 'Petroleum Refining', 'Health Care: Insurance and Managed Care', 'Computer Software', 'Petroleum Refining', 'Commercial Banks', 'Telecommunications', 'Information Technology Services', 'Computers, Office Equipment', 'Insurance: Property and Casualty (Mutual)', 'Pharmaceuticals', 'Diversified Financials', 'General Merchandisers', 'Specialty Retailers: Other', 'Petroleum Refining', 'Household and Personal Products', 'Insurance: Life, Health (stock)', 'Mail, Package, and Freight Delivery', 'Food Consumer Products', 'Semiconductors and Other Electronic Components', 'Chemicals', 'Food Production', 'Health Care: Insurance and Managed Care', 'Mail, Package, and Freight Delivery', 'Aerospace and Defense', 'Insurance: Life, Health (stock)', 'Food and Drug Stores', 'Wholesalers: Food and Grocery', 'Entertainment', 'Health Care: Insurance and Managed Care', 'Pharmaceuticals', 'Computers, Office Equipment', 'Aerospace and Defense', 'Insurance: Property and Casualty (Stock)', 'Health Care: Insurance and Managed Care', 'Network and Other Communications Equipment', 'Health Care: Medical Facilities', 'Pipelines', 'Construction and Farm Machinery', 'Insurance: Property and Casualty (Mutual)', 'Commercial Banks', 'Insurance: Property and Casualty (Stock)', 'Insurance: Life, Health (Mutual)', 'Commercial Banks', 'Airlines', 'Specialty Retailers: Other', 'Health Care: Insurance and Managed Care', 'Telecommunications', 'Airlines', 'Internet Services and Retailing', 'Electronics, Electrical Equip.', 'Pharmaceuticals', 'Insurance: Property and Casualty (Stock)', 'Food Production', 'Airlines', 'Computer Software', 'Wholesalers: Electronics and Office Equipment', 'Insurance: Life, Health (Mutual)', 'Specialty Retailers: Apparel', 'Diversified Financials', 'Beverages', 'Food and Drug Stores', 'Apparel', 'Petroleum Refining', 'Energy', 'Utilities: Gas and Electric', 'Insurance: Life, Health (Mutual)', 'Food and Drug Stores', 'Mining, Crude-Oil Production', 'Food Production', 'Miscellaneous', 'Entertainment', 'Aerospace and Defense', 'Insurance: Property and Casualty (Stock)', 'Commercial Banks', 'Construction and Farm Machinery', 'Diversified Financials', 'Insurance: Life, Health (Mutual)', 'Pipelines', 'Insurance: Property and Casualty (Stock)', 'Computers, Office Equipment', 'Tobacco', 'Entertainment', 'Pharmaceuticals', 'Medical Products and Equipment', 'Insurance: Property and Casualty (Stock)', 'Wholesalers: Electronics and Office Equipment', 'Food Consumer Products', 'Pipelines', 'Pharmaceuticals', 'Food Consumer Products', 'Aerospace and Defense', 'Aerospace and Defense', 'General Merchandisers', 'Wholesalers: Food and Grocery', 'Commercial Banks', 'Specialty Retailers: Other', 'Packaging, Containers', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Wholesalers: Electronics and Office Equipment', 'Pharmaceuticals', 'Pharmaceuticals', 'Food Services', 'Food Services', 'Semiconductors and Other Electronic Components', 'Specialty Retailers: Other', 'Petroleum Refining', 'Diversified Financials', 'Insurance: Life, Health (stock)', 'Automotive Retailing, Services', 'Automotive Retailing, Services', 'Electronics, Electrical Equip.', 'Railroads', 'Airlines', 'Temporary Help', 'Scientific,Photographic and Control Equipment', 'Pharmaceuticals', 'Oil and Gas Equipment, Services', 'Health Care: Medical Facilities', 'Motor Vehicles and Parts', 'Industrial Machinery', 'Semiconductors and Other Electronic Components', 'Metals', 'Health Care: Insurance and Managed Care', 'Engineering, Construction', 'Tobacco', 'Construction and Farm Machinery', 'Insurance: Property and Casualty (Stock)', 'General Merchandisers', 'Computers, Office Equipment', 'Semiconductors and Other Electronic Components', 'Health Care: Medical Facilities', 'Financial Data Services', 'Medical Products and Equipment', 'Household and Personal Products', 'Engineering, Construction', 'Commercial Banks', 'Telecommunications', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Wholesalers: Electronics and Office Equipment', 'Health Care: Insurance and Managed Care', 'Wholesalers: Food and Grocery', 'General Merchandisers', 'Diversified Financials', 'Automotive Retailing, Services', 'Commercial Banks', 'Mining, Crude-Oil Production', 'Wholesalers: Diversified', 'Industrial Machinery', 'Health Care: Medical Facilities', 'Food and Drug Stores', 'Specialty Retailers: Apparel', 'Food Consumer Products', 'General Merchandisers', 'Household and Personal Products', 'Utilities: Gas and Electric', 'Transportation and Logistics', 'Motor Vehicles and Parts', 'Advertising, marketing', 'Information Technology Services', 'Chemicals', 'Chemicals', 'Semiconductors and Other Electronic Components', 'Transportation and Logistics', 'Packaging, Containers', 'Information Technology Services', 'Home Equipment, Furnishings', 'Entertainment', 'Health Care: Pharmacy and Other Services', 'Chemicals', 'Diversified Outsourcing Services', 'Semiconductors and Other Electronic Components', 'Waste Management', 'Telecommunications', 'Industrial Machinery', 'Insurance: Life, Health (stock)', 'Petroleum Refining', 'Real estate', 'Aerospace and Defense', 'Specialty Retailers: Apparel', 'Insurance: Life, Health (stock)', 'Homebuilders', 'Diversified Financials', 'Mining, Crude-Oil Production', 'Utilities: Gas and Electric', 'Chemicals', 'Food Consumer Products', 'Insurance: Property and Casualty (Stock)', 'Pipelines', 'Utilities: Gas and Electric', 'Mining, Crude-Oil Production', 'Entertainment', 'Financial Data Services', 'Energy', 'Pharmaceuticals', 'Aerospace and Defense', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Home Equipment, Furnishings', 'Internet Services and Retailing', 'Homebuilders', 'Specialty Retailers: Apparel', 'Utilities: Gas and Electric', 'Utilities: Gas and Electric', 'Insurance: Life, Health (stock)', 'General Merchandisers', 'Financial Data Services', 'Securities', 'Wholesalers: Health Care', 'Insurance: Life, Health (Mutual)', 'Medical Products and Equipment', 'Diversified Financials', 'Apparel', 'Diversified Outsourcing Services', 'Utilities: Gas and Electric', 'Pharmaceuticals', 'Metals', 'Wholesalers: Food and Grocery', 'Specialty Retailers: Other', 'Pipelines', 'Commercial Banks', 'Medical Products and Equipment', 'Diversified Financials', 'Insurance: Property and Casualty (Mutual)', 'Financial Data Services', 'Utilities: Gas and Electric', 'Industrial Machinery', 'Mining, Crude-Oil Production', 'Household and Personal Products', 'Commercial Banks', 'Motor Vehicles and Parts', 'Internet Services and Retailing', 'Metals', 'Commercial Banks', 'Chemicals', 'Railroads', 'Utilities: Gas and Electric', 'Insurance: Life, Health (stock)', 'Health Care: Medical Facilities', 'Energy', 'Mining, Crude-Oil Production', 'Utilities: Gas and Electric', 'Specialty Retailers: Other', 'Automotive Retailing, Services', 'Utilities: Gas and Electric', 'Beverages', 'Aerospace and Defense', 'Packaging, Containers', 'Specialty Retailers: Other', 'Specialty Retailers: Other', 'Hotels, Casinos, Resorts', 'Specialty Retailers: Other', 'Chemicals', 'Medical Products and Equipment', 'Railroads', 'Computer Software', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Internet Services and Retailing', 'Motor Vehicles and Parts', 'Entertainment', 'Computers, Office Equipment', 'Information Technology Services', 'Electronics, Electrical Equip.', 'Automotive Retailing, Services', 'Internet Services and Retailing', 'Waste Management', 'Engineering, Construction', 'Automotive Retailing, Services', 'Diversified Financials', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Insurance: Property and Casualty (Stock)', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Metals', 'Semiconductors and Other Electronic Components', 'Diversified Financials', 'Utilities: Gas and Electric', 'Internet Services and Retailing', 'Chemicals', 'Insurance: Property and Casualty (Stock)', 'Metals', 'Insurance: Life, Health (stock)', 'Mining, Crude-Oil Production', 'Home Equipment, Furnishings', 'Engineering, Construction', 'Specialty Retailers: Other', 'Wholesalers: Health Care', 'Wholesalers: Food and Grocery', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Specialty Retailers: Other', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Telecommunications', 'Financial Data Services', 'Utilities: Gas and Electric', 'Medical Products and Equipment', 'Specialty Retailers: Other', 'Securities', 'Wholesalers: Diversified', 'Apparel', 'Automotive Retailing, Services', 'Pipelines', 'Automotive Retailing, Services', 'Energy', 'Insurance: Life, Health (stock)', 'Packaging, Containers', 'Engineering, Construction', 'Specialty Retailers: Other', 'Homebuilders', 'Construction and Farm Machinery', 'Insurance: Life, Health (Mutual)', 'Pipelines', 'Chemicals', 'Wholesalers: Diversified', 'Construction and Farm Machinery', 'Insurance: Life, Health (stock)', 'Wholesalers: Diversified', 'Publishing, Printing', 'Wholesalers: Food and Grocery', 'Chemicals', 'Energy', 'Semiconductors and Other Electronic Components', 'Airlines', 'Real estate', 'Wholesalers: Electronics and Office Equipment', 'Food Consumer Products', 'Advertising, marketing', 'Industrial Machinery', 'Medical Products and Equipment', 'Food Consumer Products', 'Specialty Retailers: Apparel', 'Utilities: Gas and Electric', 'Financial Data Services', 'Commercial Banks', 'Health Care: Pharmacy and Other Services', 'Engineering, Construction', 'Insurance: Property and Casualty (Stock)', 'Wholesalers: Diversified', 'Household and Personal Products', 'Utilities: Gas and Electric', 'Home Equipment, Furnishings', 'Information Technology Services', 'Insurance: Property and Casualty (Mutual)', 'Securities', 'Entertainment', 'Insurance: Property and Casualty (Mutual)', 'Food Consumer Products', 'Utilities: Gas and Electric', 'Aerospace and Defense', 'Chemicals', 'Food Consumer Products', 'Petroleum Refining', 'Mining, Crude-Oil Production', 'Beverages', 'Trucking, Truck Leasing', 'Oil and Gas Equipment, Services', 'Computer Software', 'Health Care: Medical Facilities', 'Specialty Retailers: Other', 'Motor Vehicles and Parts', 'Motor Vehicles and Parts', 'Forest and Paper Products', 'Trucking, Truck Leasing', 'Food Services', 'Food Services', 'Diversified Financials', 'Packaging, Containers', 'Building Materials, Glass', 'Entertainment', 'Airlines', 'Network and Other Communications Equipment', 'Miscellaneous', 'Aerospace and Defense', 'Publishing, Printing', 'Aerospace and Defense', 'Transportation and Logistics', 'Entertainment', 'Packaging, Containers', 'Semiconductors and Other Electronic Components', 'Commercial Banks', 'Insurance: Property and Casualty (Stock)', 'Construction and Farm Machinery', 'Aerospace and Defense', 'Health Care: Medical Facilities', 'Information Technology Services', 'Beverages', 'Real estate', 'Industrial Machinery', 'Apparel', 'Household and Personal Products', 'Specialty Retailers: Apparel', 'Miscellaneous', 'Specialty Retailers: Other', 'Wholesalers: Diversified', 'Packaging, Containers', 'Engineering, Construction', 'Utilities: Gas and Electric', 'Wholesalers: Diversified', 'Securities', 'Computers, Office Equipment', 'Apparel', 'Automotive Retailing, Services', 'Commercial Banks', 'Packaging, Containers', 'Insurance: Property and Casualty (Stock)', 'Mining, Crude-Oil Production', 'General Merchandisers', 'Insurance: Property and Casualty (Stock)', 'Securities', 'Building Materials, Glass', 'Network and Other Communications Equipment', 'Homebuilders', 'Electronics, Electrical Equip.', 'Food Consumer Products', 'Hotels, Casinos, Resorts', 'Chemicals', 'Motor Vehicles and Parts', 'Insurance: Property and Casualty (Stock)', 'Chemicals', 'Entertainment', 'Utilities: Gas and Electric', 'Diversified Financials', 'Chemicals', 'Packaging, Containers', 'Energy', 'Real estate', 'Specialty Retailers: Apparel', 'Commercial Banks', 'Metals', 'Insurance: Life, Health (stock)', 'Financial Data Services', 'Insurance: Property and Casualty (Stock)', 'Specialty Retailers: Other', 'Diversified Outsourcing Services', 'Commercial Banks', 'Household and Personal Products', 'Insurance: Property and Casualty (Stock)', 'Securities', 'Specialty Retailers: Other', 'Food Services', 'Pharmaceuticals', 'Telecommunications', 'Health Care: Insurance and Managed Care', 'Insurance: Life, Health (Mutual)', 'Securities', 'Food Production', 'Hotels, Casinos, Resorts', 'Homebuilders', 'Food Production', 'Information Technology Services', 'Insurance: Property and Casualty (Stock)', 'Insurance: Property and Casualty (Stock)', 'Household and Personal Products', 'Commercial Banks', 'Financial Data Services', 'Transportation Equipment', 'Energy', 'Wholesalers: Health Care', 'Mining, Crude-Oil Production', 'Semiconductors and Other Electronic Components', 'Real estate', 'Financial Data Services', 'Computers, Office Equipment', 'Transportation Equipment', 'Mining, Crude-Oil Production', 'Diversified Outsourcing Services', 'Energy', 'Diversified Outsourcing Services', 'Mining, Crude-Oil Production', 'Real estate', 'Temporary Help', 'Health Care: Medical Facilities', 'Specialty Retailers: Other', 'Semiconductors and Other Electronic Components', 'Pharmaceuticals', 'Specialty Retailers: Other', 'Home Equipment, Furnishings', 'Specialty Retailers: Other', 'Temporary Help', 'Food Consumer Products', 'Toys, Sporting Goods', 'Insurance: Property and Casualty (Stock)', 'Diversified Financials', 'Computer Software', 'Forest and Paper Products', 'Mining, Crude-Oil Production', 'Health Care: Pharmacy and Other Services', 'Semiconductors and Other Electronic Components', 'Telecommunications', 'Wholesalers: Electronics and Office Equipment', 'Packaging, Containers', 'Network and Other Communications Equipment', 'Metals', 'Information Technology Services', 'Apparel', 'Chemicals', 'Financial Data Services', 'Apparel', 'Transportation Equipment', 'Trucking, Truck Leasing', 'Toys, Sporting Goods', 'Insurance: Property and Casualty (Stock)', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Entertainment', 'Energy', 'Food Consumer Products', 'Securities', 'Aerospace and Defense', 'Engineering, Construction', 'Health Care: Medical Facilities', 'Commercial Banks', 'Internet Services and Retailing', 'Automotive Retailing, Services', 'Industrial Machinery', 'Specialty Retailers: Apparel', 'Hotels, Casinos, Resorts', 'Food and Drug Stores', 'Computers, Office Equipment', 'Scientific,Photographic and Control Equipment', 'Food and Drug Stores', 'Network and Other Communications Equipment', 'Apparel', 'Health Care: Pharmacy and Other Services', 'Food Services', 'Scientific,Photographic and Control Equipment', 'Information Technology Services', 'Energy', 'Health Care: Medical Facilities', 'Wholesalers: Diversified', 'Insurance: Property and Casualty (Stock)', 'Utilities: Gas and Electric', 'Packaging, Containers', 'Wholesalers: Diversified', 'Trucking, Truck Leasing', 'Education', 'Wholesalers: Diversified', 'Homebuilders', 'Real estate', 'Construction and Farm Machinery', 'Real estate', 'Information Technology Services', 'Wholesalers: Diversified', 'Beverages', 'Real estate', 'Diversified Outsourcing Services', 'Scientific,Photographic and Control Equipment', 'Insurance: Life, Health (stock)', 'Automotive Retailing, Services', 'Securities', 'Mining, Crude-Oil Production', 'Food Services', 'Financial Data Services', 'Computer Software', 'Semiconductors and Other Electronic Components', 'Apparel', 'Engineering, Construction', 'Specialty Retailers: Other', 'Insurance: Life, Health (stock)', 'Financial Data Services', 'Publishing, Printing', 'Chemicals', 'Building Materials, Glass', 'Packaging, Containers', 'Packaging, Containers', 'Computer Software', 'Transportation and Logistics', 'Financial Data Services', 'Food and Drug Stores', 'Industrial Machinery', 'Medical Products and Equipment', 'Petroleum Refining', 'Financial Data Services', 'Health Care: Medical Facilities', 'Building Materials, Glass', 'Securities', 'Home Equipment, Furnishings', 'Building Materials, Glass', 'Specialty Retailers: Other', 'Food Consumer Products', 'Specialty Retailers: Other', 'Insurance: Life, Health (stock)', 'Building Materials, Glass', 'Homebuilders', 'Industrial Machinery', 'Energy', 'Diversified Outsourcing Services', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Household and Personal Products', 'Chemicals', 'Building Materials, Glass', 'Diversified Financials', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'Food Production', 'Securities', 'Securities', 'Electronics, Electrical Equip.', 'Transportation Equipment', 'Food Production', 'Industrial Machinery', 'Mining, Crude-Oil Production', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Pipelines', 'Oil and Gas Equipment, Services', 'Securities', 'Packaging, Containers', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Specialty Retailers: Apparel', 'Publishing, Printing', 'Internet Services and Retailing', 'Energy', 'Chemicals', 'Waste Management', 'Entertainment', 'Real estate', 'Wholesalers: Electronics and Office Equipment', 'Utilities: Gas and Electric', 'Entertainment', 'Pharmaceuticals', 'Computers, Office Equipment', 'Commercial Banks', 'Insurance: Property and Casualty (Mutual)', 'Insurance: Life, Health (stock)', 'Aerospace and Defense', 'Aerospace and Defense', 'Metals', 'Forest and Paper Products', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Semiconductors and Other Electronic Components', 'Food and Drug Stores', 'Utilities: Gas and Electric', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Chemicals', 'Chemicals', 'Medical Products and Equipment', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Insurance: Property and Casualty (Stock)', 'Insurance: Life, Health (stock)', 'Apparel', 'Chemicals', 'Specialty Retailers: Other', 'Utilities: Gas and Electric', 'Energy', 'Beverages', 'Wholesalers: Diversified', 'Metals', 'Financial Data Services', 'Electronics, Electrical Equip.', 'Trucking, Truck Leasing', 'Miscellaneous', 'Securities', 'Diversified Outsourcing Services', 'Diversified Outsourcing Services', 'Insurance: Property and Casualty (Mutual)', 'Food Production', 'Packaging, Containers', 'Information Technology Services', 'Internet Services and Retailing', 'Specialty Retailers: Apparel', 'Motor Vehicles and Parts', 'Diversified Outsourcing Services', 'Commercial Banks', 'Diversified Outsourcing Services', 'Information Technology Services', 'Chemicals', 'Homebuilders', 'Airlines', 'Building Materials, Glass', 'Mining, Crude-Oil Production', 'Scientific,Photographic and Control Equipment', 'Entertainment', 'Insurance: Life, Health (Mutual)', 'Diversified Outsourcing Services', 'Food Services', 'Hotels, Casinos, Resorts', 'Publishing, Printing', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Medical Products and Equipment', 'Mining, Crude-Oil Production', 'Miscellaneous', 'Hotels, Casinos, Resorts', 'Chemicals', 'Utilities: Gas and Electric', 'Medical Products and Equipment', 'Diversified Financials', 'Semiconductors and Other Electronic Components', 'Home Equipment, Furnishings', 'Entertainment', 'Metals', 'Industrial Machinery', 'Electronics, Electrical Equip.', 'General Merchandisers', 'Securities', 'Beverages', 'Entertainment', 'Engineering, Construction', 'Engineering, Construction', 'Waste Management', 'Energy', 'Chemicals', 'Food Services', 'Health Care: Insurance and Managed Care', 'Commercial Banks', 'Diversified Outsourcing Services', 'Specialty Retailers: Other', 'Specialty Retailers: Apparel', 'Insurance: Life, Health (Mutual)', 'Wholesalers: Diversified', 'Securities', 'Industrial Machinery', 'Securities', 'Computer Software', 'Health Care: Medical Facilities', 'Medical Products and Equipment', 'Internet Services and Retailing', 'Metals', 'Wholesalers: Diversified', 'Motor Vehicles and Parts', 'Food Consumer Products', 'Insurance: Life, Health (stock)', 'Real estate', 'Trucking, Truck Leasing', 'Information Technology Services', 'Homebuilders', 'Diversified Financials', 'Apparel', 'Entertainment', 'Pipelines', 'Network and Other Communications Equipment', 'Specialty Retailers: Apparel', 'Diversified Outsourcing Services', 'Real estate', 'Wholesalers: Diversified', 'Apparel', 'Food Services', 'Industrial Machinery', 'Specialty Retailers: Apparel', 'Home Equipment, Furnishings', 'Engineering, Construction', 'Scientific,Photographic and Control Equipment', 'Metals', 'Medical Products and Equipment', 'Information Technology Services', 'Commercial Banks', 'Entertainment', 'Building Materials, Glass', 'Scientific,Photographic and Control Equipment', 'Computer Software', 'Insurance: Property and Casualty (Stock)', 'Chemicals', 'Utilities: Gas and Electric', 'Specialty Retailers: Other', 'Airlines', 'Household and Personal Products', 'Health Care: Pharmacy and Other Services', 'Real estate', 'Semiconductors and Other Electronic Components', 'Utilities: Gas and Electric', 'Scientific,Photographic and Control Equipment', 'Wholesalers: Diversified', 'Airlines', 'Temporary Help', 'Industrial Machinery', 'Real estate', 'Mining, Crude-Oil Production', 'Aerospace and Defense', 'Semiconductors and Other Electronic Components', 'Real estate', 'Wholesalers: Diversified', 'Education', 'Insurance: Property and Casualty (Mutual)', 'Mining, Crude-Oil Production', 'Industrial Machinery', 'Railroads', 'Homebuilders', 'Utilities: Gas and Electric', 'Hotels, Casinos, Resorts', 'Utilities: Gas and Electric', 'Entertainment', 'Utilities: Gas and Electric', 'Miscellaneous', 'General Merchandisers', 'Computers, Office Equipment', 'Semiconductors and Other Electronic Components', 'Temporary Help', 'Pipelines', 'Construction and Farm Machinery', 'Internet Services and Retailing', 'Aerospace and Defense', 'Pharmaceuticals', 'Real estate', 'Insurance: Property and Casualty (Stock)', 'Packaging, Containers', 'Semiconductors and Other Electronic Components', 'Apparel', 'Chemicals', 'Forest and Paper Products', 'Real estate', 'Food and Drug Stores', 'Securities', 'Homebuilders', 'Information Technology Services', 'Internet Services and Retailing', 'Petroleum Refining', 'Health Care: Pharmacy and Other Services', 'Specialty Retailers: Other', 'Entertainment', 'Trucking, Truck Leasing', 'Computer Software', 'Electronics, Electrical Equip.', 'Hotels, Casinos, Resorts', 'Engineering, Construction', 'Industrial Machinery', 'Industrial Machinery', 'Specialty Retailers: Other', 'Specialty Retailers: Apparel', 'Electronics, Electrical Equip.', 'Specialty Retailers: Apparel', 'Oil and Gas Equipment, Services', 'Energy', 'Apparel', 'Semiconductors and Other Electronic Components', 'Real estate', 'Industrial Machinery', 'Chemicals', 'Mining, Crude-Oil Production', 'Real estate', 'Semiconductors and Other Electronic Components', 'Mining, Crude-Oil Production', 'Wholesalers: Diversified', 'Scientific,Photographic and Control Equipment', 'Chemicals', 'Securities', 'Scientific,Photographic and Control Equipment', 'Household and Personal Products', 'Semiconductors and Other Electronic Components', 'Insurance: Life, Health (Mutual)', 'Industrial Machinery', 'Financial Data Services', 'Specialty Retailers: Apparel', 'Household and Personal Products', 'Home Equipment, Furnishings', 'Insurance: Life, Health (stock)', 'Aerospace and Defense', 'Energy', 'Motor Vehicles and Parts', 'Industrial Machinery', 'Miscellaneous', 'Motor Vehicles and Parts', 'Utilities: Gas and Electric', 'Food Services', 'Scientific,Photographic and Control Equipment', 'Household and Personal Products', 'Financial Data Services', 'Financial Data Services', 'Diversified Financials', 'Internet Services and Retailing', 'Financial Data Services', 'Securities', 'Food Consumer Products', 'Mining, Crude-Oil Production', 'Computer Software', 'Food Services', 'Shipping', 'Financial Data Services', 'Railroads', 'Telecommunications', 'Chemicals', 'Specialty Retailers: Other', 'Wholesalers: Electronics and Office Equipment', 'Insurance: Property and Casualty (Mutual)', 'Home Equipment, Furnishings', 'Real estate', 'Transportation Equipment', 'Scientific,Photographic and Control Equipment', 'Real estate', 'Energy', 'Transportation and Logistics', 'Information Technology Services', 'Motor Vehicles and Parts', 'Medical Products and Equipment', 'Financial Data Services', 'Commercial Banks', 'Computer Software', 'Medical Products and Equipment', 'Specialty Retailers: Apparel', 'Semiconductors and Other Electronic Components', 'Trucking, Truck Leasing', 'Securities', 'Aerospace and Defense', 'Network and Other Communications Equipment', 'Chemicals', 'Trucking, Truck Leasing', 'Pipelines', 'Pharmaceuticals', 'Health Care: Medical Facilities', 'Tobacco', 'Industrial Machinery', 'Medical Products and Equipment', 'Motor Vehicles and Parts', 'Food and Drug Stores', 'Wholesalers: Diversified', 'Industrial Machinery', 'Computer Software', 'Building Materials, Glass', 'Household and Personal Products', 'Shipping', 'Network and Other Communications Equipment', 'Pipelines', 'Commercial Banks', 'Electronics, Electrical Equip.', 'Utilities: Gas and Electric', 'Mining, Crude-Oil Production', 'Aerospace and Defense', 'Aerospace and Defense', 'Health Care: Pharmacy and Other Services', 'Building Materials, Glass', 'Chemicals', 'Aerospace and Defense', 'Medical Products and Equipment', 'Publishing, Printing', 'Homebuilders', 'Chemicals', 'Insurance: Property and Casualty (Stock)', 'Hotels, Casinos, Resorts', 'Industrial Machinery', 'Home Equipment, Furnishings', 'Transportation and Logistics', 'Computer Software', 'Computer Software', 'Specialty Retailers: Apparel', 'Financial Data Services', 'Computer Software', 'Building Materials, Glass', 'Aerospace and Defense', 'Energy', 'Securities', 'Chemicals', 'Oil and Gas Equipment, Services', 'Mining, Crude-Oil Production', 'Industrial Machinery', 'Homebuilders', 'Semiconductors and Other Electronic Components', 'Hotels, Casinos, Resorts', 'Insurance: Life, Health (stock)', 'Engineering, Construction', 'Insurance: Property and Casualty (Stock)', 'Aerospace and Defense', 'Specialty Retailers: Other', 'Oil and Gas Equipment, Services', 'Financial Data Services', 'Specialty Retailers: Apparel', 'Entertainment', 'Health Care: Pharmacy and Other Services', 'Wholesalers: Diversified', 'Health Care: Pharmacy and Other Services', 'Financial Data Services', 'Health Care: Medical Facilities', 'Real estate'], 'City': ['Bentonville', 'Irving', 'Omaha', 'Cupertino', 'Minnetonka', 'SF', 'Woonsocket', 'Seattle', 'Dallas', 'Detroit', 'Dearborn', 'Chesterbrook', 'San Ramon', 'Dublin', 'Issaquah', 'New York', 'Cincinnati', 'Boston', 'Deerfield', 'New York', 'Leavenworth', 'Mountain View', 'Atlanta', 'Charlotte', 'St. Louis', 'SF', 'Chicago', 'Houston', 'Indianapolis', 'Redmond', 'San Antonio', 'New York', 'Philadelphia', 'Armonk', 'Round Rock', 'Bloomington', 'New Brunswick', 'McLean', 'Minneapolis', 'Mooresville', 'Findlay', 'Cincinnati', 'New York', 'Atlanta', 'Harrison', 'Santa Clara', 'Midland', 'Chicago', 'Hartford', 'Memphis', 'Farmington', 'Newark', 'Boise', 'Houston', 'Burbank', 'Louisville', 'New York', 'Palo Alto', 'Bethesda', 'New York', 'St. Louis', 'San Jose', 'Nashville', 'Dallas', 'Deerfield', 'Columbus', 'New York', 'Boston', 'New York', 'New York', 'Fort Worth', 'Richfield', 'Bloomfield', 'Stamford', 'Atlanta', 'Menlo Park', 'Morris Plains', 'Kenilworth', 'Northbrook', 'Springdale', 'Chicago', 'Redwood City', 'Clearwater', 'New York', 'Framingham', 'New York', 'Atlanta', 'Lakeland', 'Beaverton', 'San Antonio', 'Miami', 'Chicago', 'Springfield', 'Camp Hill', 'Houston', 'Inver Grove Heights', 'St Paul', 'New York', 'Falls Church', 'San Antonio', 'McLean', 'Moline', 'New York', 'Milwaukee', 'Houston', 'New York', 'Palo Alto', 'New York', 'New York', 'North Chicago', 'Lake Bluff', 'Mayfield', 'Centennial', 'Pittsburgh', 'Houston', 'San Mateo', 'Deerfield', 'Falls Church', 'Waltham', 'Cincinnati', 'Rosemont', 'Minneapolis', 'Goodlettsville', 'Memphis', 'Charlotte', 'Atlanta', 'Bethesda', 'Phoenix', 'Indianapolis', 'Thousand Oaks', 'Oak Brook', 'Seattle', 'San Diego', 'Chesapeake', 'Parsippany-Troy Hills', 'New York', 'Columbus', 'Fort Lauderdale', 'Bloomfield Hills', 'Benton Harbor', 'Omaha', 'Dallas', 'Milwaukee', 'Waltham', 'New York', 'Houston', 'Dallas', 'Southfield', 'Columbus', 'Boise', 'Charlotte', 'Long Beach', 'Irving', 'Richmond', 'Bellevue', 'Hartford', 'Menomonee Falls', 'San Jose', 'St. Petersburg', 'Franklin', 'SF', 'Leavenworth', 'Irving', 'Los Angeles', 'Pittsburgh', 'Monroe', 'North Palm Beach', 'SF', 'Fremont', 'Tampa', 'Richmond', 'Hoffman Estates', 'Stamford', 'Richmond', 'New York City', 'Phoenix', 'Atlanta', 'St. Louis', 'Denver', 'Eden Prairie', 'SF', 'Minneapolis', 'Seattle', 'New York', 'Columbus', 'Greenwich', 'Akron', 'New York', 'Lincolnshire', 'Cleveland', 'Pittsburgh', 'Dallas', 'Eden Prairie', 'Atlanta', 'Teaneck', 'Hoboken', 'New York', 'Nashville', 'St. Louis', 'Philadelphia', 'Santa Clara', 'Houston', 'Englewood', 'Glenview', 'Wayne', 'Dallas', 'Los Angeles', 'Providence', 'Dublin', 'Des Moines', 'Arlington', 'New York', 'Oklahoma City', 'Arlington', 'St Paul', 'Arden Hills', 'New York', 'Houston', 'Akron', 'Houston', 'New York', 'San Jose', 'Tulsa', 'Summit', 'New York', 'Battle Creek', 'Las Vegas', 'New Britain', 'Norwalk', 'Miami', 'Columbus', 'Detroit', 'Richmond', 'Chesterfield', 'Plano', 'Harrison', 'New York', 'Melville', 'New York', 'Kalamazoo', 'New York', 'Greensboro', 'Roseland', 'Rosemead', 'Cambridge', 'Pittsburgh', 'South San Francisco', 'Union', 'Tulsa', 'Winston-Salem', 'Franklin Lakes', 'Minneapolis', 'Los Angeles', 'New York', 'New York', 'Cleveland', 'The Woodlands', 'New York', 'Boston', 'Palo Alto', 'Los Gatos', 'Pittsburgh', 'Riverwoods', 'Danbury', 'Jacksonville', 'Minneapolis', 'Chattanooga', 'King of Prussia', 'Princeton', 'Houston', 'San Diego', 'Wayne', 'Houston', 'New Orleans', 'Denver', 'New York', 'Broomfield', 'Memphis', 'El Dorado', 'Las Vegas', 'Boca Raton', 'The Woodlands', 'Deerfield', 'Norfolk', 'SF', 'Burlington', 'Lake Forest', 'Englewood', 'Auburn Hills', 'Beverly Hills', 'Norwalk', 'Reston', 'Corning', 'Medford', 'Bellevue', 'Phoenix', 'Dallas', 'Charlotte', 'Detroit', 'Chicago', 'Auburn Hills', 'Jacksonville', 'Atlanta', 'Durham', 'Los Angeles', 'Santa Clara', 'New York', 'Houston', 'San Jose', 'Kingsport', 'Madison', 'Fort Wayne', 'Newport Beach', 'Oklahoma City', 'Calhoun', 'Houston', 'Roanoke', 'Mechanicsville', 'Providence', 'Lake Forest', 'Chicago', 'Grapevine', 'Austin', 'McLean', 'Norwalk', 'Jacksonville', 'Newark', 'Marlboro', 'Springfield', 'SF', 'Waltham', 'New York', 'Parsippany-Troy Hills', 'Houston', 'Estero', 'Houston', 'Omaha', 'Philadelphia', 'Omaha', 'Coraopolis', 'Atlanta', 'Lisle', 'Minneapolis', 'Denver', 'Allentown', 'Atlanta', 'Duluth', 'Richmond', 'Downers Grove', 'New York', 'Byron Center', 'Houston', 'Tulsa', 'Fremont', 'Seattle', 'Chicago', 'Glenview', 'Camden', 'New York', 'Downers Grove', 'Warsaw', 'Dallas', 'New York', 'Springfield', 'Plano', 'Cincinnati', 'Secaucus', 'Norwalk', 'Greenwich', 'Pittsburgh', 'New York', 'Milwaukee', 'Livonia', 'Tysons', 'Lansing', 'Des Peres', 'Englewood', 'Erie', 'Hershey', 'Allentown', 'Newport News', 'Plymouth', 'Orrville', 'Brentwood', 'Greenwood Village', 'Victor', 'Miami', 'Houston', 'San Jose', 'Brentwood', 'Brentwood', 'Elkhart', 'Maumee', 'Seattle', 'Lowell', 'Orlando', 'Plano', 'New York', 'Evansville', 'Dallas', 'Santa Monica', 'Wilco inc', 'Wallingford', 'El Segundo', 'Wichita', 'Chicago', 'Melbourne', 'Seattle', 'Silver Spring', 'Perrysburg', 'San Jose', 'Cleveland', 'Cincinnati', 'Oshkosh', 'Cedar Rapids', 'Louisville', 'Tempe', 'Plano', 'Boston', 'Everett', 'New York', 'New York', 'Mahwah', 'Stamford', 'Ankeny', 'St. Louis', 'Glendale', 'Coral Gables', 'Jackson', 'Atlanta', 'St. Petersburg', 'Atlanta', 'Winston-Salem', 'Duluth', 'Providence', 'Lake Forest', 'New York', 'Houston', 'Little Rock', 'New York', 'San Mateo', 'Toledo', 'Chicago', 'Reston', 'Milwaukee', 'Oak Brook', 'Las Vegas', 'Clayton', 'Detroit', 'Chicago', 'Wilmington', 'San Antonio', 'St. Louis', 'Rolling Meadows', 'Irving', 'Charlotte', 'King of Prussia', 'Madison', 'Burlington', 'Birmingham', 'Beckett Ridge', 'St Paul', 'New York', 'Glen Allen', 'Westlake', 'Florham Park', 'Buffalo', 'Oakland', 'New York', 'New York', 'Bolingbrook', 'Louisville', 'Tarrytown', 'Little Rock', 'Scottsdale', 'Cincinnati', 'Atlanta', 'Westchester', 'Parsippany-Troy Hills', 'Horsham', 'Merriam', 'McLean', 'Santa Ana', 'Fairfield', 'Rye', 'Chicago', 'Brookfield', 'Milwaukee', 'Houston', 'St Paul', 'St. Louis', 'Phoenix', 'Indianapolis', 'Englewood', 'Sunnyvale', 'Medina', 'Irving', 'New York', 'Irving', 'Cincinnati', 'New York', 'Bethesda', 'Troy', 'Kennett Square', 'Irving', 'Santa Clara', 'Parsippany-Troy Hills', 'SF', 'Deerfield', 'Columbus', 'Menlo Park', 'St. Louis', 'Pawtucket', 'Worcester', 'Wilmington', 'Mountain View', 'Fort Mill', 'Houston', 'North Kansas City', 'Norwood', 'Chicago', 'Deerfield', 'Hartsville', 'Sunnyvale', 'Irving', 'Falls Church', 'Baltimore', 'Medina', 'Columbus', 'SF', 'Libertyville', 'Overland Park', 'El Segundo', 'Johnston', 'Merrillville', 'Las Vegas', 'Redwood City', 'Houston', 'Sparks Glencoe', 'Baltimore', 'Sterling', 'Los Angeles', 'Brentwood', 'Columbus', 'Boston', 'New Braunfels', 'Rye Brook', 'Dallas', 'Chicago', 'Phoenix', 'North Canton', 'Sarasota', 'Commerce', 'Newton', 'New York City', 'Flint', 'Denver', 'Santa Clara', 'Reston', 'Bismarck', 'Mechanicsburg', 'Boise', 'New York', 'Cayce', 'Atlanta', 'Winona', 'Green Bay', 'Baltimore', 'Herndon', 'Los Angeles', 'Redwood City', 'Westport', 'Houston', 'Arlington', 'Miami', 'Charlotte', 'Toledo', 'Boca Raton', 'Berwyn', 'Carmel', 'Lincolnshire', 'Boston', 'Houston', 'Tampa', 'New York', 'Mountain View', 'Tempe', 'Manhattan Beach', 'Houston', 'New York', 'McKinney', 'Lake Success', 'Sussex', 'Deerfield', 'Scottsdale', 'Stamford', 'Neenah', 'New York', 'Oak Brook', 'Cincinnati', 'Black Mountain', 'Kenosha', 'York', 'Indianapolis', 'Atlanta', 'Birmingham', 'Raleigh', 'New York', 'Carthage', 'Grand Rapids', 'Denton', 'Thomasville', 'New York', 'West Des Moines', 'Birmingham', 'Scottsdale', 'Wilmerding', 'Houston', 'Boston', 'Richardson', 'Highland Heights', 'Pittsburgh', 'Ewing Township', 'West Palm Beach', 'Charlotte', 'Evansville', 'Annapolis Junction', 'Lincolnshire', 'Maumee', 'Omaha', 'Leavenworth', 'Shelton', 'Dallas', 'Irving', 'Irving', 'Denver', 'Woburn', 'Jacksonville', 'Houston', 'Houston', 'Chicago', 'Delaware', 'The Woodlands', 'Novi', 'Philadelphia', 'Chicago', 'Southlake', 'Omaha', 'Columbus', 'Lake Forest', 'New York', 'Chicago', 'Greenville', 'Phoenix', 'Knoxville', 'New Haven', 'Stamford', 'New York', 'Bloomington', 'Madison', 'Berwyn', 'Cleveland', 'Pittsburgh', 'Catawba', 'Atlanta', 'New Albany', 'Milpitas', 'Sunbury', 'Bellevue', 'Fort Lauderdale', 'Carmel', 'Avon Lake', 'Philadelphia', 'Irvine', 'Chandler', 'Reno', 'Los Angeles', 'Galveston', 'Atlanta', 'New York', 'Atlanta', 'Madison', 'Pittsburgh', 'Corona', 'Atlanta', 'Chicago', 'Atlanta', 'Beloit', 'Thomasville', 'Voorhees Township', 'New York', 'Richmond', 'Richmond', 'Stevens Point', 'Laurel', 'Northbrook', 'Stamford', 'New York City', 'Houston', 'Rochester Hills', 'Houston', 'Dallas', 'San Leandro', 'Santa Clara', 'Covington', 'Scottsdale', 'St. George', 'Chicago', 'Spring', 'Santa Rosa', 'Knoxville', 'New York', 'Rochester', 'Dallas', 'Wyomissing', 'McLean', 'Van Buren Charter Township', 'Parsippany-Troy Hills', 'Sunnyvale', 'Oklahoma City', 'Houston', 'Las Vegas', 'Charlotte', 'Dallas', 'Marlboro', 'KCMO', 'Greensboro', 'Grand Rapids', 'New York', 'Columbus', 'North Canton', 'Milwaukee', 'San Diego', 'St. Louis', 'Louisville', 'Plano', 'Watsonville', 'Palm Beach Gardens', 'Norwell', 'Tempe', 'Marysville', 'Lebanon', 'San Juan', 'SF', 'Memphis', 'Merrimack', 'Nashville', 'Cleveland', 'Melville', 'Baltimore', 'Cleveland', 'New York', 'Fort Lauderdale', 'Franklin', 'Palo Alto', 'Chicago', 'Cleveland', 'Portsmouth', 'Findlay', 'Lake Success', 'Horsham', 'Los Angeles', 'Fort Smith', 'New York', 'Irvine', 'New York', 'New York', 'New York', 'Oklahoma City', 'Hanover', 'Columbus', 'Cincinnati', 'McLean', 'Covington', 'Richardson', 'Ann Arbor', 'Stamford', 'St. Louis', 'Lexington', 'Pasadena', 'San Diego', 'Omaha', 'Chicago', 'Blue Bell', 'Salt Lake City', 'Cockeysville', 'Nashville', 'Columbus', 'Mountain View', 'Chicago', 'Boston', 'KCMO', 'Plano', 'Honolulu', 'New York', 'Raleigh', 'Glendale', 'Costa Mesa', 'Evansville', 'Sunnyvale', 'Houston', 'Miramar', 'Calabasas', 'Cleveland', 'SF', 'Fort Worth', 'Thousand Oaks', 'Malvern', 'Boston', 'Cleveland', 'Arlington', 'Lincoln', 'Midland', 'White Plains', 'KCMO', 'Denver', 'Topeka', 'Las Vegas', 'Honolulu', 'McLean', 'Las Vegas', 'Farmington', 'York', 'San Jose', 'Neenah', 'Tacoma', 'Tulsa', 'Bloomington', 'Cambridge', 'Elma Center', 'Boston', 'Chicago', 'Branchville', 'Crystal Lake', 'Scottsdale', 'Portland', 'Fairlawn', 'Miamisburg', 'SF', 'Pittsburgh', 'New York', 'Red Bank', 'Reston', 'SF', 'Houston', 'Waltham', 'Corte Madera', 'Irving', 'Phoenix', 'Raleigh', 'St. Louis', 'Las Vegas', 'Dallas', 'Milwaukee', 'Minneapolis', 'Elmsford', 'New York', 'Reading', 'Los Angeles', 'Houston', 'Leavenworth', 'Rockford', 'San Jose', 'New York', 'Elgin', 'Waterford', 'Cleveland', 'Chicago', 'San Jose', 'St. Louis', 'Tucker', 'Milford', 'St Paul', 'West Palm Beach', 'Waltham', 'Chesterfield', 'San Jose', 'New Haven', 'Lake Forest', 'KCMO', 'Fort Myers', 'Provo', 'Zeeland', 'Montpelier', 'Charlotte', 'Wall Township', 'Milwaukee', 'Memphis', 'Boca Raton', 'Indianapolis', 'Oklahoma City', 'Calabasas', 'Raleigh', 'Orlando', 'Leawood', 'Norcross', 'Coppell', 'Scottsdale', 'Pleasanton', 'Chicago', 'Charlotte', 'El Dorado', 'Hoffman Estates', 'Louisville', 'Houston', 'SF', 'Darien', 'Boulder', 'Richmond', 'Commerce', 'El Segundo', 'Owatonna', 'Muscatine', 'Newton', 'Lake Oswego', 'Hercules', 'Arlington', 'Ames', 'Harrison', 'Dayton', 'Elkhart', 'Wayne', 'Jersey City', 'Hato Rey', 'Pleasanton', 'Pleasanton', 'Columbus', 'North Reading', 'Omaha', 'Los Angeles', 'Fort Collins', 'Seattle', 'Lexington', 'Downers Grove', 'Tulsa', 'Franklin Township', 'Brentwood', 'Richmond', 'Westlake', 'San Diego', 'Livonia', 'Memphis', 'Tustin', 'Pittsburgh', 'San Rafael', 'Cary', 'Walnut Creek', 'Honolulu', 'Englewood', 'Houston', 'Santa Clara', 'Liberty Lake', 'Portland', 'Los Angeles', 'Bellevue', 'McLean', 'San Diego', 'New York', 'Dallas', 'Stamford', 'Westbrook', 'Shoreview', 'Columbus', 'Houston', 'Houston', 'Orlando', 'Charlotte', 'Lake Zurich', 'Chicago', 'San Jose', 'Burlington', 'Indianapolis', 'Chicago', 'Santa Clara', 'Denver', 'Chantilly', 'Overland Park', 'Greenwich', 'Northfield', 'Houston', 'Denver', 'Milwaukee', 'Atlanta', 'Andover', 'Broomfield', 'Cincinnati', 'Daytona Beach', 'Daytona Beach', 'El Segundo', 'Bernards', 'Houston', 'San Jose', 'Secaucus', 'Chicago', 'Bensalem', 'Roswell', 'Wilmington', 'Irvine', 'Mission Viejo', 'Irvine'], 'State': ['AR', 'TX', 'NE', 'CA', 'MN', 'CA', 'RI', 'WA', 'TX', 'MI', 'MI', 'PA', 'CA', 'OH', 'WA', 'NY', 'OH', 'MA', 'IL', 'NY', 'WA', 'CA', 'GA', 'NC', 'MO', 'CA', 'IL', 'TX', 'IN', 'WA', 'TX', 'NY', 'PA', 'NY', 'TX', 'IL', 'NJ', 'VA', 'MN', 'NC', 'OH', 'OH', 'NY', 'GA', 'NY', 'CA', 'Michigan', 'IL', 'CT', 'TN', 'CT', 'NJ', 'ID', 'TX', 'CA', 'KY', 'NY', 'CA', 'MD', 'NY', 'MO', 'CA', 'TN', 'TX', 'IL', 'OH', 'NY', 'MA', 'NY', 'NY', 'TX', 'MN', 'CT', 'CT', 'GA', 'CA', 'NJ', 'NJ', 'IL', 'AR', 'IL', 'CA', 'FL', 'NY', 'MA', 'NY', 'GA', 'FL', 'OR', 'TX', 'FL', 'IL', 'MA', 'PA', 'TX', 'MN', 'MN', 'NY', 'VA', 'TX', 'VA', 'IL', 'NY', 'WI', 'TX', 'NY', 'CA', 'NY', 'NY', 'IL', 'IL', 'OH', 'CO', 'PA', 'TX', 'CA', 'IL', 'VA', 'MA', 'OH', 'IL', 'MN', 'TN', 'TN', 'NC', 'GA', 'MD', 'AZ', 'IN', 'CA', 'IL', 'WA', 'CA', 'VA', 'NJ', 'NY', 'GA', 'FL', 'MI', 'MI', 'NE', 'TX', 'WI', 'MA', 'NY', 'TX', 'TX', 'MI', 'IN', 'ID', 'NC', 'CA', 'TX', 'VA', 'WA', 'CT', 'WI', 'CA', 'FL', 'TN', 'CA', 'WA', 'TX', 'CA', 'PA', 'LA', 'FL', 'CA', 'CA', 'FL', 'VA', 'IL', 'CT', 'VA', 'NY', 'AZ', 'GA', 'MO', 'CO', 'MN', 'CA', 'MN', 'WA', 'NY', 'OH', 'CT', 'OH', 'NY', 'IL', 'OH', 'PA', 'TX', 'MN', 'GA', 'NJ', 'NJ', 'NY', 'TN', 'MO', 'PA', 'CA', 'TX', 'CO', 'IL', 'PA', 'TX', 'CA', 'RI', 'CA', 'IA', 'TX', 'NY', 'OK', 'VA', 'MN', 'MN', 'NY', 'TX', 'OH', 'TX', 'NY', 'CA', 'OK', 'NJ', 'NY', 'MI', 'NV', 'CT', 'CT', 'FL', 'OH', 'MI', 'VA', 'MO', 'TX', 'NY', 'NY', 'NY', 'NY', 'MI', 'NY', 'NC', 'NJ', 'CA', 'MA', 'PA', 'CA', 'NJ', 'OK', 'NC', 'NJ', 'MN', 'CA', 'NY', 'NY', 'OH', 'TX', 'NY', 'MA', 'CA', 'CA', 'PA', 'IL', 'CT', 'FL', 'MN', 'TN', 'PA', 'NJ', 'TX', 'CA', 'NJ', 'TX', 'LA', 'CO', 'NY', 'CO', 'TN', 'AR', 'NV', 'FL', 'TX', 'IL', 'VA', 'CA', 'NC', 'IL', 'CO', 'MI', 'CA', 'CT', 'VA', 'NY', 'OR', 'WA', 'AZ', 'TX', 'NC', 'MI', 'IL', 'MI', 'FL', 'GA', 'NC', 'CA', 'CA', 'NY', 'TX', 'CA', 'TN', 'WI', 'IN', 'CA', 'OK', 'GA', 'TX', 'VA', 'VA', 'RI', 'IL', 'IL', 'TX', 'MN', 'VA', 'CT', 'FL', 'NJ', 'MA', 'MO', 'CA', 'MA', 'NY', 'NJ', 'TX', 'FL', 'TX', 'NE', 'PA', 'NE', 'PA', 'GA', 'IL', 'MN', 'CO', 'PA', 'GA', 'GA', 'VA', 'IL', 'NY', 'MI', 'TX', 'OK', 'CA', 'WA', 'IL', 'IL', 'NJ', 'NY', 'IL', 'IN', 'TX', 'NY', 'MA', 'TX', 'OH', 'NJ', 'CT', 'CT', 'PA', 'NY', 'WI', 'MI', 'VA', 'MI', 'MO', 'CO', 'PA', 'PA', 'PA', 'VA', 'MN', 'OH', 'TN', 'CO', 'NY', 'FL', 'TX', 'CA', 'TN', 'TN', 'IN', 'OH', 'WA', 'AR', 'FL', 'TX', 'NY', 'IN', 'TX', 'CA', 'NY', 'CT', 'CA', 'KS', 'IL', 'FL', 'WA', 'MD', 'OH', 'CA', 'OH', 'OH', 'WI', 'IA', 'KY', 'AZ', 'TX', 'MA', 'WA', 'NY', 'NY', 'NJ', 'CT', 'IA', 'MO', 'CA', 'FL', 'MI', 'GA', 'FL', 'GA', 'NC', 'GA', 'RI', 'IL', 'NY', 'TX', 'AR', 'NY', 'CA', 'OH', 'IL', 'VA', 'WI', 'IL', 'NV', 'MO', 'MI', 'IL', 'DE', 'TX', 'MO', 'IL', 'TX', 'NC', 'PA', 'NJ', 'NJ', 'AL', 'OH', 'MN', 'NY', 'VA', 'OH', 'NJ', 'NY', 'CA', 'NY', 'NY', 'IL', 'KY', 'NY', 'AR', 'AZ', 'OH', 'GA', 'IL', 'NJ', 'PA', 'KS', 'VA', 'CA', 'OH', 'NY', 'IL', 'WI', 'WI', 'TX', 'MN', 'MO', 'AZ', 'IN', 'CO', 'CA', 'MN', 'TX', 'NY', 'TX', 'OH', 'NY', 'MD', 'MI', 'PA', 'TX', 'CA', 'NJ', 'CA', 'IL', 'OH', 'CA', 'MO', 'RI', 'MA', 'DE', 'CA', 'SC', 'TX', 'MO', 'MA', 'IL', 'IL', 'SC', 'CA', 'TX', 'VA', 'MD', 'OH', 'GA', 'CA', 'IL', 'KS', 'CA', 'RI', 'IN', 'NV', 'CA', 'TX', 'MD', 'MD', 'VA', 'CA', 'TN', 'OH', 'MA', 'TX', 'NY', 'TX', 'IL', 'AZ', 'OH', 'FL', 'CA', 'NC', 'NY', 'MI', 'CO', 'CA', 'VA', 'ND', 'PA', 'ID', 'NY', 'SC', 'GA', 'MN', 'WI', 'MD', 'VA', 'CA', 'CA', 'CT', 'TX', 'VA', 'FL', 'NC', 'OH', 'FL', 'PA', 'IN', 'IL', 'MA', 'TX', 'FL', 'NY', 'CA', 'AZ', 'CA', 'TX', 'NY', 'TX', 'NY', 'WI', 'IL', 'AZ', 'CT', 'WI', 'NY', 'IL', 'OH', 'NC', 'WI', 'PA', 'IN', 'GA', 'AL', 'NC', 'NY', 'MO', 'MI', 'TX', 'GA', 'NY', 'IA', 'AL', 'AZ', 'PA', 'TX', 'MA', 'TX', 'KY', 'PA', 'NJ', 'FL', 'NC', 'IN', 'MD', 'IL', 'OH', 'NE', 'WA', 'CT', 'TX', 'TX', 'TX', 'CO', 'MA', 'FL', 'TX', 'TX', 'IL', 'OH', 'TX', 'MI', 'PA', 'IL', 'TX', 'NE', 'OH', 'IL', 'NY', 'IL', 'SC', 'AZ', 'TN', 'CT', 'CT', 'NY', 'IL', 'WI', 'PA', 'OH', 'PA', 'SC', 'GA', 'OH', 'CA', 'PA', 'WA', 'FL', 'IN', 'OH', 'PA', 'CA', 'AZ', 'NV', 'CA', 'TX', 'GA', 'NY', 'GA', 'WI', 'PA', 'CA', 'GA', 'IL', 'GA', 'WI', 'NC', 'NJ', 'NY', 'VA', 'VA', 'WI', 'MS', 'IL', 'CT', 'NY', 'TX', 'MI', 'TX', 'TX', 'CA', 'CA', 'KY', 'AZ', 'UT', 'IL', 'TX', 'CA', 'TN', 'NY', 'NY', 'TX', 'PA', 'VA', 'MI', 'NJ', 'CA', 'OK', 'TX', 'NV', 'NC', 'TX', 'MA', 'MO', 'NC', 'MI', 'NY', 'OH', 'OH', 'WI', 'CA', 'MO', 'KY', 'TX', 'CA', 'FL', 'MA', 'AZ', 'OH', 'TN', 'Puerto Rico', 'CA', 'TN', 'NH', 'TN', 'OH', 'NY', 'MD', 'OH', 'NY', 'FL', 'TN', 'CA', 'IL', 'OH', 'NH', 'OH', 'NY', 'PA', 'CA', 'AR', 'NY', 'CA', 'NY', 'NY', 'NY', 'OK', 'MD', 'OH', 'OH', 'VA', 'LA', 'TX', 'MI', 'CT', 'MO', 'KY', 'CA', 'CA', 'NE', 'IL', 'PA', 'UT', 'MD', 'TN', 'OH', 'CA', 'IL', 'MA', 'MO', 'TX', 'HI', 'NY', 'NC', 'CA', 'CA', 'IN', 'CA', 'TX', 'FL', 'CA', 'OH', 'CA', 'TX', 'CA', 'PA', 'MA', 'OH', 'VA', 'RI', 'TX', 'NY', 'MO', 'CO', 'KS', 'NV', 'HI', 'VA', 'NV', 'UT', 'PA', 'CA', 'WI', 'WA', 'OK', 'MN', 'MA', 'NY', 'MA', 'IL', 'NJ', 'IL', 'AZ', 'OR', 'OH', 'OH', 'CA', 'PA', 'NY', 'NJ', 'VA', 'CA', 'TX', 'MA', 'CA', 'TX', 'AZ', 'NC', 'MO', 'NV', 'TX', 'WI', 'MN', 'NY', 'NY', 'PA', 'CA', 'TX', 'WA', 'MI', 'CA', 'NY', 'IL', 'NY', 'OH', 'IL', 'CA', 'MO', 'GA', 'MA', 'MN', 'FL', 'MA', 'MO', 'CA', 'CT', 'IL', 'MO', 'FL', 'UT', 'MI', 'VT', 'NC', 'NJ', 'WI', 'TN', 'FL', 'IN', 'OK', 'CA', 'NC', 'FL', 'KS', 'GA', 'TX', 'AZ', 'CA', 'IL', 'NC', 'AR', 'IL', 'KY', 'TX', 'CA', 'CT', 'CO', 'VA', 'CA', 'CA', 'MN', 'IA', 'MA', 'OR', 'CA', 'VA', 'IA', 'NY', 'OH', 'IN', 'PA', 'NJ', 'Puerto Rico', 'CA', 'CA', 'OH', 'MA', 'NE', 'CA', 'CO', 'WA', 'KY', 'IL', 'OK', 'NJ', 'TN', 'VA', 'OH', 'CA', 'MI', 'TN', 'CA', 'PA', 'CA', 'NC', 'CA', 'HI', 'CO', 'TX', 'CA', 'WA', 'OR', 'CA', 'WA', 'VA', 'CA', 'NY', 'TX', 'CT', 'ME', 'MN', 'OH', 'TX', 'TX', 'FL', 'NC', 'IL', 'IL', 'CA', 'MA', 'IN', 'IL', 'CA', 'CO', 'VA', 'KS', 'CT', 'IL', 'TX', 'CO', 'WI', 'GA', 'MA', 'CO', 'OH', 'FL', 'FL', 'CA', 'NJ', 'TX', 'CA', 'NJ', 'IL', 'PA', 'GA', 'MA', 'CA', 'CA', 'CA'], 'Latitude': [36.372853799999994, 32.814017699999994, 41.2565369, 37.322997799999996, 44.9211836, 37.7749295, 42.0028761, 47.6062095, 32.7766642, 42.331427000000005, 42.3222599, 40.0756627, 37.7799273, 40.0992294, 47.5301011, 40.7127753, 39.103118200000004, 42.360082500000004, 42.171136499999996, 40.7127753, 47.751074100000004, 37.3860517, 33.7489954, 35.2270869, 38.6270025, 37.7749295, 41.8781136, 29.7604267, 39.768403, 47.6739881, 29.4241219, 40.7127753, 39.9525839, 41.1264849, 30.508255100000003, 40.4842027, 40.4862157, 38.933867600000006, 44.977753, 35.5848596, 41.04422, 39.103118200000004, 40.7127753, 33.7489954, 41.0400135, 37.354107899999995, 43.623574, 41.8781136, 41.76580429999999, 35.1495343, 41.7360305, 40.735657, 43.6150186, 29.7604267, 34.18083920000001, 38.252664700000004, 40.7127753, 37.441883399999995, 38.984652000000004, 40.7127753, 38.6270025, 37.338208200000004, 36.1626638, 32.7766642, 42.171136499999996, 39.9611755, 40.7127753, 42.360082500000004, 40.7127753, 40.7127753, 32.7554883, 44.8832982, 41.826488, 41.0534302, 33.7489954, 37.4529598, 40.839592200000006, 40.6764911, 42.127526700000004, 36.18674420000001, 41.8781136, 37.4852152, 27.9658533, 40.7127753, 42.279286, 40.7127753, 33.7489954, 28.039465399999997, 45.4887993, 29.4241219, 25.7616798, 41.8781136, 42.1014831, 40.2398118, 29.7604267, 44.8480218, 44.953702899999996, 40.7127753, 38.882334, 29.4241219, 38.933867600000006, 41.5067003, 40.7127753, 43.0389025, 29.7604267, 40.7127753, 37.441883399999995, 40.7127753, 40.7127753, 42.325578, 42.304505, 41.55199520000001, 39.5807452, 40.440624799999995, 29.7604267, 37.558546500000006, 42.171136499999996, 38.882334, 42.376485200000005, 39.103118200000004, 41.9867507, 44.977753, 36.3231066, 35.1495343, 35.2270869, 33.7489954, 38.984652000000004, 33.4483771, 39.768403, 34.1705609, 41.8397865, 47.6062095, 32.715738, 36.7682088, 40.865286499999996, 40.7127753, 32.4609764, 26.122438600000002, 42.583645000000004, 42.1167065, 41.2565369, 32.7766642, 43.0389025, 42.376485200000005, 40.7127753, 29.7604267, 32.7766642, 42.473368799999996, 39.201440399999996, 43.6150186, 35.2270869, 33.770050399999995, 32.814017699999994, 37.540724600000004, 47.6101497, 41.76580429999999, 43.1788967, 37.338208200000004, 27.767600800000004, 35.9250637, 37.7749295, 47.751074100000004, 32.814017699999994, 34.0522342, 40.440624799999995, 32.5093109, 26.879781899999998, 37.7749295, 37.548269700000006, 27.950575, 37.540724600000004, 42.062991499999995, 41.0534302, 37.540724600000004, 40.7127753, 33.4483771, 33.7489954, 38.6270025, 39.739235799999996, 44.8546856, 37.7749295, 44.977753, 47.6062095, 40.7127753, 39.9611755, 41.0262417, 41.081444700000006, 40.7127753, 42.190024900000004, 41.499320000000004, 40.440624799999995, 32.7766642, 44.8546856, 33.7489954, 40.8932469, 40.7439905, 40.7127753, 36.1626638, 38.6270025, 39.9525839, 37.354107899999995, 29.7604267, 39.647765299999996, 42.069750899999995, 40.0462208, 32.7766642, 34.0522342, 41.8239891, 37.7021521, 41.600544799999994, 32.735687, 40.7127753, 35.4675602, 38.8816208, 44.953702899999996, 45.0502435, 40.7127753, 29.7604267, 41.081444700000006, 29.7604267, 40.7127753, 37.338208200000004, 36.1539816, 40.714637599999996, 40.7127753, 42.32115220000001, 36.169941200000004, 41.6612104, 41.117744, 25.7616798, 39.9611755, 42.331427000000005, 37.540724600000004, 38.6631083, 33.0198431, 41.0400135, 40.7127753, 40.793432200000005, 40.7127753, 42.291706899999994, 40.7127753, 36.072635399999996, 40.8206555, 34.0805651, 42.373615799999996, 40.440624799999995, 37.654656, 40.697589799999996, 36.1539816, 36.0998596, 41.016763899999994, 44.977753, 34.165357, 40.7127753, 40.7127753, 41.499320000000004, 30.1658207, 40.7127753, 42.360082500000004, 37.441883399999995, 37.235807799999996, 40.440624799999995, 42.167525399999995, 41.394816999999996, 30.3321838, 44.977753, 35.0456297, 40.101285600000004, 40.3572976, 29.7604267, 32.715738, 40.925372499999995, 29.7604267, 29.9510658, 39.739235799999996, 40.7127753, 39.9205411, 35.1495343, 33.20763, 36.169941200000004, 26.3683064, 30.1658207, 42.171136499999996, 36.8507689, 37.7749295, 36.0956918, 42.2586342, 39.647765299999996, 42.6875323, 34.073620399999996, 41.117744, 38.9586307, 42.1428521, 42.3265152, 47.6101497, 33.4483771, 32.7766642, 35.2270869, 42.331427000000005, 41.8781136, 42.6875323, 30.3321838, 33.7489954, 35.980513, 34.0522342, 37.354107899999995, 40.7127753, 29.7604267, 37.338208200000004, 36.548434, 43.07305170000001, 41.079273, 33.618882899999996, 35.4675602, 34.502587, 29.7604267, 37.270970399999996, 37.6087561, 41.8239891, 42.2586342, 41.8781136, 32.9342919, 43.6666296, 38.933867600000006, 41.117744, 30.3321838, 40.735657, 42.3459271, 37.20895720000001, 37.7749295, 42.376485200000005, 40.7127753, 40.865286499999996, 29.7604267, 26.438136, 29.7604267, 41.2565369, 39.9525839, 41.2565369, 40.5184013, 33.7489954, 41.801140999999994, 44.977753, 39.739235799999996, 40.6022939, 33.7489954, 34.0028786, 37.540724600000004, 41.8089191, 40.7127753, 42.812250799999994, 29.7604267, 36.1539816, 37.548269700000006, 47.6062095, 41.8781136, 42.069750899999995, 39.9259463, 40.7127753, 41.8089191, 41.2381, 32.7766642, 40.7127753, 42.1014831, 33.0198431, 39.103118200000004, 40.7895453, 41.117744, 41.0262417, 40.440624799999995, 40.7127753, 43.0389025, 42.36837, 38.9187222, 42.732535, 38.59722, 39.647765299999996, 42.1292241, 40.2859239, 40.6022939, 37.087082099999996, 45.01051939999999, 40.843666299999995, 36.0331164, 39.6172101, 42.982563299999995, 25.7616798, 29.7604267, 37.338208200000004, 36.0331164, 36.0331164, 41.6819935, 41.5628294, 47.6062095, 36.25535429999999, 28.5383355, 33.0198431, 40.7127753, 37.9715592, 32.7766642, 34.01945429999999, 40.744679, 41.45701079999999, 33.9191799, 37.6871761, 41.8781136, 28.0836269, 47.6062095, 38.9906657, 41.556996000000005, 37.338208200000004, 41.499320000000004, 39.103118200000004, 44.024706200000004, 41.9778795, 38.252664700000004, 33.4255104, 33.0198431, 42.360082500000004, 47.9789848, 40.7127753, 40.7127753, 41.0886216, 41.0534302, 41.7317884, 38.6270025, 34.1425078, 25.72149, 42.245869, 33.7489954, 27.767600800000004, 33.7489954, 36.0998596, 34.0028786, 41.8239891, 42.2586342, 40.7127753, 29.7604267, 34.7464809, 40.7127753, 37.562991700000005, 41.6528052, 41.8781136, 38.9586307, 43.0389025, 41.8397865, 36.169941200000004, 38.6425518, 42.331427000000005, 41.8781136, 39.7390721, 29.4241219, 38.6270025, 42.0841936, 32.814017699999994, 35.2270869, 40.101285600000004, 40.7598227, 40.071222, 33.5206608, 39.332126200000005, 44.953702899999996, 40.7127753, 37.665978, 41.4553232, 40.787878000000006, 42.8864468, 37.8043637, 40.7127753, 40.7127753, 41.6986416, 38.252664700000004, 41.076207700000005, 34.7464809, 33.494170399999994, 39.103118200000004, 33.7489954, 41.8498339, 40.865286499999996, 40.1784422, 39.023616499999996, 38.933867600000006, 33.7454725, 39.345467299999996, 40.980653499999995, 41.8781136, 43.0605671, 43.0389025, 29.7604267, 44.953702899999996, 38.6270025, 33.4483771, 39.768403, 39.647765299999996, 37.368829999999996, 45.0352411, 32.814017699999994, 40.7127753, 32.814017699999994, 39.103118200000004, 40.7127753, 38.984652000000004, 42.6064095, 39.84677670000001, 32.814017699999994, 37.354107899999995, 40.865286499999996, 37.7749295, 42.171136499999996, 39.9611755, 37.4529598, 38.6270025, 41.878710999999996, 42.262593200000005, 39.7390721, 37.3860517, 35.007369700000005, 29.7604267, 39.1429081, 42.1943909, 41.8781136, 42.171136499999996, 34.3740431, 37.368829999999996, 32.814017699999994, 38.882334, 39.2903848, 41.143245, 32.4609764, 37.7749295, 42.2333571, 38.9822282, 33.9191799, 41.820519899999994, 41.482814399999995, 36.169941200000004, 37.4852152, 29.7604267, 39.530938899999995, 39.2903848, 38.962489899999994, 34.3058279, 36.0331164, 39.9611755, 42.360082500000004, 29.7030024, 41.0192641, 32.7766642, 41.8781136, 33.4483771, 40.875890999999996, 27.3364347, 34.0005691, 35.7344538, 40.7127753, 43.012527399999996, 39.739235799999996, 37.354107899999995, 38.9586307, 46.808326799999996, 40.2142565, 43.6150186, 40.7127753, 33.9657091, 33.7489954, 44.0553908, 44.51331879999999, 39.2903848, 38.9695545, 34.0522342, 37.4852152, 41.141471700000004, 29.7604267, 38.8816208, 25.7616798, 35.2270869, 41.6528052, 26.3683064, 40.045823999999996, 39.978371, 42.190024900000004, 42.360082500000004, 29.7604267, 27.950575, 40.7127753, 37.3860517, 33.4255104, 33.884736100000005, 29.7604267, 40.7127753, 33.197246500000006, 40.7706572, 43.13418, 42.171136499999996, 33.494170399999994, 41.0534302, 44.1858193, 40.7127753, 41.8397865, 39.2807348, 35.6178951, 42.5847425, 39.9625984, 39.768403, 33.7489954, 33.5206608, 35.7795897, 40.7127753, 37.176446999999996, 42.9633599, 33.2148412, 30.8365815, 40.7127753, 41.5772115, 33.5206608, 33.494170399999994, 40.3909023, 29.7604267, 42.360082500000004, 32.948333500000004, 39.033116899999996, 40.440624799999995, 40.2599864, 26.7153424, 35.2270869, 37.9715592, 39.1202934, 42.190024900000004, 41.5628294, 41.2565369, 47.751074100000004, 41.3164856, 32.7766642, 32.814017699999994, 32.814017699999994, 39.739235799999996, 42.479261799999996, 30.3321838, 29.7604267, 29.7604267, 41.8781136, 40.2986724, 30.1658207, 42.48059, 39.9525839, 41.8781136, 32.9412363, 41.2565369, 39.9611755, 42.2586342, 40.7127753, 41.8781136, 34.8526176, 33.4483771, 35.96063839999999, 41.308274, 41.0534302, 40.7127753, 40.4842027, 43.07305170000001, 40.045823999999996, 41.499320000000004, 40.440624799999995, 34.85292329999999, 33.7489954, 40.0811745, 37.4323341, 40.862584999999996, 47.6101497, 26.1669711, 39.978371, 41.50531779999999, 39.9525839, 33.6845673, 33.3061605, 39.529632899999996, 34.0522342, 29.3013479, 33.7489954, 40.7127753, 33.7489954, 43.07305170000001, 40.440624799999995, 33.8752935, 33.7489954, 41.8781136, 33.7489954, 42.5083482, 35.8826369, 39.851944700000004, 40.7127753, 37.540724600000004, 37.540724600000004, 44.5235792, 31.694050899999997, 42.127526700000004, 41.0534302, 40.7127753, 29.7604267, 42.6583661, 30.0575359, 32.7766642, 37.7249296, 37.354107899999995, 39.083671200000005, 33.494170399999994, 37.0965278, 41.8781136, 30.079940500000003, 38.440428999999995, 35.96063839999999, 40.7127753, 43.156577899999995, 32.7766642, 40.329537, 38.933867600000006, 42.2203171, 40.865286499999996, 37.368829999999996, 35.4675602, 29.7604267, 36.169941200000004, 35.2270869, 32.7766642, 42.3459271, 39.0997265, 36.072635399999996, 42.9633599, 40.7127753, 39.9611755, 40.875890999999996, 43.0389025, 32.715738, 38.6270025, 38.252664700000004, 33.0198431, 36.910231, 26.8233946, 42.1615157, 33.4255104, 40.2364486, 36.2081098, 18.465539399999997, 37.7749295, 35.1495343, 42.867869299999995, 36.1626638, 41.499320000000004, 40.793432200000005, 39.2903848, 41.499320000000004, 40.7127753, 26.122438600000002, 35.9250637, 37.441883399999995, 41.8781136, 41.499320000000004, 43.071755200000005, 41.04422, 40.7706572, 40.1784422, 34.0522342, 35.385924200000005, 40.7127753, 33.6845673, 40.7127753, 40.7127753, 40.7127753, 35.4675602, 39.1955042, 39.9611755, 39.103118200000004, 38.933867600000006, 30.4754702, 32.948333500000004, 42.2808256, 41.0534302, 38.6270025, 38.040583700000006, 34.1477849, 32.715738, 41.2565369, 41.8781136, 40.1523309, 40.760779299999996, 39.490001299999996, 36.1626638, 39.9611755, 37.3860517, 41.8781136, 42.360082500000004, 39.0997265, 33.0198431, 21.3069444, 40.7127753, 35.7795897, 34.1425078, 33.6412156, 37.9715592, 37.368829999999996, 29.7604267, 25.9860762, 34.1367208, 41.499320000000004, 37.7749295, 32.7554883, 34.1705609, 40.0362184, 42.360082500000004, 41.499320000000004, 38.8816208, 41.911012299999996, 31.9973456, 41.0339862, 39.0997265, 39.739235799999996, 39.0473451, 36.169941200000004, 21.3069444, 38.933867600000006, 36.169941200000004, 40.9804999, 39.9625984, 37.338208200000004, 44.1858193, 47.252876799999996, 36.1539816, 44.840798, 42.373615799999996, 42.82122879999999, 42.360082500000004, 41.8781136, 41.1464852, 42.2411344, 33.494170399999994, 45.5122308, 41.127833, 39.642836200000005, 37.7749295, 40.440624799999995, 40.7127753, 40.347054299999996, 38.9586307, 37.7749295, 29.7604267, 42.376485200000005, 37.92548060000001, 32.814017699999994, 33.4483771, 35.7795897, 38.6270025, 36.169941200000004, 32.7766642, 43.0389025, 44.977753, 41.055096899999995, 40.7127753, 40.335648299999995, 34.0522342, 29.7604267, 47.751074100000004, 43.1200272, 37.338208200000004, 40.7127753, 42.035408399999994, 42.7925777, 41.499320000000004, 41.8781136, 37.338208200000004, 38.6270025, 33.8545479, 42.13985770000001, 44.953702899999996, 26.7153424, 42.376485200000005, 38.6631083, 37.338208200000004, 41.308274, 42.2586342, 39.0997265, 26.640628000000003, 40.233843799999995, 42.8125246, 44.2600593, 35.2270869, 40.160666600000006, 43.0389025, 35.1495343, 26.3683064, 39.768403, 35.4675602, 34.1372953, 35.7795897, 28.5383355, 38.966673, 33.969864, 32.954568699999996, 33.494170399999994, 37.6624312, 41.8781136, 35.2270869, 33.20763, 42.062991499999995, 38.252664700000004, 29.7604267, 37.7749295, 41.0771914, 40.0149856, 37.540724600000004, 34.0005691, 33.9191799, 44.085557200000004, 41.424473, 42.337041299999996, 45.4156817, 38.017144099999996, 38.8816208, 42.03078120000001, 41.0400135, 39.758947799999994, 41.6819935, 40.041599600000005, 40.7281575, 18.4225782, 37.6624312, 37.6624312, 39.9611755, 42.5750939, 41.2565369, 34.0522342, 40.5852602, 47.6062095, 38.040583700000006, 41.8089191, 36.1539816, 40.497603999999995, 36.0331164, 37.540724600000004, 41.4553232, 32.715738, 42.36837, 35.1495343, 33.7420005, 40.440624799999995, 37.9735346, 35.791540000000005, 37.9100783, 21.3069444, 39.647765299999996, 29.7604267, 37.354107899999995, 47.6743428, 45.5122308, 34.0522342, 47.6101497, 38.933867600000006, 32.715738, 40.7127753, 32.7766642, 41.0534302, 43.6770252, 45.0791325, 39.9611755, 29.7604267, 29.7604267, 28.5383355, 35.2270869, 42.1969689, 41.8781136, 37.338208200000004, 42.5047161, 39.768403, 41.8781136, 37.354107899999995, 39.739235799999996, 38.8942786, 38.9822282, 41.0262417, 42.09975, 29.7604267, 39.739235799999996, 43.0389025, 33.7489954, 42.6583356, 39.9205411, 39.103118200000004, 29.2108147, 29.2108147, 33.9191799, 40.7066174, 29.7604267, 37.338208200000004, 40.7895453, 41.8781136, 40.0994425, 34.0232431, 42.5481714, 33.6845673, 33.596891299999996, 33.6845673], 'Longitude': [-94.2088172, -96.9488945, -95.93450340000001, -122.03218229999999, -93.46874890000001, -122.4194155, -71.5147839, -122.33207079999998, -96.7969879, -83.0457538, -83.1763145, -75.4590816, -121.9780153, -83.11407709999999, -122.03261909999999, -74.0059728, -84.5120196, -71.0588801, -87.8445119, -74.0059728, -120.7401385, -122.08385109999999, -84.3879824, -80.8431267, -90.1994042, -122.4194155, -87.62979820000001, -95.36980279999999, -86.158068, -122.12151200000001, -98.4936282, -74.0059728, -75.1652215, -73.71401949999999, -97.678896, -88.9936873, -74.4518188, -77.17726040000001, -93.2650108, -80.81007240000001, -83.6499321, -84.5120196, -74.0059728, -84.3879824, -73.71444770000001, -121.9552356, -84.232105, -87.62979820000001, -72.6733723, -90.0489801, -72.795027, -74.1723667, -116.2023137, -95.36980279999999, -118.30896609999998, -85.7584557, -74.0059728, -122.14301950000001, -77.09470920000001, -74.0059728, -90.1994042, -121.88632859999998, -86.78160159999999, -96.7969879, -87.8445119, -82.9987942, -74.0059728, -71.0588801, -74.0059728, -74.0059728, -97.3307658, -93.28300209999999, -72.73009449999999, -73.53873409999999, -84.3879824, -122.1817252, -74.4818698, -74.2907032, -87.82895479999999, -94.1288141, -87.62979820000001, -122.2363548, -82.8001026, -74.0059728, -71.4161565, -74.0059728, -84.3879824, -81.9498042, -122.80133319999999, -98.4936282, -80.1917902, -87.62979820000001, -72.589811, -76.91997420000001, -95.36980279999999, -93.0427153, -93.0899578, -74.0059728, -77.17109140000001, -98.4936282, -77.17726040000001, -90.5151342, -74.0059728, -87.9064736, -95.36980279999999, -74.0059728, -122.14301950000001, -74.0059728, -74.0059728, -87.8411818, -87.89607120000001, -81.4392828, -104.87717260000001, -79.9958864, -95.36980279999999, -122.2710788, -87.8445119, -77.17109140000001, -71.2356113, -84.5120196, -87.87216020000001, -93.2650108, -86.7133302, -90.0489801, -80.8431267, -84.3879824, -77.09470920000001, -112.07403729999999, -86.158068, -118.83759369999999, -87.9535534, -122.33207079999998, -117.1610838, -76.2874927, -74.41738769999999, -74.0059728, -84.9877094, -80.13731740000001, -83.24548829999999, -86.4541894, -95.93450340000001, -96.7969879, -87.9064736, -71.2356113, -74.0059728, -95.36980279999999, -96.7969879, -83.2218731, -85.9213796, -116.2023137, -80.8431267, -118.1937395, -96.9488945, -77.4360481, -122.2015159, -72.6733723, -88.1173132, -121.88632859999998, -82.6402915, -86.86888990000001, -122.4194155, -120.7401385, -96.9488945, -118.24368490000002, -79.9958864, -92.1193012, -80.0533743, -122.4194155, -121.98857190000001, -82.4571776, -77.4360481, -88.1227199, -73.53873409999999, -77.4360481, -74.0059728, -112.07403729999999, -84.3879824, -90.1994042, -104.990251, -93.470786, -122.4194155, -93.2650108, -122.33207079999998, -74.0059728, -82.9987942, -73.62819640000001, -81.51900529999999, -74.0059728, -87.90840390000001, -81.6943605, -79.9958864, -96.7969879, -93.470786, -84.3879824, -74.0116536, -74.0323626, -74.0059728, -86.78160159999999, -90.1994042, -75.1652215, -121.9552356, -95.36980279999999, -104.98775970000001, -87.7878408, -75.3599105, -96.7969879, -118.24368490000002, -71.4128343, -121.9357918, -93.6091064, -97.10806559999999, -74.0059728, -97.5164276, -77.0909809, -93.0899578, -93.15661120000001, -74.0059728, -95.36980279999999, -81.51900529999999, -95.36980279999999, -74.0059728, -121.88632859999998, -95.992775, -74.3646122, -74.0059728, -85.17971419999999, -115.13982959999998, -72.77954190000001, -73.4081575, -80.1917902, -82.9987942, -83.0457538, -77.4360481, -90.5770675, -96.6988856, -73.71444770000001, -74.0059728, -73.4151214, -74.0059728, -85.5872286, -74.0059728, -79.79197540000001, -74.2937594, -118.072846, -71.10973349999999, -79.9958864, -122.40774979999999, -74.2631635, -95.992775, -80.24421600000001, -74.2057011, -93.2650108, -118.6089752, -74.0059728, -74.0059728, -81.6943605, -95.4612625, -74.0059728, -71.0588801, -122.14301950000001, -121.96237509999999, -79.9958864, -87.897014, -73.45401109999999, -81.655651, -93.2650108, -85.3096801, -75.38355250000001, -74.66722259999999, -95.36980279999999, -117.1610838, -74.2765441, -95.36980279999999, -90.0715323, -104.990251, -74.0059728, -105.0866504, -90.0489801, -92.66626740000001, -115.13982959999998, -80.1289321, -95.4612625, -87.8445119, -76.28587259999999, -122.4194155, -79.43779909999999, -87.840625, -104.98775970000001, -83.23410279999999, -118.40035630000001, -73.4081575, -77.35700279999999, -77.05469029999999, -122.8755949, -122.2015159, -112.07403729999999, -96.7969879, -80.8431267, -83.0457538, -87.62979820000001, -83.23410279999999, -81.655651, -84.3879824, -78.90511, -118.24368490000002, -121.9552356, -74.0059728, -95.36980279999999, -121.88632859999998, -82.5618186, -89.4012302, -85.13935129999999, -117.9298493, -97.5164276, -84.9510542, -95.36980279999999, -79.9414266, -77.37331390000001, -71.4128343, -87.840625, -87.62979820000001, -97.07806540000001, -92.9746367, -77.17726040000001, -73.4081575, -81.655651, -74.1723667, -71.55228740000001, -93.2922989, -122.4194155, -71.2356113, -74.0059728, -74.41738769999999, -95.36980279999999, -81.8067523, -95.36980279999999, -95.93450340000001, -75.1652215, -95.93450340000001, -80.1667247, -84.3879824, -88.0747875, -93.2650108, -104.990251, -75.4714098, -84.3879824, -84.1446376, -77.4360481, -88.01117459999999, -74.0059728, -85.7228061, -95.36980279999999, -95.992775, -121.98857190000001, -122.33207079999998, -87.62979820000001, -87.7878408, -75.1196199, -74.0059728, -88.01117459999999, -85.85304690000001, -96.7969879, -74.0059728, -72.589811, -96.6988856, -84.5120196, -74.05652979999999, -73.4081575, -73.62819640000001, -79.9958864, -74.0059728, -87.9064736, -83.3527097, -77.2310925, -84.55553470000001, -90.448126, -104.98775970000001, -80.085059, -76.6502468, -75.4714098, -76.4730122, -93.4555093, -81.7640212, -86.7827772, -104.95081409999999, -77.40887940000002, -80.1917902, -95.36980279999999, -121.88632859999998, -86.7827772, -86.7827772, -85.9766671, -83.6538244, -122.33207079999998, -94.1307587, -81.3792365, -96.6988856, -74.0059728, -87.5710898, -96.7969879, -118.4911912, -73.94854240000001, -72.82307359999999, -118.4164652, -97.330053, -87.62979820000001, -80.6081089, -122.33207079999998, -77.026088, -83.627157, -121.88632859999998, -81.6943605, -84.5120196, -88.5426136, -91.66562320000001, -85.7584557, -111.9400054, -96.6988856, -71.0588801, -122.2020795, -74.0059728, -74.0059728, -74.1435843, -73.53873409999999, -93.6001278, -90.1994042, -118.255075, -80.2683838, -84.4013462, -84.3879824, -82.6402915, -84.3879824, -80.24421600000001, -84.1446376, -71.4128343, -87.840625, -74.0059728, -95.36980279999999, -92.2895948, -74.0059728, -122.32552539999999, -83.53786740000001, -87.62979820000001, -77.35700279999999, -87.9064736, -87.9535534, -115.13982959999998, -90.32372629999999, -83.0457538, -87.62979820000001, -75.5397878, -98.4936282, -90.1994042, -88.0131275, -96.9488945, -80.8431267, -75.38355250000001, -74.417097, -74.8648873, -86.80249, -84.41726659999999, -93.0899578, -74.0059728, -77.5063739, -81.9179173, -74.38820720000001, -78.8783689, -122.27111370000002, -74.0059728, -74.0059728, -88.0683955, -85.7584557, -73.85874609999999, -92.2895948, -111.9260519, -84.5120196, -84.3879824, -87.8806738, -74.41738769999999, -75.1285061, -94.69357009999999, -77.17726040000001, -117.867653, -84.56031870000001, -73.6837399, -87.62979820000001, -88.1064787, -87.9064736, -95.36980279999999, -93.0899578, -90.1994042, -112.07403729999999, -86.158068, -104.98775970000001, -122.03634960000001, -93.5824586, -96.9488945, -74.0059728, -96.9488945, -84.5120196, -74.0059728, -77.09470920000001, -83.1497751, -75.7116032, -96.9488945, -121.9552356, -74.41738769999999, -122.4194155, -87.8445119, -82.9987942, -122.1817252, -90.1994042, -71.38255579999999, -71.8022934, -75.5397878, -122.08385109999999, -80.9450759, -95.36980279999999, -94.5729781, -71.19896949999999, -87.62979820000001, -87.8445119, -80.0734005, -122.03634960000001, -96.9488945, -77.17109140000001, -76.6121893, -81.8552196, -84.9877094, -122.4194155, -87.9259058, -94.6707917, -118.4164652, -71.512617, -87.3328139, -115.13982959999998, -122.2363548, -95.36980279999999, -76.6458043, -76.6121893, -77.4380485, -118.45719740000001, -86.7827772, -82.9987942, -71.0588801, -98.1244531, -73.68346209999999, -96.7969879, -87.62979820000001, -112.07403729999999, -81.40233559999999, -82.5306527, -118.1597929, -81.3444573, -74.0059728, -83.6874562, -104.990251, -121.9552356, -77.35700279999999, -100.7837392, -77.0085876, -116.2023137, -74.0059728, -81.0739827, -84.3879824, -91.6663523, -88.0132958, -76.6121893, -77.38609759999999, -118.24368490000002, -122.2363548, -73.35790490000001, -95.36980279999999, -77.0909809, -80.1917902, -80.8431267, -83.53786740000001, -80.1289321, -75.4395931, -86.1180435, -87.90840390000001, -71.0588801, -95.36980279999999, -82.4571776, -74.0059728, -122.08385109999999, -111.9400054, -118.41090890000001, -95.36980279999999, -74.0059728, -96.6397822, -73.7176312, -88.22294000000001, -87.8445119, -111.9260519, -73.53873409999999, -88.462609, -74.0059728, -87.9535534, -84.3173878, -82.3212302, -87.8211854, -76.727745, -86.158068, -84.3879824, -86.80249, -78.6381787, -74.0059728, -94.3102228, -85.6680863, -97.13306829999999, -83.9787808, -74.0059728, -93.711332, -86.80249, -111.9260519, -79.8100472, -95.36980279999999, -71.0588801, -96.72985190000001, -84.45188540000001, -79.9958864, -74.7909125, -80.0533746, -80.8431267, -87.5710898, -76.7769324, -87.90840390000001, -83.6538244, -95.93450340000001, -120.7401385, -73.0931641, -96.7969879, -96.9488945, -96.9488945, -104.990251, -71.1522765, -81.655651, -95.36980279999999, -95.36980279999999, -87.62979820000001, -83.067965, -95.4612625, -83.47549129999999, -75.1652215, -87.62979820000001, -97.13417829999999, -95.93450340000001, -82.9987942, -87.840625, -74.0059728, -87.62979820000001, -82.3940104, -112.07403729999999, -83.9207392, -72.9278835, -73.53873409999999, -74.0059728, -88.9936873, -89.4012302, -75.4395931, -81.6943605, -79.9958864, -80.9111862, -84.3879824, -82.8087864, -121.8995741, -76.7944104, -122.2015159, -80.25659499999999, -86.1180435, -82.02820009999999, -75.1652215, -117.82650490000002, -111.8412502, -119.8138027, -118.24368490000002, -94.7976958, -84.3879824, -74.0059728, -84.3879824, -89.4012302, -79.9958864, -117.56643840000001, -84.3879824, -87.62979820000001, -84.3879824, -89.0317765, -80.0819879, -74.961517, -74.0059728, -77.4360481, -77.4360481, -89.574563, -89.1306124, -87.82895479999999, -73.53873409999999, -74.0059728, -95.36980279999999, -83.14993220000001, -95.19029859999999, -96.7969879, -122.1560768, -121.9552356, -84.5085536, -111.9260519, -113.5684164, -87.62979820000001, -95.41716009999999, -122.7140548, -83.9207392, -74.0059728, -77.6088465, -96.7969879, -75.96521170000001, -77.17726040000001, -83.4838244, -74.41738769999999, -122.03634960000001, -97.5164276, -95.36980279999999, -115.13982959999998, -80.8431267, -96.7969879, -71.55228740000001, -94.5785667, -79.79197540000001, -85.6680863, -74.0059728, -82.9987942, -81.40233559999999, -87.9064736, -117.1610838, -90.1994042, -85.7584557, -96.6988856, -121.7568946, -80.1386547, -70.7927832, -111.9400054, -83.3671432, -86.29110240000001, -66.1057355, -122.4194155, -90.0489801, -71.4948322, -86.78160159999999, -81.6943605, -73.4151214, -76.6121893, -81.6943605, -74.0059728, -80.13731740000001, -86.86888990000001, -122.14301950000001, -87.62979820000001, -81.6943605, -70.7625532, -83.6499321, -73.7176312, -75.1285061, -118.24368490000002, -94.39854749999999, -74.0059728, -117.82650490000002, -74.0059728, -74.0059728, -74.0059728, -97.5164276, -76.72282270000001, -82.9987942, -84.5120196, -77.17726040000001, -90.1009108, -96.72985190000001, -83.7430378, -73.53873409999999, -90.1994042, -84.50371640000002, -118.1445155, -117.1610838, -95.93450340000001, -87.62979820000001, -75.266289, -111.89104740000002, -76.6585074, -86.78160159999999, -82.9987942, -122.08385109999999, -87.62979820000001, -71.0588801, -94.5785667, -96.6988856, -157.8583333, -74.0059728, -78.6381787, -118.255075, -117.91882209999999, -87.5710898, -122.03634960000001, -95.36980279999999, -80.3035602, -118.66148090000002, -81.6943605, -122.4194155, -97.3307658, -118.83759369999999, -75.5138118, -71.0588801, -81.6943605, -77.0909809, -71.4418101, -102.0779146, -73.76290970000001, -94.5785667, -104.990251, -95.67515759999999, -115.13982959999998, -157.8583333, -77.17726040000001, -115.13982959999998, -111.8874392, -76.727745, -121.88632859999998, -88.462609, -122.4442906, -95.992775, -93.29827990000001, -71.10973349999999, -78.63419959999999, -71.0588801, -87.62979820000001, -74.7523874, -88.3161965, -111.9260519, -122.6587185, -81.609844, -84.2866083, -122.4194155, -79.9958864, -74.0059728, -74.0643065, -77.35700279999999, -122.4194155, -95.36980279999999, -71.2356113, -122.5274755, -96.9488945, -112.07403729999999, -78.6381787, -90.1994042, -115.13982959999998, -96.7969879, -87.9064736, -93.2650108, -73.8201337, -74.0059728, -75.9268747, -118.24368490000002, -95.36980279999999, -120.7401385, -85.5600316, -121.88632859999998, -74.0059728, -88.2825668, -73.6812293, -81.6943605, -87.62979820000001, -121.88632859999998, -90.1994042, -84.21714240000001, -71.51630490000001, -93.0899578, -80.0533746, -71.2356113, -90.5770675, -121.88632859999998, -72.9278835, -87.840625, -94.5785667, -81.87230840000001, -111.6585337, -86.018651, -72.57538690000001, -80.8431267, -74.0679753, -87.9064736, -90.0489801, -80.1289321, -86.158068, -97.5164276, -118.6541895, -78.6381787, -81.3792365, -94.6169012, -84.2212938, -97.01500779999999, -111.9260519, -121.8746789, -87.62979820000001, -80.8431267, -92.66626740000001, -88.1227199, -85.7584557, -95.36980279999999, -122.4194155, -73.4686858, -105.2705456, -77.4360481, -118.1597929, -118.4164652, -93.2259349, -91.0432051, -71.2092214, -122.7159726, -122.28858079999999, -77.0909809, -93.63191309999999, -73.71444770000001, -84.1916069, -85.9766671, -75.3698895, -74.0776417, -66.0509549, -121.8746789, -121.8746789, -82.9987942, -71.0786653, -95.93450340000001, -118.24368490000002, -105.084423, -122.33207079999998, -84.50371640000002, -88.01117459999999, -95.992775, -74.4884868, -86.7827772, -77.4360481, -81.9179173, -117.1610838, -83.3527097, -90.0489801, -117.82363909999998, -79.9958864, -122.5310874, -78.78111690000001, -122.06518190000001, -157.8583333, -104.98775970000001, -95.36980279999999, -121.9552356, -117.1124241, -122.6587185, -118.24368490000002, -122.2015159, -77.17726040000001, -117.1610838, -74.0059728, -96.7969879, -73.53873409999999, -70.3711617, -93.1471667, -82.9987942, -95.36980279999999, -95.36980279999999, -81.3792365, -80.8431267, -88.0934108, -87.62979820000001, -121.88632859999998, -71.19562049999999, -86.158068, -87.62979820000001, -121.9552356, -104.990251, -77.4310992, -94.6707917, -73.62819640000001, -87.7808967, -95.36980279999999, -104.990251, -87.9064736, -84.3879824, -71.1367953, -105.0866504, -84.5120196, -81.02283309999999, -81.02283309999999, -118.4164652, -74.54932840000001, -95.36980279999999, -121.88632859999998, -74.05652979999999, -87.62979820000001, -74.9325683, -84.36155550000001, -71.17244670000001, -117.82650490000002, -117.6581562, -117.82650490000002]}
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.