text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import random
import trigger
from animation import animate_point
import colors
from compositecore import Leaf
import gametime
from graphic import GraphicChar
import icon
import messenger
from statusflags import StatusFlags
# TODO: Idea replace effect types with spoofchildren and add time to live to spoof children.
# Effect types in execution order
class EffectTypes(object):
EFFECT_REMOVER = 0
STATUS_REMOVER = 1
ADD_SPOOF_CHILD = 2
REMOVE_CHILD = 3
BLOCKER = 4
STATUS_ADDER = 5
TELEPORT = 6
HEAL = 7
UI = 8
DAMAGE = 9
EQUIPMENT = 10
ALLTYPES = [EFFECT_REMOVER, STATUS_REMOVER, ADD_SPOOF_CHILD, REMOVE_CHILD, BLOCKER, STATUS_ADDER,
TELEPORT, HEAL, UI, DAMAGE, EQUIPMENT]
class EffectStackID(object):
SLIME_DISSOLVE = "slime_dissolve"
class EffectQueue(Leaf):
def __init__(self):
super(EffectQueue, self).__init__()
self.component_type = "effect_queue"
self._effect_queue = [None for _ in range(len(EffectTypes.ALLTYPES))]
for effect_type in EffectTypes.ALLTYPES:
self._effect_queue[effect_type] = []
@property
def effects(self):
return [effect for effect_group in self._effect_queue for effect in effect_group]
def add(self, effect):
if effect.meld_id:
if not any(effect.meld_id == e.meld_id for e in self.effects):
self._add(effect)
else:
[e for e in self.effects][0].meld(effect)
else:
self._add(effect)
def _add(self, effect):
self._effect_queue[effect.effect_type].append(effect)
effect.queue = self
def remove(self, effect):
self._effect_queue[effect.effect_type].remove(effect)
effect.queue = None
def remove_effects_with_id(self, effect_id):
for index in range(len(self._effect_queue)):
self._effect_queue[index] = [effect for effect in self._effect_queue[index]
if not effect.effect_id == effect_id]
# TODO this is old lookup if can remove.
def remove_status_adder_of_status(self, id_to_remove):
for index in range(len(self.queue.effects)):
self.queue.effects[index] = [effect for effect in self.queue.effects[index]
if not effect.effect_id == id_to_remove]
def update(self, time):
for effect_type_queue in EffectTypes.ALLTYPES:
for effect in self._effect_queue[effect_type_queue]:
effect.update(time)
class EntityEffect(object):
def __init__(self, source_entity, time_to_live, effect_type, meld_id=None, effect_id=None):
self.source_entity = source_entity
self.time_to_live = time_to_live
self.effect_type = effect_type
self.is_blocked = False
self.meld_id = meld_id
self.effect_id = effect_id
self.queue = None
self.time_alive = 0
def update(self, time_spent):
pass
def meld(self, other_effect):
pass
@property
def target_entity(self):
return self.queue.parent
def is_new_round(self, time_spent):
turns_alive = int(self.time_alive / gametime.single_turn)
new_turns_alive = int((self.time_alive + time_spent) / gametime.single_turn)
return new_turns_alive > turns_alive
def tick(self, time_spent):
self.time_alive += time_spent
self.time_to_live = self.time_to_live - time_spent
if self.time_to_live < 1:
self._on_remove_effect()
self.queue.remove(self)
def _on_remove_effect(self):
pass
class StatusRemover(EntityEffect):
def __init__(self, source_entity, status_type_to_remove, time_to_live=1):
super(StatusRemover, self).__init__(source_entity,
time_to_live,
EffectTypes.STATUS_REMOVER)
self.status_type_to_remove = status_type_to_remove
def update(self, time_spent):
self.queue.remove_status_adder_of_status(self.status_type_to_remove)
self.tick(time_spent)
class EffectRemover(EntityEffect):
def __init__(self, source_entity, effect_id_to_remove, time_to_live=1, message=None):
super(EffectRemover, self).__init__(source_entity, time_to_live,
EffectTypes.EFFECT_REMOVER)
self.the_message = message
self.effect_id_to_remove = effect_id_to_remove
def update(self, time_spent):
old_size = len(self.queue.effects)
self.queue.remove_effects_with_id(self.effect_id_to_remove)
if not old_size == len(self.queue.effects):
self.message()
self.tick(time_spent)
def message(self):
messenger.msg.send_visual_message(self.the_message % {"source_entity": self.source_entity.description.long_name,
"target_entity": self.target_entity.description.long_name},
self.target_entity.position.value)
class HeartStop(EntityEffect):
def __init__(self, source_entity, time_to_live=1, message=messenger.HEART_STOP_MESSAGE):
super(HeartStop, self).__init__(source_entity, time_to_live, EffectTypes.STATUS_REMOVER)
self.message = message
def send_message(self):
messenger.msg.send_visual_message(self.message % {"source_entity": self.source_entity.description.long_name,
"target_entity": self.target_entity.description.long_name},
self.target_entity.position.value)
def update(self, time_spent):
if self.is_new_round(time_spent):
gray_heart = GraphicChar(None, colors.GRAY, icon.HEART)
self.target_entity.char_printer.append_graphic_char_temporary_frames([gray_heart])
self.send_message()
self.tick(time_spent)
def _on_remove_effect(self):
self.target_entity.health_modifier.kill()
class StatusAdder(EntityEffect):
def __init__(self, source_entity, status_flag, time_to_live=1):
super(StatusAdder, self).__init__(source_entity, time_to_live,
EffectTypes.STATUS_ADDER)
self.status_flag = status_flag
def update(self, time_spent):
status_flags = StatusFlags([self.status_flag])
status_flags.to_be_removed = True
self.target_entity.set_child(status_flags)
self.tick(time_spent)
class Teleport(EntityEffect):
def __init__(self, source_entity, time_to_live=1):
super(Teleport, self).__init__(source_entity, time_to_live, EffectTypes.TELEPORT)
def update(self, time_spent):
positions = (self.target_entity.dungeon_level.value.
get_walkable_positions(self.target_entity,
self.target_entity.position.value))
random_positions = random.sample(positions, len(positions))
for position in random_positions:
teleport_successful = self.target_entity.mover.try_move(position)
if teleport_successful:
self.target_entity.game_state.value.dungeon_needs_redraw = True
break
self.tick(time_spent)
class AttackEntityEffect(EntityEffect):
def __init__(self, source_entity, damage, damage_types, accuracy, crit_chance=0, crit_multiplier=2,
hit_message=messenger.HIT_MESSAGE, miss_message=messenger.MISS_MESSAGE,
crit_message=messenger.CRIT_MESSAGE, hit_trigger_effect=[], meld_id=None, time_to_live=1,
attack_effects=[]):
super(AttackEntityEffect, self).__init__(source_entity=source_entity,
effect_type=EffectTypes.DAMAGE,
meld_id=meld_id,
time_to_live=time_to_live)
self.accuracy = accuracy
self.damage = damage
self.damage_types = damage_types
self.miss_message = miss_message
self.hit_message = hit_message
self.attack_effects = attack_effects
self.hit_trigger_effect = hit_trigger_effect
self.crit_chance = crit_chance
self.crit_multiplier = crit_multiplier
self.crit_message = crit_message
def send_miss_message(self):
messenger.msg.send_visual_message(self.miss_message %
{"source_entity": self.source_entity.description.long_name,
"target_entity": self.target_entity.description.long_name},
self.target_entity.position.value)
def send_hit_message(self, message_template, damage_caused):
source_entity_name = self.source_entity.description.long_name if self.source_entity else None
target_entity_name = self.target_entity.description.long_name if self.target_entity else None
m = message_template % {"source_entity": source_entity_name,
"target_entity": target_entity_name,
"damage": str(int(damage_caused))}
messenger.msg.send_visual_message(m, self.target_entity.position.value)
def is_a_hit(self):
return self.target_entity.dodger.is_a_hit(self.accuracy) or self.target_entity.has("sleeping")
def is_a_crit(self):
return self.crit_chance > random.random() or self.target_entity.has("sleeping")
def hit_target(self):
is_crit = self.is_a_crit()
damage = self.damage
if is_crit:
damage = self.damage * self.crit_multiplier
animate_point(self.target_entity.game_state.value, self.target_entity.position.value,
[GraphicChar(None, colors.RED, "X")])
damage_after_armor = self.target_entity.armor_checker.get_damage_after_armor(damage, self.damage_types)
damage_after_resist = self.target_entity.resistance_checker.get_damage_after_resistance(damage_after_armor,
self.damage_types)
damage_caused = self.target_entity.health_modifier.hurt(damage_after_resist, entity=self.source_entity)
self.execute_effects()
if is_crit:
self.send_hit_message(self.crit_message, damage_caused)
else:
self.send_hit_message(self.hit_message, damage_caused)
def update(self, time_spent):
if self.target_entity.resistance_checker.is_immune(self.damage_types):
pass
self.on_attacked_effects()
if self.is_a_hit():
self.hit_target()
else:
self.send_miss_message()
self.tick(time_spent)
def execute_effects(self):
for effect in self.attack_effects:
if effect.roll_to_hit():
effect.attack_effect(self.source_entity, self.target_entity)
def on_attacked_effects(self):
for t in self.target_entity.get_children_with_tag(trigger.ON_ATTACKED_TRIGGER_TAG):
t.trigger(target_entity=self.source_entity, source_entity=self.target_entity)
class UndodgeableAttackEntityEffect(AttackEntityEffect):
def __init__(self, source_entity, damage, damage_types, hit_message=messenger.HIT_MESSAGE,
meld_id=None, time_to_live=1):
super(UndodgeableAttackEntityEffect, self).__init__(source_entity, damage, damage_types, -1,
hit_message=hit_message,
meld_id=meld_id,
time_to_live=time_to_live)
def is_a_hit(self):
return True
class HealthRegain(EntityEffect):
def __init__(self, source_entity, health, turn_interval, time_to_live, meld_id=None):
super(HealthRegain, self).__init__(source_entity=source_entity, effect_type=EffectTypes.HEAL,
meld_id=meld_id, time_to_live=time_to_live)
self.health = health
self.time_interval = turn_interval * gametime.single_turn
self.time_until_next_heal = self.time_interval
def update(self, time_spent):
if self.time_until_next_heal <= 0:
self.target_entity.health_modifier.heal(self.health)
self.time_until_next_heal = self.time_interval
self.time_until_next_heal -= time_spent
self.tick(time_spent)
class StatusIconEntityEffect(EntityEffect):
def __init__(self, source_entity, status_description, time_to_live, meld_id=None):
super(StatusIconEntityEffect, self).__init__(source_entity=source_entity, effect_type=EffectTypes.UI,
meld_id=meld_id, time_to_live=time_to_live)
self.status_description = status_description
def update(self, time_spent):
if self.status_description and self.target_entity.has("status_bar"):
self.target_entity.status_bar.add(self.status_description)
self.tick(time_spent)
class DamageOverTimeEffect(EntityEffect):
def __init__(self, source_entity, damage, damage_types, turn_interval, turns_to_live,
damage_message, status_description=None, meld_id=None):
super(DamageOverTimeEffect, self).__init__(source_entity=source_entity, effect_type=EffectTypes.DAMAGE,
meld_id=meld_id, time_to_live=turns_to_live * gametime.single_turn)
self.damage = damage
self.damage_types = damage_types
self.time_interval = turn_interval * gametime.single_turn
self.damage_message = damage_message
self.time_until_next_damage = self.time_interval
self.status_description = status_description
def send_damage_message(self, damage_caused):
message_arguments = {}
if self.source_entity and self.source_entity.has("description"):
message_arguments["source_entity"] = self.source_entity.description.long_name
if self.target_entity and self.target_entity.has("description"):
message_arguments["target_entity"] = self.target_entity.description.long_name
message_arguments["damage"] = str(damage_caused)
m = self.damage_message % message_arguments
messenger.msg.send_visual_message(m, self.target_entity.position.value)
def damage_target(self):
damage_after_armor = self.target_entity.armor_checker.get_damage_after_armor(self.damage, self.damage_types)
damage_after_resist = self.target_entity.resistance_checker.get_damage_after_resistance(damage_after_armor,
self.damage_types)
damage_caused = self.target_entity.health_modifier.hurt(damage_after_resist, entity=self.source_entity,
damage_types=self.damage_types)
return damage_caused
def update_status_icon(self):
if self.status_description and self.target_entity.has("status_bar"):
self.target_entity.status_bar.add(self.status_description)
def update(self, time_spent):
if not self.target_entity.resistance_checker.is_immune(self.damage_types):
if self.time_until_next_damage <= 0:
damage_caused = self.damage_target()
self.send_damage_message(damage_caused)
self.time_until_next_damage = self.time_interval
self.update_status_icon()
self.time_until_next_damage -= time_spent
self.tick(time_spent)
class BleedEffect(DamageOverTimeEffect):
MAX_DAMAGE_PER_TURN = 3
def __init__(self, source_entity, damage, damage_types, turn_interval, turns_to_live,
damage_message, status_description):
super(BleedEffect, self).__init__(source_entity, damage, damage_types, turn_interval, turns_to_live,
damage_message, status_description=status_description, meld_id="bleed_effect")
def meld(self, other_effect):
self.time_to_live = max(other_effect.time_to_live, self.time_to_live)
self.damage = min(self.damage + 1, BleedEffect.MAX_DAMAGE_PER_TURN)
self.status_description.graphic_char.icon = str(self.damage)
class UndodgeableDamagAndBlockSameEffect(EntityEffect):
def __init__(self, source_entity, damage, damage_types,
damage_message, meld_id, status_description=None, time_to_live=1):
super(UndodgeableDamagAndBlockSameEffect, self).__init__(source_entity=source_entity,
effect_type=EffectTypes.DAMAGE,
time_to_live=time_to_live,
meld_id=meld_id)
self.damage = damage
self.damage_types = damage_types
self.damage_message = damage_message
self.status_description = status_description
def send_damage_message(self, damage_caused):
messenger.msg.send_visual_message(
self.damage_message % {"source_entity": self.source_entity.description.long_name,
"target_entity": self.target_entity.description.long_name,
"damage": str(damage_caused)},
self.target_entity.position.value)
def update(self, time_spent):
if not self.target_entity.resistance_checker.is_immune(self.damage_types):
if self.time_alive == 0:
damage_after_armor = self.target_entity.armor_checker.get_damage_after_armor(self.damage,
self.damage_types)
damage_after_resist = self.target_entity.resistance_checker.get_damage_after_resistance(
damage_after_armor, self.damage_types)
damage_caused = self.target_entity.health_modifier.hurt(damage_after_resist, entity=self.source_entity)
self.send_damage_message(damage_caused)
self.update_status_icon()
self.tick(time_spent)
def update_status_icon(self):
if self.status_description and self.target_entity.has("status_bar"):
self.target_entity.status_bar.add(self.status_description)
class Heal(EntityEffect):
def __init__(self, source_entity, health, heal_message=messenger.HEAL_MESSAGE, time_to_live=1):
super(Heal, self).__init__(source_entity=source_entity,
effect_type=EffectTypes.HEAL,
time_to_live=time_to_live)
self.health = health
self.heal_message = heal_message
def message(self):
messenger.msg.send_visual_message(
self.heal_message % {"source_entity": self.source_entity.description.long_name,
"target_entity": self.target_entity.description.long_name,
"health": str(self.health)},
self.target_entity.position.value)
def update(self, time_spent):
self.target_entity.health_modifier.heal(self.health)
self.message()
self.tick(time_spent)
class AddSpoofChild(EntityEffect):
def __init__(self, source_entity, spoof_child, time_to_live, message_effect=None, meld_id=None, effect_id=None,
status_description=None):
super(AddSpoofChild, self).__init__(source_entity=source_entity,
effect_type=EffectTypes.ADD_SPOOF_CHILD,
time_to_live=time_to_live,
meld_id=meld_id,
effect_id=effect_id)
self.status_description = status_description
self.message_effect = message_effect
self.spoof_child = spoof_child
def update(self, time_spent):
self.target_entity.add_spoof_child(self.spoof_child)
if self.message_effect:
self.message()
self.update_status_icon()
self.tick(time_spent)
def update_status_icon(self):
if self.status_description and self.target_entity.has("status_bar"):
self.target_entity.status_bar.add(self.status_description)
def message(self):
messenger.msg.send_visual_message(
self.message_effect % {"source_entity": self.source_entity.description.long_name,
"target_entity": self.target_entity.description.long_name},
self.target_entity.position.value)
class RemoveChildEffect(EntityEffect):
def __init__(self, source_entity, component_type, time_to_live, meld_id=None):
super(RemoveChildEffect, self).__init__(source_entity=source_entity,
effect_type=EffectTypes.REMOVE_CHILD,
time_to_live=time_to_live,
meld_id=meld_id)
self.component_type = component_type
def update(self, time_spent):
self.target_entity.remove_component_of_type(self.component_type)
self.tick(time_spent)
class Equip(EntityEffect):
def __init__(self, source_entity, item, equip_message=messenger.EQUIP_MESSAGE):
super(Equip, self).__init__(source_entity=source_entity,
effect_type=EffectTypes.EQUIPMENT,
time_to_live=1)
self.item = item
self.equip_message = equip_message
def message(self):
messenger.msg.send_visual_message(
self.equip_message % {"source_entity": self.source_entity.description.long_name,
"target_entity": self.target_entity.description.long_name,
"item": self.item.description.long_name},
self.target_entity.position.value)
def update(self, time_spent):
equipment = self.queue.target_entity.equipment
equip_succeeded = equipment.try_equip(self.item)
if equip_succeeded:
self.message()
if self.queue.target_entity.inventory.has_item(self.item):
self.queue.target_entity.inventory.remove_item(self.item)
self.tick(time_spent)
class StepEffect(EntityEffect):
def __init__(self, source_entity, item):
super(StepEffect, self).__init__(source_entity=source_entity,
effect_type=EffectTypes.EQUIPMENT,
time_to_live=1)
self.item = item
def update(self, time_spent):
pass
class Unequip(EntityEffect):
def __init__(self, source_entity, equipment_slot):
super(Unequip, self).__init__(source_entity=source_entity,
effect_type=EffectTypes.EQUIPMENT,
time_to_live=1)
self.item = source_entity.equipment.get(equipment_slot)
self.equipment_slot = equipment_slot
self.unequip_message = messenger.UNEQUIP_MESSAGE
def message(self):
messenger.msg.send_visual_message(
self.unequip_message % {"source_entity": self.source_entity.description.long_name,
"target_entity": self.target_entity.description.long_name,
"item": self.item.description.long_name},
self.target_entity.position.value)
def update(self, time_spent):
equipment = self.target_entity.equipment
if equipment.can_unequip_to_inventory(self.equipment_slot):
underlip_succeeded = equipment.unequip_to_inventory(self.equipment_slot)
if underlip_succeeded:
self.message()
self.tick(time_spent)
class ReEquip(EntityEffect):
def __init__(self, source_entity, equipment_slot, item):
super(ReEquip, self).__init__(source_entity=source_entity,
effect_type=EffectTypes.EQUIPMENT,
time_to_live=1)
self.equipment_slot = equipment_slot
self.item = item
def message(self):
message = "%s equips %s." % (self.source_entity.description.long_name,
self.item.description.long_name)
messenger.msg.send_visual_message(message,
self.target_entity.position.value)
def update(self, time_spent):
old_item = None
if self.source_entity.equipment.slot_is_equiped(self.equipment_slot):
old_item = self.source_entity.equipment.unequip(self.equipment_slot)
equipment = self.target_entity.equipment
equip_succeeded = equipment.try_equip(self.item)
if equip_succeeded:
self.message()
self._item_flash_animation(self.target_entity, self.item)
if self.source_entity.inventory.has_item(self.item):
self.source_entity.inventory.remove_item(self.item)
if not old_item is None:
self.source_entity.inventory.try_add(old_item)
self.tick(time_spent)
def _item_flash_animation(self, entity, item):
entity.char_printer.append_graphic_char_temporary_frames([item.graphic_char])
|
{
"content_hash": "3c787c566d803d1b87ff0fa6d6ea3c2b",
"timestamp": "",
"source": "github",
"line_count": 575,
"max_line_length": 121,
"avg_line_length": 44.697391304347825,
"alnum_prop": 0.5963581183611533,
"repo_name": "co/TheLastRogue",
"id": "06568ef5454d36e098c9d8f9a7175c503ddb6941",
"size": "25701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entityeffect.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "696695"
}
],
"symlink_target": ""
}
|
from tournament_of_lulz.database.database import fetchall
from tournament_of_lulz.modules.image.model_image import ModelImage
class ModelTopImages():
def __init__(self, db_connection):
self.db_connection = db_connection
self.top_images = []
def load_top_images(self, start, limit):
self.top_images = []
sql = (
"SELECT image_id, image_url_hash, image_url, page_url, thumbnail_url, title, rating, rd, volatility "
"FROM images "
"ORDER BY rating DESC "
"LIMIT %(start)s, %(limit)s"
)
params = {
'start': start,
'limit': limit
}
data = fetchall(self.db_connection, sql, params)
for row in data:
image = ModelImage()
image.init_with_db_row(row)
self.top_images.append(image)
|
{
"content_hash": "a98c2c91828719bad5ebb75cfc3decf8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 113,
"avg_line_length": 30.892857142857142,
"alnum_prop": 0.5676300578034682,
"repo_name": "xaroth8088/tournament-of-lulz",
"id": "bc9e6097cf017e6d6c621209552a27854fff229d",
"size": "865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servers/python/tournament_of_lulz/modules/top_images/model_top_images.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22770"
},
{
"name": "Dockerfile",
"bytes": "315"
},
{
"name": "HTML",
"bytes": "6616"
},
{
"name": "JavaScript",
"bytes": "204304"
},
{
"name": "Python",
"bytes": "233991"
},
{
"name": "Shell",
"bytes": "2216"
}
],
"symlink_target": ""
}
|
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from django.http import HttpResponse
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.static import serve
logger = logging.getLogger(__name__)
@login_required
def serve_protected_files(request, fpath=None):
if settings.SERVE_PROTECTED_FILES:
return serve(request, fpath, settings.FILES_REPOSITORY, True)
response = HttpResponse()
response['Content-Type'] = ''
response['X-Accel-Redirect'] = "{protected_url}/{fpath}".format(
protected_url=settings.FILES_REPOSITORY_URL_PATH,
fpath=fpath)
return response
|
{
"content_hash": "d6a1243e3be7c5e30dae2a09e9372a9d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 69,
"avg_line_length": 32.04347826086956,
"alnum_prop": 0.7150610583446404,
"repo_name": "yeleman/snisi",
"id": "29c99a5a075b30b106126159f7316014074389e6",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snisi_web/views/downloads.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "410022"
},
{
"name": "HTML",
"bytes": "1007275"
},
{
"name": "Java",
"bytes": "7211"
},
{
"name": "JavaScript",
"bytes": "292583"
},
{
"name": "Python",
"bytes": "2237855"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
"""
This file helps to test graphql queries and verify that the "big picture" works
"""
import string
import random
import pytest
from graphene.test import Client
from BuyBitcoin.graphene_schema import SCHEMA
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from trading.models import TradingAccount, Trade
from stocks.models import DailyStockQuote, InvestmentBucket, \
InvestmentBucketDescription, InvestmentStockConfiguration, Stock
from stocks.historical import create_stock
from graphql_relay.node.node import to_global_id
def request_create(request):
"""
Creates a fully functional environment that we can test on
"""
post_save.disconnect(receiver=create_stock, sender=Stock)
stock = Stock(name="Google", ticker="GOOGL")
stock.save()
pw2 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
user2 = User.objects.create(username='testuser2', password=pw2)
account2 = TradingAccount(profile=user2.profile, account_name="testAccount2")
account2.save()
trade2 = Trade(quantity=2, account=account2, stock=stock)
trade2.save()
pw1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
request.user = User.objects.create(username='testuser1', password=pw1)
account1 = TradingAccount(profile=request.user.profile, account_name="testAccount1")
account1.save()
trade1 = Trade(quantity=1, account=account1, stock=stock)
trade1.save()
bucket = InvestmentBucket(name="i1", public=False, available=100, owner=request.user.profile)
bucket.save()
InvestmentBucketDescription(text="Blabla", is_good=True, bucket=bucket).save()
DailyStockQuote(value=9, date="2017-05-08", stock=stock).save()
DailyStockQuote(value=10, date="2017-05-10", stock=stock).save()
InvestmentStockConfiguration(stock=stock, quantity=1, bucket=bucket, start="2017-05-09").save()
return request
@pytest.mark.django_db(transaction=True)
# pylint: disable=invalid-name
def test_mutation_add_trading_account(rf, snapshot):
"""
Tests the mutation to add a trading account
"""
# pylint: enable=invalid-name
request = rf.post('/graphql')
pw1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
request.user = User.objects.create(username='testuser1', password=pw1)
client = Client(SCHEMA)
acc_name = "Test 1"
executed = client.execute("""
mutation {{
addTradingAccount(name: "{}") {{
account {{
accountName
}}
}}
}}
""".format(acc_name), context_value=request)
snapshot.assert_match(executed)
acc = TradingAccount.objects.get(profile__user__id=request.user.id)
ex_acc = executed['data']['addTradingAccount']['account']['accountName']
assert ex_acc == acc.account_name
@pytest.mark.django_db(transaction=True)
# pylint: disable=invalid-name
def test_mutation_add_bucket(rf, snapshot):
"""
This submits a massive graphql query to verify all fields work
"""
# pylint: enable=invalid-name
request = rf.post('/graphql')
pw1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
request.user = User.objects.create(username='testuser1', password=pw1)
client = Client(SCHEMA)
acc_name = "Test 1"
investment = 333.33
executed = client.execute("""
mutation {{
addBucket(name: "{}", investment: {}, public: true) {{
bucket {{
available
isOwner
public
name
}}
}}
}}
""".format(acc_name, investment), context_value=request)
snapshot.assert_match(executed)
acc = InvestmentBucket.objects.all()[0]
ex_acc = executed['data']['addBucket']['bucket']['available']
assert (ex_acc == acc.available) and (investment == ex_acc)
@pytest.mark.django_db(transaction=True)
# pylint: disable=invalid-name
def test_mutation_add_stock_to_bucket(rf, snapshot):
"""
This submits a massive graphql query to verify all fields work
"""
# pylint: enable=invalid-name
request = rf.post('/graphql')
pw1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
request.user = User.objects.create(username='testuser1', password=pw1)
bucket = InvestmentBucket(name="i1", public=False, available=100, owner=request.user.profile)
bucket.save()
post_save.disconnect(receiver=create_stock, sender=Stock)
stock = Stock(name="Google", ticker="GOOGL")
stock.save()
DailyStockQuote(value=9, date="2017-05-08", stock=stock).save()
DailyStockQuote(value=10, date="2017-05-10", stock=stock).save()
DailyStockQuote(value=9, date="2017-05-09", stock=stock).save()
client = Client(SCHEMA)
executed = client.execute(
"""
mutation {{
addStockToBucket(stockId: "{}", bucketId: "{}", quantity: {}) {{
bucket {{
available
isOwner
public
name
stocks {{
edges {{
node {{
quantity
stock {{
ticker
}}
}}
}}
}}
}}
}}
}}
""".format(
to_global_id("GStock", stock.id),
to_global_id("GInvestmentBucket", bucket.id), 3.5
),
context_value=request
)
snapshot.assert_match(executed)
assert InvestmentStockConfiguration.objects.count() == 1
@pytest.mark.django_db(transaction=True)
# pylint: disable=invalid-name
def test_mutation_add_attribute_to_investment(rf, snapshot):
"""
This submits a massive graphql query to verify all fields work
"""
# pylint: enable=invalid-name
request = rf.post('/graphql')
pw1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
request.user = User.objects.create(username='testuser1', password=pw1)
bucket = InvestmentBucket(name="i1", public=False, available=100, owner=request.user.profile)
bucket.save()
client = Client(SCHEMA)
executed = client.execute("""
mutation {{
addAttributeToBucket(desc: "{}", bucketId: "{}", isGood: true) {{
bucketAttr {{
text
isGood
}}
}}
}}
""".format("Test Desc", to_global_id("GInvestmentBucket", bucket.id)), context_value=request)
snapshot.assert_match(executed)
assert InvestmentBucketDescription.objects.count() == 1
@pytest.mark.django_db(transaction=True)
# pylint: disable=invalid-name
def test_mutation_attribute_permission(rf, snapshot):
"""
This submits a massive graphql query to verify all fields work
"""
# pylint: enable=invalid-name
request = rf.post('/graphql')
pw2 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
user2 = User.objects.create(username='testuser2', password=pw2)
pw1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
request.user = User.objects.create(username='testuser1', password=pw1)
bucket = InvestmentBucket(name="i1", public=False, available=100, owner=user2.profile)
bucket.save()
attr = InvestmentBucketDescription(text="desc1", bucket=bucket, is_good=True)
attr.save()
client = Client(SCHEMA)
executed = client.execute("""
mutation {{
addAttributeToBucket(desc: "{}", bucketId: "{}", isGood: true) {{
bucketAttr {{
isGood
}}
}}
}}
""".format("Test Desc", to_global_id("GInvestmentBucket", bucket.id)), context_value=request)
snapshot.assert_match(executed)
executed = client.execute(
"""
mutation {{
editAttribute(desc: "{}", idValue: "{}") {{
bucketAttr {{
isGood
}}
}}
}}
""".format(
"Test Desc",
to_global_id("GInvestmentBucketDescription", attr.id)
),
context_value=request
)
snapshot.assert_match(executed)
executed = client.execute("""
mutation {{
deleteAttribute(idValue: "{}") {{
isOk
}}
}}
""".format(to_global_id("GInvestmentBucketDescription", attr.id)), context_value=request)
snapshot.assert_match(executed)
executed = client.execute(
"""
mutation {{
addStockToBucket(stockId: "{}", bucketId: "{}", quantity: {}) {{
bucket {{
id
}}
}}
}}
""".format(
to_global_id("GStock", 1),
to_global_id("GInvestmentBucket", bucket.id), 3.5
),
context_value=request
)
snapshot.assert_match(executed)
assert InvestmentBucketDescription.objects.count() == 1
@pytest.mark.django_db(transaction=True)
# pylint: disable=invalid-name
def test_mutation_edit_attribute(rf, snapshot):
"""
This submits a massive graphql query to verify all fields work
"""
# pylint: enable=invalid-name
request = rf.post('/graphql')
pw1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
request.user = User.objects.create(username='testuser1', password=pw1)
bucket = InvestmentBucket(name="i1", public=False, available=100, owner=request.user.profile)
bucket.save()
attr = InvestmentBucketDescription(text="Blabla", is_good=True, bucket=bucket)
attr.save()
client = Client(SCHEMA)
executed = client.execute(
"""
mutation {{
editAttribute(desc: "{}", idValue: "{}") {{
bucketAttr {{
text
isGood
}}
}}
}}
""".format(
"Test Desc",
to_global_id("GInvestmentBucketDescription", attr.id)
),
context_value=request
)
snapshot.assert_match(executed)
@pytest.mark.django_db(transaction=True)
# pylint: disable=invalid-name
def test_mutation_delete_attribute(rf, snapshot):
"""
This submits a massive graphql query to verify all fields work
"""
# pylint: enable=invalid-name
request = rf.post('/graphql')
pw1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
request.user = User.objects.create(username='testuser1', password=pw1)
bucket = InvestmentBucket(name="i1", public=False, available=100, owner=request.user.profile)
bucket.save()
attr = InvestmentBucketDescription(text="Blabla", is_good=True, bucket=bucket)
attr.save()
client = Client(SCHEMA)
executed = client.execute("""
mutation {{
deleteAttribute(idValue: "{}") {{
isOk
}}
}}
""".format(to_global_id("GInvestmentBucketDescription", attr.id)), context_value=request)
snapshot.assert_match(executed)
assert InvestmentBucketDescription.objects.all().count() == 0
@pytest.mark.django_db(transaction=True)
# pylint: disable=invalid-name
def test_mutation_edit_configuration(rf, snapshot):
"""
This submits a massive graphql query to verify all fields work
"""
# pylint: enable=invalid-name
request = rf.post('/graphql')
pw1 = ''.join(random.choices(string.ascii_uppercase + string.digits, k=9))
request.user = User.objects.create(username='testuser1', password=pw1)
bucket = InvestmentBucket(name="i1", public=False, available=100, owner=request.user.profile)
bucket.save()
post_save.disconnect(receiver=create_stock, sender=Stock)
stock = Stock(name="Google", ticker="GOOGL")
stock.save()
DailyStockQuote(value=9, date="2017-05-08", stock=stock).save()
DailyStockQuote(value=10, date="2017-05-10", stock=stock).save()
InvestmentStockConfiguration(stock=stock, quantity=1, bucket=bucket, start="2017-05-09").save()
client = Client(SCHEMA)
executed = client.execute(
"""
mutation {{
editConfiguration(idValue: "{}", config: [
{{idValue: "{}", quantity: {}}}
]) {{
bucket {{
name
available
}}
}}
}}
""".format(
to_global_id("GInvestmentBucket", bucket.id),
to_global_id("GStock", stock.id),
2
),
context_value=request
)
snapshot.assert_match(executed)
assert InvestmentStockConfiguration.objects.all().count() == 2
@pytest.mark.django_db(transaction=True)
# pylint: disable=invalid-name
def test_big_gql(rf, snapshot):
"""
This submits a massive graphql query to verify all fields work
"""
# pylint: enable=invalid-name
request = request_create(rf.post('/graphql'))
client = Client(SCHEMA)
executed = client.execute("""
{
viewer {
id
username
profile {
investSuggestions {
edges {
node {
id
name
public
available
isOwner
description {
edges {
node {
id
text
isGood
}
}
}
stocks {
edges {
node {
id
quantity
stock {
name
latestQuote {
value
}
}
start
end
}
}
}
}
}
}
stockFind(text: "GO") {
id
quoteInRange(start: "2017-05-07", end: "2017-05-11") {
value
date
}
}
tradingAccounts {
edges {
node {
accountName
trades {
edges {
node {
quantity
stock {
ticker
name
trades {
edges {
node {
account {
accountName
}
}
}
}
}
}
}
}
}
}
}
}
}
}
""", context_value=request)
snapshot.assert_match(executed)
|
{
"content_hash": "81820371ebbbad41b9882e70d30ca8ee",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 99,
"avg_line_length": 33.09843400447427,
"alnum_prop": 0.5660696181142277,
"repo_name": "jomivega/ASE4156",
"id": "0b9c40424c394ed6369ed9a479df63ef785afe12",
"size": "14795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_viewer_graphql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "52"
},
{
"name": "HTML",
"bytes": "4334"
},
{
"name": "JavaScript",
"bytes": "81067"
},
{
"name": "PHP",
"bytes": "219"
},
{
"name": "Python",
"bytes": "109856"
}
],
"symlink_target": ""
}
|
"""Trains the MNIST network using preloaded data in a constant.
Run using bazel:
bazel run -c opt \
<...>/tensorflow/examples/how_tos/reading_data:fully_connected_preloaded
or, if installed via pip:
cd tensorflow/examples/how_tos/reading_data
python fully_connected_preloaded.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import numpy
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.examples.tutorials.mnist import mnist
# Basic model parameters as external flags.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('num_epochs', 2, 'Number of epochs to run trainer.')
flags.DEFINE_integer('hidden1', 128, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 32, 'Number of units in hidden layer 2.')
flags.DEFINE_integer('batch_size', 100, 'Batch size. '
'Must divide evenly into the dataset sizes.')
flags.DEFINE_string('train_dir', '/tmp/data',
'Directory to put the training data.')
flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data '
'for unit testing.')
def run_training():
"""Train MNIST for a number of epochs."""
# Get the sets of images and labels for training, validation, and
# test on MNIST.
data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data)
# Tell TensorFlow that the model will be built into the default Graph.
with tf.Graph().as_default():
with tf.name_scope('input'):
# Input data, pin to CPU because rest of pipeline is CPU-only
with tf.device('/cpu:0'):
input_images = tf.constant(data_sets.train.images)
input_labels = tf.constant(data_sets.train.labels)
image, label = tf.train.slice_input_producer(
[input_images, input_labels], num_epochs=FLAGS.num_epochs)
label = tf.cast(label, tf.int32)
images, labels = tf.train.batch(
[image, label], batch_size=FLAGS.batch_size)
# Build a Graph that computes predictions from the inference model.
logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2)
# Add to the Graph the Ops for loss calculation.
loss = mnist.loss(logits, labels)
# Add to the Graph the Ops that calculate and apply gradients.
train_op = mnist.training(loss, FLAGS.learning_rate)
# Add the Op to compare the logits to the labels during evaluation.
eval_correct = mnist.evaluation(logits, labels)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
saver = tf.train.Saver()
# Create the op for initializing variables.
init_op = tf.initialize_all_variables()
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
sess.run(init_op)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
# Start input enqueue threads.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# And then after everything is built, start the training loop.
try:
step = 0
while not coord.should_stop():
start_time = time.time()
# Run one step of the model.
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
# Write the summaries and print an overview fairly often.
if step % 100 == 0:
# Print status to stdout.
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value,
duration))
# Update the events file.
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
step += 1
# Save a checkpoint periodically.
if (step + 1) % 1000 == 0:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
step += 1
except tf.errors.OutOfRangeError:
print('Saving')
saver.save(sess, FLAGS.train_dir, global_step=step)
print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
finally:
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
sess.close()
def main(_):
run_training()
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "38431cc6224b1358400d47d7e2f223a3",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 80,
"avg_line_length": 32.96478873239437,
"alnum_prop": 0.6569109164708395,
"repo_name": "petewarden/tensorflow_makefile",
"id": "d08a15433a9c3a99d1cc301d55920170cc1f941f",
"size": "5359",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/examples/how_tos/reading_data/fully_connected_preloaded.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156010"
},
{
"name": "C++",
"bytes": "9202688"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "773228"
},
{
"name": "Java",
"bytes": "39181"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1773330"
},
{
"name": "Makefile",
"bytes": "7908"
},
{
"name": "Protocol Buffer",
"bytes": "111893"
},
{
"name": "Python",
"bytes": "6457132"
},
{
"name": "Shell",
"bytes": "167245"
},
{
"name": "TypeScript",
"bytes": "409165"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import django.db.models.deletion
from django.conf import settings
import common.models.base
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('common', '0005_ward_sub_county'),
]
operations = [
migrations.CreateModel(
name='UserSubCounty',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('sub_county', models.ForeignKey(to='common.SubCounty', on_delete=django.db.models.deletion.PROTECT)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(related_name='user_sub_counties', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'default_permissions': ('add', 'change', 'delete', 'view'),
'abstract': False,
},
),
]
|
{
"content_hash": "1c3b7974bbbc789f5f9715461ab13ffd",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 197,
"avg_line_length": 49.58974358974359,
"alnum_prop": 0.6339193381592554,
"repo_name": "MasterFacilityList/mfl_api",
"id": "3db16a983e02c7a006a4d46c809b3e25b5a59a30",
"size": "1958",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "common/migrations/0006_usersubcounty.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34"
},
{
"name": "HTML",
"bytes": "54029"
},
{
"name": "JavaScript",
"bytes": "1285"
},
{
"name": "PLpgSQL",
"bytes": "8030"
},
{
"name": "Python",
"bytes": "1111072"
},
{
"name": "Ruby",
"bytes": "1251"
},
{
"name": "Shell",
"bytes": "1455"
}
],
"symlink_target": ""
}
|
import RPi.GPIO as gpio
import time
import sys
import Tkinter as tk
def init():
gpio.setmode(gpio.BOARD)
gpio.setup(7, gpio.OUT)
gpio.setup(11, gpio.OUT)
gpio.setup(13, gpio.OUT)
gpio.setup(15, gpio.OUT)
def forward(tf):
gpio.output(7, False)
gpio.output(11, True)
gpio.output(13, True)
gpio.output(15, False)
time.sleep(tf)
gpio.cleanup()
def reverse(tf):
gpio.output(7, True)
gpio.output(11, False)
gpio.output(13, False)
gpio.output(15, True)
time.sleep(tf)
gpio.cleanup()
def turn_left(tf):
gpio.output(7, True)
gpio.output(11, True)
gpio.output(13, True)
gpio.output(15, False)
time.sleep(tf)
gpio.cleanup()
def turn_right(tf):
gpio.output(7, False)
gpio.output(11, True)
gpio.output(13, False)
gpio.output(15, False)
time.sleep(tf)
gpio.cleanup()
def pivot_left(tf):
gpio.output(7, True)
gpio.output(11, False)
gpio.output(13, True)
gpio.output(15, False)
time.sleep(tf)
gpio.cleanup()
def pivot_right(tf):
gpio.output(7, False)
gpio.output(11, True)
gpio.output(13, False)
gpio.output(15, True)
time.sleep(tf)
gpio.cleanup()
def key_input(event):
init()
print 'Key:', event.char
key_press = event.char
sleep_time = 0.030
if key_press.lower() == 'w':
forward(sleep_time)
elif key_press.lower() == 's':
reverse(sleep_time)
elif key_press.lower() == 'a':
turn_left(sleep_time)
elif key_press.lower() == 'd':
turn_right(sleep_time)
elif key_press.lower() == 'q':
pivot_left(sleep_time)
elif key_press.lower() == 'e':
pivot_right(sleep_time)
else:
pass
command = tk.Tk()
command.bind('<KeyPress>', key_input)
command.mainloop()
|
{
"content_hash": "dfa1b51dddb628cd4a15d4db43392873",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 37,
"avg_line_length": 20.770114942528735,
"alnum_prop": 0.6048699501936912,
"repo_name": "PythonProgramming/Robotics-with-Raspberry-Pi",
"id": "0e64def44a33c5802efb03c3972e8dba64ca390b",
"size": "1807",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "robot4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10875"
}
],
"symlink_target": ""
}
|
from horizon import views
class IndexView(views.HorizonTemplateView):
template_name = 'admin/plugin_panel/index.html'
page_title = 'Plugin-based Panel'
class TestBannerView(views.HorizonTemplateView):
template_name = 'admin/plugin_panel/header.html'
def get_context_data(self, **kwargs):
context = super(TestBannerView, self).get_context_data(**kwargs)
context['message'] = "sample context"
return context
|
{
"content_hash": "5bb67cc5be1352cd7a7757f897c292ea",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 28.25,
"alnum_prop": 0.7079646017699115,
"repo_name": "noironetworks/horizon",
"id": "a5c18fda40ca53826e82d03b5c5b53cbb36617eb",
"size": "1025",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/test_panels/plugin_panel/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "129247"
},
{
"name": "HTML",
"bytes": "581169"
},
{
"name": "JavaScript",
"bytes": "2455930"
},
{
"name": "Python",
"bytes": "5190295"
},
{
"name": "Shell",
"bytes": "7108"
}
],
"symlink_target": ""
}
|
"""
:synopsis: Parent crawler module, which supervises all crawlers.
Contains functions for initializing all subsidiary, threaded crawlers.
"""
import logging
import logging.handlers
import os
import Queue
import sys
import time
from threading import Event
from .crawler import GitHubCrawler, BitbucketCrawler
from .indexer import GitIndexer, GitRepository
__all__ = ["crawl"]
MAX_URL_QUEUE_SIZE = 5e3
def crawl():
"""
Initialize all crawlers (and indexers).
Start the:
1. GitHub crawler, :class:`crawler.GitHubCrawler`.
2. Bitbucket crawler, :class:`crawler.BitbucketCrawler`.
3. Git indexer, :class:`bitshift.crawler.indexer.GitIndexer`.
"""
_configure_logging()
time.sleep(5)
repo_clone_queue = Queue.Queue(maxsize=MAX_URL_QUEUE_SIZE)
run_event = Event()
run_event.set()
threads = [GitIndexer(repo_clone_queue, run_event)]
if sys.argv[1:]:
names = sys.argv[1:]
ranks = GitHubCrawler.get_ranks(names)
for name in names:
repo = GitRepository("https://github.com/" + name, name, "GitHub",
ranks[name])
repo_clone_queue.put(repo)
else:
threads += [GitHubCrawler(repo_clone_queue, run_event),
BitbucketCrawler(repo_clone_queue, run_event)]
for thread in threads:
thread.start()
try:
while 1:
time.sleep(0.1)
except KeyboardInterrupt:
run_event.clear()
with repo_clone_queue.mutex:
repo_clone_queue.queue.clear()
for thread in threads:
thread.join()
def _configure_logging():
# This isn't ideal, since it means the bitshift python package must be kept
# inside the app, but it works for now:
root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
log_dir = os.path.join(root, "logs")
if not os.path.exists(log_dir):
os.mkdir(log_dir)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
formatter = logging.Formatter(
fmt=("%(asctime)s %(levelname)s %(name)s %(message)s"),
datefmt="%y-%m-%d %H:%M:%S")
file_handler = logging.handlers.TimedRotatingFileHandler(
"%s/%s" % (log_dir, "app.log"), when="H", interval=1,
backupCount=20)
stream_handler = logging.StreamHandler()
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.handlers = []
root_logger.addHandler(file_handler)
root_logger.addHandler(stream_handler)
root_logger.setLevel(logging.NOTSET)
if __name__ == "__main__":
crawl()
|
{
"content_hash": "bf42976f8a968e9891f4ae907079ed20",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 79,
"avg_line_length": 29.106382978723403,
"alnum_prop": 0.6363304093567251,
"repo_name": "earwig/bitshift",
"id": "fafd017bbc8d8995193fd046b9aabd72e2a85439",
"size": "2736",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bitshift/crawler/crawl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21963"
},
{
"name": "HTML",
"bytes": "22638"
},
{
"name": "Java",
"bytes": "23638"
},
{
"name": "JavaScript",
"bytes": "20474"
},
{
"name": "Python",
"bytes": "89912"
},
{
"name": "Ruby",
"bytes": "6988"
},
{
"name": "Shell",
"bytes": "1252"
},
{
"name": "TSQL",
"bytes": "4696"
}
],
"symlink_target": ""
}
|
"""OpenGL version 1.3 imaging-handling routines"""
from OpenGL import wrapper, constants, arrays
from OpenGL.raw.GL.VERSION import GL_1_3 as simple
from OpenGL.GL import images, glget
for dimensions in (1,2,3):
for function in ('glCompressedTexImage%sD','glCompressedTexSubImage%sD'):
name = function%(dimensions,)
globals()[ name ] = images.compressedImageFunction(
getattr( simple, name )
)
try:
del name, function
except NameError, err:
pass
try:
del dimensions
except NameError, err:
pass
if simple.glGetCompressedTexImage:
def glGetCompressedTexImage( target, level, img=None ):
"""Retrieve a compressed texture image"""
if img is None:
length = glget.glGetTexLevelParameteriv(
target, 0,
simple.GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB,
)
img = arrays.ArrayDataType.zeros( (length,), constants.GL_UNSIGNED_BYTE )
return simple.glGetCompressedTexImage(target, 0, img);
|
{
"content_hash": "af043ecb9705bd9f0bfff864f65b5544",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 85,
"avg_line_length": 35.733333333333334,
"alnum_prop": 0.6315298507462687,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "dd1cb79c9061d1ff3b310d9910ffb9fb307ef8fb",
"size": "1072",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/GL/VERSION/GL_1_3_images.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
#=============================================================================#
# Class definitions for the AST nodes which comprise the type language for
# which types will be inferred
class Lam(object):
"""Lambda abstraction"""
def __init__(self, v, body):
self.v = v
self.body = body
def __str__(self):
return "(\{v} -> {body})".format(v=self.v, body=self.body)
class Var(object):
"""Variable/Identifier"""
def __init__(self, name):
self.name = name
def __str__(self):
return str(self.name)
class App(object):
"""Function application"""
def __init__(self, fn, arg):
self.fn = fn
self.arg = arg
def __str__(self):
return "({fn} {arg})".format(fn=self.fn, arg=self.arg)
class Let(object):
"""Let binding (always recursive)"""
def __init__(self, v, defn, body):
self.v = v
self.defn = defn
self.body = body
def __str__(self):
exp = "(let {v} = {defn} in {body})"
return exp.format(v=self.v, defn=self.defn, body=self.body)
#=============================================================================#
# Types and type constructors
def show_type(type_name):
"""
Pretty-print a Python type or internal type name.
Args:
type_name: a Python type, or a string representing a type name
Returns: a string representation of the type
"""
if isinstance(type_name, str):
return type_name
elif isinstance(type_name, type):
return type_name.__name__
return str(type_name)
class TypeVariable(object):
"""
A type variable standing for an arbitrary type. All type variables have
a unique id, but names are only assigned lazily, when required.
Note that this approach is *not* thread-safe.
"""
next_variable_id = 0
next_var_name = 'a'
def __init__(self, constraints=()):
self.id = TypeVariable.next_variable_id
TypeVariable.next_variable_id += 1
self.instance = None
self.__name = None
self.constraints = constraints
def __getName(self):
"""
Names are allocated to TypeVariables lazily, so that only TypeVariables
converted to strings are given names.
"""
if self.__name is None:
self.__name = TypeVariable.next_var_name
TypeVariable.next_var_name = chr(ord(TypeVariable.next_var_name)
+ 1)
return self.__name
name = property(__getName)
def __str__(self):
if self.instance is not None:
return str(self.instance)
return self.name
def __repr__(self):
return "TypeVariable(id = {0})".format(self.id)
class TypeOperator(object):
"""An n-ary type constructor which builds a new type from old"""
def __init__(self, name, types):
self.name = name
self.types = types
def __str__(self):
num_types = len(self.types)
if num_types == 0:
return show_type(self.name)
return "({0} {1})".format(show_type(self.name),
' '.join(map(show_type, self.types)))
class Function(TypeOperator):
"""A binary type constructor which builds function types"""
def __init__(self, from_type, to_type):
super(self.__class__, self).__init__("->", [from_type, to_type])
def __str__(self):
return "({1} {0} {2})".format(show_type(self.name),
*map(show_type, self.types))
class Tuple(TypeOperator):
"""N-ary constructor which builds tuple types"""
def __init__(self, types):
super(self.__class__, self).__init__(tuple, types)
def __str__(self):
return "({0})".format(", ".join(map(show_type, self.types)))
class ListType(TypeOperator):
"""Unary constructor which builds list types"""
def __init__(self, list_type):
super(self.__class__, self).__init__("[]", [list_type])
def __str__(self):
return "[{0}]".format(show_type(self.types[0]))
#=============================================================================#
# Type inference machinery
def analyze(node, env, non_generic=None):
"""
Computes the type of the expression given by node.
The type of the node is computed in the context of the supplied type
environment, env. Data types can be introduced into the language simply by
having a predefined set of identifiers in the initial environment. This way
there is no need to change the syntax or, more importantly, the
type-checking program when extending the language.
Args:
node: The root of the abstract syntax tree.
env: The type environment is a mapping of expression identifier names
to type assignments.
to type assignments.
non_generic: A set of non-generic variables, or None
Returns:
The computed type of the expression.
Raises:
TypeError: The type of the expression could not be inferred, for
example if it is not possible to unify two types such as
Integer and Bool or if the abstract syntax tree rooted at
node could not be parsed
"""
if non_generic is None:
non_generic = set()
if isinstance(node, Var):
return getType(node.name, env, non_generic)
elif isinstance(node, App):
fun_type = analyze(node.fn, env, non_generic)
arg_type = analyze(node.arg, env, non_generic)
result_type = TypeVariable()
unify(Function(arg_type, result_type), fun_type)
return result_type
elif isinstance(node, Lam):
arg_type = TypeVariable()
new_env = env.copy()
new_env[node.v] = arg_type
new_non_generic = non_generic.copy()
new_non_generic.add(arg_type)
result_type = analyze(node.body, new_env, new_non_generic)
return Function(arg_type, result_type)
elif isinstance(node, Let):
new_type = TypeVariable()
new_env = env.copy()
new_env[node.v] = new_type
new_non_generic = non_generic.copy()
new_non_generic.add(new_type)
defn_type = analyze(node.defn, new_env, new_non_generic)
unify(new_type, defn_type)
return analyze(node.body, new_env, non_generic)
assert 0, "Unhandled syntax node {0}".format(node)
def getType(name, env, non_generic):
"""Get the type of identifier name from the type environment env.
Args:
name: The identifier name
env: The type environment mapping from identifier names to types
non_generic: A set of non-generic TypeVariables
Raises:
ParseError: Raised if name is an undefined symbol in the type
environment.
"""
if name in env:
return fresh(env[name], non_generic)
raise TypeError("Undefined symbol {0}".format(name))
def fresh(t, non_generic):
"""Makes a copy of a type expression.
The type t is copied. The the generic variables are duplicated and the
non_generic variables are shared.
Args:
t: A type to be copied.
non_generic: A set of non-generic TypeVariables
"""
mappings = {} # A mapping of TypeVariables to TypeVariables
def freshrec(tp):
p = prune(tp)
if isinstance(p, TypeVariable):
if isGeneric(p, non_generic):
if p not in mappings:
mappings[p] = TypeVariable()
return mappings[p]
else:
return p
elif isinstance(p, TypeOperator):
return TypeOperator(p.name, [freshrec(x) for x in p.types])
return freshrec(t)
def unify_var(v1, t2):
"""
Unify the type variable v1 and the type t2, i.e. makes their types the same
and unifies typeclass constraints.
Note: Must be called with v1 and t2 pre-pruned
Args:
v1: The type variable to be made equivalent
t2: The second type to be be equivalent
Returns:
None
Raises:
TypeError: Raised if the types cannot be unified.
"""
if v1 != t2:
if isinstance(t2, TypeVariable):
# unify typeclass constraints
union = tuple(set(v1.constraints + t2.constraints))
v1.constraints = union
t2.constraints = union
if occursInType(v1, t2):
raise TypeError("recursive unification")
v1.instance = t2
return
def unify(t1, t2):
"""
Unify the two types t1 and t2. Makes the types t1 and t2 the same.
Note that the current method of unifying higher-kinded types does not
properly handle kind, i.e. it will happily unify `f a` and `g b c`.
This is due to the way that typeclasses are implemented, and will be fixed
in future versions.
Args:
t1: The first type to be made equivalent
t2: The second type to be be equivalent
Returns:
None
Raises:
TypeError: Raised if the types cannot be unified.
"""
a = prune(t1)
b = prune(t2)
if isinstance(a, TypeVariable):
unify_var(a, b)
elif isinstance(a, TypeOperator) and isinstance(b, TypeVariable):
unify_var(b, a)
elif isinstance(a, TypeOperator) and isinstance(b, TypeOperator):
# Unify polymorphic higher-kinded type
if isinstance(a.name, TypeVariable) and len(a.types) > 0:
a.name = b.name
a.types = b.types
unify(a, b)
elif isinstance(b.name, TypeVariable):
unify(b, a)
# Unify concrete higher-kinded type
elif (a.name != b.name or len(a.types) != len(b.types)):
raise TypeError("Type mismatch: {0} != {1}".format(str(a), str(b)))
for p, q in zip(a.types, b.types):
unify(p, q)
else:
raise TypeError("Not unified")
return
def prune(t):
"""
Returns the currently defining instance of t.
As a side effect, collapses the list of type instances. The function prune
is used whenever a type expression has to be inspected: it will always
return a type expression which is either an uninstantiated type variable or
a type operator; i.e. it will skip instantiated variables, and will
actually prune them from expressions to remove long chains of instantiated
variables.
Args:
t: The type to be pruned
Returns:
An uninstantiated TypeVariable or a TypeOperator
"""
if isinstance(t, TypeVariable):
if t.instance is not None:
t.instance = prune(t.instance)
return t.instance
return t
def isGeneric(v, non_generic):
"""
Checks whether a given variable occurs in a list of non-generic variables
Note that a variables in such a list may be instantiated to a type term,
in which case the variables contained in the type term are considered
non-generic.
Note: Must be called with v pre-pruned
Args:
v: The TypeVariable to be tested for genericity
non_generic: A set of non-generic TypeVariables
Returns:
"true" if v is a generic variable, otherwise False
"""
return not occursIn(v, non_generic)
def occursInType(v, type2):
"""Checks whether a type variable occurs in a type expression.
Note: Must be called with v pre-pruned
Args:
v: The TypeVariable to be tested for
type2: The type in which to search
Returns:
True if v occurs in type2, otherwise False
"""
pruned_type2 = prune(type2)
if pruned_type2 == v:
return True
elif isinstance(pruned_type2, TypeOperator):
return occursIn(v, pruned_type2.types)
return False
def occursIn(t, types):
"""
Checks whether a types variable occurs in any other types.
Args:
v: The TypeVariable to be tested for
types: The sequence of types in which to search
Returns:
True if t occurs in any of types, otherwise False
"""
return any(occursInType(t, t2) for t2 in types)
|
{
"content_hash": "08b2cbd279daf8d97d1444ca83a7251a",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 79,
"avg_line_length": 30.268115942028984,
"alnum_prop": 0.5776075333173729,
"repo_name": "silky/hask",
"id": "9dffd3995fba74ba2239b510ef6164ac5068999b",
"size": "13402",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hask/lang/hindley_milner.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "297981"
}
],
"symlink_target": ""
}
|
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file() or not get_service_name():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name()):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from ClusterServicePyImpl import ClusterServicePy
impl_ClusterServicePy = ClusterServicePy(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.__str__()
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'ClusterServicePy'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.rpc_service.add(impl_ClusterServicePy.estimate_k,
name='ClusterServicePy.estimate_k',
types=[dict, int, int, int, int, int])
self.rpc_service.add(impl_ClusterServicePy.estimate_k_new,
name='ClusterServicePy.estimate_k_new',
types=[dict, int, int, basestring, int, float, int, int])
self.rpc_service.add(impl_ClusterServicePy.cluster_k_means,
name='ClusterServicePy.cluster_k_means',
types=[dict, int, int, int, int, basestring])
self.rpc_service.add(impl_ClusterServicePy.cluster_hierarchical,
name='ClusterServicePy.cluster_hierarchical',
types=[dict, basestring, basestring, float, int])
self.rpc_service.add(impl_ClusterServicePy.clusters_from_dendrogram,
name='ClusterServicePy.clusters_from_dendrogram',
types=[dict, basestring, float])
self.rpc_service.add(impl_ClusterServicePy.calc_cluster_qualities,
name='ClusterServicePy.calc_cluster_qualities',
types=[list])
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
try:
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
ctx['rpc_context'] = req['context']
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if len(sys.argv) >= 3 and len(sys.argv) <= 4 and os.path.isfile(sys.argv[1]):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
{
"content_hash": "88af33eb7c4d9e9e262a9534d2348cd7",
"timestamp": "",
"source": "github",
"line_count": 518,
"max_line_length": 134,
"avg_line_length": 36.9034749034749,
"alnum_prop": 0.5551370579619167,
"repo_name": "kbase/feature_values",
"id": "156d02a099a3ee94be10e06ddab8a34e691a390f",
"size": "19138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clusterservice/ClusterServicePyServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "93"
},
{
"name": "Java",
"bytes": "514071"
},
{
"name": "Makefile",
"bytes": "6196"
},
{
"name": "Python",
"bytes": "34816"
},
{
"name": "R",
"bytes": "8577"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class CnbetaapisConfig(AppConfig):
name = 'CnbetaApis'
|
{
"content_hash": "04531452afa7aa4c60be0157b73abdf7",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 19,
"alnum_prop": 0.7684210526315789,
"repo_name": "kagenZhao/cnBeta",
"id": "fd1a26dd0899443c9ab00a9da5f04a29e14344f9",
"size": "119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CnbetaApi/CnbetaApis/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33522"
},
{
"name": "Ruby",
"bytes": "1453"
},
{
"name": "Swift",
"bytes": "53314"
}
],
"symlink_target": ""
}
|
import cv2
import sys
def edge_detect(infile, nogui=False):
# The first argument is the image
image = cv2.imread(infile)
#conver to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#blur it
blurred_image = cv2.GaussianBlur(gray_image, (7,7), 0)
# Use low thresholds
canny = cv2.Canny(blurred_image, 10, 30)
# Use high thresholds
canny2 = cv2.Canny(blurred_image, 50, 150)
if nogui:
cv2.imwrite('test_edge.png', canny2)
else:
cv2.imshow("Orignal Image", image)
cv2.imshow("Canny with low thresholds", canny)
cv2.imshow("Canny with high thresholds", canny2)
cv2.waitKey(0)
if __name__ == "__main__":
edge_detect(sys.argv[1])
|
{
"content_hash": "debb07bd3c23176850f0da32bca71575",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 58,
"avg_line_length": 27,
"alnum_prop": 0.6392318244170097,
"repo_name": "shantnu/PyEng",
"id": "a7d9f3a592a2baa08cc885ad3ff7944d36ffe0a6",
"size": "729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Image_Video/edge_detect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2140"
},
{
"name": "Jupyter Notebook",
"bytes": "29665"
},
{
"name": "Perl",
"bytes": "716"
},
{
"name": "Python",
"bytes": "27927"
},
{
"name": "Shell",
"bytes": "643"
}
],
"symlink_target": ""
}
|
"""Admins for the models of the ``paypal_express_checkout`` app."""
import django
from django.contrib import admin
from . import models
try:
user_model = django.contrib.auth.get_user_model()
except AttributeError:
user_model = django.contrib.auth.models.User
username_field = getattr(user_model, 'USERNAME_FIELD', 'username')
class ItemAdmin(admin.ModelAdmin):
"""Custom admin for the ``Item`` model."""
list_display = ['name', 'description_short', 'value']
search_fields = ['name', 'description']
def description_short(self, obj):
return '{0}...'.format(obj.description[:50])
class PaymentTransactionAdmin(admin.ModelAdmin):
"""Custom admin for the ``PaymentTransaction`` model."""
list_display = [
'creation_date', 'date', 'user', 'user_email', 'transaction_id',
'value', 'status',
]
search_fields = [
'transaction_id', 'status', 'user__email', 'user__' + username_field]
date_hierarchy = 'creation_date'
list_filter = ['status']
raw_id_fields = ['user', ]
def user_email(self, obj):
return obj.user.email
class PaymentTransactionErrorAdmin(admin.ModelAdmin):
"""Custom admin for the ``PaymentTransactionError`` model."""
list_display = [
# FIXME 'transaction_id'
'date', 'user', 'user_email', 'response_short',
]
def user_email(self, obj):
return obj.user.email
def response_short(self, obj):
return '{0}...'.format(obj.response[:50])
def transaction_id(self, obj):
return obj.transaction_id
class PurchasedItemAdmin(admin.ModelAdmin):
"""Custom admin for the ``PurchasedItem`` model."""
list_display = [
'identifier', 'date', 'user', 'user_email', 'transaction', 'item',
'price', 'quantity', 'subtotal', 'total', 'status',
]
list_filter = [
'identifier', 'transaction__status', 'item', ]
search_fields = [
'transaction__transaction_id', 'user__email', ]
raw_id_fields = ['user', 'transaction', ]
def date(self, obj):
return obj.transaction.date
def status(self, obj):
return obj.transaction.status
def subtotal(self, obj):
price = 0
if obj.item is not None:
price = obj.item.value
if obj.price:
price = obj.price
return price * obj.quantity
def total(self, obj):
return obj.transaction.value
def user_email(self, obj):
return obj.user.email
admin.site.register(models.Item, ItemAdmin)
admin.site.register(models.PaymentTransaction, PaymentTransactionAdmin)
admin.site.register(
models.PaymentTransactionError, PaymentTransactionErrorAdmin)
admin.site.register(models.PurchasedItem, PurchasedItemAdmin)
|
{
"content_hash": "f1933594de239a8a59de50aab2cf6fae",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 77,
"avg_line_length": 29.03157894736842,
"alnum_prop": 0.6388687454677302,
"repo_name": "bitmazk/django-paypal-express-checkout",
"id": "b1ba6dffc2d17d9068b75151cb8af5f047c8b634",
"size": "2758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paypal_express_checkout/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "141517"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1NodeSelectorTerm(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'match_expressions': 'list[V1NodeSelectorRequirement]',
'match_fields': 'list[V1NodeSelectorRequirement]'
}
attribute_map = {
'match_expressions': 'matchExpressions',
'match_fields': 'matchFields'
}
def __init__(self, match_expressions=None, match_fields=None, local_vars_configuration=None): # noqa: E501
"""V1NodeSelectorTerm - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._match_expressions = None
self._match_fields = None
self.discriminator = None
if match_expressions is not None:
self.match_expressions = match_expressions
if match_fields is not None:
self.match_fields = match_fields
@property
def match_expressions(self):
"""Gets the match_expressions of this V1NodeSelectorTerm. # noqa: E501
A list of node selector requirements by node's labels. # noqa: E501
:return: The match_expressions of this V1NodeSelectorTerm. # noqa: E501
:rtype: list[V1NodeSelectorRequirement]
"""
return self._match_expressions
@match_expressions.setter
def match_expressions(self, match_expressions):
"""Sets the match_expressions of this V1NodeSelectorTerm.
A list of node selector requirements by node's labels. # noqa: E501
:param match_expressions: The match_expressions of this V1NodeSelectorTerm. # noqa: E501
:type: list[V1NodeSelectorRequirement]
"""
self._match_expressions = match_expressions
@property
def match_fields(self):
"""Gets the match_fields of this V1NodeSelectorTerm. # noqa: E501
A list of node selector requirements by node's fields. # noqa: E501
:return: The match_fields of this V1NodeSelectorTerm. # noqa: E501
:rtype: list[V1NodeSelectorRequirement]
"""
return self._match_fields
@match_fields.setter
def match_fields(self, match_fields):
"""Sets the match_fields of this V1NodeSelectorTerm.
A list of node selector requirements by node's fields. # noqa: E501
:param match_fields: The match_fields of this V1NodeSelectorTerm. # noqa: E501
:type: list[V1NodeSelectorRequirement]
"""
self._match_fields = match_fields
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeSelectorTerm):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeSelectorTerm):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "13ce1e80dca2707df37cce94db2b7371",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 124,
"avg_line_length": 32.391891891891895,
"alnum_prop": 0.6065915727993325,
"repo_name": "kubernetes-client/python",
"id": "6364b331146f0810cfdbc2b26f2537714fd2c5f3",
"size": "4811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_node_selector_term.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
}
|
"""Module for testing the del metacluster command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelMetaCluster(TestBrokerCommand):
def testdelutmc1(self):
command = ["del_metacluster", "--metacluster=utmc1"]
err = self.statustest(command)
self.matchoutput(err, "sent 0 server notifications", command)
def testverifydelutmc1(self):
command = ["show_metacluster", "--metacluster=utmc1"]
self.notfoundtest(command)
def testdelutmc2(self):
command = ["del_metacluster", "--metacluster=utmc2"]
err = self.statustest(command)
self.matchoutput(err, "sent 0 server notifications", command)
def testverifydelutmc2(self):
command = ["show_metacluster", "--metacluster=utmc2"]
self.notfoundtest(command)
def testdelutmc3(self):
command = ["del_metacluster", "--metacluster=utmc3"]
err = self.statustest(command)
self.matchoutput(err, "sent 0 server notifications", command)
def testverifydelutmc3(self):
command = ["show_metacluster", "--metacluster=utmc3"]
self.notfoundtest(command)
def testdelutmc4(self):
command = ["del_metacluster", "--metacluster=utmc4"]
err = self.statustest(command)
self.matchoutput(err, "sent 0 server notifications", command)
def testdelutmc5(self):
command = ["del_metacluster", "--metacluster=utmc5"]
err = self.statustest(command)
self.matchoutput(err, "sent 0 server notifications", command)
def testdelutmc6(self):
command = ["del_metacluster", "--metacluster=utmc6"]
err = self.statustest(command)
self.matchoutput(err, "sent 0 server notifications", command)
def testdelutmc7(self):
command = ["del_metacluster", "--metacluster=utmc7"]
err = self.statustest(command)
self.matchoutput(err, "sent 0 server notifications", command)
def testdelutsandbox(self):
# Test moving machines between metaclusters
command = ["del_metacluster", "--metacluster=sandboxmc"]
err = self.statustest(command)
self.matchoutput(err, "sent 0 server notifications", command)
def testdelvulcan1(self):
command = ["del_metacluster", "--metacluster=vulcan1"]
err = self.statustest(command)
self.matchoutput(err, "sent 0 server notifications", command)
def testverifyall(self):
command = ["show_metacluster", "--all"]
out = self.commandtest(command)
self.matchclean(out, "Metacluster: utmc", command)
def testdelnotfound(self):
command = ["del_metacluster",
"--metacluster=metacluster-does-not-exist"]
self.notfoundtest(command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelMetaCluster)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "6c72bfc7cba792087de0ca67528a160e",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 75,
"avg_line_length": 34.72093023255814,
"alnum_prop": 0.6560616208975217,
"repo_name": "jrha/aquilon",
"id": "d5316e701ffa767fb98727f62a4fbb2c910c57e6",
"size": "3714",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/broker/test_del_metacluster.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import json
import os
import unittest
from conans.client import tools
from conans.client.runner import ConanRunner
from conans.model.ref import ConanFileReference
from conans.paths import RUN_LOG_NAME
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient, TestServer,\
TestBufferConanOutput
from conans.util.files import load
class ConanTraceTest(unittest.TestCase):
def setUp(self):
test_server = TestServer()
self.servers = {"default": test_server}
def test_run_log_file_package_test(self):
"""Check if the log file is generated and packaged"""
base = '''
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello0"
version = "0.1"
def build(self):
self.run('echo "Simulating cmake..."')
def package(self):
self.copy(pattern="%s", dst="", keep_path=False)
''' % RUN_LOG_NAME
def _install_a_package(print_commands_to_output, generate_run_log_file):
output = TestBufferConanOutput()
runner = ConanRunner(print_commands_to_output, generate_run_log_file,
log_run_to_output=True, output=output)
client = TestClient(servers=self.servers,
users={"default": [("lasote", "mypass")]},
runner=runner)
ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
client.save({"conanfile.py": base})
client.run("create . lasote/stable")
package_dir = client.cache.package_layout(ref).packages()
package_dir = os.path.join(package_dir, os.listdir(package_dir)[0])
log_file_packaged_ = os.path.join(package_dir, RUN_LOG_NAME)
output = "\n".join([str(output), str(client.out)])
return log_file_packaged_, output
log_file_packaged, output = _install_a_package(False, True)
self.assertIn("Packaged 1 '.log' file: conan_run.log", output)
self.assertTrue(os.path.exists(log_file_packaged))
contents = load(log_file_packaged)
self.assertIn("Simulating cmake...", contents)
self.assertNotIn("----Running------%s> echo" % os.linesep, contents)
log_file_packaged, output = _install_a_package(True, True)
self.assertIn("Packaged 1 '.log' file: conan_run.log", output)
self.assertTrue(os.path.exists(log_file_packaged))
contents = load(log_file_packaged)
self.assertIn("Simulating cmake...", contents)
self.assertIn("----Running------%s> echo" % os.linesep, contents)
log_file_packaged, output = _install_a_package(False, False)
self.assertNotIn("Packaged 1 '.log' file: conan_run.log", output)
self.assertFalse(os.path.exists(log_file_packaged))
def test_trace_actions(self):
client = TestClient(servers=self.servers,
users={"default": [("lasote", "mypass")]})
trace_file = os.path.join(temp_folder(), "conan_trace.log")
with tools.environment_append({"CONAN_TRACE_FILE": trace_file}):
# UPLOAD A PACKAGE
ref = ConanFileReference.loads("Hello0/0.1@lasote/stable")
files = cpp_hello_conan_files("Hello0", "0.1", need_patch=True, build=False)
client.save(files)
client.run("user lasote -p mypass -r default")
client.run("export . lasote/stable")
client.run("install %s --build missing" % str(ref))
client.run("upload %s --all" % str(ref))
traces = load(trace_file)
self.assertNotIn("mypass", traces)
self.assertIn('"password": "**********"', traces)
self.assertIn('"Authorization": "**********"', traces)
self.assertIn('"X-Client-Anonymous-Id": "**********"', traces)
actions = traces.splitlines()
without_rest_api = [it for it in actions if "REST_API_CALL" not in it]
self.assertTrue(len(without_rest_api) == 11)
for trace in actions:
doc = json.loads(trace)
self.assertIn("_action", doc) # Valid jsons
self.assertEqual(json.loads(without_rest_api[0])["_action"], "COMMAND")
self.assertEqual(json.loads(without_rest_api[0])["name"], "authenticate")
self.assertEqual(json.loads(without_rest_api[2])["_action"], "COMMAND")
self.assertEqual(json.loads(without_rest_api[2])["name"], "export")
self.assertEqual(json.loads(without_rest_api[3])["_action"], "COMMAND")
self.assertEqual(json.loads(without_rest_api[3])["name"], "install_reference")
self.assertEqual(json.loads(without_rest_api[4])["_action"], "GOT_RECIPE_FROM_LOCAL_CACHE")
self.assertEqual(json.loads(without_rest_api[4])["_id"], "Hello0/0.1@lasote/stable")
self.assertEqual(json.loads(without_rest_api[5])["_action"], "PACKAGE_BUILT_FROM_SOURCES")
self.assertEqual(json.loads(without_rest_api[6])["_action"], "COMMAND")
self.assertEqual(json.loads(without_rest_api[6])["name"], "upload")
self.assertEqual(json.loads(without_rest_api[7])["_action"], "ZIP")
self.assertEqual(json.loads(without_rest_api[8])["_action"], "UPLOADED_RECIPE")
self.assertEqual(json.loads(without_rest_api[9])["_action"], "ZIP")
self.assertEqual(json.loads(without_rest_api[10])["_action"], "UPLOADED_PACKAGE")
num_put = len([it for it in actions if "REST_API_CALL" in it and "PUT" in it])
self.assertEqual(num_put, 6) # 3 files the recipe 3 files the package
num_post = len([it for it in actions if "REST_API_CALL" in it and "POST" in it])
if "/v2/" in traces:
self.assertEqual(num_post, 0)
else:
self.assertEqual(num_post, 2) # 2 get urls
num_get = len([it for it in actions if "REST_API_CALL" in it and "GET" in it])
self.assertEqual(num_get, 10)
# Check masked signature
for action in actions:
doc = json.loads(action)
if doc.get("url") and "signature" in doc.get("url"):
self.assertIn("signature=*****", doc.get("url"))
|
{
"content_hash": "8cc8691bfafbbbfa3a4799857323ddb3",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 99,
"avg_line_length": 47.17424242424242,
"alnum_prop": 0.6157057973341898,
"repo_name": "memsharded/conan",
"id": "7430c6fb1b1d2cf040bb3626edc991356acd8e13",
"size": "6227",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/functional/old/conan_trace_file_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Groovy",
"bytes": "12586"
},
{
"name": "Python",
"bytes": "4334185"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
}
|
"""Test for the ee.geometry module."""
import unittest
import ee
from ee import apitestcase
class GeometryTest(apitestcase.ApiTestCase):
def testValid_Point(self):
"""Verifies Point constructor behavior with valid arguments."""
self.assertValid(1, ee.Geometry.Point, [1, 2])
self.assertValid(1, ee.Geometry.Point, 1, 2)
def testValid_MultiPoint(self):
"""Verifies MultiPoint constructor behavior with valid arguments."""
self.assertValid(2, ee.Geometry.MultiPoint, 1, 2, 3, 4, 5, 6)
self.assertValid(1, ee.Geometry.MultiPoint)
def testValid_LineString(self):
"""Verifies LineString constructor behavior with valid arguments."""
self.assertValid(2, ee.Geometry.LineString, 1, 2, 3, 4, 5, 6)
def testValid_LinearRing(self):
"""Verifies LinearRing constructor behavior with valid arguments."""
self.assertValid(2, ee.Geometry.LinearRing, 1, 2, 3, 4, 5, 6)
def testValid_MultiLineString(self):
"""Verifies MultiLineString constructor behavior with valid arguments."""
self.assertValid(3, ee.Geometry.MultiLineString, 1, 2, 3, 4, 5, 6)
self.assertValid(1, ee.Geometry.MultiLineString)
def testValid_Polygon(self):
"""Verifies Polygon constructor behavior with valid arguments."""
self.assertValid(3, ee.Geometry.Polygon, 1, 2, 3, 4, 5, 6)
def testValid_Rectangle(self):
"""Verifies Rectangle constructor behavior with valid arguments."""
self.assertValid(3, ee.Geometry.Rectangle, 1, 2, 5, 6)
def testValid_MultiPolygon(self):
"""Verifies MultiPolygon constructor behavior with valid arguments."""
self.assertValid(4, ee.Geometry.MultiPolygon, 1, 2, 3, 4, 5, 6)
self.assertValid(1, ee.Geometry.MultiPolygon)
def testValid_GeometryCollection(self):
"""Verifies GeometryCollection constructor behavior with valid arguments."""
geometry = ee.Geometry({
'type':
'GeometryCollection',
'geometries': [{
'type': 'Polygon',
'coordinates': [[[-1, -1], [0, 1], [1, -1]]],
'geodesic': True,
'evenOdd': True
}, {
'type': 'Point',
'coordinates': [0, 0]
}, {
'type':
'GeometryCollection',
'geometries': [{
'type': 'Point',
'coordinates': [1, 2]
}, {
'type': 'Point',
'coordinates': [2, 1]
}]
}],
'coordinates': []
})
self.assertIsInstance(geometry, ee.Geometry)
def testInvalid_Point(self):
"""Verifies Point constructor behavior with invalid arguments."""
f = ee.Geometry.Point
self.assertInvalid(f, 'Invalid geometry', ['-78.204948', '40.966539'])
def testInvalid_MultiPoint(self):
"""Verifies MultiPoint constructor behavior with invalid arguments."""
f = ee.Geometry.MultiPoint
self.assertInvalid(
f, 'Invalid number of coordinates: 5', 1, 2, 3, 4, 5)
self.assertInvalid(f, 'Invalid number of coordinates: 5', [1, 2, 3, 4, 5])
self.assertInvalid(f, 'Invalid geometry', [[1, 2], [3, 4], 5])
# Too many nesting levels.
self.assertInvalid(f, 'Invalid geometry', [[[1, 2], [3, 4]]])
def testInvalid_LineString(self):
"""Verifies LineString constructor behavior with invalid arguments."""
f = ee.Geometry.LineString
self.assertInvalid(
f, 'Invalid number of coordinates: 5', 1, 2, 3, 4, 5)
self.assertInvalid(f, 'Invalid number of coordinates: 5', [1, 2, 3, 4, 5])
self.assertInvalid(f, 'Invalid geometry', [[1, 2], [3, 4], 5])
# Too many nesting levels.
self.assertInvalid(f, 'Invalid geometry', [[[1, 2], [3, 4]]])
def testInvalid_LinearRing(self):
"""Verifies LinearRing constructor behavior with invalid arguments."""
f = ee.Geometry.LinearRing
self.assertInvalid(
f, 'Invalid number of coordinates: 5', 1, 2, 3, 4, 5)
self.assertInvalid(f, 'Invalid number of coordinates: 5', [1, 2, 3, 4, 5])
self.assertInvalid(f, 'Invalid geometry', [[1, 2], [3, 4], 5])
# Too many nesting levels.
self.assertInvalid(f, 'Invalid geometry', [[[1, 2], [3, 4]]])
def testInvalid_MultiLineString(self):
"""Verifies MultiLineString constructor behavior with invalid arguments."""
f = ee.Geometry.MultiLineString
self.assertInvalid(
f, 'Invalid number of coordinates: 5', 1, 2, 3, 4, 5)
self.assertInvalid(f, 'Invalid number of coordinates: 5', [1, 2, 3, 4, 5])
self.assertInvalid(f, 'Invalid geometry', [[1, 2], [3, 4], 5])
# Too many nesting levels.
self.assertInvalid(f, 'Invalid geometry', [[[[1, 2], [3, 4]]]])
# Bad nesting
self.assertInvalid(f, 'Invalid geometry', [[[1, 2], [3, 4]], [1, 2]])
def testInvalid_Polygon(self):
"""Verifies Polygon constructor behavior with invalid arguments."""
f = ee.Geometry.Polygon
self.assertInvalid(
f, 'Invalid number of coordinates: 5', 1, 2, 3, 4, 5)
self.assertInvalid(f, 'Invalid number of coordinates: 5', [1, 2, 3, 4, 5])
self.assertInvalid(f, 'Invalid geometry', [[1, 2], [3, 4], 5])
# Too many nesting levels.
self.assertInvalid(f, 'Invalid geometry', [[[[1, 2], [3, 4], [5, 6]]]])
# Bad nesting
self.assertInvalid(f, 'Invalid geometry', [[[1, 2], [3, 4]], [1, 2]])
def testInvalid_MultiPolygon(self):
"""Verifies MultiPolygon constructor behavior with invalid arguments."""
f = ee.Geometry.MultiPolygon
self.assertInvalid(f, 'Invalid number of coordinates: 5', 1, 2, 3, 4, 5)
self.assertInvalid(f, 'Invalid number of coordinates: 5', [1, 2, 3, 4, 5])
self.assertInvalid(f, 'Invalid geometry', [[1, 2], [3, 4], 5])
# Too many nesting levels.
self.assertInvalid(f, 'Invalid geometry', [[[[[1, 2], [3, 4], [5, 6]]]]])
# Bad nesting
self.assertInvalid(f, 'Invalid geometry', [[[[1, 2], [3, 4]], [1, 2]]])
def testEvenOddPolygon(self):
poly1 = ee.Geometry.Polygon([0, 0, 0, 5, 5, 0])
self.assertTrue(poly1.toGeoJSON()['evenOdd'])
poly2 = ee.Geometry.Polygon([0, 0, 0, 5, 5, 0], None, None, None, False)
self.assertFalse(poly2.toGeoJSON()['evenOdd'])
def testArrayConstructors(self):
"""Verifies that constructors that take arrays fix nesting."""
get_coordinates_count = lambda g: len(g.toGeoJSON()['coordinates'])
point = ee.Geometry.Point([1, 2])
self.assertEqual(2, get_coordinates_count(point))
multipoint = ee.Geometry.MultiPoint([[1, 2], [3, 4], [5, 6]])
self.assertEqual(3, get_coordinates_count(multipoint))
line = ee.Geometry.LineString([[1, 2], [3, 4], [5, 6]])
self.assertEqual(3, get_coordinates_count(line))
ring = ee.Geometry.LinearRing([[1, 2], [3, 4], [5, 6]])
self.assertEqual(3, get_coordinates_count(ring))
multiline = ee.Geometry.MultiLineString(
[[[1, 2], [3, 4]],
[[5, 6], [7, 8]]])
self.assertEqual(2, get_coordinates_count(multiline))
polygon = ee.Geometry.Polygon([[[1, 2], [3, 4], [5, 6]]])
self.assertEqual(1, get_coordinates_count(polygon))
mpolygon = ee.Geometry.MultiPolygon(
[[[[1, 2], [3, 4], [5, 6]]],
[[[1, 2], [3, 4], [5, 6]]]])
self.assertEqual(2, get_coordinates_count(mpolygon))
def testGeodesicFlag(self):
"""Verifies that JSON parsing and generation preserves the geodesic flag."""
geodesic = ee.Geometry({
'type': 'LineString',
'coordinates': [[1, 2], [3, 4]],
'geodesic': True
})
projected = ee.Geometry({
'type': 'LineString',
'coordinates': [[1, 2], [3, 4]],
'geodesic': False
})
self.assertTrue(geodesic.toGeoJSON()['geodesic'])
self.assertFalse(projected.toGeoJSON()['geodesic'])
def testConstructor(self):
"""Check the behavior of the Geometry constructor.
There are 5 options:
1) A geoJSON object.
2) A not-computed geometry.
3) A not-computed geometry with overrides.
4) A computed geometry.
5) something to cast to geometry.
"""
line = ee.Geometry.LineString(1, 2, 3, 4)
# GeoJSON.
from_json = ee.Geometry(line.toGeoJSON())
self.assertEqual(from_json.func, None)
self.assertEqual(from_json._type, 'LineString')
self.assertEqual(from_json._coordinates, [[1, 2], [3, 4]])
# GeoJSON with a CRS specified.
json_with_crs = line.toGeoJSON()
json_with_crs['crs'] = {
'type': 'name',
'properties': {
'name': 'SR-ORG:6974'
}
}
from_json_with_crs = ee.Geometry(json_with_crs)
self.assertEqual(from_json_with_crs.func, None)
self.assertEqual(from_json_with_crs._type, 'LineString')
self.assertEqual(from_json_with_crs._proj, 'SR-ORG:6974')
# A not-computed geometry.
self.assertEqual(ee.Geometry(line), line)
# A not-computed geometry with an override.
with_override = ee.Geometry(line, 'SR-ORG:6974')
self.assertEqual(with_override._proj, 'SR-ORG:6974')
# A computed geometry.
self.assertEqual(ee.Geometry(line.bounds()), line.bounds())
# Something to cast to a geometry.
computed = ee.ComputedObject(ee.Function(), {'a': 1})
geom = ee.Geometry(computed)
self.assertEqual(computed.func, geom.func)
self.assertEqual(computed.args, geom.args)
def testComputedGeometries(self):
"""Verifies the computed object behavior of the Geometry constructor."""
line = ee.Geometry.LineString(1, 2, 3, 4)
bounds = line.bounds()
self.assertIsInstance(bounds, ee.Geometry)
self.assertEqual(ee.ApiFunction.lookup('Geometry.bounds'), bounds.func)
self.assertEqual(line, bounds.args['geometry'])
self.assertTrue(hasattr(bounds, 'bounds'))
def testComputedCoordinate(self):
"""Verifies that a computed coordinate produces a computed geometry."""
coords = [1, ee.Number(1).add(1)]
p = ee.Geometry.Point(coords)
self.assertIsInstance(p, ee.Geometry)
self.assertEqual(
ee.ApiFunction.lookup('GeometryConstructors.Point'), p.func)
self.assertEqual({'coordinates': ee.List(coords)}, p.args)
def testComputedList(self):
"""Verifies that a computed coordinate produces a computed geometry."""
lst = ee.List([1, 2, 3, 4]).slice(0, 2)
p = ee.Geometry.Point(lst)
self.assertIsInstance(p, ee.Geometry)
self.assertEqual(
ee.ApiFunction.lookup('GeometryConstructors.Point'), p.func)
self.assertEqual({'coordinates': lst}, p.args)
def testComputedProjection(self):
"""Verifies that a geometry with a projection can be constructed."""
p = ee.Geometry.Point([1, 2], 'epsg:4326')
self.assertIsInstance(p, ee.Geometry)
self.assertEqual(
ee.ApiFunction.lookup('GeometryConstructors.Point'), p.func)
expected_args = {
'coordinates': ee.List([1, 2]),
'crs': ee.ApiFunction.lookup('Projection').call('epsg:4326')
}
self.assertEqual(expected_args, p.args)
def testGeometryInputs(self):
"""Verifies that a geometry with geometry inputs can be constructed."""
p1 = ee.Geometry.Point([1, 2])
p2 = ee.Geometry.Point([3, 4])
line = ee.Geometry.LineString([p1, p2])
self.assertIsInstance(line, ee.Geometry)
self.assertEqual(
ee.ApiFunction.lookup('GeometryConstructors.LineString'), line.func)
self.assertEqual({'coordinates': ee.List([p1, p2])}, line.args)
def testOldPointKeywordArgs(self):
"""Verifies that Points still allow keyword lon/lat args."""
self.assertEqual(ee.Geometry.Point(1, 2), ee.Geometry.Point(lon=1, lat=2))
self.assertEqual(ee.Geometry.Point(1, 2), ee.Geometry.Point(1, lat=2))
def testOldRectangleKeywordArgs(self):
"""Verifies that Rectangles still allow keyword xlo/ylo/xhi/yhi args."""
self.assertEqual(
ee.Geometry.Rectangle(1, 2, 3, 4),
ee.Geometry.Rectangle(xlo=1, ylo=2, xhi=3, yhi=4))
self.assertEqual(
ee.Geometry.Rectangle(1, 2, 3, 4),
ee.Geometry.Rectangle(1, 2, xhi=3, yhi=4))
def wgs84_rectangle(self, west, south, east, north):
# If we call ee.Geometry.Rectangle with geodesic=False we would get a
# computed call.
return ee.Geometry({
'coordinates': [[[west, north],
[west, south],
[east, south],
[east, north]]],
'type': 'Polygon',
'geodesic': False,
})
def testBBox_simple(self):
self.assertEqual(self.wgs84_rectangle(-10, -20, 10, 20),
ee.Geometry.BBox(-10, -20, 10, 20))
def testBBox_computed(self):
ten = ee.Number(5).add(5)
box = ee.Geometry.BBox(-10, -20, ten, 20)
self.assertIsInstance(box, ee.Geometry)
self.assertEqual(
ee.ApiFunction.lookup('GeometryConstructors.BBox'), box.func)
expected_args = {
'west': ee.Number(-10),
'south': ee.Number(-20),
'east': ten,
'north': ee.Number(20),
}
self.assertEqual(expected_args, box.args)
def testBBox_latitude_widerThanPolesIsClamped(self):
self.assertEqual(
self.wgs84_rectangle(-10, -90, 10, 73),
ee.Geometry.BBox(-10, -1000, 10, 73))
self.assertEqual(
self.wgs84_rectangle(-10, -34, 10, 90),
ee.Geometry.BBox(-10, -34, 10, 10000))
def testBBox_latitude_notBeyondPoles(self):
# Reject cases which, if we clamped them instead, would move a box whose
# bounds lie past a pole to being a point at the pole.
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry\.BBox: north must be at least -90°, but was -95°',
-10, -100, 10, -95)
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry\.BBox: south must be at most \+90°, but was 95°',
-10, 95, 10, 100)
def testBBox_latitude_zeroSpan(self):
self.assertEqual(
self.wgs84_rectangle(-10, 20, 10, 20),
ee.Geometry.BBox(-10, 20, 10, 20))
def testBBox_longitude_crossingMeridianWithOppositeSigns(self):
self.assertEqual(
self.wgs84_rectangle(170, -20, 190, 20),
ee.Geometry.BBox(170, -20, -170, 20))
def testBBox_longitude_crossingMeridianWithNegativeSigns(self):
self.assertEqual(
self.wgs84_rectangle(170, -20, 190, 20),
ee.Geometry.BBox(-190, -20, -170, 20))
def testBBox_longitude_crossingMeridianWithPositiveSigns(self):
self.assertEqual(
self.wgs84_rectangle(170, -20, 190, 20),
ee.Geometry.BBox(170, -20, 190, 20))
def testBBox_longitude_exactlyGlobal(self):
self.assertEqual(
self.wgs84_rectangle(-180, -20, 180, 20),
ee.Geometry.BBox(-180, -20, 180, 20))
def testBBox_longitude_excessOfGlobalIsClamped(self):
epsilon = 1e-5
self.assertEqual(
self.wgs84_rectangle(-180, -20, 180, 20),
ee.Geometry.BBox(-180 - epsilon, -20, 180 + epsilon, 20))
def testBBox_longitude_zeroSpan(self):
self.assertEqual(
self.wgs84_rectangle(10, -20, 10, 20),
ee.Geometry.BBox(10, -20, 10, 20))
def testBBox_NaN_isRejected(self):
nan = float('nan')
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry\.BBox: west must not be nan',
nan, -20, 10, 20)
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry\.BBox: south must be at most \+90°, but was nan°',
-10, nan, 10, 20)
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry\.BBox: east must not be nan',
-10, -20, nan, 20)
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry.BBox: north must be at least -90°, but was nan°',
-10, -20, 10, nan)
def testBBox_infinities_invalidDirection_isRejected(self):
inf = float('inf')
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry\.BBox: west must not be inf',
inf, -20, 10, 20)
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry\.BBox: east must not be -inf',
-10, -20, -inf, 20)
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry\.BBox: south must be at most \+90°, but was inf°',
-10, inf, 10, 20)
self.assertInvalid(
ee.Geometry.BBox,
r'Geometry\.BBox: north must be at least -90°, but was -inf°',
-10, -20, 10, -inf)
def testBBox_infinities_validDirection_isClamped(self):
inf = float('inf')
self.assertEqual(
ee.Geometry.BBox(-180, -20, 180, 20),
ee.Geometry.BBox(-10, -20, inf, 20))
self.assertEqual(
ee.Geometry.BBox(-180, -20, 180, 20),
ee.Geometry.BBox(-inf, -20, 10, 20))
self.assertEqual(
ee.Geometry.BBox(-10, -20, 10, 90),
ee.Geometry.BBox(-10, -20, 10, inf))
self.assertEqual(
ee.Geometry.BBox(-10, -90, 10, 20),
ee.Geometry.BBox(-10, -inf, 10, 20))
def assertValid(self, nesting, ctor, *coords):
"""Checks that geometry is valid and has the expected nesting level.
Args:
nesting: The expected coordinate nesting level.
ctor: The geometry constructor function, e.g. ee.Geometry.MultiPoint.
*coords: The coordinates of the geometry.
"""
# The constructor already does a validity check.
geometry = ctor(*coords)
self.assertIsInstance(geometry, ee.Geometry)
self.assertIsInstance(geometry.toGeoJSON(), dict)
final_coords = geometry.toGeoJSON()['coordinates']
self.assertEqual(nesting, ee.Geometry._isValidCoordinates(final_coords))
def assertInvalid(self, ctor, msg, *coords):
"""Verifies that geometry is invalid.
Calls the given constructor with whatever arguments have been passed,
and verifies that the given error message is thrown.
Args:
ctor: The geometry constructor function, e.g. ee.Geometry.MultiPoint.
msg: The expected error message in the thrown exception.
*coords: The coordinates of the geometry.
"""
with self.assertRaisesRegex(ee.EEException, msg):
ctor(*coords)
def testInternals(self):
"""Test eq(), ne() and hash()."""
a = ee.Geometry.Point(1, 2)
b = ee.Geometry.Point(2, 1)
c = ee.Geometry.Point(1, 2)
self.assertEqual(a, a)
self.assertNotEqual(a, b)
self.assertEqual(a, c)
self.assertNotEqual(b, c)
self.assertNotEqual(hash(a), hash(b))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "20c19e2b08d4668e72336ba68696e4ba",
"timestamp": "",
"source": "github",
"line_count": 495,
"max_line_length": 80,
"avg_line_length": 36.73939393939394,
"alnum_prop": 0.6293852413944793,
"repo_name": "google/earthengine-api",
"id": "95e103e2f4ccb7ed13d1f086ba73f2bb67cf1f81",
"size": "18235",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/ee/tests/geometry_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1352"
},
{
"name": "JavaScript",
"bytes": "3544345"
},
{
"name": "Jupyter Notebook",
"bytes": "216509"
},
{
"name": "Python",
"bytes": "698991"
},
{
"name": "Shell",
"bytes": "1447"
},
{
"name": "TypeScript",
"bytes": "42297"
}
],
"symlink_target": ""
}
|
"""
An example for wrapping ec based modules.
"""
from ec import interface
import simple # the ec-ed script
interface.setBase(simple)
interface.call('task1 arg2=1', True)
# #Pending: Add example for other interface calls.
|
{
"content_hash": "91efe105ce8f43048546cf00cc8fdc3c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 50,
"avg_line_length": 17.46153846153846,
"alnum_prop": 0.7400881057268722,
"repo_name": "Laufire/ec",
"id": "b8a42656723904222c2c95778c615cfdf4e74c29",
"size": "227",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/examples/advanced/wrapping.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "76634"
}
],
"symlink_target": ""
}
|
"""Use operations learned with learn_bpe.py to encode a new text.
The text will not be smaller, but use only a fixed vocabulary, with rare words
encoded as variable-length sequences of subword units.
Reference:
Rico Sennrich, Barry Haddow and Alexandra Birch (2015). Neural Machine Translation of Rare Words with Subword Units.
Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.
"""
from __future__ import unicode_literals, division
import sys
import codecs
import argparse
from collections import defaultdict
# hack for python2/3 compatibility
from io import open
argparse.open = open
# python 2/3 compatibility
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
else:
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
import codecs
class BPE(object):
def __init__(self, codes, separator='@@'):
self.bpe_codes = [tuple(item.split()) for item in codes]
# some hacking to deal with duplicates (only consider first instance)
self.bpe_codes = dict([(code,i) for (i,code) in reversed(list(enumerate(self.bpe_codes)))])
self.separator = separator
def segment(self, sentence):
"""segment single sentence (whitespace-tokenized string) with BPE encoding"""
output = []
for word in sentence.split():
new_word = encode(word, self.bpe_codes)
for item in new_word[:-1]:
output.append(item + self.separator)
output.append(new_word[-1])
return ' '.join(output)
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="learn BPE-based word segmentation")
parser.add_argument(
'--input', '-i', type=argparse.FileType('r'), default=sys.stdin,
metavar='PATH',
help="Input file (default: standard input).")
parser.add_argument(
'--codes', '-c', type=argparse.FileType('r'), metavar='PATH',
required=True,
help="File with BPE codes (created by learn_bpe.py).")
parser.add_argument(
'--output', '-o', type=argparse.FileType('w'), default=sys.stdout,
metavar='PATH',
help="Output file (default: standard output)")
parser.add_argument(
'--separator', '-s', type=str, default='@@', metavar='STR',
help="Separator between non-final subword units (default: '%(default)s'))")
return parser
def get_pairs(word):
"""Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def encode(orig, bpe_codes, cache={}):
"""Encode word based on list of BPE merge operations, which are applied consecutively
"""
if orig in cache:
return cache[orig]
word = tuple(orig) + ('</w>',)
pairs = get_pairs(word)
while True:
bigram = min(pairs, key = lambda pair: bpe_codes.get(pair, float('inf')))
if bigram not in bpe_codes:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
# don't print end-of-word symbols
if word[-1] == '</w>':
word = word[:-1]
elif word[-1].endswith('</w>'):
word = word[:-1] + (word[-1].replace('</w>',''),)
cache[orig] = word
return word
if __name__ == '__main__':
parser = create_parser()
args = parser.parse_args()
# read/write files as UTF-8
args.codes = codecs.open(args.codes.name, encoding='utf-8')
if args.input.name != '<stdin>':
args.input = codecs.open(args.input.name, encoding='utf-8')
if args.output.name != '<stdout>':
args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
bpe = BPE(args.codes, args.separator)
for line in args.input:
args.output.write(bpe.segment(line).strip())
args.output.write('\n')
|
{
"content_hash": "59f33e9a97c3c22a401c10adea1ed8f6",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 116,
"avg_line_length": 31.57051282051282,
"alnum_prop": 0.6008121827411167,
"repo_name": "yang1fan2/nematus",
"id": "c2a535f3325a3027d5119d96d53532add2593d77",
"size": "4992",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "subword-nmt-master/apply_bpe.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10914"
},
{
"name": "Batchfile",
"bytes": "18581"
},
{
"name": "C",
"bytes": "2157828"
},
{
"name": "C++",
"bytes": "9300642"
},
{
"name": "CMake",
"bytes": "13717"
},
{
"name": "CSS",
"bytes": "22945"
},
{
"name": "E",
"bytes": "66"
},
{
"name": "Emacs Lisp",
"bytes": "34068"
},
{
"name": "Forth",
"bytes": "58"
},
{
"name": "HTML",
"bytes": "439533"
},
{
"name": "Java",
"bytes": "9070"
},
{
"name": "JavaScript",
"bytes": "176069"
},
{
"name": "Logos",
"bytes": "3118"
},
{
"name": "M4",
"bytes": "47488"
},
{
"name": "Makefile",
"bytes": "293964"
},
{
"name": "NewLisp",
"bytes": "3164"
},
{
"name": "Objective-C",
"bytes": "8255"
},
{
"name": "PHP",
"bytes": "145237"
},
{
"name": "Perl",
"bytes": "1617898"
},
{
"name": "Protocol Buffer",
"bytes": "947"
},
{
"name": "Python",
"bytes": "974116"
},
{
"name": "Roff",
"bytes": "14619243"
},
{
"name": "Ruby",
"bytes": "3298"
},
{
"name": "Shell",
"bytes": "478589"
},
{
"name": "Slash",
"bytes": "634"
},
{
"name": "Smalltalk",
"bytes": "208330"
},
{
"name": "SystemVerilog",
"bytes": "368"
},
{
"name": "Yacc",
"bytes": "18910"
},
{
"name": "nesC",
"bytes": "366"
}
],
"symlink_target": ""
}
|
"""
Represents an EC2 Object
"""
from boto.ec2.tag import TagSet
class EC2Object(object):
def __init__(self, connection=None):
self.connection = connection
if self.connection and hasattr(self.connection, 'region'):
self.region = connection.region
else:
self.region = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class TaggedEC2Object(EC2Object):
"""
Any EC2 resource that can be tagged should be represented
by a Python object that subclasses this class. This class
has the mechanism in place to handle the tagSet element in
the Describe* responses. If tags are found, it will create
a TagSet object and allow it to parse and collect the tags
into a dict that is stored in the "tags" attribute of the
object.
"""
def __init__(self, connection=None):
EC2Object.__init__(self, connection)
self.tags = TagSet()
def startElement(self, name, attrs, connection):
if name == 'tagSet':
return self.tags
else:
return None
def add_tag(self, key, value=None):
"""
Add a tag to this object. Tag's are stored by AWS and can be used
to organize and filter resources. Adding a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
"""
status = self.connection.create_tags([self.id], {key : value})
if self.tags is None:
self.tags = TagSet()
self.tags[key] = value
def remove_tag(self, key, value=None):
"""
Remove a tag from this object. Removing a tag involves a round-trip
to the EC2 service.
:type key: str
:param key: The key or name of the tag being stored.
:type value: str
:param value: An optional value that can be stored with the tag.
If a value is provided, it must match the value
currently stored in EC2. If not, the tag will not
be removed.
"""
if value:
tags = {key : value}
else:
tags = [key]
status = self.connection.delete_tags([self.id], tags)
if key in self.tags:
del self.tags[key]
|
{
"content_hash": "c791e09f691dcf7f7a8ca313f3065c90",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 77,
"avg_line_length": 31.7375,
"alnum_prop": 0.5931469082315872,
"repo_name": "drawquest/drawquest-web",
"id": "6e375961ee56d7e5941f23687b84cbeca68e342d",
"size": "3691",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "common/boto/ec2/ec2object.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "634659"
},
{
"name": "CoffeeScript",
"bytes": "8968"
},
{
"name": "HTML",
"bytes": "898627"
},
{
"name": "JavaScript",
"bytes": "1507053"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "7220727"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "3700"
}
],
"symlink_target": ""
}
|
from mi.dataset.driver.metbk_a.dcl.metbk_dcl_a_driver import process, \
TELEMETERED_PARTICLE_CLASS
from mi.core.versioning import version
@version("15.6.1")
def parse(basePythonCodePath, sourceFilePath, particleDataHdlrObj):
process(sourceFilePath, particleDataHdlrObj, TELEMETERED_PARTICLE_CLASS)
return particleDataHdlrObj
|
{
"content_hash": "840b56aa162b5e7755d11f2742b77713",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 76,
"avg_line_length": 34,
"alnum_prop": 0.8,
"repo_name": "JeffRoy/mi-dataset",
"id": "829934c027f2ce9c9e32e4cb05f54607d0e05b7e",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/metbk_a/dcl/metbk_a_dcl_telemetered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3610231"
}
],
"symlink_target": ""
}
|
from .viewport_helpers import compute_view # noqa
from .type_checking import has_geo_interface, is_pandas_df, records_from_geo_interface # noqa
from .color_scales import assign_random_colors # noqa
|
{
"content_hash": "e83e8d3b84f0ede6569afc251f730f07",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 94,
"avg_line_length": 67,
"alnum_prop": 0.7810945273631841,
"repo_name": "uber-common/deck.gl",
"id": "e40bbfbd3e35b862bb37ab8bf782e72ea1fee334",
"size": "201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bindings/pydeck/pydeck/data_utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135"
},
{
"name": "GLSL",
"bytes": "20360"
},
{
"name": "HTML",
"bytes": "316"
},
{
"name": "JavaScript",
"bytes": "65732"
}
],
"symlink_target": ""
}
|
"""Tests for options manager for :class:`Poly` and public API functions. """
from sympy.polys.polyoptions import (
Options, Expand, Gens, Wrt, Sort, Order, Field, Greedy, Domain,
Split, Gaussian, Extension, Modulus, Symmetric, Strict, Auto,
Frac, Formal, Polys, Include, All, Gen, Symbols, Method)
from sympy.polys.monomialtools import lex
from sympy.polys.domains import FF, GF, ZZ, QQ, RR, EX
from sympy.polys.polyerrors import OptionError, GeneratorsError
from sympy import Integer, Symbol, I, sqrt
from sympy.utilities.pytest import raises
from sympy.abc import x, y, z
def test_Options_clone():
opt = Options((x, y, z), {'domain': 'ZZ'})
assert opt.gens == (x, y, z)
assert opt.domain == ZZ
assert ('order' in opt) is False
new_opt = opt.clone({'gens': (x, y), 'order': 'lex'})
assert opt.gens == (x, y, z)
assert opt.domain == ZZ
assert ('order' in opt) is False
assert new_opt.gens == (x, y)
assert new_opt.domain == ZZ
assert ('order' in new_opt) is True
def test_Expand_preprocess():
assert Expand.preprocess(False) is False
assert Expand.preprocess(True) is True
assert Expand.preprocess(0) is False
assert Expand.preprocess(1) is True
raises(OptionError, lambda: Expand.preprocess(x))
def test_Expand_postprocess():
opt = {'expand': True}
Expand.postprocess(opt)
assert opt == {'expand': True}
def test_Gens_preprocess():
assert Gens.preprocess((None,)) == ()
assert Gens.preprocess((x, y, z)) == (x, y, z)
assert Gens.preprocess(((x, y, z),)) == (x, y, z)
a = Symbol('a', commutative=False)
raises(GeneratorsError, lambda: Gens.preprocess((x, x, y)))
raises(GeneratorsError, lambda: Gens.preprocess((x, y, a)))
def test_Gens_postprocess():
opt = {'gens': (x, y)}
Gens.postprocess(opt)
assert opt == {'gens': (x, y)}
def test_Wrt_preprocess():
assert Wrt.preprocess(x) == ['x']
assert Wrt.preprocess('') == []
assert Wrt.preprocess(' ') == []
assert Wrt.preprocess('x,y') == ['x', 'y']
assert Wrt.preprocess('x y') == ['x', 'y']
assert Wrt.preprocess('x, y') == ['x', 'y']
assert Wrt.preprocess('x , y') == ['x', 'y']
assert Wrt.preprocess(' x, y') == ['x', 'y']
assert Wrt.preprocess(' x, y') == ['x', 'y']
assert Wrt.preprocess([x, y]) == ['x', 'y']
raises(OptionError, lambda: Wrt.preprocess(','))
raises(OptionError, lambda: Wrt.preprocess(0))
def test_Wrt_postprocess():
opt = {'wrt': ['x']}
Wrt.postprocess(opt)
assert opt == {'wrt': ['x']}
def test_Sort_preprocess():
assert Sort.preprocess([x, y, z]) == ['x', 'y', 'z']
assert Sort.preprocess((x, y, z)) == ['x', 'y', 'z']
assert Sort.preprocess('x > y > z') == ['x', 'y', 'z']
assert Sort.preprocess('x>y>z') == ['x', 'y', 'z']
raises(OptionError, lambda: Sort.preprocess(0))
raises(OptionError, lambda: Sort.preprocess(set([x, y, z])))
def test_Sort_postprocess():
opt = {'sort': 'x > y'}
Sort.postprocess(opt)
assert opt == {'sort': 'x > y'}
def test_Order_preprocess():
assert Order.preprocess('lex') == lex
def test_Order_postprocess():
opt = {'order': True}
Order.postprocess(opt)
assert opt == {'order': True}
def test_Field_preprocess():
assert Field.preprocess(False) is False
assert Field.preprocess(True) is True
assert Field.preprocess(0) is False
assert Field.preprocess(1) is True
raises(OptionError, lambda: Field.preprocess(x))
def test_Field_postprocess():
opt = {'field': True}
Field.postprocess(opt)
assert opt == {'field': True}
def test_Greedy_preprocess():
assert Greedy.preprocess(False) is False
assert Greedy.preprocess(True) is True
assert Greedy.preprocess(0) is False
assert Greedy.preprocess(1) is True
raises(OptionError, lambda: Greedy.preprocess(x))
def test_Greedy_postprocess():
opt = {'greedy': True}
Greedy.postprocess(opt)
assert opt == {'greedy': True}
def test_Domain_preprocess():
assert Domain.preprocess(ZZ) == ZZ
assert Domain.preprocess(QQ) == QQ
assert Domain.preprocess(EX) == EX
assert Domain.preprocess(FF(2)) == FF(2)
assert Domain.preprocess(ZZ[x, y]) == ZZ[x, y]
assert Domain.preprocess('Z') == ZZ
assert Domain.preprocess('Q') == QQ
assert Domain.preprocess('ZZ') == ZZ
assert Domain.preprocess('QQ') == QQ
assert Domain.preprocess('EX') == EX
assert Domain.preprocess('FF(23)') == FF(23)
assert Domain.preprocess('GF(23)') == GF(23)
raises(OptionError, lambda: Domain.preprocess('Z[]'))
assert Domain.preprocess('Z[x]') == ZZ[x]
assert Domain.preprocess('Q[x]') == QQ[x]
assert Domain.preprocess('ZZ[x]') == ZZ[x]
assert Domain.preprocess('QQ[x]') == QQ[x]
assert Domain.preprocess('Z[x,y]') == ZZ[x, y]
assert Domain.preprocess('Q[x,y]') == QQ[x, y]
assert Domain.preprocess('ZZ[x,y]') == ZZ[x, y]
assert Domain.preprocess('QQ[x,y]') == QQ[x, y]
raises(OptionError, lambda: Domain.preprocess('Z()'))
assert Domain.preprocess('Z(x)') == ZZ.frac_field(x)
assert Domain.preprocess('Q(x)') == QQ.frac_field(x)
assert Domain.preprocess('ZZ(x)') == ZZ.frac_field(x)
assert Domain.preprocess('QQ(x)') == QQ.frac_field(x)
assert Domain.preprocess('Z(x,y)') == ZZ.frac_field(x, y)
assert Domain.preprocess('Q(x,y)') == QQ.frac_field(x, y)
assert Domain.preprocess('ZZ(x,y)') == ZZ.frac_field(x, y)
assert Domain.preprocess('QQ(x,y)') == QQ.frac_field(x, y)
assert Domain.preprocess('Q<I>') == QQ.algebraic_field(I)
assert Domain.preprocess('QQ<I>') == QQ.algebraic_field(I)
assert Domain.preprocess('Q<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
assert Domain.preprocess(
'QQ<sqrt(2), I>') == QQ.algebraic_field(sqrt(2), I)
raises(OptionError, lambda: Domain.preprocess('abc'))
def test_Domain_postprocess():
raises(GeneratorsError, lambda: Domain.postprocess({'gens': (x, y),
'domain': ZZ[y, z]}))
raises(GeneratorsError, lambda: Domain.postprocess({'gens': (),
'domain': EX}))
raises(GeneratorsError, lambda: Domain.postprocess({'domain': EX}))
def test_Split_preprocess():
assert Split.preprocess(False) is False
assert Split.preprocess(True) is True
assert Split.preprocess(0) is False
assert Split.preprocess(1) is True
raises(OptionError, lambda: Split.preprocess(x))
def test_Split_postprocess():
raises(NotImplementedError, lambda: Split.postprocess({'split': True}))
def test_Gaussian_preprocess():
assert Gaussian.preprocess(False) is False
assert Gaussian.preprocess(True) is True
assert Gaussian.preprocess(0) is False
assert Gaussian.preprocess(1) is True
raises(OptionError, lambda: Gaussian.preprocess(x))
def test_Gaussian_postprocess():
opt = {'gaussian': True}
Gaussian.postprocess(opt)
assert opt == {
'gaussian': True,
'extension': set([I]),
'domain': QQ.algebraic_field(I),
}
def test_Extension_preprocess():
assert Extension.preprocess(True) is True
assert Extension.preprocess(1) is True
assert Extension.preprocess([]) is None
assert Extension.preprocess(sqrt(2)) == set([sqrt(2)])
assert Extension.preprocess([sqrt(2)]) == set([sqrt(2)])
assert Extension.preprocess([sqrt(2), I]) == set([sqrt(2), I])
raises(OptionError, lambda: Extension.preprocess(False))
raises(OptionError, lambda: Extension.preprocess(0))
def test_Extension_postprocess():
opt = {'extension': set([sqrt(2)])}
Extension.postprocess(opt)
assert opt == {
'extension': set([sqrt(2)]),
'domain': QQ.algebraic_field(sqrt(2)),
}
opt = {'extension': True}
Extension.postprocess(opt)
assert opt == {'extension': True}
def test_Modulus_preprocess():
assert Modulus.preprocess(23) == 23
assert Modulus.preprocess(Integer(23)) == 23
raises(OptionError, lambda: Modulus.preprocess(0))
raises(OptionError, lambda: Modulus.preprocess(x))
def test_Modulus_postprocess():
opt = {'modulus': 5}
Modulus.postprocess(opt)
assert opt == {
'modulus': 5,
'domain': FF(5),
}
opt = {'modulus': 5, 'symmetric': False}
Modulus.postprocess(opt)
assert opt == {
'modulus': 5,
'domain': FF(5, False),
'symmetric': False,
}
def test_Symmetric_preprocess():
assert Symmetric.preprocess(False) is False
assert Symmetric.preprocess(True) is True
assert Symmetric.preprocess(0) is False
assert Symmetric.preprocess(1) is True
raises(OptionError, lambda: Symmetric.preprocess(x))
def test_Symmetric_postprocess():
opt = {'symmetric': True}
Symmetric.postprocess(opt)
assert opt == {'symmetric': True}
def test_Strict_preprocess():
assert Strict.preprocess(False) is False
assert Strict.preprocess(True) is True
assert Strict.preprocess(0) is False
assert Strict.preprocess(1) is True
raises(OptionError, lambda: Strict.preprocess(x))
def test_Strict_postprocess():
opt = {'strict': True}
Strict.postprocess(opt)
assert opt == {'strict': True}
def test_Auto_preprocess():
assert Auto.preprocess(False) is False
assert Auto.preprocess(True) is True
assert Auto.preprocess(0) is False
assert Auto.preprocess(1) is True
raises(OptionError, lambda: Auto.preprocess(x))
def test_Auto_postprocess():
opt = {'auto': True}
Auto.postprocess(opt)
assert opt == {'auto': True}
def test_Frac_preprocess():
assert Frac.preprocess(False) is False
assert Frac.preprocess(True) is True
assert Frac.preprocess(0) is False
assert Frac.preprocess(1) is True
raises(OptionError, lambda: Frac.preprocess(x))
def test_Frac_postprocess():
opt = {'frac': True}
Frac.postprocess(opt)
assert opt == {'frac': True}
def test_Formal_preprocess():
assert Formal.preprocess(False) is False
assert Formal.preprocess(True) is True
assert Formal.preprocess(0) is False
assert Formal.preprocess(1) is True
raises(OptionError, lambda: Formal.preprocess(x))
def test_Formal_postprocess():
opt = {'formal': True}
Formal.postprocess(opt)
assert opt == {'formal': True}
def test_Polys_preprocess():
assert Polys.preprocess(False) is False
assert Polys.preprocess(True) is True
assert Polys.preprocess(0) is False
assert Polys.preprocess(1) is True
raises(OptionError, lambda: Polys.preprocess(x))
def test_Polys_postprocess():
opt = {'polys': True}
Polys.postprocess(opt)
assert opt == {'polys': True}
def test_Include_preprocess():
assert Include.preprocess(False) is False
assert Include.preprocess(True) is True
assert Include.preprocess(0) is False
assert Include.preprocess(1) is True
raises(OptionError, lambda: Include.preprocess(x))
def test_Include_postprocess():
opt = {'include': True}
Include.postprocess(opt)
assert opt == {'include': True}
def test_All_preprocess():
assert All.preprocess(False) is False
assert All.preprocess(True) is True
assert All.preprocess(0) is False
assert All.preprocess(1) is True
raises(OptionError, lambda: All.preprocess(x))
def test_All_postprocess():
opt = {'all': True}
All.postprocess(opt)
assert opt == {'all': True}
def test_Gen_postprocess():
opt = {'gen': x}
Gen.postprocess(opt)
assert opt == {'gen': x}
def test_Symbols_preprocess():
raises(OptionError, lambda: Symbols.preprocess(x))
def test_Symbols_postprocess():
opt = {'symbols': [x, y, z]}
Symbols.postprocess(opt)
assert opt == {'symbols': [x, y, z]}
def test_Method_preprocess():
raises(OptionError, lambda: Method.preprocess(10))
def test_Method_postprocess():
opt = {'method': 'f5b'}
Method.postprocess(opt)
assert opt == {'method': 'f5b'}
|
{
"content_hash": "da6f3bec9442107a7caae72e473ac524",
"timestamp": "",
"source": "github",
"line_count": 477,
"max_line_length": 79,
"avg_line_length": 25.163522012578618,
"alnum_prop": 0.6435057902191119,
"repo_name": "amitjamadagni/sympy",
"id": "06d7c4adcdd6d783592286927418055af570908c",
"size": "12003",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/polys/tests/test_polyoptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12199014"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "287"
},
{
"name": "TeX",
"bytes": "8789"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
import httparchive
import httplib
import httpproxy
import threading
import unittest
import util
class MockCustomResponseHandler(object):
def __init__(self, response):
"""
Args:
response: An instance of ArchivedHttpResponse that is returned for each
request.
"""
self._response = response
def handle(self, request):
del request
return self._response
class MockHttpArchiveFetch(object):
def __init__(self, response):
"""
Args:
response: An instance of ArchivedHttpResponse that is returned for each
request.
"""
self.is_record_mode = False
self._response = response
def __call__(self, request):
del request # unused
return self._response
class MockHttpArchiveHandler(httpproxy.HttpArchiveHandler):
def handle_one_request(self):
httpproxy.HttpArchiveHandler.handle_one_request(self)
HttpProxyTest.HANDLED_REQUEST_COUNT += 1
class MockRules(object):
def Find(self, unused_rule_type_name): # pylint: disable=unused-argument
return lambda unused_request, unused_response: None
class HttpProxyTest(unittest.TestCase):
def setUp(self):
self.has_proxy_server_bound_port = False
self.has_proxy_server_started = False
self.allow_generate_304 = False
self.serve_response_by_http_archive = False
def set_up_proxy_server(self, response):
"""
Args:
response: An instance of ArchivedHttpResponse that is returned for each
request.
"""
HttpProxyTest.HANDLED_REQUEST_COUNT = 0
self.host = 'localhost'
self.port = 8889
custom_handlers = MockCustomResponseHandler(
response if not self.serve_response_by_http_archive else None)
rules = MockRules()
http_archive_fetch = MockHttpArchiveFetch(
response if self.serve_response_by_http_archive else None)
self.proxy_server = httpproxy.HttpProxyServer(
http_archive_fetch, custom_handlers, rules,
host=self.host, port=self.port,
allow_generate_304=self.allow_generate_304)
self.proxy_server.RequestHandlerClass = MockHttpArchiveHandler
self.has_proxy_server_bound_port = True
def tear_down_proxy_server(self):
if self.has_proxy_server_started:
self.proxy_server.shutdown()
if self.has_proxy_server_bound_port:
self.proxy_server.server_close()
def tearDown(self):
self.tear_down_proxy_server()
def serve_requests_forever(self):
self.has_proxy_server_started = True
self.proxy_server.serve_forever(poll_interval=0.01)
# Tests that handle_one_request does not leak threads, and does not try to
# re-handle connections that are finished.
def test_handle_one_request_closes_connection(self):
# By default, BaseHTTPServer.py treats all HTTP 1.1 requests as keep-alive.
# Intentionally use HTTP 1.0 to prevent this behavior.
response = httparchive.ArchivedHttpResponse(
version=10, status=200, reason="OK",
headers=[], response_data=["bat1"])
self.set_up_proxy_server(response)
t = threading.Thread(
target=HttpProxyTest.serve_requests_forever, args=(self,))
t.start()
initial_thread_count = threading.activeCount()
# Make a bunch of requests.
request_count = 10
for _ in range(request_count):
conn = httplib.HTTPConnection('localhost', 8889, timeout=10)
conn.request("GET", "/index.html")
res = conn.getresponse().read()
self.assertEqual(res, "bat1")
conn.close()
# Check to make sure that there is no leaked thread.
util.WaitFor(lambda: threading.activeCount() == initial_thread_count, 2)
self.assertEqual(request_count, HttpProxyTest.HANDLED_REQUEST_COUNT)
# Tests that keep-alive header works.
def test_keep_alive_header(self):
response = httparchive.ArchivedHttpResponse(
version=11, status=200, reason="OK",
headers=[("Connection", "keep-alive")], response_data=["bat1"])
self.set_up_proxy_server(response)
t = threading.Thread(
target=HttpProxyTest.serve_requests_forever, args=(self,))
t.start()
initial_thread_count = threading.activeCount()
# Make a bunch of requests.
request_count = 10
connections = []
for _ in range(request_count):
conn = httplib.HTTPConnection('localhost', 8889, timeout=10)
conn.request("GET", "/index.html", headers={"Connection": "keep-alive"})
res = conn.getresponse().read()
self.assertEqual(res, "bat1")
connections.append(conn)
# Repeat the same requests.
for conn in connections:
conn.request("GET", "/index.html", headers={"Connection": "keep-alive"})
res = conn.getresponse().read()
self.assertEqual(res, "bat1")
# Check that the right number of requests have been handled.
self.assertEqual(2 * request_count, HttpProxyTest.HANDLED_REQUEST_COUNT)
# Check to make sure that exactly "request_count" new threads are active.
self.assertEqual(
threading.activeCount(), initial_thread_count + request_count)
for conn in connections:
conn.close()
util.WaitFor(lambda: threading.activeCount() == initial_thread_count, 1)
# Test that opening 400 simultaneous connections does not cause httpproxy to
# hit a process fd limit. The default limit is 256 fds.
def test_max_fd(self):
response = httparchive.ArchivedHttpResponse(
version=11, status=200, reason="OK",
headers=[("Connection", "keep-alive")], response_data=["bat1"])
self.set_up_proxy_server(response)
t = threading.Thread(
target=HttpProxyTest.serve_requests_forever, args=(self,))
t.start()
# Make a bunch of requests.
request_count = 400
connections = []
for _ in range(request_count):
conn = httplib.HTTPConnection('localhost', 8889, timeout=10)
conn.request("GET", "/index.html", headers={"Connection": "keep-alive"})
res = conn.getresponse().read()
self.assertEqual(res, "bat1")
connections.append(conn)
# Check that the right number of requests have been handled.
self.assertEqual(request_count, HttpProxyTest.HANDLED_REQUEST_COUNT)
for conn in connections:
conn.close()
# Tests that conditional requests return 304.
def test_generate_304(self):
REQUEST_HEADERS = [
{},
{'If-Modified-Since': 'whatever'},
{'If-None-Match': 'whatever yet again'}]
RESPONSE_STATUSES = [200, 204, 304, 404]
for allow_generate_304 in [False, True]:
self.allow_generate_304 = allow_generate_304
for serve_response_by_http_archive in [False, True]:
self.serve_response_by_http_archive = serve_response_by_http_archive
for response_status in RESPONSE_STATUSES:
response = None
if response_status != 404:
response = httparchive.ArchivedHttpResponse(
version=11, status=response_status, reason="OK", headers=[],
response_data=["some content"])
self.set_up_proxy_server(response)
t = threading.Thread(
target=HttpProxyTest.serve_requests_forever, args=(self,))
t.start()
for method in ['GET', 'HEAD', 'POST']:
for headers in REQUEST_HEADERS:
connection = httplib.HTTPConnection('localhost', 8889, timeout=10)
connection.request(method, "/index.html", headers=headers)
response = connection.getresponse()
connection.close()
if (allow_generate_304 and
serve_response_by_http_archive and
method in ['GET', 'HEAD'] and
headers and
response_status == 200):
self.assertEqual(304, response.status)
self.assertEqual('', response.read())
else:
self.assertEqual(response_status, response.status)
self.tear_down_proxy_server()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "038bfead32d5664988af7446698af73d",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 80,
"avg_line_length": 34.96035242290749,
"alnum_prop": 0.6622983870967742,
"repo_name": "catapult-project/catapult-csm",
"id": "ff341809d8eb260dee32a96aec7cf0ac11e4e9dc",
"size": "8557",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "telemetry/third_party/web-page-replay/httpproxy_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
}
|
import vim, re
import heapq
_escape = dict((c , "\\" + c) for c in ['^','$','.','{','}','(',')','[',']','\\','/','+'])
def CtrlPPyMatch():
items = vim.eval('a:items')
astr = vim.eval('a:str')
lowAstr = astr.lower()
limit = int(vim.eval('a:limit'))
mmode = vim.eval('a:mmode')
aregex = int(vim.eval('a:regex'))
crfile = vim.eval('a:crfile')
if crfile in items and int(vim.eval("pymatcher#ShouldHideCurrentFile(a:ispath, a:crfile)")):
items.remove(crfile)
rez = vim.eval('s:rez')
regex = ''
if aregex == 1:
regex = astr
else:
# Escape all of the characters as necessary
escaped = [_escape.get(c, c) for c in lowAstr]
# If the string is longer that one character, append a mismatch
# expression to each character (except the last).
if len(lowAstr) > 1:
regex = ''.join([c + "[^" + c + "]*" for c in escaped[:-1]])
# Append the last character in the string to the regex
regex += escaped[-1]
# because this IGNORECASE flag is extremely expensive we are converting everything to lower case
# see https://github.com/FelikZ/ctrlp-py-matcher/issues/29
regex = regex.lower()
res = []
prog = re.compile(regex)
def filename_score(line):
# get filename via reverse find to improve performance
slashPos = line.rfind('/')
if slashPos != -1:
line = line[slashPos + 1:]
lineLower = line.lower()
result = prog.search(lineLower)
if result:
score = result.end() - result.start() + 1
score = score + ( len(lineLower) + 1 ) / 100.0
score = score + ( len(line) + 1 ) / 1000.0
return 1000.0 / score
return 0
def path_score(line):
lineLower = line.lower()
result = prog.search(lineLower)
if result:
score = result.end() - result.start() + 1
score = score + ( len(lineLower) + 1 ) / 100.0
return 1000.0 / score
return 0
if mmode == 'filename-only':
res = [(filename_score(line), line) for line in items]
elif mmode == 'first-non-tab':
res = [(path_score(line.split('\t')[0]), line) for line in items]
elif mmode == 'until-last-tab':
res = [(path_score(line.rsplit('\t')[0]), line) for line in items]
else:
res = [(path_score(line), line) for line in items]
rez.extend([line for score, line in heapq.nlargest(limit, res) if score != 0])
# Use double quoted vim strings and escape \
vimrez = ['"' + line.replace('\\', '\\\\').replace('"', '\\"') + '"' for line in rez]
vim.command("let s:regex = '%s'" % regex)
vim.command('let s:rez = [%s]' % ','.join(vimrez))
|
{
"content_hash": "486d864555e4c3327a9509b01bd4cb42",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 100,
"avg_line_length": 32.174418604651166,
"alnum_prop": 0.5514998192988797,
"repo_name": "FelikZ/ctrlp-py-matcher",
"id": "a819deae40ec4949823bf3ef798b8efbfb82fecf",
"size": "2767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoload/pymatcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2767"
},
{
"name": "Vim script",
"bytes": "1163"
}
],
"symlink_target": ""
}
|
import inspect
import os
import sys
import unittest
import numpy as np
import numpy.testing as nptest
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import statistics
import stubs
import util
class StatisticsTest(unittest.TestCase):
def setUp(self):
self.video_data = util.VideoData(stubs.raw)
self.spread_span = statistics.SpreadSpan(self.video_data)
def test_span(self):
self.assertEqual(self.spread_span.span_by_vid, stubs.span_by_vid)
def test_spread(self):
self.assertEqual(self.spread_span.spread_by_vid, stubs.spread_by_vid)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ae9feda43d4e83420b7954dfa9fae5af",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 86,
"avg_line_length": 25.3,
"alnum_prop": 0.7061923583662714,
"repo_name": "c4fcm/WhatWeWatch-Analysis",
"id": "a34c3440109210c279d13e5d7aa17a063019754c",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/teststatistics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "2228"
},
{
"name": "Python",
"bytes": "133478"
},
{
"name": "R",
"bytes": "2048"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
try:
import cPickle as pickle
except:
import pickle
import numpy as np
import scipy.misc
IMAGE_SIZE = 32
def to_one_hot(y, num_label):
"""Converts a one-dimensional label array to a two-dimensional one-hot array"""
return np.eye(num_label)[y]
def load_character_image(path, preprocess=False, normalize=None):
"""Loads an image from the disk
After loaded from the disk, the 3-channel image is flattened to a grayscale
image. If preprocess is set to True, the image is resized to 32x32 using
bilinear interpolation.
Args:
path: path to the image
preprocess: if set to True, the image is preprocessed. Default is False
normalize: a tuple of the form (mean, std). Each of these two values is a
one-dimensional array of the size that matches the size of the
image. Each pixel of the image is subtracted and divided by the
corresponding value in each of these arrays. Default is None
Returns:
A two-dimensional floating-point array containing grayscale value of each
pixel of the loaded image.
"""
img = scipy.misc.imread(path, flatten=True)
if preprocess:
img = scipy.misc.imresize(img, (IMAGE_SIZE, IMAGE_SIZE), interp='bilinear')
if normalize is not None:
mean, std = normalize
img = ((img.ravel() - mean)/std).reshape(img.shape)
return img
class ImageDataset:
"""A class that contains datasets of images
On construction, this class load images and labels from the specified path.
As a convention for this project, this path can either be
'dataset/detectorData' or 'dataset/recognizerData'. Each of these paths
contains a full training set and a test set.
The full training set is later split into training set and validation set. On
splitting, the mean and standard deviation of each feature in the training set
is recorded as mean_train and std_train. These two arrays are used to
normalized all training set, validation set and test set.
Attributes:
X_train: input of training set
y_train: label of training set
X_val: input of validation set
y_val: label of validation set
X_test: input of test set
y_test: label of test set
mean_train: mean of the training set
std_train: standard deviation of the training set
"""
def __init__(self, location_prefix, **kwargs):
self.mean_train = kwargs.get('mean_train', 0)
self.std_train = kwargs.get('std_train', 1)
self.test_only = kwargs.get('test_only', False)
self.train_val_ratio = kwargs.get('train_val_ratio', 0.85)
self._load_character_images(location_prefix)
def _load_character_images(self, prefix):
if not self.test_only:
train_label_path = os.path.join(prefix, 'trainLabels.csv')
self.all_y_train = np.genfromtxt(train_label_path, delimiter=',', usecols=1,
dtype=np.int32)
N_all_train = self.all_y_train.shape[0]
print('Loading train set...')
all_X_train = []
for i in xrange(1, N_all_train + 1):
img_path = os.path.join(prefix, 'train', str(i) + '.png')
I = load_character_image(img_path)
all_X_train.append(I.ravel())
print('Finished loading train set')
self.all_X_train = np.array(all_X_train)
self._split_train_val()
self.X_train = self.normalize(self.X_train)
self.X_val = self.normalize(self.X_val)
test_label_path = os.path.join(prefix, 'testLabels.csv')
self.y_test = np.genfromtxt(test_label_path, delimiter=',', usecols=1,
dtype=np.int32)
N_test = self.y_test.shape[0]
print('Loading test set...')
X_test = []
for i in xrange(1, N_test + 1):
img_path = os.path.join(prefix, 'test', str(i) + '.png')
I = load_character_image(img_path)
X_test.append(I.ravel())
print('Finished loading test set')
self.X_test = np.array(X_test)
self.X_test = self.normalize(self.X_test)
def normalize(self, sample):
if self.mean_train is not None:
return (sample - self.mean_train) / self.std_train
return sample
def save_normalize(self, path):
"""Saves normalization values to a file
Normalization values are the mean and standard deviation of the training
set. This is saved as a tuple (mean, std) to a pickle file.
Args:
path: path to the pickle file that the normalization is saved to.
"""
if self.mean_train is not None:
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb') as f:
pickle.dump((self.mean_train, self.std_train), f)
def train_batches(self, batch_size):
"""Returns a generator for fetching a batch of training data
Args:
batch_size: size of each batch
"""
if self.test_only:
yield
self._shuffle_train()
for start in xrange(0, self.X_train.shape[0], batch_size):
yield ( self.X_train[start: start + batch_size],
self.y_train[start: start + batch_size])
def val_batches(self, batch_size):
if self.test_only:
yield
for start in xrange(0, self.X_val.shape[0], batch_size):
yield ( self.X_val[start: start + batch_size],
self.y_val[start: start + batch_size])
def _split_train_val(self):
perm = np.arange(self.all_X_train.shape[0])
np.random.shuffle(perm)
self.all_X_train = self.all_X_train[perm]
self.all_y_train = self.all_y_train[perm]
train_val_split = int(self.all_y_train.shape[0] * self.train_val_ratio)
self.X_train = self.all_X_train[:train_val_split]
self.y_train = self.all_y_train[:train_val_split]
self.X_val = self.all_X_train[train_val_split:]
self.y_val = self.all_y_train[train_val_split:]
self.mean_train = np.mean(self.X_train, axis=0)
self.std_train = np.std(self.X_train, axis=0)
def _shuffle_train(self):
perm = np.arange(self.X_train.shape[0])
np.random.shuffle(perm)
self.X_train = self.X_train[perm]
self.y_train = self.y_train[perm]
|
{
"content_hash": "5e1d82b169055961932a666c30d465ee",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 82,
"avg_line_length": 34.10497237569061,
"alnum_prop": 0.6588368702413737,
"repo_name": "cuongdtnguyen/poster-reader",
"id": "3f18295f50e86d1f09b33ca3c15800a55b15f84e",
"size": "6173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42205"
}
],
"symlink_target": ""
}
|
import time
from twitter.common import app, log
from twitter.common.app.modules.http import RootServer
from twitter.common.log.options import LogOptions
from twitter.common.metrics import RootMetrics
from twitter.common.examples.pingpong import PingPongServer
app.add_option('--target_host', default='localhost',
help='The target host to send pingpong requests.')
app.add_option('--target_port', default=12345, type='int',
help='The target port to send pingpong requests.')
app.configure('twitter.common.app.modules.http', enable=True)
def main(args, options):
pingpong = PingPongServer(options.target_host, options.target_port)
RootServer().mount_routes(pingpong)
RootMetrics().register_observable('pingpong', pingpong)
try:
time.sleep(2**20)
except KeyboardInterrupt:
log.info('Shutting down.')
LogOptions.set_disk_log_level('NONE')
LogOptions.set_stderr_log_level('google:DEBUG')
app.main()
|
{
"content_hash": "7d7adfcec85d10af0824dc1a42d3213b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 69,
"avg_line_length": 28.87878787878788,
"alnum_prop": 0.7429171038824763,
"repo_name": "Yasumoto/commons",
"id": "1f8bb5e17de53bcf3eecd829ca3159550dd9d6be",
"size": "953",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "src/python/twitter/common/examples/pingpong_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "26960"
},
{
"name": "HTML",
"bytes": "14899"
},
{
"name": "Java",
"bytes": "2607127"
},
{
"name": "JavaScript",
"bytes": "29955"
},
{
"name": "Python",
"bytes": "1202158"
},
{
"name": "Scala",
"bytes": "8271"
},
{
"name": "Shell",
"bytes": "27935"
},
{
"name": "Smalltalk",
"bytes": "79"
},
{
"name": "Thrift",
"bytes": "51878"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0016_user_background'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='short_description',
),
]
|
{
"content_hash": "41c640dd4a5446c44524a86909a2e9a6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 42,
"avg_line_length": 19.058823529411764,
"alnum_prop": 0.5895061728395061,
"repo_name": "jamesaud/se1-group4",
"id": "05bcf15f43cbefb249f7935e67155bdc1c0b30e8",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jmatcher/users/migrations/0017_remove_user_short_description.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246623"
},
{
"name": "HTML",
"bytes": "119706"
},
{
"name": "JavaScript",
"bytes": "108620"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Python",
"bytes": "224980"
},
{
"name": "Shell",
"bytes": "8041"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# Register your models here.
|
{
"content_hash": "5e69319c9e399be1c9cc9beede9c1c5a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 21,
"alnum_prop": 0.7936507936507936,
"repo_name": "wevote/WebAppPublic",
"id": "01c606da7e9775803aa1bd0442c5be2857ee4322",
"size": "147",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "candidate/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8022"
},
{
"name": "HTML",
"bytes": "131153"
},
{
"name": "JavaScript",
"bytes": "296860"
},
{
"name": "Python",
"bytes": "1700558"
},
{
"name": "Shell",
"bytes": "252"
}
],
"symlink_target": ""
}
|
import os
import sys
from os.path import abspath, dirname, join as path_join
from robot import run_cli
CURDIR = abspath(dirname(__file__))
LIBROOT = path_join(CURDIR, 'libs')
ESCAPED_CURDIR = CURDIR.replace(' ', '!')
DEFAULT_ARGS = '--escape space:! --variable PROJECTROOT:{root} {source}'.format(
root=ESCAPED_CURDIR, source=path_join(ESCAPED_CURDIR, 'tests-ios'))
def extend_python_path():
sys.path.append(LIBROOT)
def main(cli_args):
extend_python_path()
cli_args.extend(DEFAULT_ARGS.split())
return run_cli(cli_args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "a3657bec2b1264818f48671eb3a39293",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 80,
"avg_line_length": 26.347826086956523,
"alnum_prop": 0.6798679867986799,
"repo_name": "aknackiron/testdroid-samples",
"id": "0bba5febe6d335002026488503c021e86fb00405",
"size": "652",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "robot-framework/run_ios.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "305"
},
{
"name": "C#",
"bytes": "3106"
},
{
"name": "CSS",
"bytes": "6444"
},
{
"name": "Dockerfile",
"bytes": "209"
},
{
"name": "Gherkin",
"bytes": "5922"
},
{
"name": "HTML",
"bytes": "2399"
},
{
"name": "Java",
"bytes": "181375"
},
{
"name": "JavaScript",
"bytes": "117841"
},
{
"name": "Objective-C",
"bytes": "4422"
},
{
"name": "Python",
"bytes": "66629"
},
{
"name": "RobotFramework",
"bytes": "7550"
},
{
"name": "Ruby",
"bytes": "37967"
},
{
"name": "Shell",
"bytes": "36337"
},
{
"name": "Swift",
"bytes": "9381"
}
],
"symlink_target": ""
}
|
import os
import unittest
import numpy
from cclib.bridge import cclib2openbabel
class OpenbabelTest(unittest.TestCase):
"""Tests for the cclib2openbabel bridge in cclib."""
def setUp(self):
self.path = os.path.abspath(os.path.dirname(__file__))
def test_makeopenbabel(self):
try:
from openbabel import openbabel
except:
import openbabel
atomnos = numpy.array([1, 8, 1], "i")
atomcoords = numpy.array([[[-1., 1., 0.], [0., 0., 0.], [1., 1., 0.]]])
obmol = cclib2openbabel.makeopenbabel(atomcoords, atomnos)
obconversion = openbabel.OBConversion()
formatok = obconversion.SetOutFormat("inchi")
assert obconversion.WriteString(obmol).strip() == "InChI=1S/H2O/h1H2"
def test_makeopenbabel_and_makecclib(self):
"""Ensure that makeopenbabel and makecclib are inverse of each other"""
atomnos = numpy.array([1, 8, 1], "i")
atomcoords = numpy.array([[[-1., 1., 0.], [0., 0., 0.], [1., 1., 0.]]])
# makecclib(makeopenbabel(...))
obmol = cclib2openbabel.makeopenbabel(atomcoords, atomnos)
data = cclib2openbabel.makecclib(obmol)
numpy.testing.assert_allclose(data.atomcoords, atomcoords)
numpy.testing.assert_allclose(data.atomnos, atomnos)
# makeopenbabel(makecclib(...))
obmol = cclib2openbabel.makeopenbabel(data.atomcoords, data.atomnos)
data = cclib2openbabel.makecclib(obmol) # this line is just to make the test easier
numpy.testing.assert_allclose(data.atomcoords, atomcoords)
numpy.testing.assert_allclose(data.atomnos, atomnos)
def test_readfile(self):
"""Try to load an XYZ file with uracyl through Openbabel"""
data = cclib2openbabel.readfile(f"{self.path}/uracil.xyz", "XYZ")
assert data.natom == 12
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "16c2af2b8a33bbdf0883feab5ed32ce6",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 92,
"avg_line_length": 37.3921568627451,
"alnum_prop": 0.6407970634504457,
"repo_name": "langner/cclib",
"id": "d82a30c5994079d1da24ec39e0d7b444782dcd41",
"size": "2105",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/bridge/testopenbabel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arc",
"bytes": "18395"
},
{
"name": "C++",
"bytes": "21085"
},
{
"name": "DIGITAL Command Language",
"bytes": "31999"
},
{
"name": "Python",
"bytes": "1612580"
},
{
"name": "Roff",
"bytes": "375502"
},
{
"name": "Shell",
"bytes": "1484"
},
{
"name": "TeX",
"bytes": "29388"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import functools
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.datastore_admin_v1.services.datastore_admin import pagers
from google.cloud.datastore_admin_v1.types import datastore_admin
from google.cloud.datastore_admin_v1.types import index
from google.protobuf import empty_pb2 # type: ignore
from .transports.base import DatastoreAdminTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import DatastoreAdminGrpcAsyncIOTransport
from .client import DatastoreAdminClient
class DatastoreAdminAsyncClient:
"""Google Cloud Datastore Admin API
The Datastore Admin API provides several admin services for
Cloud Datastore.
-----------------------------------------------------------------------------
## Concepts
Project, namespace, kind, and entity as defined in the Google
Cloud Datastore API.
Operation: An Operation represents work being performed in the
background.
EntityFilter: Allows specifying a subset of entities in a
project. This is specified as a combination of kinds and
namespaces (either or both of which may be all).
-----------------------------------------------------------------------------
## Services
# Export/Import
The Export/Import service provides the ability to copy all or a
subset of entities to/from Google Cloud Storage.
Exported data may be imported into Cloud Datastore for any
Google Cloud Platform project. It is not restricted to the
export source project. It is possible to export from one project
and then import into another.
Exported data can also be loaded into Google BigQuery for
analysis.
Exports and imports are performed asynchronously. An Operation
resource is created for each export/import. The state (including
any errors encountered) of the export/import may be queried via
the Operation resource.
# Index
The index service manages Cloud Datastore composite indexes.
Index creation and deletion are performed asynchronously. An
Operation resource is created for each such asynchronous
operation. The state of the operation (including any errors
encountered) may be queried via the Operation resource.
# Operation
The Operations collection provides a record of actions performed
for the specified project (including any operations in
progress). Operations are not created directly but through calls
on other collections or resources.
An operation that is not yet done may be cancelled. The request
to cancel is asynchronous and the operation may continue to run
for some time after the request to cancel is made.
An operation that is done may be deleted so that it is no longer
listed as part of the Operation collection.
ListOperations returns all pending operations, but not completed
operations.
Operations are created by service DatastoreAdmin,
but are accessed via service google.longrunning.Operations.
"""
_client: DatastoreAdminClient
DEFAULT_ENDPOINT = DatastoreAdminClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = DatastoreAdminClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(
DatastoreAdminClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
DatastoreAdminClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(DatastoreAdminClient.common_folder_path)
parse_common_folder_path = staticmethod(
DatastoreAdminClient.parse_common_folder_path
)
common_organization_path = staticmethod(
DatastoreAdminClient.common_organization_path
)
parse_common_organization_path = staticmethod(
DatastoreAdminClient.parse_common_organization_path
)
common_project_path = staticmethod(DatastoreAdminClient.common_project_path)
parse_common_project_path = staticmethod(
DatastoreAdminClient.parse_common_project_path
)
common_location_path = staticmethod(DatastoreAdminClient.common_location_path)
parse_common_location_path = staticmethod(
DatastoreAdminClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DatastoreAdminAsyncClient: The constructed client.
"""
return DatastoreAdminClient.from_service_account_info.__func__(DatastoreAdminAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DatastoreAdminAsyncClient: The constructed client.
"""
return DatastoreAdminClient.from_service_account_file.__func__(DatastoreAdminAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return DatastoreAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> DatastoreAdminTransport:
"""Returns the transport used by the client instance.
Returns:
DatastoreAdminTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(DatastoreAdminClient).get_transport_class, type(DatastoreAdminClient)
)
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, DatastoreAdminTransport] = "grpc_asyncio",
client_options: Optional[ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the datastore admin client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DatastoreAdminTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = DatastoreAdminClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def export_entities(
self,
request: Optional[Union[datastore_admin.ExportEntitiesRequest, dict]] = None,
*,
project_id: Optional[str] = None,
labels: Optional[MutableMapping[str, str]] = None,
entity_filter: Optional[datastore_admin.EntityFilter] = None,
output_url_prefix: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Exports a copy of all or a subset of entities from
Google Cloud Datastore to another storage system, such
as Google Cloud Storage. Recent updates to entities may
not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed
via the Operation resource that is created. The output
of an export may only be used once the associated
operation is done. If an export operation is cancelled
before completion it may leave partial data behind in
Google Cloud Storage.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datastore_admin_v1
async def sample_export_entities():
# Create a client
client = datastore_admin_v1.DatastoreAdminAsyncClient()
# Initialize request argument(s)
request = datastore_admin_v1.ExportEntitiesRequest(
project_id="project_id_value",
output_url_prefix="output_url_prefix_value",
)
# Make the request
operation = client.export_entities(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datastore_admin_v1.types.ExportEntitiesRequest, dict]]):
The request object. The request for
[google.datastore.admin.v1.DatastoreAdmin.ExportEntities][google.datastore.admin.v1.DatastoreAdmin.ExportEntities].
project_id (:class:`str`):
Required. Project ID against which to
make the request.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
labels (:class:`MutableMapping[str, str]`):
Client-assigned labels.
This corresponds to the ``labels`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entity_filter (:class:`google.cloud.datastore_admin_v1.types.EntityFilter`):
Description of what data from the
project is included in the export.
This corresponds to the ``entity_filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
output_url_prefix (:class:`str`):
Required. Location for the export metadata and data
files.
The full resource URL of the external storage location.
Currently, only Google Cloud Storage is supported. So
output_url_prefix should be of the form:
``gs://BUCKET_NAME[/NAMESPACE_PATH]``, where
``BUCKET_NAME`` is the name of the Cloud Storage bucket
and ``NAMESPACE_PATH`` is an optional Cloud Storage
namespace path (this is not a Cloud Datastore
namespace). For more information about Cloud Storage
namespace paths, see `Object name
considerations <https://cloud.google.com/storage/docs/naming#object-considerations>`__.
The resulting files will be nested deeper than the
specified URL prefix. The final output URL will be
provided in the
[google.datastore.admin.v1.ExportEntitiesResponse.output_url][google.datastore.admin.v1.ExportEntitiesResponse.output_url]
field. That value should be used for subsequent
ImportEntities operations.
By nesting the data files deeper, the same Cloud Storage
bucket can be used in multiple ExportEntities operations
without conflict.
This corresponds to the ``output_url_prefix`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.datastore_admin_v1.types.ExportEntitiesResponse` The response for
[google.datastore.admin.v1.DatastoreAdmin.ExportEntities][google.datastore.admin.v1.DatastoreAdmin.ExportEntities].
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[project_id, labels, entity_filter, output_url_prefix]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datastore_admin.ExportEntitiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if entity_filter is not None:
request.entity_filter = entity_filter
if output_url_prefix is not None:
request.output_url_prefix = output_url_prefix
if labels:
request.labels.update(labels)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.export_entities,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("project_id", request.project_id),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
datastore_admin.ExportEntitiesResponse,
metadata_type=datastore_admin.ExportEntitiesMetadata,
)
# Done; return the response.
return response
async def import_entities(
self,
request: Optional[Union[datastore_admin.ImportEntitiesRequest, dict]] = None,
*,
project_id: Optional[str] = None,
labels: Optional[MutableMapping[str, str]] = None,
input_url: Optional[str] = None,
entity_filter: Optional[datastore_admin.EntityFilter] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Imports entities into Google Cloud Datastore.
Existing entities with the same key are overwritten. The
import occurs in the background and its progress can be
monitored and managed via the Operation resource that is
created. If an ImportEntities operation is cancelled, it
is possible that a subset of the data has already been
imported to Cloud Datastore.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datastore_admin_v1
async def sample_import_entities():
# Create a client
client = datastore_admin_v1.DatastoreAdminAsyncClient()
# Initialize request argument(s)
request = datastore_admin_v1.ImportEntitiesRequest(
project_id="project_id_value",
input_url="input_url_value",
)
# Make the request
operation = client.import_entities(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datastore_admin_v1.types.ImportEntitiesRequest, dict]]):
The request object. The request for
[google.datastore.admin.v1.DatastoreAdmin.ImportEntities][google.datastore.admin.v1.DatastoreAdmin.ImportEntities].
project_id (:class:`str`):
Required. Project ID against which to
make the request.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
labels (:class:`MutableMapping[str, str]`):
Client-assigned labels.
This corresponds to the ``labels`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
input_url (:class:`str`):
Required. The full resource URL of the external storage
location. Currently, only Google Cloud Storage is
supported. So input_url should be of the form:
``gs://BUCKET_NAME[/NAMESPACE_PATH]/OVERALL_EXPORT_METADATA_FILE``,
where ``BUCKET_NAME`` is the name of the Cloud Storage
bucket, ``NAMESPACE_PATH`` is an optional Cloud Storage
namespace path (this is not a Cloud Datastore
namespace), and ``OVERALL_EXPORT_METADATA_FILE`` is the
metadata file written by the ExportEntities operation.
For more information about Cloud Storage namespace
paths, see `Object name
considerations <https://cloud.google.com/storage/docs/naming#object-considerations>`__.
For more information, see
[google.datastore.admin.v1.ExportEntitiesResponse.output_url][google.datastore.admin.v1.ExportEntitiesResponse.output_url].
This corresponds to the ``input_url`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
entity_filter (:class:`google.cloud.datastore_admin_v1.types.EntityFilter`):
Optionally specify which kinds/namespaces are to be
imported. If provided, the list must be a subset of the
EntityFilter used in creating the export, otherwise a
FAILED_PRECONDITION error will be returned. If no filter
is specified then all entities from the export are
imported.
This corresponds to the ``entity_filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, labels, input_url, entity_filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = datastore_admin.ImportEntitiesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if input_url is not None:
request.input_url = input_url
if entity_filter is not None:
request.entity_filter = entity_filter
if labels:
request.labels.update(labels)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.import_entities,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("project_id", request.project_id),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=datastore_admin.ImportEntitiesMetadata,
)
# Done; return the response.
return response
async def create_index(
self,
request: Optional[Union[datastore_admin.CreateIndexRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates the specified index. A newly created index's initial
state is ``CREATING``. On completion of the returned
[google.longrunning.Operation][google.longrunning.Operation],
the state will be ``READY``. If the index already exists, the
call will return an ``ALREADY_EXISTS`` status.
During index creation, the process could result in an error, in
which case the index will move to the ``ERROR`` state. The
process can be recovered by fixing the data that caused the
error, removing the index with
[delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex],
then re-creating the index with [create]
[google.datastore.admin.v1.DatastoreAdmin.CreateIndex].
Indexes with a single property cannot be created.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datastore_admin_v1
async def sample_create_index():
# Create a client
client = datastore_admin_v1.DatastoreAdminAsyncClient()
# Initialize request argument(s)
request = datastore_admin_v1.CreateIndexRequest(
)
# Make the request
operation = client.create_index(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datastore_admin_v1.types.CreateIndexRequest, dict]]):
The request object. The request for
[google.datastore.admin.v1.DatastoreAdmin.CreateIndex][google.datastore.admin.v1.DatastoreAdmin.CreateIndex].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.datastore_admin_v1.types.Index`
Datastore composite index definition.
"""
# Create or coerce a protobuf request object.
request = datastore_admin.CreateIndexRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_index,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("project_id", request.project_id),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
index.Index,
metadata_type=datastore_admin.IndexOperationMetadata,
)
# Done; return the response.
return response
async def delete_index(
self,
request: Optional[Union[datastore_admin.DeleteIndexRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes an existing index. An index can only be deleted if it is
in a ``READY`` or ``ERROR`` state. On successful execution of
the request, the index will be in a ``DELETING``
[state][google.datastore.admin.v1.Index.State]. And on
completion of the returned
[google.longrunning.Operation][google.longrunning.Operation],
the index will be removed.
During index deletion, the process could result in an error, in
which case the index will move to the ``ERROR`` state. The
process can be recovered by fixing the data that caused the
error, followed by calling
[delete][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex]
again.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datastore_admin_v1
async def sample_delete_index():
# Create a client
client = datastore_admin_v1.DatastoreAdminAsyncClient()
# Initialize request argument(s)
request = datastore_admin_v1.DeleteIndexRequest(
)
# Make the request
operation = client.delete_index(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datastore_admin_v1.types.DeleteIndexRequest, dict]]):
The request object. The request for
[google.datastore.admin.v1.DatastoreAdmin.DeleteIndex][google.datastore.admin.v1.DatastoreAdmin.DeleteIndex].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.datastore_admin_v1.types.Index`
Datastore composite index definition.
"""
# Create or coerce a protobuf request object.
request = datastore_admin.DeleteIndexRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_index,
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("index_id", request.index_id),
)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
index.Index,
metadata_type=datastore_admin.IndexOperationMetadata,
)
# Done; return the response.
return response
async def get_index(
self,
request: Optional[Union[datastore_admin.GetIndexRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> index.Index:
r"""Gets an index.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datastore_admin_v1
async def sample_get_index():
# Create a client
client = datastore_admin_v1.DatastoreAdminAsyncClient()
# Initialize request argument(s)
request = datastore_admin_v1.GetIndexRequest(
)
# Make the request
response = await client.get_index(request=request)
# Handle the response
print(response)
Args:
request (Optional[Union[google.cloud.datastore_admin_v1.types.GetIndexRequest, dict]]):
The request object. The request for
[google.datastore.admin.v1.DatastoreAdmin.GetIndex][google.datastore.admin.v1.DatastoreAdmin.GetIndex].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datastore_admin_v1.types.Index:
Datastore composite index definition.
"""
# Create or coerce a protobuf request object.
request = datastore_admin.GetIndexRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_index,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(
("project_id", request.project_id),
("index_id", request.index_id),
)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_indexes(
self,
request: Optional[Union[datastore_admin.ListIndexesRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListIndexesAsyncPager:
r"""Lists the indexes that match the specified filters.
Datastore uses an eventually consistent query to fetch
the list of indexes and may occasionally return stale
results.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datastore_admin_v1
async def sample_list_indexes():
# Create a client
client = datastore_admin_v1.DatastoreAdminAsyncClient()
# Initialize request argument(s)
request = datastore_admin_v1.ListIndexesRequest(
)
# Make the request
page_result = client.list_indexes(request=request)
# Handle the response
async for response in page_result:
print(response)
Args:
request (Optional[Union[google.cloud.datastore_admin_v1.types.ListIndexesRequest, dict]]):
The request object. The request for
[google.datastore.admin.v1.DatastoreAdmin.ListIndexes][google.datastore.admin.v1.DatastoreAdmin.ListIndexes].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.datastore_admin_v1.services.datastore_admin.pagers.ListIndexesAsyncPager:
The response for
[google.datastore.admin.v1.DatastoreAdmin.ListIndexes][google.datastore.admin.v1.DatastoreAdmin.ListIndexes].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
request = datastore_admin.ListIndexesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_indexes,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("project_id", request.project_id),)
),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListIndexesAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-datastore-admin",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("DatastoreAdminAsyncClient",)
|
{
"content_hash": "e6dd2491e3685a95c8ef599b60fad76a",
"timestamp": "",
"source": "github",
"line_count": 1058,
"max_line_length": 171,
"avg_line_length": 41.7069943289225,
"alnum_prop": 0.6156687667135022,
"repo_name": "googleapis/python-datastore",
"id": "bd96febe7643c3ab1c2563d2a055863fdd28f9f0",
"size": "44726",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/datastore_admin_v1/services/datastore_admin/async_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1177784"
},
{
"name": "Shell",
"bytes": "31942"
}
],
"symlink_target": ""
}
|
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from storm import Nimbus
from storm.ttypes import *
from storm.constants import *
class StormStatus:
def __init__(self,args):
self.nimbus_connected = False
self.nimbus_serv=args.nimbus_serv
self.nimbus_port=args.nimbus_port
self.topology=args.topology
self.topologies=dict()
self.get_topologies()
if self.topology is not None:
self.topologies_to_check=[self.topology]
else:
self.topologies_to_check=self.topologies.keys()
for topology in self.topologies_to_check:
self.get_topology_status(topology)
def get_topologies(self):
try:
socket = TSocket.TSocket(self.nimbus_serv,self.nimbus_port)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Nimbus.Client(protocol)
transport.open()
summary = client.getClusterInfo()
transport.close()
for topology in summary.topologies:
self.topologies[str(topology.name)]={'id':topology.id,'components':{},'connected':False}
self.nimbus_connected = True
except Thrift.TException, tx:
print "%s" % (tx.message)
def get_topology_status(self,topology):
try:
socket = TSocket.TSocket(self.nimbus_serv,self.nimbus_port)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Nimbus.Client(protocol)
transport.open()
executors=client.getTopologyInfo(self.topologies[topology]['id']).executors
for executor in executors:
component=executor.component_id
self.topologies[topology]['components'].setdefault(component, {'bolts':[], 'spouts':[]})
task_start=executor.executor_info.task_start
task_end=executor.executor_info.task_end
spout_stats=executor.stats.specific.spout
bolt_stats=executor.stats.specific.bolt
if bolt_stats:
self.topologies[topology]['components'][component]['bolts'].append({
'id':str(task_start) + '-' + str(task_end),
'stats' : self.boltToDict(bolt_stats)})
self.topologies[topology]['connected'] = True
except Thrift.TException, tx:
print "%s" % (tx.message)
def boltToDict(self,bolt_stats):
out={'process_ms_avg':{},'executed':{},'execute_ms_avg':{},'acked':{},'failed':{},'load':{}}
for period in [':all-time','86400','10800','600']:
out['process_ms_avg'][period]=bolt_stats.process_ms_avg[period].values()[0] if bolt_stats.process_ms_avg[period] else 0
out['executed'][period]=bolt_stats.executed[period].values()[0] if bolt_stats.executed[period] else 0
out['execute_ms_avg'][period]=bolt_stats.execute_ms_avg[period].values()[0] if bolt_stats.execute_ms_avg[period] else 0
out['failed'][period]=bolt_stats.failed[period].values()[0] if bolt_stats.failed[period] else 0
out['acked'][period]=bolt_stats.acked[period].values()[0] if bolt_stats.acked[period] else 0
out['load'][period]=out['process_ms_avg'][period]*out['executed'][period]/(int(period)*1000) if period != ":all-time" else 0
return out
|
{
"content_hash": "7bcbc7c7e824b5378773a40fef19726d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 136,
"avg_line_length": 46.44871794871795,
"alnum_prop": 0.6160640353298371,
"repo_name": "keedio/nagios-hadoop",
"id": "288e6868872b57ab52f0064b260d51cac3aaa9e3",
"size": "4521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stormStatus.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "78099"
},
{
"name": "Shell",
"bytes": "8589"
}
],
"symlink_target": ""
}
|
import json, requests, hmac
class Binance:
"""
Developed by Mark Horner with the intention to show how to use and interact the Binance API within Python
to be able to carry out tasks such as place orders, view orders, view balances and look at symbol depth etc.
As of 2017/10/18 the links to the APIurl and SignedURL are correct - these may need updating in future if
Binance versions change.
"""
apiurl = 'https://www.binance.com/api/v1/'
signedurl = 'https://www.binance.com/api/v3/'
secret = ''
APIkey = ''
def __init__(self,APIkey,Secret):
"""
When instantiating set your API and Secret in the constructor.
Not continuous errors in requests may be due to incorrect keys (check for space at end)
or not being case sensitive.
See https://www.binance.com/restapipub.html#grip-content for more info
"""
self.secret = Secret
self.APIkey = APIkey
def publicrequest(self, url):
"""
To be used for any public request not requiring a signed or api-key request.
Will return an object in JSON format, or raise an exception indicating there's an error to look at.
"""
r = requests.get(url)
if r.status_code == 200:
return r.json()
else:
raise Exception('Error number : ' + str(r.status_code) +'. Please investigate.')
def servertime(self):
"""
A simple helper function to poll the server and give us the server time in string format
"""
return str(self.publicrequest(self.apiurl + 'time')['serverTime'])
def signedrequest(self, url, data={}, type='get'):
"""
A function to generate signed requests. A separate routine for GET requests than the others, as for
get requests we are unable to utilise the body to send params we need to generate it all in query string.
"""
hdrs = ({'Content-type': 'application/x-www-form-urlencoded',
'X-MBX-APIKEY': self.APIkey})
data.update({'timestamp': str(self.servertime())})
query = ''.join([('&' + k + '=' + v) for (k, v) in data.items()])[1:]
signature = hmac.new(key=bytes(self.secret, 'utf-8'),
msg=bytes(query, 'utf-8'),
digestmod='sha256').hexdigest()
if type == 'get':
r = requests.get(url + '?' + query + '&signature=' + signature, headers=hdrs)
return r.json()
if type == 'post':
r = requests.post(url + '?' + query + '&signature=' + signature, headers=hdrs)
return r.json()
if type =='delete':
r = requests.delete(url + '?' + query + '&signature=' + signature, headers=hdrs)
return r.json()
def allsymbolprices(self):
"""
Returns json of all symbols and current prices
"""
vals = self.publicrequest(self.apiurl + 'ticker/allBookTickers')
print(json.dumps(vals, indent=1))
return vals
def currentsymbolprice(self, symbol):
"""
Returns the current ask and bid price for a symbol.
"""
vals = self.publicrequest(self.apiurl + 'ticker/24hr?symbol=' + symbol)
val = json.dumps({'pair' : symbol,
'askPrice' : vals['askPrice'],
'bidPrice' : vals['bidPrice']})
print(json.dumps(vals, indent=1))
return val
def currentdepth(self, symbol, limit = 10):
"""
Retrieve the depth of a symbol - a default limit of 10. Acceptable values for the Limit value are
5, 10, 20, 50, 100, 200 and 500. If any other value is entered it will default to 10.
"""
if not (limit in {50, 20, 100, 500, 5, 200, 10}):
print('Unacceptable value for limit entered ' + str(limit) + ', defaulting to 10')
limit = 10
vals = self.publicrequest(self.apiurl + '/depth?symbol=' + symbol + '&limit=' + str(limit))
asks = []
bids = []
for ask in vals['asks']:
asks.append({'Price' : ask[0],
'Quantity' : ask[1]})
for bid in vals['bids']:
bids.append({'Price' : bid[0],
'Quantity' : bid[1]})
val = json.dumps({'Symbol' : symbol,
'Asks' : asks,
'Bids' : bids})
print(json.dumps(vals, indent=1))
return val
def allorders(self, symbol, orderid=1):
vals = self.signedrequest(url=self.signedurl + 'allOrders',
data={'symbol': symbol,
'timestamp': self.servertime(),
'orderId': str(orderid)})
print(json.dumps(vals, indent=1))
return vals
def alltrades(self, symbol):
vals = self.signedrequest(url=self.signedurl + 'myTrades',
data={'symbol': symbol,
'timestamp': self.servertime()})
print(json.dumps(vals, indent=1))
return vals
def openorders(self, symbol):
vals = self.signedrequest(url=self.signedurl + 'openOrders',
data={'symbol': symbol,
'timestamp': self.servertime()})
print(json.dumps(vals,indent=1))
return vals
def neworder(self, symbol, side, type, timeInForce, quantity, price, stopPrice = '0', icebergQty = '0',
recvWindow = '5000', newClientOrderId='' ):
"""
Function to generate a new order based on entered parameters.
Note ENUMS for;
Order type = 'LIMIT', 'MARKET'
Order Side = 'BUY', 'SELL'
Time in force = 'GTC', 'IOC'
"""
args = {'symbol': symbol,
'side' : side,
'type' : type,
'timeInForce' : timeInForce,
'quantity' : quantity,
'price': price,
'stopPrice': stopPrice,
'iceberqQty': icebergQty,
'recvWindow': recvWindow,
'newClientOrderId': newClientOrderId}
if newClientOrderId == '':
args.pop('newClientOrderId')
if icebergQty == '0':
args.pop('iceberqQty')
if stopPrice == '0':
args.pop('stopPrice')
vals = self.signedrequest(url = self.signedurl + 'order', data=args, type='post')
print(json.dumps(vals, indent=1))
return vals
def queryorder(self, symbol, orderId = '', origClientOrderId = ''):
"""
Queries one specific order by either ID or Orig Client Order Id (specified by user when generated).
If there is no value entered for either, no search is made and user is notified via print.
"""
if orderId != '':
vals = self.signedrequest(url=self.signedurl + 'order',
data={'symbol': symbol,
'orderId': orderId,
'timestamp': self.servertime()})
print(json.dumps(vals, indent=1))
return vals
else:
if origClientOrderId != '':
vals = self.signedrequest(url=self.signedurl + 'order',
data={'symbol': symbol,
'origClientOrderId': origClientOrderId,
'timestamp': self.servertime()})
print(json.dumps(vals, indent=1))
return vals
print('No order to search for, please enter an orderId or an origClientOrderId')
def cancelorder(self, symbol, orderId):
"""
Cancel a specific order by order ID. If no order is found with that ID, response is printed to that effect.
"""
vals = self.signedrequest(url=self.signedurl + 'order', type='delete',
data={'symbol': symbol,
'orderId': orderId,
'timestamp': self.servertime()})
print(json.dumps(vals, indent=1))
return vals
def showbalances(self):
"""
This will return the balances for any assets that the user holds a balance in, either locked or free.
"""
vals = self.signedrequest(url=self.signedurl + 'account',
data={'timestamp': self.servertime()})
balances = []
for asset in vals['balances']:
if float(asset['free']) > 0 or float(asset['locked']) > 0:
balances.append({'Asset': asset['asset'],
'Free' : asset['free'],
'Locked': asset['locked']})
print(json.dumps(balances, indent=1))
return balances
|
{
"content_hash": "d136b7cade5ca0a5cd3f199f9990ce3c",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 115,
"avg_line_length": 44.59708737864078,
"alnum_prop": 0.5154021987591162,
"repo_name": "markhorner123/BinancePY",
"id": "081792b668d2f01ac4fe5acfa6b262c4a25c9b77",
"size": "9187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BinancePY.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10229"
}
],
"symlink_target": ""
}
|
"""
The Win32 clipboard uses a special format for handling HTML. The basic
problem that the special format is trying to solve is that the user can
select an arbitrary chunk of formatted text that might not be valid HTML.
For instance selecting half-way through a bolded word would contain no </b>
tag. The solution is to encase the fragment in a valid HTML document.
You can read more about this at:
http://msdn.microsoft.com/workshop/networking/clipboard/htmlclipboard.asp
This module deals with converting between the clipboard HTML format and
standard HTML format.
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import re
# ----------------------------------------------------------------------------
# Private Functions
# ----------------------------------------------------------------------------
def _findFirst( pattern, src ):
"""
A helper function that simplifies the logic of using regex to find
the first match in a string.
"""
results = re.findall( pattern, src )
if len(results) > 0:
return results[0]
return None
# ----------------------------------------------------------------------------
# HtmlClipboardFormat Object
# ----------------------------------------------------------------------------
class HtmlClipboardFormat:
"""
Encapsulates the conversation between the clipboard HTML
format and standard HTML format.
"""
# The 1.0 HTML clipboard header format.
HEADER_FORMAT = \
"Version:1.0\r\n" \
"StartHTML:%(htmlStart)09d\r\n" \
"EndHTML:%(htmlEnd)09d\r\n" \
"StartFragment:%(fragmentStart)09d\r\n" \
"EndFragment:%(fragmentEnd)09d\r\n" \
"StartSelection:%(fragmentStart)09d\r\n" \
"EndSelection:%(fragmentEnd)09d\r\n" \
"SourceURL:Enso\r\n"
# A generic HTML page.
HTML_PAGE = \
"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2//EN\">\n" \
"<html>\n<head><title></title></head>\n" \
"<body>%s</body>\n" \
"</html>"
# These regexps find the character offsets of the fragment strings (see
# below) from the HTML clipboard format header.
START_RE = "StartFragment:(\d+)"
END_RE = "EndFragment:(\d+)"
# The Clipboard HTML format uses the following comment strings to mark
# the beginning and end of the text fragment which represents the user's
# actual selection; everything else is envelope.
START_FRAG = "<!-- StartFragment -->"
END_FRAG = "<!-- EndFragment -->"
def __init__( self, html ):
"""
Initializes the class to represent html.
"""
# Preconditions:
assert( type( html ) == unicode )
# The internal storage format is platonic unicode.
self.html = html
@classmethod
def fromClipboardHtml( cls, clipboardHtml ):
"""
Instantiates the class given a string containing the Win32 Html
Clipboard format. The given clipboardHtml is expected to be in
utf-8 and is expected to contain the special start-fragment and
end-fragment markers as defined in the class constants. If it's
not utf-8 or if it doesn't have the right delimiters, this function
logs a warning message and creates an instance empty of text.
"""
# Preconditions:
assert( type( clipboardHtml ) == str )
try:
html = clipboardHtml.decode( "utf-8" )
except UnicodeDecodeError:
# input can't be decoded from utf-8:
logging.warn( "Non-Utf-8 string in fromClipboardHtml." )
return cls( u"" )
start = _findFirst( cls.START_RE, clipboardHtml )
end = _findFirst( cls.END_RE, clipboardHtml )
if start and end:
html = clipboardHtml[ int(start): int(end) ]
html = html.decode( "utf-8" )
return cls( html )
else:
# Start and end not found in input:
logging.warn( "Missing delimiters in fromClipboardHtml." )
return cls( u"" )
@classmethod
def fromHtml( cls, html ):
"""
Instantiates the class given a string containing plain Html.
"""
# Preconditions:
assert( isinstance( html, unicode ) )
return cls( html )
def toClipboardHtml( self ):
"""
Returns the contents in the Win32 Html format.
"""
return self._encodeHtmlFragment( self.html )
def toHtml( self ):
"""
Returns the contents in the plain Html format.
"""
return self.html
def _createHtmlPage( self, fragment ):
"""
Takes an Html fragment and encloses it in a full Html page.
"""
return self.HTML_PAGE % fragment
def _encodeHtmlFragment(self, sourceHtml):
"""
Join all our bits of information into a string formatted as per the
clipboard HTML format spec.
The return value of this function is a Python string
encoded in UTF-8.
"""
# Preconditions:
assert( type( sourceHtml ) == unicode )
# LONGTERM TODO: The above contract statement involving
# .encode().decode() could have damaging performance
# repercussions.
# NOTE: Every time we construct a string, we must encode it to
# UTF-8 *before* we do any position-sensitive operations on
# it, such as taking its length or finding a substring
# position.
if "<body>" in sourceHtml:
htmlheader, fragment = sourceHtml.split( "<body>" )
fragment, footer = fragment.split( "</body>" )
htmlheader = htmlheader + "<body>"
footer = "</body>" + footer
fragment = "".join( [self.START_FRAG,
fragment,
self.END_FRAG] )
html = "".join([ htmlheader, fragment, footer ])
else:
fragment = sourceHtml
html = self._createHtmlPage( fragment )
fragment = fragment.encode( "utf-8" )
html = html.encode( "utf-8" )
assert html == html.decode( "utf-8" ).encode( "utf-8" ), \
"Encoding got out of whack in HtmlClipboardFormat."
# How long is the header going to be?
dummyHeader = self.HEADER_FORMAT % dict( htmlStart = 0,
htmlEnd = 0,
fragmentStart = 0,
fragmentEnd = 0 )
dummyHeader = dummyHeader.encode( "utf-8" )
headerLen = len(dummyHeader)
fragmentStart = html.find( fragment )
fragmentEnd = fragmentStart + len( fragment )
positions = dict( htmlStart = headerLen,
htmlEnd = headerLen + len(html),
fragmentStart = headerLen + fragmentStart,
fragmentEnd = headerLen + fragmentEnd )
header = self.HEADER_FORMAT % positions
header = header.encode( "utf-8" )
result = header + html
# Postconditions:
assert( type( result ) == str )
assert( result == result.decode( "utf-8" ).encode( "utf-8" ) )
return result
|
{
"content_hash": "883ccf10e4020b27ac53b0b00bee29e8",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 36.23474178403756,
"alnum_prop": 0.5234516714174656,
"repo_name": "tectronics/enso",
"id": "a9adc0ec241ac9550e15409308868d625168c211",
"size": "9263",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "enso/platform/win32/selection/HtmlClipboardFormat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4635533"
},
{
"name": "C++",
"bytes": "401064"
},
{
"name": "Objective-C",
"bytes": "11847"
},
{
"name": "Python",
"bytes": "593013"
}
],
"symlink_target": ""
}
|
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AppPlatformManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for AppPlatformManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-01-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(AppPlatformManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2022-01-01-preview") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-appplatform/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
{
"content_hash": "8520b91d5f95a88955a196edccaf7caa",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 130,
"avg_line_length": 49.753846153846155,
"alnum_prop": 0.717687074829932,
"repo_name": "Azure/azure-sdk-for-python",
"id": "7b9398e33727dc5e69de61b8828116622e9adf94",
"size": "3702",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2022_01_01_preview/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import argparse
def print_file(input_file):
"""
Function opens a file and prints the
results to stdout.
@param input_file
"""
with open(input_file) as f:
for line in f:
print line
def main():
"""
First function to be run in a command-line program
"""
# Create a basic command-line program the reads a file and prints it out.
parser = argparse.ArgumentParser(description="Essentially cat, but for only one file")
# Adds a input file argument to the parser.
parser.add_argument("input_file")
# parses the arguments and stores the arguments in a dictionary.
args = parser.parse_args()
# Call the print_file function.
print_file(args.input_file)
if __name__ == "__main__":
main()
|
{
"content_hash": "4f420890b3faab8a5c0743f3073209fa",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 90,
"avg_line_length": 27.20689655172414,
"alnum_prop": 0.6311787072243346,
"repo_name": "mikblack/python_introduction",
"id": "9e89b520e2e9be6c43454c4589391c9dc42efaa4",
"size": "861",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lesson/boiler_plate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "861"
}
],
"symlink_target": ""
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "isosurface.colorbar.title"
_path_str = "isosurface.colorbar.title.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.isosurface.col
orbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.colorbar.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
{
"content_hash": "eaa6159bda4bc3444e34297665b54c9f",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 82,
"avg_line_length": 37.54385964912281,
"alnum_prop": 0.5614485981308411,
"repo_name": "plotly/plotly.py",
"id": "7c93d93e3ce30a026c3a4b6c92256e6240b21432",
"size": "8560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/isosurface/colorbar/title/_font.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import sys
import unittest
import json
from libcloud.utils.py3 import httplib
from libcloud.dns.base import Record, Zone
from libcloud.dns.drivers.rcodezero import RcodeZeroDNSDriver
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.types import RecordType
from libcloud.test import LibcloudTestCase, MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
class RcodeZeroDNSTestCase(LibcloudTestCase):
def setUp(self):
RcodeZeroDNSDriver.connectionCls.conn_class = RcodeZeroDNSMockHttp
RcodeZeroDNSMockHttp.type = None
self.driver = RcodeZeroDNSDriver('mysecret')
self.test_zone = Zone(id='example.at', domain='example.at',
driver=self.driver, type='master', ttl=None,
extra={})
self.test_record = Record(id=None, name='', data='192.0.2.1',
type=RecordType.A, zone=self.test_zone,
driver=self.driver, extra={})
def test_create_record(self):
record = self.test_zone.create_record(name='newrecord',
type=RecordType.A,
data='192.0.5.4',
extra={'ttl': 86400})
self.assertEqual(record.id, None)
self.assertEqual(record.name, 'newrecord')
self.assertEqual(record.data, '192.0.5.4')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.ttl, 86400)
def test_create_zone(self):
extra = {"masters": ["193.0.2.2"]}
zone = self.driver.create_zone(
"example.at", type='slave', extra=extra)
self.assertEqual(zone.id, 'example.at.')
self.assertEqual(zone.domain, 'example.at')
self.assertEqual(zone.type, 'slave')
self.assertEqual(zone.ttl, None)
def test_delete_record(self):
self.assertTrue(self.test_record.delete())
def test_delete_zone(self):
self.assertTrue(self.test_zone.delete())
def test_get_record(self):
record = self.driver.get_record('example.at.', '12345')
self.assertEqual(record, None)
def test_get_zone(self):
zone = self.driver.get_zone('example.at')
self.assertEqual(zone.id, 'example.at')
self.assertEqual(zone.domain, 'example.at')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.ttl, None)
def test_list_record_types(self):
result = self.driver.list_record_types()
self.assertEqual(len(result), 25)
def test_list_records(self):
records = self.driver.list_records(self.test_zone)
self.assertEqual(len(records), 3)
def test_list_zones(self):
zones = self.driver.list_zones()
self.assertEqual(zones[0].id, 'example1.at')
self.assertEqual(zones[0].domain, 'example1.at')
self.assertEqual(zones[0].type, 'slave')
self.assertEqual(zones[0].ttl, None)
self.assertEqual(zones[0].extra['masters'][0], '193.0.2.2')
self.assertEqual(zones[0].extra['serial'], 20180411)
self.assertEqual(zones[1].id, 'example2.at')
self.assertEqual(zones[1].domain, 'example2.at')
self.assertEqual(zones[1].type, 'master')
self.assertEqual(zones[1].ttl, None)
def test_update_record(self):
record = self.driver.update_record(self.test_record,
name='newrecord',
type=RecordType.A,
data='127.0.0.1',
extra={'ttl': 300})
self.assertEqual(record.id, '430059068a39501b8df5a577b1e46ad7')
self.assertEqual(record.name, 'newrecord')
self.assertEqual(record.data, '127.0.0.1')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.ttl, 300)
def test_update_zone(self):
with self.assertRaises(NotImplementedError):
self.driver.update_zone(self.test_zone, 'example.at')
# # Test some error conditions
def test_create_existing_zone(self):
RcodeZeroDNSMockHttp.type = 'EXISTS'
extra = {'masters': ['193.0.2.2']}
with self.assertRaises(ZoneAlreadyExistsError):
self.driver.create_zone(
"example1.at", type='slave', extra=extra)
def test_get_missing_zone(self):
RcodeZeroDNSMockHttp.type = 'MISSING'
with self.assertRaises(ZoneDoesNotExistError):
self.driver.get_zone('example.com')
def test_delete_missing_zone(self):
RcodeZeroDNSMockHttp.type = 'MISSING'
self.assertFalse(self.test_zone.delete())
class RcodeZeroDNSMockHttp(MockHttp):
fixtures = DNSFileFixtures('rcodezero')
base_headers = {'content-type': 'application/json'}
def _api_v1_zones(self, method, url, body, headers):
if method == 'GET':
# list_zones()
body = self.fixtures.load('list_zones.json')
elif method == 'POST':
# create_zone()
# Don't bother with a fixture for this operation, because we do
# nothing with the parsed body anyway.
body = ''
else:
raise NotImplementedError('Unexpected method: %s' % method)
return (httplib.OK, body, self.base_headers,
httplib.responses[httplib.OK])
def _api_v1_zones_example_at(self, method, *args, **kwargs):
if method == 'GET':
# list_records()
body = self.fixtures.load('get_zone_details.json')
elif method == 'DELETE':
# delete_zone()
return (httplib.NO_CONTENT, '', self.base_headers,
httplib.responses[httplib.NO_CONTENT])
else:
raise NotImplementedError('Unexpected method: %s' % method)
return (httplib.OK, body, self.base_headers,
httplib.responses[httplib.OK])
def _api_v1_zones_example_at__rrsets(self, method, *args, **kwargs):
return (self._api_v1_zones_example_at_rrsets(method, *args, **kwargs))
def _api_v1_zones_example_at_rrsets(self, method, *args, **kwargs):
if method == 'GET':
# list_records()
body = self.fixtures.load('list_records.json')
elif method == 'PATCH':
# create/update/delete_record()
# Don't bother with a fixture for these operations, because we do
# nothing with the parsed body anyway.
body = ''
else:
raise NotImplementedError('Unexpected method: %s' % method)
return (httplib.OK, body, self.base_headers,
httplib.responses[httplib.OK])
def _api_v1_zones_EXISTS(self, method, url, body, headers):
# create_zone() is a POST. Raise on all other operations to be safe.
if method != 'POST':
raise NotImplementedError('Unexpected method: %s' % method)
payload = json.loads(body)
domain = payload['domain']
body = json.dumps({'error': "Domain '%s' already exists" % domain})
return (httplib.UNPROCESSABLE_ENTITY, body, self.base_headers,
'Unprocessable Entity')
def _api_v1_zones_example_com_MISSING(self, *args, **kwargs):
return (httplib.NOT_FOUND,
'{"status": "failed","message": "Zone not found"}',
self.base_headers, 'Unprocessable Entity')
def _api_v1_zones_example_at_MISSING(self, *args, **kwargs):
return (httplib.NOT_FOUND,
'{"status": "failed","message": "Zone not found"}',
self.base_headers, 'Unprocessable Entity')
if __name__ == '__main__':
sys.exit(unittest.main())
|
{
"content_hash": "54e3a74e6e66a9c3f90cd8792dd36315",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 78,
"avg_line_length": 40.49740932642487,
"alnum_prop": 0.593654042988741,
"repo_name": "Scalr/libcloud",
"id": "a05140bde1e1e32415255e88074b7c3739b0a61d",
"size": "8565",
"binary": false,
"copies": "4",
"ref": "refs/heads/trunk",
"path": "libcloud/test/dns/test_rcodezero.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "859"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "7402898"
},
{
"name": "Shell",
"bytes": "5933"
}
],
"symlink_target": ""
}
|
from django.db import models
class Model(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True)
def _do_insert(self, manager, *a, **kw):
'''
This is required as part of the `insert...returning` hack.
All it does is replaces the base manager in the call
with the specified manager, which does the rest of the work.
'''
if getattr(self.__class__.objects, 'insert_returning', False):
manager = self.__class__.objects
return super(Model, self)._do_insert(manager, *a, **kw)
class Meta:
abstract = True
|
{
"content_hash": "568e62cd756b2fd60d6ef013e1650145",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 34.68421052631579,
"alnum_prop": 0.629742033383915,
"repo_name": "joealcorn/berth.cc",
"id": "00333214a9666a1f2aa43cb71330792a8f3afb36",
"size": "659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "berth/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1007"
},
{
"name": "Python",
"bytes": "42875"
}
],
"symlink_target": ""
}
|
"""Test classes for errors.py"""
import pytest
import re
import sys
from tests import paths
from li.errors import LicenseError
def test_error_message_contains_wanted():
try:
raise LicenseError('foo')
except LicenseError:
_,e,_ = sys.exc_info()
assert hasattr(e, 'message')
assert re.search(r'foo', e.message) is not None
def test_error_message_contains_error_preamble():
try:
raise LicenseError('foo')
except LicenseError:
_,e,_ = sys.exc_info()
assert hasattr(e, 'message')
assert re.search(r'Error.*:', e.message) is not None
def test_error_message_preamble_is_formatted_well():
try:
raise LicenseError('foo')
except LicenseError:
_,e,_ = sys.exc_info()
assert hasattr(e, 'message')
assert re.search(r'\033\[91mError\033\[0;m.*:', e.message) is not None
def test_error_message_is_fine_all_together():
try:
raise LicenseError('foo')
except LicenseError:
_,e,_ = sys.exc_info()
assert hasattr(e, 'message')
assert e.message == "\033[91mError\033[0;m: foo"
|
{
"content_hash": "aa48538fd2f3922e5b2e5d416555072d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 78,
"avg_line_length": 27.390243902439025,
"alnum_prop": 0.6206589492430988,
"repo_name": "goldsborough/li",
"id": "f0407c8e07fd9e5a67ee5a33fa7ae1cf2792d00b",
"size": "1170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1695"
},
{
"name": "Python",
"bytes": "16045"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('halfwayapp', '0006_auto_20160224_1904'),
]
operations = [
migrations.AlterField(
model_name='participant',
name='starting_location',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='halfwayapp.Address'),
),
]
|
{
"content_hash": "ddb560ce4ae42dacc90c883db907bc81",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 120,
"avg_line_length": 26.31578947368421,
"alnum_prop": 0.652,
"repo_name": "cszc/meethalfway",
"id": "1cec5f793f021d778b37d34f52ca3c12db061d5e",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangohalfway/halfwayapp/migrations/0007_auto_20160224_1908.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7663"
},
{
"name": "Python",
"bytes": "34804"
}
],
"symlink_target": ""
}
|
from .views import ProjectViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'projects', ProjectViewSet)
urlpatterns = router.urls
|
{
"content_hash": "44980b6bdb2f8189bd37555397d50600",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 48,
"avg_line_length": 30,
"alnum_prop": 0.8277777777777777,
"repo_name": "paulsheridan/django-portfolio",
"id": "11616d87a74bba242c8f8f273649e464f7154a2f",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "portfolio/projects/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2662"
},
{
"name": "HTML",
"bytes": "6840"
},
{
"name": "JavaScript",
"bytes": "1376"
},
{
"name": "Python",
"bytes": "14566"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="surface.colorbar", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
""",
),
**kwargs
)
|
{
"content_hash": "745f00cde8fe399aa868644920b8ffb9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 86,
"avg_line_length": 39.74193548387097,
"alnum_prop": 0.5430194805194806,
"repo_name": "plotly/python-api",
"id": "24d70939442383facd39d6184686680f765a9992",
"size": "1232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/surface/colorbar/_title.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import collections
import json
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.lookups import Lookup
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (
CURSOR,
GET_ITERATOR_CHUNK_SIZE,
MULTI,
NO_RESULTS,
ORDER_DIR,
SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.models.sql.where import AND
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r"^(.*)\s(?:ASC|DESC).*",
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using, elide_empty=True):
self.query = query
self.connection = connection
self.using = using
# Some queries, e.g. coalesced aggregation, need to be executed even if
# they would return an empty result set.
self.elide_empty = elide_empty
self.quote_cache = {"*": "*"}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"model={self.query.model.__qualname__} "
f"connection={self.connection!r} using={self.using!r}>"
)
def setup_query(self, with_col_aliases=False):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select(
with_col_aliases=with_col_aliases,
)
self.col_count = len(self.select)
def pre_sql_setup(self, with_col_aliases=False):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query(with_col_aliases=with_col_aliases)
order_by = self.get_order_by()
self.where, self.having, self.qualify = self.query.where.split_having_qualify(
must_group_by=self.query.group_by is not None
)
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk')
# .annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
allows_group_by_refs = self.connection.features.allows_group_by_refs
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expr = self.query.resolve_ref(expr)
if not allows_group_by_refs and isinstance(expr, Ref):
expr = expr.source
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
aliased_exprs = {}
for expr, _, alias in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
if alias:
aliased_exprs[expr] = alias
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
if not self._meta_ordering:
for expr, (sql, params, is_ref) in order_by:
# Skip references to the SELECT clause, as all expressions in
# the SELECT clause are already part of the GROUP BY.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
if allows_group_by_refs and (alias := aliased_exprs.get(expr)):
expr = Ref(alias, expr)
try:
sql, params = self.compile(expr)
except EmptyResultSet:
continue
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (
getattr(expr, "target", None) == self.query.model._meta.pk
and getattr(expr, "alias", None) == self.query.base_table
):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias
for expr in expressions
if hasattr(expr, "target") and expr.target.primary_key
}
expressions = [pk] + [
expr
for expr in expressions
if expr in having
or (
getattr(expr, "alias", None) is not None
and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr
for expr in expressions
if (
hasattr(expr, "target")
and expr.target.primary_key
and self.connection.features.allows_group_by_selected_pks_on_model(
expr.target.model
)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr
for expr in expressions
if expr in pks or getattr(expr, "alias", None) not in aliases
]
return expressions
def get_select(self, with_col_aliases=False):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
select_mask = self.query.get_select_mask()
if self.query.default_cols:
cols = self.get_default_columns(select_mask)
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
"model": self.query.model,
"select_fields": select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select, select_mask)
klass_info["related_klass_infos"] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info["related_klass_infos"]:
if ki["from_parent"]:
ki["select_fields"] = (
klass_info["select_fields"] + ki["select_fields"]
)
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
col_idx = 1
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
empty_result_set_value = getattr(
col, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
# Select a predicate that's always False.
sql, params = "0", ()
else:
sql, params = self.compile(Value(empty_result_set_value))
else:
sql, params = col.select_format(self, sql, params)
if alias is None and with_col_aliases:
alias = f"col{col_idx}"
col_idx += 1
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def _order_by_pairs(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif (meta := self.query.get_meta()) and meta.ordering:
ordering = meta.ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
default_order, _ = ORDER_DIR["ASC"]
else:
default_order, _ = ORDER_DIR["DESC"]
for field in ordering:
if hasattr(field, "resolve_expression"):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
if isinstance(field.expression, F) and (
annotation := self.query.annotation_select.get(
field.expression.name
)
):
field.expression = Ref(field.expression.name, annotation)
yield field, isinstance(field.expression, Ref)
continue
if field == "?": # random
yield OrderBy(Random()), False
continue
col, order = get_order_dir(field, default_order)
descending = order == "DESC"
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
yield (
OrderBy(
Ref(col, self.query.annotation_select[col]),
descending=descending,
),
True,
)
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
if self.query.combinator and self.select:
# Don't use the resolved annotation because other
# combinated queries might define it differently.
expr = F(col)
else:
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
yield OrderBy(expr, descending=descending), False
continue
if "." in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split(".", 1)
yield (
OrderBy(
RawSQL(
"%s.%s" % (self.quote_name_unless_alias(table), col), []
),
descending=descending,
),
False,
)
continue
if self.query.extra and col in self.query.extra:
if col in self.query.extra_select:
yield (
OrderBy(
Ref(col, RawSQL(*self.query.extra[col])),
descending=descending,
),
True,
)
else:
yield (
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False,
)
else:
if self.query.combinator and self.select:
# Don't use the first model's field because other
# combinated queries might define it differently.
yield OrderBy(F(col), descending=descending), False
else:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
yield from self.find_ordering_name(
field,
self.query.get_meta(),
default_order=default_order,
)
def get_order_by(self):
"""
Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for
the ORDER BY clause.
The order_by clause can alter the select clause (for example it can add
aliases to clauses that do not yet have one, or it can add totally new
select clauses).
"""
result = []
seen = set()
replacements = {
expr: Ref(alias, expr)
for alias, expr in self.query.annotation_select.items()
}
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if not is_ref and self.query.combinator and self.select:
src = resolved.expression
expr_src = expr.expression
for sel_expr, _, col_alias in self.select:
if col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions(
[Ref(col_alias if col_alias else src.target.column, src)]
)
break
else:
if col_alias:
raise DatabaseError(
"ORDER BY term does not match any column in the result set."
)
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_name = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
q.add_annotation(expr_src, col_name)
self.query.add_select_col(resolved, col_name)
resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())])
sql, params = self.compile(resolved.replace_expressions(replacements))
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if (
(name in self.query.alias_map and name not in self.query.table_map)
or name in self.query.extra_select
or (
self.query.external_aliases.get(name)
and name not in self.query.table_map
)
):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, "as_" + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for compiler in compilers:
if compiler.query.is_sliced:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
if compiler.get_order_by():
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
elif self.query.is_sliced and combinator == "union":
limit = (self.query.low_mark, self.query.high_mark)
for compiler in compilers:
if not compiler.query.is_sliced:
compiler.query.set_limits(*limit)
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values(
(
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
)
)
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = "SELECT * FROM ({})".format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif (
self.query.subquery
or not features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
elif (
self.query.subquery
and features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == "union" or (combinator == "difference" and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == "union":
combinator_sql += " ALL"
braces = "{}"
if not self.query.subquery and features.supports_slicing_ordering_in_compound:
braces = "({})"
sql_parts, args_parts = zip(
*((braces.format(sql), args) for sql, args in parts)
)
result = [" {} ".format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def get_qualify_sql(self):
where_parts = []
if self.where:
where_parts.append(self.where)
if self.having:
where_parts.append(self.having)
inner_query = self.query.clone()
inner_query.subquery = True
inner_query.where = inner_query.where.__class__(where_parts)
# Augment the inner query with any window function references that
# might have been masked via values() and alias(). If any masked
# aliases are added they'll be masked again to avoid fetching
# the data in the `if qual_aliases` branch below.
select = {
expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0]
}
select_aliases = set(select.values())
qual_aliases = set()
replacements = {}
def collect_replacements(expressions):
while expressions:
expr = expressions.pop()
if expr in replacements:
continue
elif select_alias := select.get(expr):
replacements[expr] = select_alias
elif isinstance(expr, Lookup):
expressions.extend(expr.get_source_expressions())
elif isinstance(expr, Ref):
if expr.refs not in select_aliases:
expressions.extend(expr.get_source_expressions())
else:
num_qual_alias = len(qual_aliases)
select_alias = f"qual{num_qual_alias}"
qual_aliases.add(select_alias)
inner_query.add_annotation(expr, select_alias)
replacements[expr] = select_alias
collect_replacements(list(self.qualify.leaves()))
self.qualify = self.qualify.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
order_by = []
for order_by_expr, *_ in self.get_order_by():
collect_replacements(order_by_expr.get_source_expressions())
order_by.append(
order_by_expr.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
)
inner_query_compiler = inner_query.get_compiler(
self.using, elide_empty=self.elide_empty
)
inner_sql, inner_params = inner_query_compiler.as_sql(
# The limits must be applied to the outer query to avoid pruning
# results too eagerly.
with_limits=False,
# Force unique aliasing of selected columns to avoid collisions
# and make rhs predicates referencing easier.
with_col_aliases=True,
)
qualify_sql, qualify_params = self.compile(self.qualify)
result = [
"SELECT * FROM (",
inner_sql,
")",
self.connection.ops.quote_name("qualify"),
"WHERE",
qualify_sql,
]
if qual_aliases:
# If some select aliases were unmasked for filtering purposes they
# must be masked back.
cols = [self.connection.ops.quote_name(alias) for alias in select.values()]
result = [
"SELECT",
", ".join(cols),
"FROM (",
*result,
")",
self.connection.ops.quote_name("qualify_mask"),
]
params = list(inner_params) + qualify_params
# As the SQL spec is unclear on whether or not derived tables
# ordering must propagate it has to be explicitly repeated on the
# outer-most query to ensure it's preserved.
if order_by:
ordering_sqls = []
for ordering in order_by:
ordering_sql, ordering_params = self.compile(ordering)
ordering_sqls.append(ordering_sql)
params.extend(ordering_params)
result.extend(["ORDER BY", ", ".join(ordering_sqls)])
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup(
with_col_aliases=with_col_aliases,
)
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (
self.query.high_mark is not None or self.query.low_mark
)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, "supports_select_{}".format(combinator)):
raise NotSupportedError(
"{} is not supported on this database backend.".format(
combinator
)
)
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
elif self.qualify:
result, params = self.get_qualify_sql()
order_by = None
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
)
except EmptyResultSet:
if self.elide_empty:
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
having, h_params = (
self.compile(self.having) if self.having is not None else ("", [])
)
result = ["SELECT"]
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name(alias),
)
params.extend(s_params)
out_cols.append(s_sql)
result += [", ".join(out_cols)]
if from_:
result += ["FROM", *from_]
elif self.connection.features.bare_select_suffix:
result += [self.connection.features.bare_select_suffix]
params.extend(f_params)
if self.query.select_for_update and features.has_select_for_update:
if (
self.connection.get_autocommit()
# Don't raise an exception when database doesn't
# support transactions, as it's a noop.
and features.supports_transactions
):
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
if (
with_limit_offset
and not features.supports_select_for_update_with_limit
):
raise NotSupportedError(
"LIMIT/OFFSET is not supported with "
"select_for_update on this database backend."
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not features.has_select_for_update_nowait:
raise NotSupportedError(
"NOWAIT is not supported on this database backend."
)
elif skip_locked and not features.has_select_for_update_skip_locked:
raise NotSupportedError(
"SKIP LOCKED is not supported on this database backend."
)
elif of and not features.has_select_for_update_of:
raise NotSupportedError(
"FOR UPDATE OF is not supported on this database backend."
)
elif no_key and not features.has_select_for_no_key_update:
raise NotSupportedError(
"FOR NO KEY UPDATE is not supported on this "
"database backend."
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and features.for_update_after_from:
result.append(for_update_part)
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented."
)
order_by = order_by or self.connection.ops.force_no_ordering()
result.append("GROUP BY %s" % ", ".join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append("HAVING %s" % having)
params.extend(h_params)
if self.query.explain_info:
result.insert(
0,
self.connection.ops.explain_query_prefix(
self.query.explain_info.format,
**self.query.explain_info.options,
),
)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
order_by_sql = "ORDER BY %s" % ", ".join(ordering)
if combinator and features.requires_compound_order_by_subquery:
result = ["SELECT * FROM (", *result, ")", order_by_sql]
else:
result.append(order_by_sql)
if with_limit_offset:
result.append(
self.connection.ops.limit_offset_sql(
self.query.low_mark, self.query.high_mark
)
)
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if alias:
sub_selects.append(
"%s.%s"
% (
self.connection.ops.quote_name("subquery"),
self.connection.ops.quote_name(alias),
)
)
else:
select_clone = select.relabeled_clone(
{select.alias: "subquery"}
)
subselect, subparams = select_clone.as_sql(
self, self.connection
)
sub_selects.append(subselect)
sub_params.extend(subparams)
return "SELECT %s FROM (%s) subquery" % (
", ".join(sub_selects),
" ".join(result),
), tuple(sub_params + params)
return " ".join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(
self, select_mask, start_alias=None, opts=None, from_parent=None
):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
if (opts := self.query.get_meta()) is None:
return result
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if (
from_parent
and model is not None
and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model
)
):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if select_mask and field not in select_mask:
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(
parts, opts, None
)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(self.connection.ops.quote_name(name))
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(
self, name, opts, alias=None, default_order="ASC", already_seen=None
):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == "DESC"
pieces = name.split(LOOKUP_SEP)
(
field,
targets,
alias,
joins,
path,
opts,
transform_function,
) = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified or
# there are transforms to process.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
and not getattr(transform_function, "has_transforms", False)
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(
getattr(self.query.alias_map[j], "join_cols", None) for j in joins
)
if join_tuple in already_seen:
raise FieldError("Infinite loop caused by ordering.")
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, "resolve_expression") and not isinstance(
item, OrderBy
):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append(
(item.prefix_references(f"{name}{LOOKUP_SEP}"), False)
)
continue
results.extend(
(expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref)
for expr, is_ref in self.find_ordering_name(
item, opts, alias, order, already_seen
)
)
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [
(OrderBy(transform_function(t, alias), descending=descending), False)
for t in targets
]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(
pieces, opts, alias
)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if (
alias not in self.query.alias_map
or self.query.alias_refcount[alias] == 1
):
result.append(", %s" % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(
self,
select,
select_mask,
opts=None,
root_alias=None,
cur_depth=1,
requested=None,
restricted=None,
):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects
if f.field.unique
)
return chain(
direct_choices, reverse_choices, self.query._filtered_relations
)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info["related_klass_infos"] = related_klass_infos
for f in opts.fields:
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s"
% (
f.name,
", ".join(_get_field_choices()) or "(none)",
)
)
else:
next = False
if not select_related_descend(f, restricted, requested, select_mask):
continue
related_select_mask = select_mask.get(f) or {}
klass_info = {
"model": f.remote_field.model,
"field": f,
"reverse": False,
"local_setter": f.set_cached_value,
"remote_setter": f.remote_field.set_cached_value
if f.unique
else lambda x, y: None,
"from_parent": False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(
related_select_mask, start_alias=alias, opts=f.remote_field.model._meta
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
f.remote_field.model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
related_select_mask = select_mask.get(f) or {}
if not select_related_descend(
f, restricted, requested, related_select_mask, reverse=True
):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins(
[related_field_name], opts, root_alias
)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": f.remote_field.set_cached_value,
"remote_setter": f.set_cached_value,
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
related_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins(
[name], opts, root_alias
)
model = join_opts.model
alias = joins[-1]
from_parent = (
issubclass(model, opts.model) and model is not opts.model
)
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": local_setter,
"remote_setter": partial(remote_setter, name),
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
field_select_mask = select_mask.get((name, f)) or {}
columns = self.get_default_columns(
field_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select,
field_select_mask,
opts=model._meta,
root_alias=alias,
cur_depth=cur_depth + 1,
requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
"Invalid field name(s) given in select_related: %s. "
"Choices are: %s"
% (
", ".join(invalid_fields),
", ".join(_get_field_choices()) or "(none)",
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info["model"]._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
"model": parent_model,
"field": parent_link,
"reverse": False,
"select_fields": [
select_index
for select_index in klass_info["select_fields"]
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model
or self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info["model"]._meta.concrete_model
for select_index in klass_info["select_fields"]:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield "self"
else:
field = klass_info["field"]
if klass_info["reverse"]:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get("related_klass_infos", [])
)
if not self.klass_info:
return []
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == "self":
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get("related_klass_infos", []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info["field"]
if related_klass_info["reverse"]:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: %s."
% (
", ".join(invalid_names),
", ".join(_get_field_choices()),
)
)
return result
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(
self,
results=None,
tuple_expected=False,
chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE,
):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(
MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size
)
fields = [s[0] for s in self.select[0 : self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(
self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0 : self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor,
self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = "%s.%s" % (qn(alias), qn2(columns[index]))
self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND)
sql, params = self.as_sql()
return "EXISTS (%s)" % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
format_ = self.query.explain_info.format
output_formatter = json.dumps if format_ and format_.lower() == "json" else str
for row in result[0]:
if not isinstance(row, str):
yield " ".join(output_formatter(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = ()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self.query, allow_joins=False, for_save=True
)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
"can only be used to update, not to insert." % (value, field)
)
if value.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query (%s=%r)."
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(
on_conflict=self.query.on_conflict,
)
result = ["%s %s" % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append("(%s)" % ", ".join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[
self.prepare_value(field, self.pre_save_val(field, obj))
for field in fields
]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [
[self.connection.ops.pk_default_value()] for _ in self.query.objs
]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (
not self.returning_fields and self.connection.features.has_bulk_insert
)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql(
fields,
self.query.on_conflict,
self.query.update_fields,
self.query.unique_fields,
)
if (
self.returning_fields
and self.connection.features.can_return_columns_from_insert
):
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(
self.connection.ops.bulk_insert_sql(fields, placeholder_rows)
)
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(
self.returning_fields
)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields
and len(self.query.objs) != 1
and not self.connection.features.can_return_rows_from_bulk_insert
)
opts = self.query.get_meta()
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if (
self.connection.features.can_return_rows_from_bulk_insert
and len(self.query.objs) > 1
):
rows = self.connection.ops.fetch_returned_insert_rows(cursor)
elif self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
rows = [
self.connection.ops.fetch_returned_insert_columns(
cursor,
self.returning_params,
)
]
else:
rows = [
(
self.connection.ops.last_insert_id(
cursor,
opts.db_table,
opts.pk.column,
),
)
]
cols = [field.get_col(opts.db_table) for field in self.returning_fields]
converters = self.get_converters(cols)
if converters:
rows = list(self.apply_converters(rows, converters))
return rows
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
@classmethod
def _expr_refs_base_model(cls, expr, base_model):
if isinstance(expr, Query):
return expr.model == base_model
if not hasattr(expr, "get_source_expressions"):
return False
return any(
cls._expr_refs_base_model(source_expr, base_model)
for source_expr in expr.get_source_expressions()
)
@cached_property
def contains_self_reference_subquery(self):
return any(
self._expr_refs_base_model(expr, self.query.model)
for expr in chain(
self.query.annotations.values(), self.query.where.children
)
)
def _as_sql(self, query):
result = ["DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)]
where, params = self.compile(query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias and not self.contains_self_reference_subquery:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [pk.get_col(self.query.get_initial_alias())]
outerq = Query(self.query.model)
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params)
outerq.add_filter("pk__in", innerq)
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return "", ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, "resolve_expression"):
val = val.resolve_expression(
self.query, allow_joins=False, for_save=True
)
if val.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
elif hasattr(val, "prepare_database_save"):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, "get_placeholder"):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = "%s"
name = field.column
if hasattr(val, "as_sql"):
sql, params = self.compile(val)
values.append("%s = %s" % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append("%s = %s" % (qn(name), placeholder))
update_params.append(val)
else:
values.append("%s = NULL" % qn(name))
table = self.query.base_table
result = [
"UPDATE %s SET" % qn(table),
", ".join(values),
]
where, params = self.compile(self.query.where)
if where:
result.append("WHERE %s" % where)
return " ".join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(force=True)
query.extra = {}
query.select = []
meta = query.get_meta()
fields = [meta.pk.name]
related_ids_index = []
for related in self.query.related_updates:
if all(
path.join_field.primary_key for path in meta.get_path_to_parent(related)
):
# If a primary key chain exists to the targeted related update,
# then the meta.pk value can be used for it.
related_ids_index.append((related, 0))
else:
# This branch will only be reached when updating a field of an
# ancestor that is not part of the primary key chain of a MTI
# tree.
related_ids_index.append((related, len(fields)))
fields.append(related._meta.pk.name)
query.add_fields(fields)
super().pre_sql_setup()
must_pre_select = (
count > 1 and not self.connection.features.update_can_self_select
)
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.clear_where()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
related_ids = collections.defaultdict(list)
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
for parent, index in related_ids_index:
related_ids[parent].extend(r[index] for r in rows)
self.query.add_filter("pk__in", idents)
self.query.related_ids = related_ids
else:
# The fast path. Filters and updates in one query.
self.query.add_filter("pk__in", query)
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ", ".join(sql)
params = tuple(params)
inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(
self.using,
elide_empty=self.elide_empty,
).as_sql(with_col_aliases=True)
sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql)
params = params + inner_query_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
{
"content_hash": "fecefd004380f722f7357c5abafd42c8",
"timestamp": "",
"source": "github",
"line_count": 2065,
"max_line_length": 88,
"avg_line_length": 42.42324455205811,
"alnum_prop": 0.5245879183598923,
"repo_name": "manhhomienbienthuy/django",
"id": "32b88c89604221d0acd49a2cbe112aec71952cfa",
"size": "87604",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "django/db/models/sql/compiler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "91648"
},
{
"name": "HTML",
"bytes": "238916"
},
{
"name": "JavaScript",
"bytes": "158214"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16134531"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
}
|
from thumbor.handlers import BaseHandler
class HealthcheckHandler(BaseHandler):
async def get(self):
self.set_header("Cache-Control", "no-cache")
self.write("WORKING")
async def head(self):
self.set_header("Cache-Control", "no-cache")
self.set_status(200)
|
{
"content_hash": "1794391adf37a9b4a54cb6462c8156da",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 27.181818181818183,
"alnum_prop": 0.6588628762541806,
"repo_name": "gi11es/thumbor",
"id": "3b5b053c0bc38077310ba4a82189034da0b8367c",
"size": "551",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "thumbor/handlers/healthcheck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "58039"
},
{
"name": "JavaScript",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "9946"
},
{
"name": "Python",
"bytes": "557993"
}
],
"symlink_target": ""
}
|
"""
This StaticWeb WSGI middleware will serve container data as a static web site
with index file and error file resolution and optional file listings. This mode
is normally only active for anonymous requests. When using keystone for
authentication set ``delay_auth_decision = true`` in the authtoken middleware
configuration in your ``/etc/swift/proxy-server.conf`` file. If you want to
use it with authenticated requests, set the ``X-Web-Mode: true`` header on the
request.
The ``staticweb`` filter should be added to the pipeline in your
``/etc/swift/proxy-server.conf`` file just after any auth middleware. Also, the
configuration section for the ``staticweb`` middleware itself needs to be
added. For example::
[DEFAULT]
...
[pipeline:main]
pipeline = catch_errors healthcheck proxy-logging cache ratelimit tempauth
staticweb proxy-logging proxy-server
...
[filter:staticweb]
use = egg:swift#staticweb
Any publicly readable containers (for example, ``X-Container-Read: .r:*``, see
:ref:`acls` for more information on this) will be checked for
X-Container-Meta-Web-Index and X-Container-Meta-Web-Error header values::
X-Container-Meta-Web-Index <index.name>
X-Container-Meta-Web-Error <error.name.suffix>
If X-Container-Meta-Web-Index is set, any <index.name> files will be served
without having to specify the <index.name> part. For instance, setting
``X-Container-Meta-Web-Index: index.html`` will be able to serve the object
.../pseudo/path/index.html with just .../pseudo/path or .../pseudo/path/
If X-Container-Meta-Web-Error is set, any errors (currently just 401
Unauthorized and 404 Not Found) will instead serve the
.../<status.code><error.name.suffix> object. For instance, setting
``X-Container-Meta-Web-Error: error.html`` will serve .../404error.html for
requests for paths not found.
For pseudo paths that have no <index.name>, this middleware can serve HTML file
listings if you set the ``X-Container-Meta-Web-Listings: true`` metadata item
on the container.
If listings are enabled, the listings can have a custom style sheet by setting
the X-Container-Meta-Web-Listings-CSS header. For instance, setting
``X-Container-Meta-Web-Listings-CSS: listing.css`` will make listings link to
the .../listing.css style sheet. If you "view source" in your browser on a
listing page, you will see the well defined document structure that can be
styled.
The content-type of directory marker objects can be modified by setting
the ``X-Container-Meta-Web-Directory-Type`` header. If the header is not set,
application/directory is used by default. Directory marker objects are
0-byte objects that represent directories to create a simulated hierarchical
structure.
Example usage of this middleware via ``swift``:
Make the container publicly readable::
swift post -r '.r:*' container
You should be able to get objects directly, but no index.html resolution or
listings.
Set an index file directive::
swift post -m 'web-index:index.html' container
You should be able to hit paths that have an index.html without needing to
type the index.html part.
Turn on listings::
swift post -r '.r:*,.rlistings' container
swift post -m 'web-listings: true' container
Now you should see object listings for paths and pseudo paths that have no
index.html.
Enable a custom listings style sheet::
swift post -m 'web-listings-css:listings.css' container
Set an error file::
swift post -m 'web-error:error.html' container
Now 401's should load 401error.html, 404's should load 404error.html, etc.
Set Content-Type of directory marker object::
swift post -m 'web-directory-type:text/directory' container
Now 0-byte objects with a content-type of text/directory will be treated
as directories rather than objects.
"""
import cgi
import json
import time
from swift.common.utils import human_readable, split_path, config_true_value, \
quote, register_swift_info, get_logger
from swift.common.wsgi import make_env, WSGIContext
from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND
from swift.common.swob import Response, HTTPMovedPermanently, HTTPNotFound
from swift.proxy.controllers.base import get_container_info
class _StaticWebContext(WSGIContext):
"""
The Static Web WSGI middleware filter; serves container data as a
static web site. See `staticweb`_ for an overview.
This _StaticWebContext is used by StaticWeb with each request
that might need to be handled to make keeping contextual
information about the request a bit simpler than storing it in
the WSGI env.
"""
def __init__(self, staticweb, version, account, container, obj):
WSGIContext.__init__(self, staticweb.app)
self.version = version
self.account = account
self.container = container
self.obj = obj
self.app = staticweb.app
self.agent = '%(orig)s StaticWeb'
# Results from the last call to self._get_container_info.
self._index = self._error = self._listings = self._listings_css = \
self._dir_type = None
def _error_response(self, response, env, start_response):
"""
Sends the error response to the remote client, possibly resolving a
custom error response body based on x-container-meta-web-error.
:param response: The error response we should default to sending.
:param env: The original request WSGI environment.
:param start_response: The WSGI start_response hook.
"""
if not self._error:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return response
save_response_status = self._response_status
save_response_headers = self._response_headers
save_response_exc_info = self._response_exc_info
resp = self._app_call(make_env(
env, 'GET', '/%s/%s/%s/%s%s' % (
self.version, self.account, self.container,
self._get_status_int(), self._error),
self.agent, swift_source='SW'))
if is_success(self._get_status_int()):
start_response(save_response_status, self._response_headers,
self._response_exc_info)
return resp
start_response(save_response_status, save_response_headers,
save_response_exc_info)
return response
def _get_container_info(self, env):
"""
Retrieves x-container-meta-web-index, x-container-meta-web-error,
x-container-meta-web-listings, x-container-meta-web-listings-css,
and x-container-meta-web-directory-type from memcache or from the
cluster and stores the result in memcache and in self._index,
self._error, self._listings, self._listings_css and self._dir_type.
:param env: The WSGI environment dict.
"""
self._index = self._error = self._listings = self._listings_css = \
self._dir_type = None
container_info = get_container_info(env, self.app, swift_source='SW')
if is_success(container_info['status']):
meta = container_info.get('meta', {})
self._index = meta.get('web-index', '').strip()
self._error = meta.get('web-error', '').strip()
self._listings = meta.get('web-listings', '').strip()
self._listings_css = meta.get('web-listings-css', '').strip()
self._dir_type = meta.get('web-directory-type', '').strip()
def _listing(self, env, start_response, prefix=None):
"""
Sends an HTML object listing to the remote client.
:param env: The original WSGI environment dict.
:param start_response: The original WSGI start_response hook.
:param prefix: Any prefix desired for the container listing.
"""
if not config_true_value(self._listings):
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
'<html>\n' \
'<head>\n' \
'<title>Listing of %s</title>\n' % cgi.escape(env['PATH_INFO'])
if self._listings_css:
body += ' <link rel="stylesheet" type="text/css" ' \
'href="%s" />\n' % self._build_css_path(prefix or '')
else:
body += ' <style type="text/css">\n' \
' h1 {font-size: 1em; font-weight: bold;}\n' \
' p {font-size: 2}\n' \
' </style>\n'
body += '</head>\n<body>' \
' <h1>Web Listing Disabled</h1>' \
' <p>The owner of this web site has disabled web listing.' \
' <p>If you are the owner of this web site, you can enable' \
' web listing by setting X-Container-Meta-Web-Listings.</p>'
if self._index:
body += '<h1>Index File Not Found</h1>' \
' <p>The owner of this web site has set ' \
' <b>X-Container-Meta-Web-Index: %s</b>. ' \
' However, this file is not found.</p>' % self._index
body += ' </body>\n</html>\n'
resp = HTTPNotFound(body=body)(env, self._start_response)
return self._error_response(resp, env, start_response)
tmp_env = make_env(
env, 'GET', '/%s/%s/%s' % (
self.version, self.account, self.container),
self.agent, swift_source='SW')
tmp_env['QUERY_STRING'] = 'delimiter=/&format=json'
if prefix:
tmp_env['QUERY_STRING'] += '&prefix=%s' % quote(prefix)
else:
prefix = ''
resp = self._app_call(tmp_env)
if not is_success(self._get_status_int()):
return self._error_response(resp, env, start_response)
listing = None
body = ''.join(resp)
if body:
listing = json.loads(body)
if not listing:
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
headers = {'Content-Type': 'text/html; charset=UTF-8'}
body = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 ' \
'Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' \
'<html>\n' \
' <head>\n' \
' <title>Listing of %s</title>\n' % \
cgi.escape(env['PATH_INFO'])
if self._listings_css:
body += ' <link rel="stylesheet" type="text/css" ' \
'href="%s" />\n' % (self._build_css_path(prefix))
else:
body += ' <style type="text/css">\n' \
' h1 {font-size: 1em; font-weight: bold;}\n' \
' th {text-align: left; padding: 0px 1em 0px 1em;}\n' \
' td {padding: 0px 1em 0px 1em;}\n' \
' a {text-decoration: none;}\n' \
' </style>\n'
body += ' </head>\n' \
' <body>\n' \
' <h1 id="title">Listing of %s</h1>\n' \
' <table id="listing">\n' \
' <tr id="heading">\n' \
' <th class="colname">Name</th>\n' \
' <th class="colsize">Size</th>\n' \
' <th class="coldate">Date</th>\n' \
' </tr>\n' % \
cgi.escape(env['PATH_INFO'])
if prefix:
body += ' <tr id="parent" class="item">\n' \
' <td class="colname"><a href="../">../</a></td>\n' \
' <td class="colsize"> </td>\n' \
' <td class="coldate"> </td>\n' \
' </tr>\n'
for item in listing:
if 'subdir' in item:
subdir = item['subdir'].encode("utf-8")
if prefix:
subdir = subdir[len(prefix):]
body += ' <tr class="item subdir">\n' \
' <td class="colname"><a href="%s">%s</a></td>\n' \
' <td class="colsize"> </td>\n' \
' <td class="coldate"> </td>\n' \
' </tr>\n' % \
(quote(subdir), cgi.escape(subdir))
for item in listing:
if 'name' in item:
name = item['name'].encode("utf-8")
if prefix:
name = name[len(prefix):]
content_type = item['content_type'].encode("utf-8")
bytes = human_readable(item['bytes'])
last_modified = (
cgi.escape(item['last_modified'].encode("utf-8")).
split('.')[0].replace('T', ' '))
body += ' <tr class="item %s">\n' \
' <td class="colname"><a href="%s">%s</a></td>\n' \
' <td class="colsize">%s</td>\n' \
' <td class="coldate">%s</td>\n' \
' </tr>\n' % \
(' '.join('type-' + cgi.escape(t.lower(), quote=True)
for t in content_type.split('/')),
quote(name), cgi.escape(name),
bytes, last_modified)
body += ' </table>\n' \
' </body>\n' \
'</html>\n'
resp = Response(headers=headers, body=body)
return resp(env, start_response)
def _build_css_path(self, prefix=''):
"""
Constructs a relative path from a given prefix within the container.
URLs and paths starting with '/' are not modified.
:param prefix: The prefix for the container listing.
"""
if self._listings_css.startswith(('/', 'http://', 'https://')):
css_path = quote(self._listings_css, ':/')
else:
css_path = '../' * prefix.count('/') + quote(self._listings_css)
return css_path
def handle_container(self, env, start_response):
"""
Handles a possible static web request for a container.
:param env: The original WSGI environment dict.
:param start_response: The original WSGI start_response hook.
"""
self._get_container_info(env)
if not self._listings and not self._index:
if config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
return HTTPNotFound()(env, start_response)
return self.app(env, start_response)
if env['PATH_INFO'][-1] != '/':
resp = HTTPMovedPermanently(
location=(env['PATH_INFO'] + '/'))
return resp(env, start_response)
if not self._index:
return self._listing(env, start_response)
tmp_env = dict(env)
tmp_env['HTTP_USER_AGENT'] = \
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
tmp_env['swift.source'] = 'SW'
tmp_env['PATH_INFO'] += self._index
resp = self._app_call(tmp_env)
status_int = self._get_status_int()
if status_int == HTTP_NOT_FOUND:
return self._listing(env, start_response)
elif not is_success(self._get_status_int()) and \
not is_redirection(self._get_status_int()):
return self._error_response(resp, env, start_response)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
def handle_object(self, env, start_response):
"""
Handles a possible static web request for an object. This object could
resolve into an index or listing request.
:param env: The original WSGI environment dict.
:param start_response: The original WSGI start_response hook.
"""
tmp_env = dict(env)
tmp_env['HTTP_USER_AGENT'] = \
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
tmp_env['swift.source'] = 'SW'
resp = self._app_call(tmp_env)
status_int = self._get_status_int()
self._get_container_info(env)
if is_success(status_int) or is_redirection(status_int):
# Treat directory marker objects as not found
if not self._dir_type:
self._dir_type = 'application/directory'
content_length = self._response_header_value('content-length')
content_length = int(content_length) if content_length else 0
if self._response_header_value('content-type') == self._dir_type \
and content_length <= 1:
status_int = HTTP_NOT_FOUND
else:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
if status_int != HTTP_NOT_FOUND:
# Retaining the previous code's behavior of not using custom error
# pages for non-404 errors.
self._error = None
return self._error_response(resp, env, start_response)
if not self._listings and not self._index:
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
status_int = HTTP_NOT_FOUND
if self._index:
tmp_env = dict(env)
tmp_env['HTTP_USER_AGENT'] = \
'%s StaticWeb' % env.get('HTTP_USER_AGENT')
tmp_env['swift.source'] = 'SW'
if tmp_env['PATH_INFO'][-1] != '/':
tmp_env['PATH_INFO'] += '/'
tmp_env['PATH_INFO'] += self._index
resp = self._app_call(tmp_env)
status_int = self._get_status_int()
if is_success(status_int) or is_redirection(status_int):
if env['PATH_INFO'][-1] != '/':
resp = HTTPMovedPermanently(
location=env['PATH_INFO'] + '/')
return resp(env, start_response)
start_response(self._response_status, self._response_headers,
self._response_exc_info)
return resp
if status_int == HTTP_NOT_FOUND:
if env['PATH_INFO'][-1] != '/':
tmp_env = make_env(
env, 'GET', '/%s/%s/%s' % (
self.version, self.account, self.container),
self.agent, swift_source='SW')
tmp_env['QUERY_STRING'] = 'limit=1&format=json&delimiter' \
'=/&limit=1&prefix=%s' % quote(self.obj + '/')
resp = self._app_call(tmp_env)
body = ''.join(resp)
if not is_success(self._get_status_int()) or not body or \
not json.loads(body):
resp = HTTPNotFound()(env, self._start_response)
return self._error_response(resp, env, start_response)
resp = HTTPMovedPermanently(location=env['PATH_INFO'] + '/')
return resp(env, start_response)
return self._listing(env, start_response, self.obj)
class StaticWeb(object):
"""
The Static Web WSGI middleware filter; serves container data as a static
web site. See `staticweb`_ for an overview.
The proxy logs created for any subrequests made will have swift.source set
to "SW".
:param app: The next WSGI application/filter in the paste.deploy pipeline.
:param conf: The filter configuration dict.
"""
def __init__(self, app, conf):
#: The next WSGI application/filter in the paste.deploy pipeline.
self.app = app
#: The filter configuration dict.
self.conf = conf
self.logger = get_logger(conf, log_route='staticweb')
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
"""
env['staticweb.start_time'] = time.time()
if 'swift.authorize' not in env:
self.logger.warning(
'No authentication middleware authorized request yet. '
'Skipping staticweb')
return self.app(env, start_response)
try:
(version, account, container, obj) = \
split_path(env['PATH_INFO'], 2, 4, True)
except ValueError:
return self.app(env, start_response)
if env['REQUEST_METHOD'] not in ('HEAD', 'GET'):
return self.app(env, start_response)
if env.get('REMOTE_USER') and \
not config_true_value(env.get('HTTP_X_WEB_MODE', 'f')):
return self.app(env, start_response)
if not container:
return self.app(env, start_response)
context = _StaticWebContext(self, version, account, container, obj)
if obj:
return context.handle_object(env, start_response)
return context.handle_container(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a Static Web WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
register_swift_info('staticweb')
def staticweb_filter(app):
return StaticWeb(app, conf)
return staticweb_filter
|
{
"content_hash": "f05a90a1cefc1328f26119a60ca1368a",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 79,
"avg_line_length": 43.90669371196755,
"alnum_prop": 0.5664787951584589,
"repo_name": "mjwtom/swift",
"id": "1bf16405ca4bfdda32102b3a61aff2150018dc43",
"size": "22241",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "swift/common/middleware/staticweb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6824999"
},
{
"name": "Shell",
"bytes": "2278"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import Profile
admin.site.register(Profile)
|
{
"content_hash": "8f2fbf561065bf5f27c46e725809db37",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 32,
"avg_line_length": 15.5,
"alnum_prop": 0.8064516129032258,
"repo_name": "akoskaaa/dotprofile",
"id": "f045416366b537e869e70fc66a8d4bd40e1317dc",
"size": "93",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "dotprofile/profiles/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6875"
}
],
"symlink_target": ""
}
|
"""
.. module:: mpl_get_data_hst
:synopsis: Returns HST spectral data as a JSON string through Randy's
mast_plot.pl service.
.. moduleauthor:: Scott W. Fleming <fleming@stsci.edu>
"""
import collections
from operator import itemgetter
from data_series import DataSeries
import requests
#--------------------
def mpl_get_data_hst(obsid):
"""
Given an HST observation ID, returns the spectral data.
:param obsid: The HST observation ID to retrieve the data from.
:type obsid: str
:returns: JSON -- The spectral data for this observation ID.
Error codes:
0 = No error.
1 = HTTP Error 500 code returned.
2 = "File not found error" returned by mast_plot.pl.
3 = Wavelength and/or flux arrays are zero length.
4 = Wavelength and flux arrays are not of equal length.
"""
# This defines a data point for a DataSeries object as a namedtuple.
data_point = collections.namedtuple('DataPoint', ['x', 'y'])
# For HST, this defines the x-axis and y-axis units as a string.
hst_xunit = "Angstroms"
hst_yunit = "ergs/cm^2/s/Angstrom"
# Initiate a reqest from Randy's perl script service. Note the return is
# a 3-element list, each element itself if a list containing another list.
return_request = requests.get("https://archive.stsci.edu/cgi-bin/mast_plot"
".pl?HST=" + obsid.upper())
if return_request.status_code == 500:
# If an HTTP 500 error is returned, catch it here, since it can't
# be converted to a JSON string using the built-in json().
errcode = 1
return_dataseries = DataSeries('hst', obsid, [], [], [], [], errcode)
else:
return_request = return_request.json()
if not return_request[0]:
# File not found by service.
errcode = 2
return_dataseries = DataSeries('hst', obsid, [], [], [], [],
errcode)
else:
# Wavelengths are the first list in the returned 3-element list.
wls = [float("{0:.8f}".format(x)) for x in return_request[0][0]]
# Fluxes are the second list in the returned 3-element list.
fls = [float("{0:.8e}".format(x)) for x in return_request[1][0]]
# This error code will be used unless there's a problem reading any
# of the FITS files in the list.
errcode = 0
# Make sure wavelengths and fluxes are not empty and are same size.
if wls and fls and len(wls) == len(fls):
# Make sure wavelengths and fluxes are sorted
# from smallest wavelength to largest.
sort_indexes = [x[0] for x in sorted(enumerate(wls),
key=itemgetter(1))]
wls = [wls[x] for x in sort_indexes]
fls = [fls[x] for x in sort_indexes]
# Zip the wavelengths and fluxes into tuples to create the plot
# series.
plot_series = [[data_point(x=x, y=y) for x, y in zip(wls, fls)]]
# Create the return DataSeries object.
return_dataseries = DataSeries('hst', obsid, plot_series,
['HST_' + obsid],
[hst_xunit], [hst_yunit],
errcode)
elif not wls or not fls:
errcode = 3
return_dataseries = DataSeries('hst', obsid, [], [], [], [],
errcode)
else:
errcode = 4
return_dataseries = DataSeries('hst', obsid, [], [], [], [],
errcode)
# Return the DataSeries object back to the calling module.
return return_dataseries
#--------------------
|
{
"content_hash": "1e4ece735090016311828aa2ccf992a9",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 80,
"avg_line_length": 39.39,
"alnum_prop": 0.5384615384615384,
"repo_name": "openSAIL/MASTDataDelivery",
"id": "1bf8559141e7c72f1a3f6eb70bb23e41b9711bdd",
"size": "3939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpl_get_data_hst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "189906"
}
],
"symlink_target": ""
}
|
"""
Handles the queries.
"""
from attributes import IntegerField, DateTimeField
import redisco
from redisco.containers import SortedSet, Set, List, NonPersistentList
from exceptions import AttributeNotIndexed
from utils import _encode_key
from attributes import ZINDEXABLE
# Model Set
class ModelSet(Set):
def __init__(self, model_class):
self.model_class = model_class
self.key = model_class._key['all']
self._db = redisco.get_client()
self._filters = {}
self._exclusions = {}
self._zfilters = []
self._ordering = []
self._limit = None
self._offset = None
#################
# MAGIC METHODS #
#################
def __getitem__(self, index):
if isinstance(index, slice):
return map(lambda id: self._get_item_with_id(id), self._set[index])
else:
id = self._set[index]
if id:
return self._get_item_with_id(id)
else:
raise IndexError
def __repr__(self):
if len(self._set) > 30:
m = self._set[:30]
else:
m = self._set
s = map(lambda id: self._get_item_with_id(id), m)
return "%s" % s
def __iter__(self):
for id in self._set:
yield self._get_item_with_id(id)
def __len__(self):
return len(self._set)
def __contains__(self, val):
return val.id in self._set
##########################################
# METHODS THAT RETURN A SET OF INSTANCES #
##########################################
def get_by_id(self, id):
if self.model_class.exists(id):
return self._get_item_with_id(id)
def first(self):
try:
return self.limit(1).__getitem__(0)
except IndexError:
return None
#####################################
# METHODS THAT MODIFY THE MODEL SET #
#####################################
def filter(self, **kwargs):
clone = self._clone()
if not clone._filters:
clone._filters = {}
clone._filters.update(kwargs)
return clone
def exclude(self, **kwargs):
clone = self._clone()
if not clone._exclusions:
clone._exclusions = {}
clone._exclusions.update(kwargs)
return clone
def zfilter(self, **kwargs):
clone = self._clone()
if not clone._zfilters:
clone._zfilters = []
clone._zfilters.append(kwargs)
return clone
# this should only be called once
def order(self, field):
fname = field.lstrip('-')
if fname not in self.model_class._indices:
raise ValueError("Order parameter should be an indexed attribute.")
alpha = True
if fname in self.model_class._attributes:
v = self.model_class._attributes[fname]
alpha = not isinstance(v, ZINDEXABLE)
clone = self._clone()
if not clone._ordering:
clone._ordering = []
clone._ordering.append((field, alpha,))
return clone
def limit(self, n, offset=0):
clone = self._clone()
clone._limit = n
clone._offset = offset
return clone
def create(self, **kwargs):
instance = self.model_class(**kwargs)
if instance.save():
return instance
else:
return None
def all(self):
return self._clone()
def get_or_create(self, **kwargs):
opts = {}
for k, v in kwargs.iteritems():
if k in self.model_class._indices:
opts[k] = v
o = self.filter(**opts).first()
if o:
return o
else:
return self.create(**kwargs)
#
@property
def db(self):
return self._db
###################
# PRIVATE METHODS #
###################
@property
def _set(self):
# For performance reasons, only one zfilter is allowed.
if hasattr(self, '_cached_set'):
return self._cached_set
if self._zfilters:
self._cached_set = self._add_zfilters()
return self._cached_set
s = Set(self.key)
self._expire_or_delete = []
if self._filters:
s = self._add_set_filter(s)
if self._exclusions:
s = self._add_set_exclusions(s)
n = self._order(s.key)
self._cached_set = list(self._order(s.key))
for key in filter(lambda key: key != self.key, self._expire_or_delete):
del self.db[key]
return self._cached_set
def _add_set_filter(self, s):
indices = []
for k, v in self._filters.iteritems():
index = self._build_key_from_filter_item(k, v)
if k not in self.model_class._indices:
raise AttributeNotIndexed(
"Attribute %s is not indexed in %s class." %
(k, self.model_class.__name__))
indices.append(index)
new_set_key = "~%s.%s" % ("+".join([self.key] + indices), id(self))
s.intersection(new_set_key, *[Set(n) for n in indices])
self._expire_or_delete.append(new_set_key)
return Set(new_set_key)
def _add_set_exclusions(self, s):
indices = []
for k, v in self._exclusions.iteritems():
index = self._build_key_from_filter_item(k, v)
if k not in self.model_class._indices:
raise AttributeNotIndexed(
"Attribute %s is not indexed in %s class." %
(k, self.model_class.__name__))
indices.append(index)
new_set_key = "~%s.%s" % ("-".join([self.key] + indices), id(self))
s.difference(new_set_key, *[Set(n) for n in indices])
self._expire_or_delete.append(new_set_key)
return Set(new_set_key)
def _add_zfilters(self):
k, v = self._zfilters[0].items()[0]
try:
att, op = k.split('__')
except ValueError:
raise ValueError("zfilter should have an operator.")
index = self.model_class._key[att]
desc = self.model_class._attributes[att]
zset = SortedSet(index)
limit, offset = self._get_limit_and_offset()
if isinstance(v, (tuple, list,)):
min, max = v
min = float(desc.typecast_for_storage(min))
max = float(desc.typecast_for_storage(max))
else:
v = float(desc.typecast_for_storage(v))
if op == 'lt':
return zset.lt(v, limit, offset)
elif op == 'gt':
return zset.gt(v, limit, offset)
elif op == 'gte':
return zset.ge(v, limit, offset)
elif op == 'lte':
return zset.le(v, limit, offset)
elif op == 'in':
return zset.between(min, max, limit, offset)
def _order(self, skey):
if self._ordering:
return self._set_with_ordering(skey)
else:
return self._set_without_ordering(skey)
def _set_with_ordering(self, skey):
num, start = self._get_limit_and_offset()
old_set_key = skey
for ordering, alpha in self._ordering:
if ordering.startswith('-'):
desc = True
ordering = ordering.lstrip('-')
else:
desc = False
new_set_key = "%s#%s.%s" % (old_set_key, ordering, id(self))
by = "%s->%s" % (self.model_class._key['*'], ordering)
self.db.sort(old_set_key,
by=by,
store=new_set_key,
alpha=alpha,
start=start,
num=num,
desc=desc)
self._expire_or_delete.append(old_set_key)
self._expire_or_delete.append(new_set_key)
return List(new_set_key)
def _set_without_ordering(self, skey):
# sort by id
num, start = self._get_limit_and_offset()
old_set_key = skey
new_set_key = "%s#.%s" % (old_set_key, id(self))
self.db.sort(old_set_key,
store=new_set_key,
start=start,
num=num)
self._expire_or_delete.append(old_set_key)
self._expire_or_delete.append(new_set_key)
return List(new_set_key)
def _get_limit_and_offset(self):
if (self._limit is not None and self._offset is None) or \
(self._limit is None and self._offset is not None):
raise "Limit and offset must be specified"
if self._limit is None:
return (None, None)
else:
return (self._limit, self._offset)
def _get_item_with_id(self, id):
instance = self.model_class()
instance._id = str(id)
return instance
def _build_key_from_filter_item(self, index, value):
desc = self.model_class._attributes.get(index)
if desc:
value = desc.typecast_for_storage(value)
return self.model_class._key[index][_encode_key(value)]
def _clone(self):
klass = self.__class__
c = klass(self.model_class)
if self._filters:
c._filters = self._filters
if self._exclusions:
c._exclusions = self._exclusions
if self._zfilters:
c._zfilters = self._zfilters
if self._ordering:
c._ordering = self._ordering
c._limit = self._limit
c._offset = self._offset
return c
|
{
"content_hash": "7a9c024d6e5267375c95566e12748f2f",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 79,
"avg_line_length": 32.06,
"alnum_prop": 0.511540860885839,
"repo_name": "iamteem/redisco",
"id": "a4acd1bd933a62a2381620b537886447a40c167c",
"size": "9618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "redisco/models/modelset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120678"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.has_image'
db.add_column('inventory_product', 'has_image', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.has_image'
db.delete_column('inventory_product', 'has_image')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'inventory.coin': {
'Meta': {'object_name': 'Coin', '_ormbases': ['inventory.ProductType']},
'actual_year': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"}),
'additional_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']", 'null': 'True', 'blank': 'True'}),
'country_code': ('django.db.models.fields.CharField', [], {'default': "'us'", 'max_length': '2'}),
'denomination': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': "''"}),
'die_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'heading': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'holder_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'holder_variety_2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'major_variety': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'pcgs_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'producttype_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['inventory.ProductType']", 'unique': 'True', 'primary_key': 'True'}),
'sort_order': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']", 'null': 'True', 'blank': 'True'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': "''"}),
'year_issued': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': "''"})
},
'inventory.product': {
'Meta': {'object_name': 'Product'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketCategory']"}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'has_image': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_item': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shops.Shop']"}),
'subcategory': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketSubCategory']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['inventory.ProductType']", 'null': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '11', 'decimal_places': '2'})
},
'inventory.producttype': {
'Meta': {'object_name': 'ProductType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'market.marketcategory': {
'Meta': {'object_name': 'MarketCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'market.marketsubcategory': {
'Meta': {'unique_together': "(('parent', 'slug'),)", 'object_name': 'MarketSubCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subcategories'", 'null': 'True', 'to': "orm['market.MarketCategory']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60', 'db_index': 'True'})
},
'shops.shop': {
'Meta': {'object_name': 'Shop'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'bids': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'date_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'39.29038,-76.61219'", 'max_length': '255'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['inventory']
|
{
"content_hash": "fe0d98af2fa385a2b2222921bccc36a5",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 181,
"avg_line_length": 77.96323529411765,
"alnum_prop": 0.5434311044044139,
"repo_name": "codepython/CollectorCity-Market-Place",
"id": "7d8313f14fb68f1871fad32f75ddafe91afce6e8",
"size": "10621",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stores/apps/inventory/migrations/0004_auto__add_field_product_has_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "863646"
},
{
"name": "HTML",
"bytes": "475154"
},
{
"name": "JavaScript",
"bytes": "693720"
},
{
"name": "Python",
"bytes": "1860719"
},
{
"name": "Shell",
"bytes": "1174"
}
],
"symlink_target": ""
}
|
"""Package to provide compatibility with compat scripts."""
|
{
"content_hash": "190c7dddb0ebdcbd92676365dc8dc4f1",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 59,
"avg_line_length": 60,
"alnum_prop": 0.7666666666666667,
"repo_name": "jayvdb/pywikibot-core",
"id": "1f9845bd7eebdc191b241f5849aa12ca7bae4a8b",
"size": "84",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pywikibot/compat/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4435849"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import errno
import filecmp
import glob
import os
import random
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import tempfile
import time
import traceback
from color import Coloring
from git_command import GitCommand, git_require
from git_config import GitConfig, IsId, GetSchemeFromUrl, GetUrlCookieFile, ID_RE
from error import GitError, HookError, UploadError, DownloadError
from error import ManifestInvalidRevisionError
from error import NoManifestException
from trace import IsTrace, Trace
from git_refs import GitRefs, HEAD, R_HEADS, R_TAGS, R_PUB, R_M
from pyversion import is_python3
if not is_python3():
# pylint:disable=W0622
input = raw_input
# pylint:enable=W0622
def _lwrite(path, content):
lock = '%s.lock' % path
fd = open(lock, 'w')
try:
fd.write(content)
finally:
fd.close()
try:
os.rename(lock, path)
except OSError:
os.remove(lock)
raise
def _error(fmt, *args):
msg = fmt % args
print('error: %s' % msg, file=sys.stderr)
def _warn(fmt, *args):
msg = fmt % args
print('warn: %s' % msg, file=sys.stderr)
def not_rev(r):
return '^' + r
def sq(r):
return "'" + r.replace("'", "'\''") + "'"
_project_hook_list = None
def _ProjectHooks():
"""List the hooks present in the 'hooks' directory.
These hooks are project hooks and are copied to the '.git/hooks' directory
of all subprojects.
This function caches the list of hooks (based on the contents of the
'repo/hooks' directory) on the first call.
Returns:
A list of absolute paths to all of the files in the hooks directory.
"""
global _project_hook_list
if _project_hook_list is None:
d = os.path.realpath(os.path.abspath(os.path.dirname(__file__)))
d = os.path.join(d, 'hooks')
_project_hook_list = [os.path.join(d, x) for x in os.listdir(d)]
return _project_hook_list
class DownloadedChange(object):
_commit_cache = None
def __init__(self, project, base, change_id, ps_id, commit):
self.project = project
self.base = base
self.change_id = change_id
self.ps_id = ps_id
self.commit = commit
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
self.commit,
'--')
return self._commit_cache
class ReviewableBranch(object):
_commit_cache = None
def __init__(self, project, branch, base):
self.project = project
self.branch = branch
self.base = base
@property
def name(self):
return self.branch.name
@property
def commits(self):
if self._commit_cache is None:
self._commit_cache = self.project.bare_git.rev_list(
'--abbrev=8',
'--abbrev-commit',
'--pretty=oneline',
'--reverse',
'--date-order',
not_rev(self.base),
R_HEADS + self.name,
'--')
return self._commit_cache
@property
def unabbrev_commits(self):
r = dict()
for commit in self.project.bare_git.rev_list(
not_rev(self.base),
R_HEADS + self.name,
'--'):
r[commit[0:8]] = commit
return r
@property
def date(self):
return self.project.bare_git.log(
'--pretty=format:%cd',
'-n', '1',
R_HEADS + self.name,
'--')
def UploadForReview(self, people, auto_topic=False, draft=False, dest_branch=None):
self.project.UploadForReview(self.name,
people,
auto_topic=auto_topic,
draft=draft,
dest_branch=dest_branch)
def GetPublishedRefs(self):
refs = {}
output = self.project.bare_git.ls_remote(
self.branch.remote.SshReviewUrl(self.project.UserEmail),
'refs/changes/*')
for line in output.split('\n'):
try:
(sha, ref) = line.split()
refs[sha] = ref
except ValueError:
pass
return refs
class StatusColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
self.branch = self.printer('header', attr='bold')
self.nobranch = self.printer('nobranch', fg='red')
self.important = self.printer('important', fg='red')
self.added = self.printer('added', fg='green')
self.changed = self.printer('changed', fg='red')
self.untracked = self.printer('untracked', fg='red')
class DiffColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'diff')
self.project = self.printer('header', attr='bold')
class _Annotation(object):
def __init__(self, name, value, keep):
self.name = name
self.value = value
self.keep = keep
class _CopyFile(object):
def __init__(self, src, dest, abssrc, absdest):
self.src = src
self.dest = dest
self.abs_src = abssrc
self.abs_dest = absdest
def _Copy(self):
src = self.abs_src
dest = self.abs_dest
# copy file if it does not exist or is out of date
if not os.path.exists(dest) or not filecmp.cmp(src, dest):
try:
# remove existing file first, since it might be read-only
if os.path.exists(dest):
os.remove(dest)
else:
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
shutil.copy(src, dest)
# make the file read-only
mode = os.stat(dest)[stat.ST_MODE]
mode = mode & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
os.chmod(dest, mode)
except IOError:
_error('Cannot copy file %s to %s', src, dest)
class _LinkFile(object):
def __init__(self, git_worktree, src, dest, relsrc, absdest):
self.git_worktree = git_worktree
self.src = src
self.dest = dest
self.src_rel_to_dest = relsrc
self.abs_dest = absdest
def __linkIt(self, relSrc, absDest):
# link file if it does not exist or is out of date
if not os.path.islink(absDest) or (os.readlink(absDest) != relSrc):
try:
# remove existing file first, since it might be read-only
if os.path.lexists(absDest):
os.remove(absDest)
else:
dest_dir = os.path.dirname(absDest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
os.symlink(relSrc, absDest)
except IOError:
_error('Cannot link file %s to %s', relSrc, absDest)
def _Link(self):
"""Link the self.rel_src_to_dest and self.abs_dest. Handles wild cards
on the src linking all of the files in the source in to the destination
directory.
"""
# We use the absSrc to handle the situation where the current directory
# is not the root of the repo
absSrc = os.path.join(self.git_worktree, self.src)
if os.path.exists(absSrc):
# Entity exists so just a simple one to one link operation
self.__linkIt(self.src_rel_to_dest, self.abs_dest)
else:
# Entity doesn't exist assume there is a wild card
absDestDir = self.abs_dest
if os.path.exists(absDestDir) and not os.path.isdir(absDestDir):
_error('Link error: src with wildcard, %s must be a directory',
absDestDir)
else:
absSrcFiles = glob.glob(absSrc)
for absSrcFile in absSrcFiles:
# Create a releative path from source dir to destination dir
absSrcDir = os.path.dirname(absSrcFile)
relSrcDir = os.path.relpath(absSrcDir, absDestDir)
# Get the source file name
srcFile = os.path.basename(absSrcFile)
# Now form the final full paths to srcFile. They will be
# absolute for the desintaiton and relative for the srouce.
absDest = os.path.join(absDestDir, srcFile)
relSrc = os.path.join(relSrcDir, srcFile)
self.__linkIt(relSrc, absDest)
class RemoteSpec(object):
def __init__(self,
name,
url=None,
review=None,
revision=None):
self.name = name
self.url = url
self.review = review
self.revision = revision
class RepoHook(object):
"""A RepoHook contains information about a script to run as a hook.
Hooks are used to run a python script before running an upload (for instance,
to run presubmit checks). Eventually, we may have hooks for other actions.
This shouldn't be confused with files in the 'repo/hooks' directory. Those
files are copied into each '.git/hooks' folder for each project. Repo-level
hooks are associated instead with repo actions.
Hooks are always python. When a hook is run, we will load the hook into the
interpreter and execute its main() function.
"""
def __init__(self,
hook_type,
hooks_project,
topdir,
abort_if_user_denies=False):
"""RepoHook constructor.
Params:
hook_type: A string representing the type of hook. This is also used
to figure out the name of the file containing the hook. For
example: 'pre-upload'.
hooks_project: The project containing the repo hooks. If you have a
manifest, this is manifest.repo_hooks_project. OK if this is None,
which will make the hook a no-op.
topdir: Repo's top directory (the one containing the .repo directory).
Scripts will run with CWD as this directory. If you have a manifest,
this is manifest.topdir
abort_if_user_denies: If True, we'll throw a HookError() if the user
doesn't allow us to run the hook.
"""
self._hook_type = hook_type
self._hooks_project = hooks_project
self._topdir = topdir
self._abort_if_user_denies = abort_if_user_denies
# Store the full path to the script for convenience.
if self._hooks_project:
self._script_fullpath = os.path.join(self._hooks_project.worktree,
self._hook_type + '.py')
else:
self._script_fullpath = None
def _GetHash(self):
"""Return a hash of the contents of the hooks directory.
We'll just use git to do this. This hash has the property that if anything
changes in the directory we will return a different has.
SECURITY CONSIDERATION:
This hash only represents the contents of files in the hook directory, not
any other files imported or called by hooks. Changes to imported files
can change the script behavior without affecting the hash.
Returns:
A string representing the hash. This will always be ASCII so that it can
be printed to the user easily.
"""
assert self._hooks_project, "Must have hooks to calculate their hash."
# We will use the work_git object rather than just calling GetRevisionId().
# That gives us a hash of the latest checked in version of the files that
# the user will actually be executing. Specifically, GetRevisionId()
# doesn't appear to change even if a user checks out a different version
# of the hooks repo (via git checkout) nor if a user commits their own revs.
#
# NOTE: Local (non-committed) changes will not be factored into this hash.
# I think this is OK, since we're really only worried about warning the user
# about upstream changes.
return self._hooks_project.work_git.rev_parse('HEAD')
def _GetMustVerb(self):
"""Return 'must' if the hook is required; 'should' if not."""
if self._abort_if_user_denies:
return 'must'
else:
return 'should'
def _CheckForHookApproval(self):
"""Check to see whether this hook has been approved.
We'll look at the hash of all of the hooks. If this matches the hash that
the user last approved, we're done. If it doesn't, we'll ask the user
about approval.
Note that we ask permission for each individual hook even though we use
the hash of all hooks when detecting changes. We'd like the user to be
able to approve / deny each hook individually. We only use the hash of all
hooks because there is no other easy way to detect changes to local imports.
Returns:
True if this hook is approved to run; False otherwise.
Raises:
HookError: Raised if the user doesn't approve and abort_if_user_denies
was passed to the consturctor.
"""
hooks_config = self._hooks_project.config
git_approval_key = 'repo.hooks.%s.approvedhash' % self._hook_type
# Get the last hash that the user approved for this hook; may be None.
old_hash = hooks_config.GetString(git_approval_key)
# Get the current hash so we can tell if scripts changed since approval.
new_hash = self._GetHash()
if old_hash is not None:
# User previously approved hook and asked not to be prompted again.
if new_hash == old_hash:
# Approval matched. We're done.
return True
else:
# Give the user a reason why we're prompting, since they last told
# us to "never ask again".
prompt = 'WARNING: Scripts have changed since %s was allowed.\n\n' % (
self._hook_type)
else:
prompt = ''
# Prompt the user if we're not on a tty; on a tty we'll assume "no".
if sys.stdout.isatty():
prompt += ('Repo %s run the script:\n'
' %s\n'
'\n'
'Do you want to allow this script to run '
'(yes/yes-never-ask-again/NO)? ') % (
self._GetMustVerb(), self._script_fullpath)
response = input(prompt).lower()
print()
# User is doing a one-time approval.
if response in ('y', 'yes'):
return True
elif response == 'yes-never-ask-again':
hooks_config.SetString(git_approval_key, new_hash)
return True
# For anything else, we'll assume no approval.
if self._abort_if_user_denies:
raise HookError('You must allow the %s hook or use --no-verify.' %
self._hook_type)
return False
def _ExecuteHook(self, **kwargs):
"""Actually execute the given hook.
This will run the hook's 'main' function in our python interpreter.
Args:
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
"""
# Keep sys.path and CWD stashed away so that we can always restore them
# upon function exit.
orig_path = os.getcwd()
orig_syspath = sys.path
try:
# Always run hooks with CWD as topdir.
os.chdir(self._topdir)
# Put the hook dir as the first item of sys.path so hooks can do
# relative imports. We want to replace the repo dir as [0] so
# hooks can't import repo files.
sys.path = [os.path.dirname(self._script_fullpath)] + sys.path[1:]
# Exec, storing global context in the context dict. We catch exceptions
# and convert to a HookError w/ just the failing traceback.
context = {}
try:
exec(compile(open(self._script_fullpath).read(),
self._script_fullpath, 'exec'), context)
except Exception:
raise HookError('%s\nFailed to import %s hook; see traceback above.' % (
traceback.format_exc(), self._hook_type))
# Running the script should have defined a main() function.
if 'main' not in context:
raise HookError('Missing main() in: "%s"' % self._script_fullpath)
# Add 'hook_should_take_kwargs' to the arguments to be passed to main.
# We don't actually want hooks to define their main with this argument--
# it's there to remind them that their hook should always take **kwargs.
# For instance, a pre-upload hook should be defined like:
# def main(project_list, **kwargs):
#
# This allows us to later expand the API without breaking old hooks.
kwargs = kwargs.copy()
kwargs['hook_should_take_kwargs'] = True
# Call the main function in the hook. If the hook should cause the
# build to fail, it will raise an Exception. We'll catch that convert
# to a HookError w/ just the failing traceback.
try:
context['main'](**kwargs)
except Exception:
raise HookError('%s\nFailed to run main() for %s hook; see traceback '
'above.' % (
traceback.format_exc(), self._hook_type))
finally:
# Restore sys.path and CWD.
sys.path = orig_syspath
os.chdir(orig_path)
def Run(self, user_allows_all_hooks, **kwargs):
"""Run the hook.
If the hook doesn't exist (because there is no hooks project or because
this particular hook is not enabled), this is a no-op.
Args:
user_allows_all_hooks: If True, we will never prompt about running the
hook--we'll just assume it's OK to run it.
kwargs: Keyword arguments to pass to the hook. These are often specific
to the hook type. For instance, pre-upload hooks will contain
a project_list.
Raises:
HookError: If there was a problem finding the hook or the user declined
to run a required hook (from _CheckForHookApproval).
"""
# No-op if there is no hooks project or if hook is disabled.
if ((not self._hooks_project) or
(self._hook_type not in self._hooks_project.enabled_repo_hooks)):
return
# Bail with a nice error if we can't find the hook.
if not os.path.isfile(self._script_fullpath):
raise HookError('Couldn\'t find repo hook: "%s"' % self._script_fullpath)
# Make sure the user is OK with running the hook.
if (not user_allows_all_hooks) and (not self._CheckForHookApproval()):
return
# Run the hook with the same version of python we're using.
self._ExecuteHook(**kwargs)
class Project(object):
# These objects can be shared between several working trees.
shareable_files = ['description', 'info']
shareable_dirs = ['hooks', 'objects', 'rr-cache', 'svn']
# These objects can only be used by a single working tree.
working_tree_files = ['config', 'packed-refs', 'shallow']
working_tree_dirs = ['logs', 'refs']
def __init__(self,
manifest,
name,
remote,
gitdir,
objdir,
worktree,
relpath,
revisionExpr,
revisionId,
rebase=True,
groups=None,
sync_c=False,
sync_s=False,
clone_depth=None,
upstream=None,
parent=None,
is_derived=False,
dest_branch=None,
optimized_fetch=False,
old_revision=None):
"""Init a Project object.
Args:
manifest: The XmlManifest object.
name: The `name` attribute of manifest.xml's project element.
remote: RemoteSpec object specifying its remote's properties.
gitdir: Absolute path of git directory.
objdir: Absolute path of directory to store git objects.
worktree: Absolute path of git working tree.
relpath: Relative path of git working tree to repo's top directory.
revisionExpr: The `revision` attribute of manifest.xml's project element.
revisionId: git commit id for checking out.
rebase: The `rebase` attribute of manifest.xml's project element.
groups: The `groups` attribute of manifest.xml's project element.
sync_c: The `sync-c` attribute of manifest.xml's project element.
sync_s: The `sync-s` attribute of manifest.xml's project element.
upstream: The `upstream` attribute of manifest.xml's project element.
parent: The parent Project object.
is_derived: False if the project was explicitly defined in the manifest;
True if the project is a discovered submodule.
dest_branch: The branch to which to push changes for review by default.
optimized_fetch: If True, when a project is set to a sha1 revision, only
fetch from the remote if the sha1 is not present locally.
old_revision: saved git commit id for open GITC projects.
"""
self.manifest = manifest
self.name = name
self.remote = remote
self.gitdir = gitdir.replace('\\', '/')
self.objdir = objdir.replace('\\', '/')
if worktree:
self.worktree = worktree.replace('\\', '/')
else:
self.worktree = None
self.relpath = relpath
self.revisionExpr = revisionExpr
if revisionId is None \
and revisionExpr \
and IsId(revisionExpr):
self.revisionId = revisionExpr
else:
self.revisionId = revisionId
self.rebase = rebase
self.groups = groups
self.sync_c = sync_c
self.sync_s = sync_s
self.clone_depth = clone_depth
self.upstream = upstream
self.parent = parent
self.is_derived = is_derived
self.optimized_fetch = optimized_fetch
self.subprojects = []
self.snapshots = {}
self.copyfiles = []
self.linkfiles = []
self.annotations = []
self.config = GitConfig.ForRepository(
gitdir=self.gitdir,
defaults=self.manifest.globalConfig)
if self.worktree:
self.work_git = self._GitGetByExec(self, bare=False, gitdir=gitdir)
else:
self.work_git = None
self.bare_git = self._GitGetByExec(self, bare=True, gitdir=gitdir)
self.bare_ref = GitRefs(gitdir)
self.bare_objdir = self._GitGetByExec(self, bare=True, gitdir=objdir)
self.dest_branch = dest_branch
self.old_revision = old_revision
# This will be filled in if a project is later identified to be the
# project containing repo hooks.
self.enabled_repo_hooks = []
@property
def Derived(self):
return self.is_derived
@property
def Exists(self):
return os.path.isdir(self.gitdir) and os.path.isdir(self.objdir)
@property
def CurrentBranch(self):
"""Obtain the name of the currently checked out branch.
The branch name omits the 'refs/heads/' prefix.
None is returned if the project is on a detached HEAD.
"""
b = self.work_git.GetHead()
if b.startswith(R_HEADS):
return b[len(R_HEADS):]
return None
def IsRebaseInProgress(self):
w = self.worktree
g = os.path.join(w, '.git')
return os.path.exists(os.path.join(g, 'rebase-apply')) \
or os.path.exists(os.path.join(g, 'rebase-merge')) \
or os.path.exists(os.path.join(w, '.dotest'))
def IsDirty(self, consider_untracked=True):
"""Is the working directory modified in some way?
"""
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
if self.work_git.DiffZ('diff-index', '-M', '--cached', HEAD):
return True
if self.work_git.DiffZ('diff-files'):
return True
if consider_untracked and self.work_git.LsOthers():
return True
return False
_userident_name = None
_userident_email = None
@property
def UserName(self):
"""Obtain the user's personal name.
"""
if self._userident_name is None:
self._LoadUserIdentity()
return self._userident_name
@property
def UserEmail(self):
"""Obtain the user's email address. This is very likely
to be their Gerrit login.
"""
if self._userident_email is None:
self._LoadUserIdentity()
return self._userident_email
def _LoadUserIdentity(self):
u = self.bare_git.var('GIT_COMMITTER_IDENT')
m = re.compile("^(.*) <([^>]*)> ").match(u)
if m:
self._userident_name = m.group(1)
self._userident_email = m.group(2)
else:
self._userident_name = ''
self._userident_email = ''
def GetRemote(self, name):
"""Get the configuration for a single remote.
"""
return self.config.GetRemote(name)
def GetBranch(self, name):
"""Get the configuration for a single branch.
"""
return self.config.GetBranch(name)
def GetBranches(self):
"""Get all existing local branches.
"""
current = self.CurrentBranch
all_refs = self._allrefs
heads = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
name = name[len(R_HEADS):]
b = self.GetBranch(name)
b.current = name == current
b.published = None
b.revision = ref_id
heads[name] = b
for name, ref_id in all_refs.items():
if name.startswith(R_PUB):
name = name[len(R_PUB):]
b = heads.get(name)
if b:
b.published = ref_id
return heads
def MatchesGroups(self, manifest_groups):
"""Returns true if the manifest groups specified at init should cause
this project to be synced.
Prefixing a manifest group with "-" inverts the meaning of a group.
All projects are implicitly labelled with "all".
labels are resolved in order. In the example case of
project_groups: "all,group1,group2"
manifest_groups: "-group1,group2"
the project will be matched.
The special manifest group "default" will match any project that
does not have the special project group "notdefault"
"""
expanded_manifest_groups = manifest_groups or ['default']
expanded_project_groups = ['all'] + (self.groups or [])
if not 'notdefault' in expanded_project_groups:
expanded_project_groups += ['default']
matched = False
for group in expanded_manifest_groups:
if group.startswith('-') and group[1:] in expanded_project_groups:
matched = False
elif group in expanded_project_groups:
matched = True
return matched
## Status Display ##
def UncommitedFiles(self, get_all=True):
"""Returns a list of strings, uncommitted files in the git tree.
Args:
get_all: a boolean, if True - get information about all different
uncommitted files. If False - return as soon as any kind of
uncommitted files is detected.
"""
details = []
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
if self.IsRebaseInProgress():
details.append("rebase in progress")
if not get_all:
return details
changes = self.work_git.DiffZ('diff-index', '--cached', HEAD).keys()
if changes:
details.extend(changes)
if not get_all:
return details
changes = self.work_git.DiffZ('diff-files').keys()
if changes:
details.extend(changes)
if not get_all:
return details
changes = self.work_git.LsOthers()
if changes:
details.extend(changes)
return details
def HasChanges(self):
"""Returns true if there are uncommitted changes.
"""
if self.UncommitedFiles(get_all=False):
return True
else:
return False
def PrintWorkTreeStatus(self, output_redir=None):
"""Prints the status of the repository to stdout.
Args:
output: If specified, redirect the output to this object.
"""
if not os.path.isdir(self.worktree):
if output_redir == None:
output_redir = sys.stdout
print(file=output_redir)
print('project %s/' % self.relpath, file=output_redir)
print(' missing (run "repo sync")', file=output_redir)
return
self.work_git.update_index('-q',
'--unmerged',
'--ignore-missing',
'--refresh')
rb = self.IsRebaseInProgress()
di = self.work_git.DiffZ('diff-index', '-M', '--cached', HEAD)
df = self.work_git.DiffZ('diff-files')
do = self.work_git.LsOthers()
if not rb and not di and not df and not do and not self.CurrentBranch:
return 'CLEAN'
out = StatusColoring(self.config)
if not output_redir == None:
out.redirect(output_redir)
out.project('project %-40s', self.relpath + '/ ')
branch = self.CurrentBranch
if branch is None:
out.nobranch('(*** NO BRANCH ***)')
else:
out.branch('branch %s', branch)
out.nl()
if rb:
out.important('prior sync failed; rebase still in progress')
out.nl()
paths = list()
paths.extend(di.keys())
paths.extend(df.keys())
paths.extend(do)
for p in sorted(set(paths)):
try:
i = di[p]
except KeyError:
i = None
try:
f = df[p]
except KeyError:
f = None
if i:
i_status = i.status.upper()
else:
i_status = '-'
if f:
f_status = f.status.lower()
else:
f_status = '-'
if i and i.src_path:
line = ' %s%s\t%s => %s (%s%%)' % (i_status, f_status,
i.src_path, p, i.level)
else:
line = ' %s%s\t%s' % (i_status, f_status, p)
if i and not f:
out.added('%s', line)
elif (i and f) or (not i and f):
out.changed('%s', line)
elif not i and not f:
out.untracked('%s', line)
else:
out.write('%s', line)
out.nl()
return 'DIRTY'
def PrintWorkTreeDiff(self, absolute_paths=False):
"""Prints the status of the repository to stdout.
"""
out = DiffColoring(self.config)
cmd = ['diff']
if out.is_on:
cmd.append('--color')
cmd.append(HEAD)
if absolute_paths:
cmd.append('--src-prefix=a/%s/' % self.relpath)
cmd.append('--dst-prefix=b/%s/' % self.relpath)
cmd.append('--')
p = GitCommand(self,
cmd,
capture_stdout=True,
capture_stderr=True)
has_diff = False
for line in p.process.stdout:
if not has_diff:
out.nl()
out.project('project %s/' % self.relpath)
out.nl()
has_diff = True
print(line[:-1])
p.Wait()
## Publish / Upload ##
def WasPublished(self, branch, all_refs=None):
"""Was the branch published (uploaded) for code review?
If so, returns the SHA-1 hash of the last published
state for the branch.
"""
key = R_PUB + branch
if all_refs is None:
try:
return self.bare_git.rev_parse(key)
except GitError:
return None
else:
try:
return all_refs[key]
except KeyError:
return None
def CleanPublishedCache(self, all_refs=None):
"""Prunes any stale published refs.
"""
if all_refs is None:
all_refs = self._allrefs
heads = set()
canrm = {}
for name, ref_id in all_refs.items():
if name.startswith(R_HEADS):
heads.add(name)
elif name.startswith(R_PUB):
canrm[name] = ref_id
for name, ref_id in canrm.items():
n = name[len(R_PUB):]
if R_HEADS + n not in heads:
self.bare_git.DeleteRef(name, ref_id)
def GetUploadableBranches(self, selected_branch=None):
"""List any branches which can be uploaded for review.
"""
heads = {}
pubed = {}
for name, ref_id in self._allrefs.items():
if name.startswith(R_HEADS):
heads[name[len(R_HEADS):]] = ref_id
elif name.startswith(R_PUB):
pubed[name[len(R_PUB):]] = ref_id
ready = []
for branch, ref_id in heads.items():
if branch in pubed and pubed[branch] == ref_id:
continue
if selected_branch and branch != selected_branch:
continue
rb = self.GetUploadableBranch(branch)
if rb:
ready.append(rb)
return ready
def GetUploadableBranch(self, branch_name):
"""Get a single uploadable branch, or None.
"""
branch = self.GetBranch(branch_name)
base = branch.LocalMerge
if branch.LocalMerge:
rb = ReviewableBranch(self, branch, base)
if rb.commits:
return rb
return None
def UploadForReview(self, branch=None,
people=([], []),
auto_topic=False,
draft=False,
dest_branch=None):
"""Uploads the named branch for code review.
"""
if branch is None:
branch = self.CurrentBranch
if branch is None:
raise GitError('not currently on a branch')
branch = self.GetBranch(branch)
if not branch.LocalMerge:
raise GitError('branch %s does not track a remote' % branch.name)
if not branch.remote.review:
raise GitError('remote %s has no review url' % branch.remote.name)
if dest_branch is None:
dest_branch = self.dest_branch
if dest_branch is None:
dest_branch = branch.merge
if not dest_branch.startswith(R_HEADS):
dest_branch = R_HEADS + dest_branch
if not branch.remote.projectname:
branch.remote.projectname = self.name
branch.remote.Save()
url = branch.remote.ReviewUrl(self.UserEmail)
if url is None:
raise UploadError('review not configured')
cmd = ['push']
if url.startswith('ssh://'):
rp = ['gerrit receive-pack']
for e in people[0]:
rp.append('--reviewer=%s' % sq(e))
for e in people[1]:
rp.append('--cc=%s' % sq(e))
cmd.append('--receive-pack=%s' % " ".join(rp))
cmd.append(url)
if dest_branch.startswith(R_HEADS):
dest_branch = dest_branch[len(R_HEADS):]
upload_type = 'for'
if draft:
upload_type = 'drafts'
ref_spec = '%s:refs/%s/%s' % (R_HEADS + branch.name, upload_type,
dest_branch)
if auto_topic:
ref_spec = ref_spec + '/' + branch.name
if not url.startswith('ssh://'):
rp = ['r=%s' % p for p in people[0]] + \
['cc=%s' % p for p in people[1]]
if rp:
ref_spec = ref_spec + '%' + ','.join(rp)
cmd.append(ref_spec)
if GitCommand(self, cmd, bare=True).Wait() != 0:
raise UploadError('Upload failed')
msg = "posted to %s for %s" % (branch.remote.review, dest_branch)
self.bare_git.UpdateRef(R_PUB + branch.name,
R_HEADS + branch.name,
message=msg)
## Sync ##
def _ExtractArchive(self, tarpath, path=None):
"""Extract the given tar on its current location
Args:
- tarpath: The path to the actual tar file
"""
try:
with tarfile.open(tarpath, 'r') as tar:
tar.extractall(path=path)
return True
except (IOError, tarfile.TarError) as e:
_error("Cannot extract archive %s: %s", tarpath, str(e))
return False
def Sync_NetworkHalf(self,
quiet=False,
is_new=None,
current_branch_only=False,
force_sync=False,
clone_bundle=True,
no_tags=False,
archive=False,
optimized_fetch=False,
prune=False):
"""Perform only the network IO portion of the sync process.
Local working directory/branch state is not affected.
"""
if archive and not isinstance(self, MetaProject):
if self.remote.url.startswith(('http://', 'https://')):
_error("%s: Cannot fetch archives from http/https remotes.", self.name)
return False
name = self.relpath.replace('\\', '/')
name = name.replace('/', '_')
tarpath = '%s.tar' % name
topdir = self.manifest.topdir
try:
self._FetchArchive(tarpath, cwd=topdir)
except GitError as e:
_error('%s', e)
return False
# From now on, we only need absolute tarpath
tarpath = os.path.join(topdir, tarpath)
if not self._ExtractArchive(tarpath, path=topdir):
return False
try:
os.remove(tarpath)
except OSError as e:
_warn("Cannot remove archive %s: %s", tarpath, str(e))
self._CopyAndLinkFiles()
return True
if is_new is None:
is_new = not self.Exists
if is_new:
self._InitGitDir(force_sync=force_sync)
else:
self._UpdateHooks()
self._InitRemote()
if is_new:
alt = os.path.join(self.gitdir, 'objects/info/alternates')
try:
fd = open(alt, 'rb')
try:
alt_dir = fd.readline().rstrip()
finally:
fd.close()
except IOError:
alt_dir = None
else:
alt_dir = None
if clone_bundle \
and alt_dir is None \
and self._ApplyCloneBundle(initial=is_new, quiet=quiet):
is_new = False
if not current_branch_only:
if self.sync_c:
current_branch_only = True
elif not self.manifest._loaded:
# Manifest cannot check defaults until it syncs.
current_branch_only = False
elif self.manifest.default.sync_c:
current_branch_only = True
need_to_fetch = not (optimized_fetch and \
(ID_RE.match(self.revisionExpr) and self._CheckForSha1()))
if (need_to_fetch
and not self._RemoteFetch(initial=is_new, quiet=quiet, alt_dir=alt_dir,
current_branch_only=current_branch_only,
no_tags=no_tags, prune=prune)):
return False
if self.worktree:
self._InitMRef()
else:
self._InitMirrorHead()
try:
os.remove(os.path.join(self.gitdir, 'FETCH_HEAD'))
except OSError:
pass
return True
def PostRepoUpgrade(self):
self._InitHooks()
def _CopyAndLinkFiles(self):
if self.manifest.isGitcClient:
return
for copyfile in self.copyfiles:
copyfile._Copy()
for linkfile in self.linkfiles:
linkfile._Link()
def GetCommitRevisionId(self):
"""Get revisionId of a commit.
Use this method instead of GetRevisionId to get the id of the commit rather
than the id of the current git object (for example, a tag)
"""
if not self.revisionExpr.startswith(R_TAGS):
return self.GetRevisionId(self._allrefs)
try:
return self.bare_git.rev_list(self.revisionExpr, '-1')[0]
except GitError:
raise ManifestInvalidRevisionError(
'revision %s in %s not found' % (self.revisionExpr,
self.name))
def GetRevisionId(self, all_refs=None):
if self.revisionId:
return self.revisionId
rem = self.GetRemote(self.remote.name)
rev = rem.ToLocal(self.revisionExpr)
if all_refs is not None and rev in all_refs:
return all_refs[rev]
try:
return self.bare_git.rev_parse('--verify', '%s^0' % rev)
except GitError:
raise ManifestInvalidRevisionError(
'revision %s in %s not found' % (self.revisionExpr,
self.name))
def Sync_LocalHalf(self, syncbuf, force_sync=False):
"""Perform only the local IO portion of the sync process.
Network access is not required.
"""
self._InitWorkTree(force_sync=force_sync)
all_refs = self.bare_ref.all
self.CleanPublishedCache(all_refs)
revid = self.GetRevisionId(all_refs)
def _doff():
self._FastForward(revid)
self._CopyAndLinkFiles()
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
branch = head[len(R_HEADS):]
try:
head = all_refs[head]
except KeyError:
head = None
else:
branch = None
if branch is None or syncbuf.detach_head:
# Currently on a detached HEAD. The user is assumed to
# not have any local modifications worth worrying about.
#
if self.IsRebaseInProgress():
syncbuf.fail(self, _PriorSyncFailedError())
return
if head == revid:
# No changes; don't do anything further.
# Except if the head needs to be detached
#
if not syncbuf.detach_head:
# The copy/linkfile config may have changed.
self._CopyAndLinkFiles()
return
else:
lost = self._revlist(not_rev(revid), HEAD)
if lost:
syncbuf.info(self, "discarding %d commits", len(lost))
try:
self._Checkout(revid, quiet=True)
except GitError as e:
syncbuf.fail(self, e)
return
self._CopyAndLinkFiles()
return
if head == revid:
# No changes; don't do anything further.
#
# The copy/linkfile config may have changed.
self._CopyAndLinkFiles()
return
branch = self.GetBranch(branch)
if not branch.LocalMerge:
# The current branch has no tracking configuration.
# Jump off it to a detached HEAD.
#
syncbuf.info(self,
"leaving %s; does not track upstream",
branch.name)
try:
self._Checkout(revid, quiet=True)
except GitError as e:
syncbuf.fail(self, e)
return
self._CopyAndLinkFiles()
return
upstream_gain = self._revlist(not_rev(HEAD), revid)
pub = self.WasPublished(branch.name, all_refs)
if pub:
not_merged = self._revlist(not_rev(revid), pub)
if not_merged:
if upstream_gain:
# The user has published this branch and some of those
# commits are not yet merged upstream. We do not want
# to rewrite the published commits so we punt.
#
syncbuf.fail(self,
"branch %s is published (but not merged) and is now %d commits behind"
% (branch.name, len(upstream_gain)))
return
elif pub == head:
# All published commits are merged, and thus we are a
# strict subset. We can fast-forward safely.
#
syncbuf.later1(self, _doff)
return
# Examine the local commits not in the remote. Find the
# last one attributed to this user, if any.
#
local_changes = self._revlist(not_rev(revid), HEAD, format='%H %ce')
last_mine = None
cnt_mine = 0
for commit in local_changes:
commit_id, committer_email = commit.decode('utf-8').split(' ', 1)
if committer_email == self.UserEmail:
last_mine = commit_id
cnt_mine += 1
if not upstream_gain and cnt_mine == len(local_changes):
return
if self.IsDirty(consider_untracked=False):
syncbuf.fail(self, _DirtyError())
return
# If the upstream switched on us, warn the user.
#
if branch.merge != self.revisionExpr:
if branch.merge and self.revisionExpr:
syncbuf.info(self,
'manifest switched %s...%s',
branch.merge,
self.revisionExpr)
elif branch.merge:
syncbuf.info(self,
'manifest no longer tracks %s',
branch.merge)
if cnt_mine < len(local_changes):
# Upstream rebased. Not everything in HEAD
# was created by this user.
#
syncbuf.info(self,
"discarding %d commits removed from upstream",
len(local_changes) - cnt_mine)
branch.remote = self.GetRemote(self.remote.name)
if not ID_RE.match(self.revisionExpr):
# in case of manifest sync the revisionExpr might be a SHA1
branch.merge = self.revisionExpr
if not branch.merge.startswith('refs/'):
branch.merge = R_HEADS + branch.merge
branch.Save()
if cnt_mine > 0 and self.rebase:
def _dorebase():
self._Rebase(upstream='%s^1' % last_mine, onto=revid)
self._CopyAndLinkFiles()
syncbuf.later2(self, _dorebase)
elif local_changes:
try:
self._ResetHard(revid)
self._CopyAndLinkFiles()
except GitError as e:
syncbuf.fail(self, e)
return
else:
syncbuf.later1(self, _doff)
def AddCopyFile(self, src, dest, absdest):
# dest should already be an absolute path, but src is project relative
# make src an absolute path
abssrc = os.path.join(self.worktree, src)
self.copyfiles.append(_CopyFile(src, dest, abssrc, absdest))
def AddLinkFile(self, src, dest, absdest):
# dest should already be an absolute path, but src is project relative
# make src relative path to dest
absdestdir = os.path.dirname(absdest)
relsrc = os.path.relpath(os.path.join(self.worktree, src), absdestdir)
self.linkfiles.append(_LinkFile(self.worktree, src, dest, relsrc, absdest))
def AddAnnotation(self, name, value, keep):
self.annotations.append(_Annotation(name, value, keep))
def DownloadPatchSet(self, change_id, patch_id):
"""Download a single patch set of a single change to FETCH_HEAD.
"""
remote = self.GetRemote(self.remote.name)
cmd = ['fetch', remote.name]
cmd.append('refs/changes/%2.2d/%d/%d' \
% (change_id % 100, change_id, patch_id))
if GitCommand(self, cmd, bare=True).Wait() != 0:
return None
return DownloadedChange(self,
self.GetRevisionId(),
change_id,
patch_id,
self.bare_git.rev_parse('FETCH_HEAD'))
## Branch Management ##
def StartBranch(self, name, branch_merge=''):
"""Create a new branch off the manifest's revision.
"""
if not branch_merge:
branch_merge = self.revisionExpr
head = self.work_git.GetHead()
if head == (R_HEADS + name):
return True
all_refs = self.bare_ref.all
if R_HEADS + name in all_refs:
return GitCommand(self,
['checkout', name, '--'],
capture_stdout=True,
capture_stderr=True).Wait() == 0
branch = self.GetBranch(name)
branch.remote = self.GetRemote(self.remote.name)
branch.merge = branch_merge
if not branch.merge.startswith('refs/') and not ID_RE.match(branch_merge):
branch.merge = R_HEADS + branch_merge
revid = self.GetRevisionId(all_refs)
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid and head and revid == head:
ref = os.path.join(self.gitdir, R_HEADS + name)
try:
os.makedirs(os.path.dirname(ref))
except OSError:
pass
_lwrite(ref, '%s\n' % revid)
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'ref: %s%s\n' % (R_HEADS, name))
branch.Save()
return True
if GitCommand(self,
['checkout', '-b', branch.name, revid],
capture_stdout=True,
capture_stderr=True).Wait() == 0:
branch.Save()
return True
return False
def CheckoutBranch(self, name):
"""Checkout a local topic branch.
Args:
name: The name of the branch to checkout.
Returns:
True if the checkout succeeded; False if it didn't; None if the branch
didn't exist.
"""
rev = R_HEADS + name
head = self.work_git.GetHead()
if head == rev:
# Already on the branch
#
return True
all_refs = self.bare_ref.all
try:
revid = all_refs[rev]
except KeyError:
# Branch does not exist in this project
#
return None
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if head == revid:
# Same revision; just update HEAD to point to the new
# target branch, but otherwise take no other action.
#
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'ref: %s%s\n' % (R_HEADS, name))
return True
return GitCommand(self,
['checkout', name, '--'],
capture_stdout=True,
capture_stderr=True).Wait() == 0
def AbandonBranch(self, name):
"""Destroy a local topic branch.
Args:
name: The name of the branch to abandon.
Returns:
True if the abandon succeeded; False if it didn't; None if the branch
didn't exist.
"""
rev = R_HEADS + name
all_refs = self.bare_ref.all
if rev not in all_refs:
# Doesn't exist
return None
head = self.work_git.GetHead()
if head == rev:
# We can't destroy the branch while we are sitting
# on it. Switch to a detached HEAD.
#
head = all_refs[head]
revid = self.GetRevisionId(all_refs)
if head == revid:
_lwrite(os.path.join(self.worktree, '.git', HEAD),
'%s\n' % revid)
else:
self._Checkout(revid, quiet=True)
return GitCommand(self,
['branch', '-D', name],
capture_stdout=True,
capture_stderr=True).Wait() == 0
def PruneHeads(self):
"""Prune any topic branches already merged into upstream.
"""
cb = self.CurrentBranch
kill = []
left = self._allrefs
for name in left.keys():
if name.startswith(R_HEADS):
name = name[len(R_HEADS):]
if cb is None or name != cb:
kill.append(name)
rev = self.GetRevisionId(left)
if cb is not None \
and not self._revlist(HEAD + '...' + rev) \
and not self.IsDirty(consider_untracked=False):
self.work_git.DetachHead(HEAD)
kill.append(cb)
if kill:
old = self.bare_git.GetHead()
if old is None:
old = 'refs/heads/please_never_use_this_as_a_branch_name'
try:
self.bare_git.DetachHead(rev)
b = ['branch', '-d']
b.extend(kill)
b = GitCommand(self, b, bare=True,
capture_stdout=True,
capture_stderr=True)
b.Wait()
finally:
self.bare_git.SetHead(old)
left = self._allrefs
for branch in kill:
if (R_HEADS + branch) not in left:
self.CleanPublishedCache()
break
if cb and cb not in kill:
kill.append(cb)
kill.sort()
kept = []
for branch in kill:
if R_HEADS + branch in left:
branch = self.GetBranch(branch)
base = branch.LocalMerge
if not base:
base = rev
kept.append(ReviewableBranch(self, branch, base))
return kept
## Submodule Management ##
def GetRegisteredSubprojects(self):
result = []
def rec(subprojects):
if not subprojects:
return
result.extend(subprojects)
for p in subprojects:
rec(p.subprojects)
rec(self.subprojects)
return result
def _GetSubmodules(self):
# Unfortunately we cannot call `git submodule status --recursive` here
# because the working tree might not exist yet, and it cannot be used
# without a working tree in its current implementation.
def get_submodules(gitdir, rev):
# Parse .gitmodules for submodule sub_paths and sub_urls
sub_paths, sub_urls = parse_gitmodules(gitdir, rev)
if not sub_paths:
return []
# Run `git ls-tree` to read SHAs of submodule object, which happen to be
# revision of submodule repository
sub_revs = git_ls_tree(gitdir, rev, sub_paths)
submodules = []
for sub_path, sub_url in zip(sub_paths, sub_urls):
try:
sub_rev = sub_revs[sub_path]
except KeyError:
# Ignore non-exist submodules
continue
submodules.append((sub_rev, sub_path, sub_url))
return submodules
re_path = re.compile(r'^submodule\.([^.]+)\.path=(.*)$')
re_url = re.compile(r'^submodule\.([^.]+)\.url=(.*)$')
def parse_gitmodules(gitdir, rev):
cmd = ['cat-file', 'blob', '%s:.gitmodules' % rev]
try:
p = GitCommand(None, cmd, capture_stdout=True, capture_stderr=True,
bare=True, gitdir=gitdir)
except GitError:
return [], []
if p.Wait() != 0:
return [], []
gitmodules_lines = []
fd, temp_gitmodules_path = tempfile.mkstemp()
try:
os.write(fd, p.stdout)
os.close(fd)
cmd = ['config', '--file', temp_gitmodules_path, '--list']
p = GitCommand(None, cmd, capture_stdout=True, capture_stderr=True,
bare=True, gitdir=gitdir)
if p.Wait() != 0:
return [], []
gitmodules_lines = p.stdout.split('\n')
except GitError:
return [], []
finally:
os.remove(temp_gitmodules_path)
names = set()
paths = {}
urls = {}
for line in gitmodules_lines:
if not line:
continue
m = re_path.match(line)
if m:
names.add(m.group(1))
paths[m.group(1)] = m.group(2)
continue
m = re_url.match(line)
if m:
names.add(m.group(1))
urls[m.group(1)] = m.group(2)
continue
names = sorted(names)
return ([paths.get(name, '') for name in names],
[urls.get(name, '') for name in names])
def git_ls_tree(gitdir, rev, paths):
cmd = ['ls-tree', rev, '--']
cmd.extend(paths)
try:
p = GitCommand(None, cmd, capture_stdout=True, capture_stderr=True,
bare=True, gitdir=gitdir)
except GitError:
return []
if p.Wait() != 0:
return []
objects = {}
for line in p.stdout.split('\n'):
if not line.strip():
continue
object_rev, object_path = line.split()[2:4]
objects[object_path] = object_rev
return objects
try:
rev = self.GetRevisionId()
except GitError:
return []
return get_submodules(self.gitdir, rev)
def GetDerivedSubprojects(self):
result = []
if not self.Exists:
# If git repo does not exist yet, querying its submodules will
# mess up its states; so return here.
return result
for rev, path, url in self._GetSubmodules():
name = self.manifest.GetSubprojectName(self, path)
relpath, worktree, gitdir, objdir = \
self.manifest.GetSubprojectPaths(self, name, path)
project = self.manifest.paths.get(relpath)
if project:
result.extend(project.GetDerivedSubprojects())
continue
remote = RemoteSpec(self.remote.name,
url=url,
review=self.remote.review,
revision=self.remote.revision)
subproject = Project(manifest=self.manifest,
name=name,
remote=remote,
gitdir=gitdir,
objdir=objdir,
worktree=worktree,
relpath=relpath,
revisionExpr=self.revisionExpr,
revisionId=rev,
rebase=self.rebase,
groups=self.groups,
sync_c=self.sync_c,
sync_s=self.sync_s,
parent=self,
is_derived=True)
result.append(subproject)
result.extend(subproject.GetDerivedSubprojects())
return result
## Direct Git Commands ##
def _CheckForSha1(self):
try:
# if revision (sha or tag) is not present then following function
# throws an error.
self.bare_git.rev_parse('--verify', '%s^0' % self.revisionExpr)
return True
except GitError:
# There is no such persistent revision. We have to fetch it.
return False
def _FetchArchive(self, tarpath, cwd=None):
cmd = ['archive', '-v', '-o', tarpath]
cmd.append('--remote=%s' % self.remote.url)
cmd.append('--prefix=%s/' % self.relpath)
cmd.append(self.revisionExpr)
command = GitCommand(self, cmd, cwd=cwd,
capture_stdout=True,
capture_stderr=True)
if command.Wait() != 0:
raise GitError('git archive %s: %s' % (self.name, command.stderr))
def _RemoteFetch(self, name=None,
current_branch_only=False,
initial=False,
quiet=False,
alt_dir=None,
no_tags=False,
prune=False):
is_sha1 = False
tag_name = None
depth = None
# The depth should not be used when fetching to a mirror because
# it will result in a shallow repository that cannot be cloned or
# fetched from.
if not self.manifest.IsMirror:
if self.clone_depth:
depth = self.clone_depth
else:
depth = self.manifest.manifestProject.config.GetString('repo.depth')
# The repo project should never be synced with partial depth
if self.relpath == '.repo/repo':
depth = None
if depth:
current_branch_only = True
if ID_RE.match(self.revisionExpr) is not None:
is_sha1 = True
if current_branch_only:
if self.revisionExpr.startswith(R_TAGS):
# this is a tag and its sha1 value should never change
tag_name = self.revisionExpr[len(R_TAGS):]
if is_sha1 or tag_name is not None:
if self._CheckForSha1():
return True
if is_sha1 and not depth:
# When syncing a specific commit and --depth is not set:
# * if upstream is explicitly specified and is not a sha1, fetch only
# upstream as users expect only upstream to be fetch.
# Note: The commit might not be in upstream in which case the sync
# will fail.
# * otherwise, fetch all branches to make sure we end up with the
# specific commit.
current_branch_only = self.upstream and not ID_RE.match(self.upstream)
if not name:
name = self.remote.name
ssh_proxy = False
remote = self.GetRemote(name)
if remote.PreConnectFetch():
ssh_proxy = True
if initial:
if alt_dir and 'objects' == os.path.basename(alt_dir):
ref_dir = os.path.dirname(alt_dir)
packed_refs = os.path.join(self.gitdir, 'packed-refs')
remote = self.GetRemote(name)
all_refs = self.bare_ref.all
ids = set(all_refs.values())
tmp = set()
for r, ref_id in GitRefs(ref_dir).all.items():
if r not in all_refs:
if r.startswith(R_TAGS) or remote.WritesTo(r):
all_refs[r] = ref_id
ids.add(ref_id)
continue
if ref_id in ids:
continue
r = 'refs/_alt/%s' % ref_id
all_refs[r] = ref_id
ids.add(ref_id)
tmp.add(r)
tmp_packed = ''
old_packed = ''
for r in sorted(all_refs):
line = '%s %s\n' % (all_refs[r], r)
tmp_packed += line
if r not in tmp:
old_packed += line
_lwrite(packed_refs, tmp_packed)
else:
alt_dir = None
cmd = ['fetch']
if depth:
cmd.append('--depth=%s' % depth)
else:
# If this repo has shallow objects, then we don't know which refs have
# shallow objects or not. Tell git to unshallow all fetched refs. Don't
# do this with projects that don't have shallow objects, since it is less
# efficient.
if os.path.exists(os.path.join(self.gitdir, 'shallow')):
cmd.append('--depth=2147483647')
if quiet:
cmd.append('--quiet')
if not self.worktree:
cmd.append('--update-head-ok')
cmd.append(name)
# If using depth then we should not get all the tags since they may
# be outside of the depth.
if no_tags or depth:
cmd.append('--no-tags')
else:
cmd.append('--tags')
if prune:
cmd.append('--prune')
spec = []
if not current_branch_only:
# Fetch whole repo
spec.append(str((u'+refs/heads/*:') + remote.ToLocal('refs/heads/*')))
elif tag_name is not None:
spec.append('tag')
spec.append(tag_name)
if not self.manifest.IsMirror:
branch = self.revisionExpr
if is_sha1 and depth and git_require((1, 8, 3)):
# Shallow checkout of a specific commit, fetch from that commit and not
# the heads only as the commit might be deeper in the history.
spec.append(branch)
else:
if is_sha1:
branch = self.upstream
if branch is not None and branch.strip():
if not branch.startswith('refs/'):
branch = R_HEADS + branch
spec.append(str((u'+%s:' % branch) + remote.ToLocal(branch)))
cmd.extend(spec)
ok = False
for _i in range(2):
gitcmd = GitCommand(self, cmd, bare=True, ssh_proxy=ssh_proxy)
ret = gitcmd.Wait()
if ret == 0:
ok = True
break
# If needed, run the 'git remote prune' the first time through the loop
elif (not _i and
"error:" in gitcmd.stderr and
"git remote prune" in gitcmd.stderr):
prunecmd = GitCommand(self, ['remote', 'prune', name], bare=True,
ssh_proxy=ssh_proxy)
ret = prunecmd.Wait()
if ret:
break
continue
elif current_branch_only and is_sha1 and ret == 128:
# Exit code 128 means "couldn't find the ref you asked for"; if we're in sha1
# mode, we just tried sync'ing from the upstream field; it doesn't exist, thus
# abort the optimization attempt and do a full sync.
break
elif ret < 0:
# Git died with a signal, exit immediately
break
time.sleep(random.randint(30, 45))
if initial:
if alt_dir:
if old_packed != '':
_lwrite(packed_refs, old_packed)
else:
os.remove(packed_refs)
self.bare_git.pack_refs('--all', '--prune')
if is_sha1 and current_branch_only and self.upstream:
# We just synced the upstream given branch; verify we
# got what we wanted, else trigger a second run of all
# refs.
if not self._CheckForSha1():
if not depth:
# Avoid infinite recursion when depth is True (since depth implies
# current_branch_only)
return self._RemoteFetch(name=name, current_branch_only=False,
initial=False, quiet=quiet, alt_dir=alt_dir)
if self.clone_depth:
self.clone_depth = None
return self._RemoteFetch(name=name, current_branch_only=current_branch_only,
initial=False, quiet=quiet, alt_dir=alt_dir)
return ok
def _ApplyCloneBundle(self, initial=False, quiet=False):
if initial and (self.manifest.manifestProject.config.GetString('repo.depth') or self.clone_depth):
return False
remote = self.GetRemote(self.remote.name)
bundle_url = remote.url + '/clone.bundle'
bundle_url = GitConfig.ForUser().UrlInsteadOf(bundle_url)
if GetSchemeFromUrl(bundle_url) not in (
'http', 'https', 'persistent-http', 'persistent-https'):
return False
bundle_dst = os.path.join(self.gitdir, 'clone.bundle')
bundle_tmp = os.path.join(self.gitdir, 'clone.bundle.tmp')
exist_dst = os.path.exists(bundle_dst)
exist_tmp = os.path.exists(bundle_tmp)
if not initial and not exist_dst and not exist_tmp:
return False
if not exist_dst:
exist_dst = self._FetchBundle(bundle_url, bundle_tmp, bundle_dst, quiet)
if not exist_dst:
return False
cmd = ['fetch']
if quiet:
cmd.append('--quiet')
if not self.worktree:
cmd.append('--update-head-ok')
cmd.append(bundle_dst)
for f in remote.fetch:
cmd.append(str(f))
cmd.append('refs/tags/*:refs/tags/*')
ok = GitCommand(self, cmd, bare=True).Wait() == 0
if os.path.exists(bundle_dst):
os.remove(bundle_dst)
if os.path.exists(bundle_tmp):
os.remove(bundle_tmp)
return ok
def _FetchBundle(self, srcUrl, tmpPath, dstPath, quiet):
if os.path.exists(dstPath):
os.remove(dstPath)
cmd = ['curl', '--fail', '--output', tmpPath, '--netrc', '--location']
if quiet:
cmd += ['--silent']
if os.path.exists(tmpPath):
size = os.stat(tmpPath).st_size
if size >= 1024:
cmd += ['--continue-at', '%d' % (size,)]
else:
os.remove(tmpPath)
if 'http_proxy' in os.environ and 'darwin' == sys.platform:
cmd += ['--proxy', os.environ['http_proxy']]
with GetUrlCookieFile(srcUrl, quiet) as (cookiefile, proxy):
if cookiefile:
cmd += ['--cookie', cookiefile, '--cookie-jar', cookiefile]
if srcUrl.startswith('persistent-'):
srcUrl = srcUrl[len('persistent-'):]
cmd += [srcUrl]
if IsTrace():
Trace('%s', ' '.join(cmd))
try:
proc = subprocess.Popen(cmd)
except OSError:
return False
curlret = proc.wait()
if curlret == 22:
# From curl man page:
# 22: HTTP page not retrieved. The requested url was not found or
# returned another error with the HTTP error code being 400 or above.
# This return code only appears if -f, --fail is used.
if not quiet:
print("Server does not provide clone.bundle; ignoring.",
file=sys.stderr)
return False
if os.path.exists(tmpPath):
if curlret == 0 and self._IsValidBundle(tmpPath, quiet):
os.rename(tmpPath, dstPath)
return True
else:
os.remove(tmpPath)
return False
else:
return False
def _IsValidBundle(self, path, quiet):
try:
with open(path) as f:
if f.read(16) == '# v2 git bundle\n':
return True
else:
if not quiet:
print("Invalid clone.bundle file; ignoring.", file=sys.stderr)
return False
except OSError:
return False
def _Checkout(self, rev, quiet=False):
cmd = ['checkout']
if quiet:
cmd.append('-q')
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s checkout %s ' % (self.name, rev))
def _CherryPick(self, rev):
cmd = ['cherry-pick']
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s cherry-pick %s ' % (self.name, rev))
def _Revert(self, rev):
cmd = ['revert']
cmd.append('--no-edit')
cmd.append(rev)
cmd.append('--')
if GitCommand(self, cmd).Wait() != 0:
if self._allrefs:
raise GitError('%s revert %s ' % (self.name, rev))
def _ResetHard(self, rev, quiet=True):
cmd = ['reset', '--hard']
if quiet:
cmd.append('-q')
cmd.append(rev)
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s reset --hard %s ' % (self.name, rev))
def _Rebase(self, upstream, onto=None):
cmd = ['rebase']
if onto is not None:
cmd.extend(['--onto', onto])
cmd.append(upstream)
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s rebase %s ' % (self.name, upstream))
def _FastForward(self, head, ffonly=False):
cmd = ['merge', head]
if ffonly:
cmd.append("--ff-only")
if GitCommand(self, cmd).Wait() != 0:
raise GitError('%s merge %s ' % (self.name, head))
def _InitGitDir(self, mirror_git=None, force_sync=False):
init_git_dir = not os.path.exists(self.gitdir)
init_obj_dir = not os.path.exists(self.objdir)
try:
# Initialize the bare repository, which contains all of the objects.
if init_obj_dir:
os.makedirs(self.objdir)
self.bare_objdir.init()
# If we have a separate directory to hold refs, initialize it as well.
if self.objdir != self.gitdir:
if init_git_dir:
os.makedirs(self.gitdir)
if init_obj_dir or init_git_dir:
self._ReferenceGitDir(self.objdir, self.gitdir, share_refs=False,
copy_all=True)
try:
self._CheckDirReference(self.objdir, self.gitdir, share_refs=False)
except GitError as e:
if force_sync:
print("Retrying clone after deleting %s" % self.gitdir, file=sys.stderr)
try:
shutil.rmtree(os.path.realpath(self.gitdir))
if self.worktree and os.path.exists(
os.path.realpath(self.worktree)):
shutil.rmtree(os.path.realpath(self.worktree))
return self._InitGitDir(mirror_git=mirror_git, force_sync=False)
except:
raise e
raise e
if init_git_dir:
mp = self.manifest.manifestProject
ref_dir = mp.config.GetString('repo.reference') or ''
if ref_dir or mirror_git:
if not mirror_git:
mirror_git = os.path.join(ref_dir, self.name + '.git')
repo_git = os.path.join(ref_dir, '.repo', 'projects',
self.relpath + '.git')
if os.path.exists(mirror_git):
ref_dir = mirror_git
elif os.path.exists(repo_git):
ref_dir = repo_git
else:
ref_dir = None
if ref_dir:
_lwrite(os.path.join(self.gitdir, 'objects/info/alternates'),
os.path.join(ref_dir, 'objects') + '\n')
self._UpdateHooks()
m = self.manifest.manifestProject.config
for key in ['user.name', 'user.email']:
if m.Has(key, include_defaults=False):
self.config.SetString(key, m.GetString(key))
if self.manifest.IsMirror:
self.config.SetString('core.bare', 'true')
else:
self.config.SetString('core.bare', None)
except Exception:
if init_obj_dir and os.path.exists(self.objdir):
shutil.rmtree(self.objdir)
if init_git_dir and os.path.exists(self.gitdir):
shutil.rmtree(self.gitdir)
raise
def _UpdateHooks(self):
if os.path.exists(self.gitdir):
self._InitHooks()
def _InitHooks(self):
hooks = os.path.realpath(self._gitdir_path('hooks'))
if not os.path.exists(hooks):
os.makedirs(hooks)
for stock_hook in _ProjectHooks():
name = os.path.basename(stock_hook)
if name in ('commit-msg',) and not self.remote.review \
and not self is self.manifest.manifestProject:
# Don't install a Gerrit Code Review hook if this
# project does not appear to use it for reviews.
#
# Since the manifest project is one of those, but also
# managed through gerrit, it's excluded
continue
dst = os.path.join(hooks, name)
if os.path.islink(dst):
continue
if os.path.exists(dst):
if filecmp.cmp(stock_hook, dst, shallow=False):
os.remove(dst)
else:
_warn("%s: Not replacing locally modified %s hook", self.relpath, name)
continue
try:
os.symlink(os.path.relpath(stock_hook, os.path.dirname(dst)), dst)
except OSError as e:
if e.errno == errno.EPERM:
raise GitError('filesystem must support symlinks')
else:
raise
def _InitRemote(self):
if self.remote.url:
remote = self.GetRemote(self.remote.name)
remote.url = self.remote.url
remote.review = self.remote.review
remote.projectname = self.name
if self.worktree:
remote.ResetFetch(mirror=False)
else:
remote.ResetFetch(mirror=True)
remote.Save()
def _InitMRef(self):
if self.manifest.branch:
self._InitAnyMRef(R_M + self.manifest.branch)
def _InitMirrorHead(self):
self._InitAnyMRef(HEAD)
def _InitAnyMRef(self, ref):
cur = self.bare_ref.symref(ref)
if self.revisionId:
if cur != '' or self.bare_ref.get(ref) != self.revisionId:
msg = 'manifest set to %s' % self.revisionId
dst = self.revisionId + '^0'
self.bare_git.UpdateRef(ref, dst, message=msg, detach=True)
else:
remote = self.GetRemote(self.remote.name)
dst = remote.ToLocal(self.revisionExpr)
if cur != dst:
msg = 'manifest set to %s' % self.revisionExpr
self.bare_git.symbolic_ref('-m', msg, ref, dst)
def _CheckDirReference(self, srcdir, destdir, share_refs):
symlink_files = self.shareable_files
symlink_dirs = self.shareable_dirs
if share_refs:
symlink_files += self.working_tree_files
symlink_dirs += self.working_tree_dirs
to_symlink = symlink_files + symlink_dirs
for name in set(to_symlink):
dst = os.path.realpath(os.path.join(destdir, name))
if os.path.lexists(dst):
src = os.path.realpath(os.path.join(srcdir, name))
# Fail if the links are pointing to the wrong place
if src != dst:
raise GitError('--force-sync not enabled; cannot overwrite a local '
'work tree. If you\'re comfortable with the '
'possibility of losing the work tree\'s git metadata,'
' use `repo sync --force-sync {0}` to '
'proceed.'.format(self.relpath))
def _ReferenceGitDir(self, gitdir, dotgit, share_refs, copy_all):
"""Update |dotgit| to reference |gitdir|, using symlinks where possible.
Args:
gitdir: The bare git repository. Must already be initialized.
dotgit: The repository you would like to initialize.
share_refs: If true, |dotgit| will store its refs under |gitdir|.
Only one work tree can store refs under a given |gitdir|.
copy_all: If true, copy all remaining files from |gitdir| -> |dotgit|.
This saves you the effort of initializing |dotgit| yourself.
"""
symlink_files = self.shareable_files
symlink_dirs = self.shareable_dirs
if share_refs:
symlink_files += self.working_tree_files
symlink_dirs += self.working_tree_dirs
to_symlink = symlink_files + symlink_dirs
to_copy = []
if copy_all:
to_copy = os.listdir(gitdir)
dotgit = os.path.realpath(dotgit)
for name in set(to_copy).union(to_symlink):
try:
src = os.path.realpath(os.path.join(gitdir, name))
dst = os.path.join(dotgit, name)
if os.path.lexists(dst):
continue
# If the source dir doesn't exist, create an empty dir.
if name in symlink_dirs and not os.path.lexists(src):
os.makedirs(src)
# If the source file doesn't exist, ensure the destination
# file doesn't either.
if name in symlink_files and not os.path.lexists(src):
try:
os.remove(dst)
except OSError:
pass
if name in to_symlink:
os.symlink(os.path.relpath(src, os.path.dirname(dst)), dst)
elif copy_all and not os.path.islink(dst):
if os.path.isdir(src):
shutil.copytree(src, dst)
elif os.path.isfile(src):
shutil.copy(src, dst)
except OSError as e:
if e.errno == errno.EPERM:
raise DownloadError('filesystem must support symlinks')
else:
raise
def _InitWorkTree(self, force_sync=False):
dotgit = os.path.join(self.worktree, '.git')
init_dotgit = not os.path.exists(dotgit)
try:
if init_dotgit:
os.makedirs(dotgit)
self._ReferenceGitDir(self.gitdir, dotgit, share_refs=True,
copy_all=False)
try:
self._CheckDirReference(self.gitdir, dotgit, share_refs=True)
except GitError as e:
if force_sync:
try:
shutil.rmtree(dotgit)
return self._InitWorkTree(force_sync=False)
except:
raise e
raise e
if init_dotgit:
_lwrite(os.path.join(dotgit, HEAD), '%s\n' % self.GetRevisionId())
cmd = ['read-tree', '--reset', '-u']
cmd.append('-v')
cmd.append(HEAD)
if GitCommand(self, cmd).Wait() != 0:
raise GitError("cannot initialize work tree")
self._CopyAndLinkFiles()
except Exception:
if init_dotgit:
shutil.rmtree(dotgit)
raise
def _gitdir_path(self, path):
return os.path.realpath(os.path.join(self.gitdir, path))
def _revlist(self, *args, **kw):
a = []
a.extend(args)
a.append('--')
return self.work_git.rev_list(*a, **kw)
@property
def _allrefs(self):
return self.bare_ref.all
def _getLogs(self, rev1, rev2, oneline=False, color=True):
"""Get logs between two revisions of this project."""
comp = '..'
if rev1:
revs = [rev1]
if rev2:
revs.extend([comp, rev2])
cmd = ['log', ''.join(revs)]
out = DiffColoring(self.config)
if out.is_on and color:
cmd.append('--color')
if oneline:
cmd.append('--oneline')
try:
log = GitCommand(self, cmd, capture_stdout=True, capture_stderr=True)
if log.Wait() == 0:
return log.stdout
except GitError:
# worktree may not exist if groups changed for example. In that case,
# try in gitdir instead.
if not os.path.exists(self.worktree):
return self.bare_git.log(*cmd[1:])
else:
raise
return None
def getAddedAndRemovedLogs(self, toProject, oneline=False, color=True):
"""Get the list of logs from this revision to given revisionId"""
logs = {}
selfId = self.GetRevisionId(self._allrefs)
toId = toProject.GetRevisionId(toProject._allrefs)
logs['added'] = self._getLogs(selfId, toId, oneline=oneline, color=color)
logs['removed'] = self._getLogs(toId, selfId, oneline=oneline, color=color)
return logs
class _GitGetByExec(object):
def __init__(self, project, bare, gitdir):
self._project = project
self._bare = bare
self._gitdir = gitdir
def LsOthers(self):
p = GitCommand(self._project,
['ls-files',
'-z',
'--others',
'--exclude-standard'],
bare=False,
gitdir=self._gitdir,
capture_stdout=True,
capture_stderr=True)
if p.Wait() == 0:
out = p.stdout
if out:
return out[:-1].split('\0') # pylint: disable=W1401
# Backslash is not anomalous
return []
def DiffZ(self, name, *args):
cmd = [name]
cmd.append('-z')
cmd.extend(args)
p = GitCommand(self._project,
cmd,
gitdir=self._gitdir,
bare=False,
capture_stdout=True,
capture_stderr=True)
try:
out = p.process.stdout.read()
r = {}
if out:
out = iter(out[:-1].split('\0')) # pylint: disable=W1401
while out:
try:
info = next(out)
path = next(out)
except StopIteration:
break
class _Info(object):
def __init__(self, path, omode, nmode, oid, nid, state):
self.path = path
self.src_path = None
self.old_mode = omode
self.new_mode = nmode
self.old_id = oid
self.new_id = nid
if len(state) == 1:
self.status = state
self.level = None
else:
self.status = state[:1]
self.level = state[1:]
while self.level.startswith('0'):
self.level = self.level[1:]
info = info[1:].split(' ')
info = _Info(path, *info)
if info.status in ('R', 'C'):
info.src_path = info.path
info.path = next(out)
r[info.path] = info
return r
finally:
p.Wait()
def GetHead(self):
if self._bare:
path = os.path.join(self._project.gitdir, HEAD)
else:
path = os.path.join(self._project.worktree, '.git', HEAD)
try:
fd = open(path, 'rb')
except IOError as e:
raise NoManifestException(path, str(e))
try:
line = fd.read()
finally:
fd.close()
try:
line = line.decode()
except AttributeError:
pass
if line.startswith('ref: '):
return line[5:-1]
return line[:-1]
def SetHead(self, ref, message=None):
cmdv = []
if message is not None:
cmdv.extend(['-m', message])
cmdv.append(HEAD)
cmdv.append(ref)
self.symbolic_ref(*cmdv)
def DetachHead(self, new, message=None):
cmdv = ['--no-deref']
if message is not None:
cmdv.extend(['-m', message])
cmdv.append(HEAD)
cmdv.append(new)
self.update_ref(*cmdv)
def UpdateRef(self, name, new, old=None,
message=None,
detach=False):
cmdv = []
if message is not None:
cmdv.extend(['-m', message])
if detach:
cmdv.append('--no-deref')
cmdv.append(name)
cmdv.append(new)
if old is not None:
cmdv.append(old)
self.update_ref(*cmdv)
def DeleteRef(self, name, old=None):
if not old:
old = self.rev_parse(name)
self.update_ref('-d', name, old)
self._project.bare_ref.deleted(name)
def rev_list(self, *args, **kw):
if 'format' in kw:
cmdv = ['log', '--pretty=format:%s' % kw['format']]
else:
cmdv = ['rev-list']
cmdv.extend(args)
p = GitCommand(self._project,
cmdv,
bare=self._bare,
gitdir=self._gitdir,
capture_stdout=True,
capture_stderr=True)
r = []
for line in p.process.stdout:
if line[-1] == '\n':
line = line[:-1]
r.append(line)
if p.Wait() != 0:
raise GitError('%s rev-list %s: %s' % (
self._project.name,
str(args),
p.stderr))
return r
def __getattr__(self, name):
"""Allow arbitrary git commands using pythonic syntax.
This allows you to do things like:
git_obj.rev_parse('HEAD')
Since we don't have a 'rev_parse' method defined, the __getattr__ will
run. We'll replace the '_' with a '-' and try to run a git command.
Any other positional arguments will be passed to the git command, and the
following keyword arguments are supported:
config: An optional dict of git config options to be passed with '-c'.
Args:
name: The name of the git command to call. Any '_' characters will
be replaced with '-'.
Returns:
A callable object that will try to call git with the named command.
"""
name = name.replace('_', '-')
def runner(*args, **kwargs):
cmdv = []
config = kwargs.pop('config', None)
for k in kwargs:
raise TypeError('%s() got an unexpected keyword argument %r'
% (name, k))
if config is not None:
if not git_require((1, 7, 2)):
raise ValueError('cannot set config on command line for %s()'
% name)
for k, v in config.items():
cmdv.append('-c')
cmdv.append('%s=%s' % (k, v))
cmdv.append(name)
cmdv.extend(args)
p = GitCommand(self._project,
cmdv,
bare=self._bare,
gitdir=self._gitdir,
capture_stdout=True,
capture_stderr=True)
if p.Wait() != 0:
raise GitError('%s %s: %s' % (
self._project.name,
name,
p.stderr))
r = p.stdout
try:
r = r.decode('utf-8')
except AttributeError:
pass
if r.endswith('\n') and r.index('\n') == len(r) - 1:
return r[:-1]
return r
return runner
class _PriorSyncFailedError(Exception):
def __str__(self):
return 'prior sync failed; rebase still in progress'
class _DirtyError(Exception):
def __str__(self):
return 'contains uncommitted changes'
class _InfoMessage(object):
def __init__(self, project, text):
self.project = project
self.text = text
def Print(self, syncbuf):
syncbuf.out.info('%s/: %s', self.project.relpath, self.text)
syncbuf.out.nl()
class _Failure(object):
def __init__(self, project, why):
self.project = project
self.why = why
def Print(self, syncbuf):
syncbuf.out.fail('error: %s/: %s',
self.project.relpath,
str(self.why))
syncbuf.out.nl()
class _Later(object):
def __init__(self, project, action):
self.project = project
self.action = action
def Run(self, syncbuf):
out = syncbuf.out
out.project('project %s/', self.project.relpath)
out.nl()
try:
self.action()
out.nl()
return True
except GitError:
out.nl()
return False
class _SyncColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'reposync')
self.project = self.printer('header', attr='bold')
self.info = self.printer('info')
self.fail = self.printer('fail', fg='red')
class SyncBuffer(object):
def __init__(self, config, detach_head=False):
self._messages = []
self._failures = []
self._later_queue1 = []
self._later_queue2 = []
self.out = _SyncColoring(config)
self.out.redirect(sys.stderr)
self.detach_head = detach_head
self.clean = True
def info(self, project, fmt, *args):
self._messages.append(_InfoMessage(project, fmt % args))
def fail(self, project, err=None):
self._failures.append(_Failure(project, err))
self.clean = False
def later1(self, project, what):
self._later_queue1.append(_Later(project, what))
def later2(self, project, what):
self._later_queue2.append(_Later(project, what))
def Finish(self):
self._PrintMessages()
self._RunLater()
self._PrintMessages()
return self.clean
def _RunLater(self):
for q in ['_later_queue1', '_later_queue2']:
if not self._RunQueue(q):
return
def _RunQueue(self, queue):
for m in getattr(self, queue):
if not m.Run(self):
self.clean = False
return False
setattr(self, queue, [])
return True
def _PrintMessages(self):
for m in self._messages:
m.Print(self)
for m in self._failures:
m.Print(self)
self._messages = []
self._failures = []
class MetaProject(Project):
"""A special project housed under .repo.
"""
def __init__(self, manifest, name, gitdir, worktree):
Project.__init__(self,
manifest=manifest,
name=name,
gitdir=gitdir,
objdir=gitdir,
worktree=worktree,
remote=RemoteSpec('origin'),
relpath='.repo/%s' % name,
revisionExpr='refs/heads/master',
revisionId=None,
groups=None)
def PreSync(self):
if self.Exists:
cb = self.CurrentBranch
if cb:
base = self.GetBranch(cb).merge
if base:
self.revisionExpr = base
self.revisionId = None
def MetaBranchSwitch(self):
""" Prepare MetaProject for manifest branch switch
"""
# detach and delete manifest branch, allowing a new
# branch to take over
syncbuf = SyncBuffer(self.config, detach_head=True)
self.Sync_LocalHalf(syncbuf)
syncbuf.Finish()
return GitCommand(self,
['update-ref', '-d', 'refs/heads/default'],
capture_stdout=True,
capture_stderr=True).Wait() == 0
@property
def LastFetch(self):
try:
fh = os.path.join(self.gitdir, 'FETCH_HEAD')
return os.path.getmtime(fh)
except OSError:
return 0
@property
def HasChanges(self):
"""Has the remote received new commits not yet checked out?
"""
if not self.remote or not self.revisionExpr:
return False
all_refs = self.bare_ref.all
revid = self.GetRevisionId(all_refs)
head = self.work_git.GetHead()
if head.startswith(R_HEADS):
try:
head = all_refs[head]
except KeyError:
head = None
if revid == head:
return False
elif self._revlist(not_rev(HEAD), revid):
return True
return False
|
{
"content_hash": "74d67db47712130257535f4b5d7d155b",
"timestamp": "",
"source": "github",
"line_count": 2823,
"max_line_length": 102,
"avg_line_length": 31.40701381509033,
"alnum_prop": 0.5839480273397848,
"repo_name": "slfyusufu/repo",
"id": "50580880aaa19f1dd18d88d1cb649148e8b15621",
"size": "89263",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "389654"
},
{
"name": "Shell",
"bytes": "6043"
}
],
"symlink_target": ""
}
|
"""
Class for PXE bare-metal nodes.
"""
import datetime
import os
from oslo.config import cfg
from nova.compute import flavors
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import timeutils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
pxe_opts = [
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
cfg.StrOpt('deploy_ramdisk',
help='Default ramdisk image ID used in deployment phase'),
cfg.StrOpt('net_config_template',
default='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
help='Template file for injected network config'),
cfg.StrOpt('pxe_append_params',
help='additional append parameters for baremetal PXE boot'),
cfg.StrOpt('pxe_config_template',
default='$pybasedir/nova/virt/baremetal/pxe_config.template',
help='Template file for PXE configuration'),
cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)',
default=0),
cfg.BoolOpt('pxe_network_config',
help='If set, pass the network configuration details to the '
'initramfs via cmdline.',
default=False),
]
LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(pxe_opts, baremetal_group)
CONF.import_opt('use_ipv6', 'nova.netconf')
CHEETAH = None
def _get_cheetah():
global CHEETAH
if CHEETAH is None:
from Cheetah import Template
CHEETAH = Template.Template
return CHEETAH
def build_pxe_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
template = None
if not CONF.use_ipv6:
template = "ip=%(address)s::%(gateway)s:%(netmask)s::%(name)s:off"
else:
template = ("ip=[%(address_v6)s]::[%(gateway_v6)s]:"
"[%(netmask_v6)s]::%(name)s:off")
net_config = [template % iface for iface in interfaces]
return ' '.join(net_config)
def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
deployment_aki_path, deployment_ari_path,
aki_path, ari_path, network_info):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
given all the required parameters.
The resulting file has both a "deploy" and "boot" label, which correspond
to the two phases of booting. This may be extended later.
"""
LOG.debug(_("Building PXE config for deployment %s.") % deployment_id)
network_config = None
if network_info and CONF.baremetal.pxe_network_config:
network_config = build_pxe_network_config(network_info)
pxe_options = {
'deployment_id': deployment_id,
'deployment_key': deployment_key,
'deployment_iscsi_iqn': deployment_iscsi_iqn,
'deployment_aki_path': deployment_aki_path,
'deployment_ari_path': deployment_ari_path,
'aki_path': aki_path,
'ari_path': ari_path,
'pxe_append_params': CONF.baremetal.pxe_append_params,
'pxe_network_config': network_config,
}
cheetah = _get_cheetah()
pxe_config = str(cheetah(
open(CONF.baremetal.pxe_config_template).read(),
searchList=[{'pxe_options': pxe_options,
'ROOT': '${ROOT}',
}]))
return pxe_config
def build_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
cheetah = _get_cheetah()
network_config = str(cheetah(
open(CONF.baremetal.net_config_template).read(),
searchList=[
{'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6,
}
]))
return network_config
def get_deploy_aki_id(instance_type):
return instance_type.get('extra_specs', {}).\
get('baremetal:deploy_kernel_id', CONF.baremetal.deploy_kernel)
def get_deploy_ari_id(instance_type):
return instance_type.get('extra_specs', {}).\
get('baremetal:deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk)
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_pxe_config_file_path(instance):
"""Generate the path for an instances PXE config file."""
return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config')
def get_partition_sizes(instance):
instance_type = flavors.extract_flavor(instance)
root_mb = instance_type['root_gb'] * 1024
swap_mb = instance_type['swap']
# NOTE(deva): For simpler code paths on the deployment side,
# we always create a swap partition. If the flavor
# does not specify any swap, we default to 1MB
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb)
def get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name."""
return os.path.join(
CONF.baremetal.tftp_root,
'pxelinux.cfg',
"01-" + mac.replace(":", "-").lower()
)
def get_tftp_image_info(instance, instance_type):
"""Generate the paths for tftp files for this instance
Raises NovaException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
instance_type['extra_specs'] and defaults are not set
"""
image_info = {
'kernel': [None, None],
'ramdisk': [None, None],
'deploy_kernel': [None, None],
'deploy_ramdisk': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
image_info['ramdisk'][0] = str(instance['ramdisk_id'])
image_info['deploy_kernel'][0] = get_deploy_aki_id(instance_type)
image_info['deploy_ramdisk'][0] = get_deploy_ari_id(instance_type)
except KeyError:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate PXE bootloader. The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class PXE(base.NodeDriver):
"""PXE bare metal driver."""
def __init__(self, virtapi):
super(PXE, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug(_("Fetching kernel and ramdisk for instance %s") %
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.baremetal.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug(_("Fetching image %(ami)s for instance %(name)s") %
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id']
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
# NOTE(deva): We assume that if we're not using a kernel,
# then the target partition is the first partition
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
# NOTE(deva): copy so we dont modify the original
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug(_("Injecting files into image for instance %(name)s") %
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=instance['metadata'],
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
instance_type = self.virtapi.instance_type_get(
context, instance['instance_type_id'])
tftp_image_info = get_tftp_image_info(instance, instance_type)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
def activate_bootloader(self, context, node, instance, network_info):
"""Configure PXE boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
ramdisk
deploy_kernel
deploy_ramdisk
config
./pxelinux.cfg/
{mac} -> ../{uuid}/config
"""
instance_type = self.virtapi.instance_type_get(
context, instance['instance_type_id'])
image_info = get_tftp_image_info(instance, instance_type)
(root_mb, swap_mb) = get_partition_sizes(instance)
pxe_config_file_path = get_pxe_config_file_path(instance)
image_file_path = get_image_file_path(instance)
deployment_key = bm_utils.random_alnum(32)
deployment_iscsi_iqn = "iqn-%s" % instance['uuid']
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': pxe_config_file_path,
'root_mb': root_mb,
'swap_mb': swap_mb})
pxe_config = build_pxe_config(
node['id'],
deployment_key,
deployment_iscsi_iqn,
image_info['deploy_kernel'][1],
image_info['deploy_ramdisk'][1],
image_info['kernel'][1],
image_info['ramdisk'][1],
network_info,
)
bm_utils.write_to_file(pxe_config_file_path, pxe_config)
macs = self._collect_mac_addresses(context, node)
for mac in macs:
mac_path = get_pxe_mac_path(mac)
bm_utils.unlink_without_raise(mac_path)
bm_utils.create_link_without_raise(pxe_config_file_path, mac_path)
def deactivate_bootloader(self, context, node, instance):
"""Delete PXE bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
# NOTE(danms): the instance_type extra_specs do not need to be
# present/correct at deactivate time, so pass something empty
# to avoid an extra lookup
instance_type = dict(extra_specs={
'baremetal:deploy_ramdisk_id': 'ignore',
'baremetal:deploy_kernel_id': 'ignore'})
try:
image_info = get_tftp_image_info(instance, instance_type)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.unlink_without_raise(path)
bm_utils.unlink_without_raise(get_pxe_config_file_path(instance))
try:
macs = self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
else:
for mac in macs:
bm_utils.unlink_without_raise(get_pxe_mac_path(mac))
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
def activate_node(self, context, node, instance):
"""Wait for PXE deployment to complete."""
locals = {'error': '', 'started': False}
def _wait_for_deploy():
"""Called at an interval until the deployment completes."""
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
raise loopingcall.LoopingCallDone()
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING
and locals['started'] == False):
LOG.info(_("PXE deploy started for instance %s")
% instance['uuid'])
locals['started'] = True
elif status in (baremetal_states.DEPLOYDONE,
baremetal_states.ACTIVE):
LOG.info(_("PXE deploy completed for instance %s")
% instance['uuid'])
raise loopingcall.LoopingCallDone()
elif status == baremetal_states.DEPLOYFAIL:
locals['error'] = _("PXE deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if (CONF.baremetal.pxe_deploy_timeout and
timeutils.utcnow() > expiration):
locals['error'] = _("Timeout reached while waiting for "
"PXE deploy of instance %s")
if locals['error']:
raise loopingcall.LoopingCallDone()
expiration = timeutils.utcnow() + datetime.timedelta(
seconds=CONF.baremetal.pxe_deploy_timeout)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
timer.start(interval=1).wait()
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass
|
{
"content_hash": "719552a1dc58e0fe0b3f9578641b3c03",
"timestamp": "",
"source": "github",
"line_count": 477,
"max_line_length": 79,
"avg_line_length": 38.18867924528302,
"alnum_prop": 0.5789964866051822,
"repo_name": "qwefi/nova",
"id": "21fc2ce47246055a8360cb4bc8e35ac4e12752a7",
"size": "18956",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/virt/baremetal/pxe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11596912"
},
{
"name": "Shell",
"bytes": "17148"
}
],
"symlink_target": ""
}
|
from yubikit.core import TRANSPORT, YUBIKEY
from yubikit.management import (
CAPABILITY,
FORM_FACTOR,
DeviceInfo,
DeviceConfig,
Version,
)
from yubikit.support import get_name
from typing import cast
def info(form_factor):
return DeviceInfo(
config=cast(DeviceConfig, None),
serial=None,
version=Version(5, 3, 0),
form_factor=form_factor,
supported_capabilities={TRANSPORT.USB: 0xFF}, # type: ignore
is_locked=False,
is_fips=False,
)
def info_nfc(form_factor):
with_nfc = info(form_factor)
with_nfc.supported_capabilities[TRANSPORT.NFC] = 0xFF
return with_nfc
def test_yk5_formfactors():
kt = YUBIKEY.YK4
assert get_name(info(FORM_FACTOR.USB_A_KEYCHAIN), kt) == "YubiKey 5A"
assert get_name(info_nfc(FORM_FACTOR.USB_A_KEYCHAIN), kt) == "YubiKey 5 NFC"
assert get_name(info(FORM_FACTOR.USB_A_NANO), kt) == "YubiKey 5 Nano"
assert get_name(info(FORM_FACTOR.USB_C_KEYCHAIN), kt) == "YubiKey 5C"
assert get_name(info_nfc(FORM_FACTOR.USB_C_KEYCHAIN), kt) == "YubiKey 5C NFC"
assert get_name(info(FORM_FACTOR.USB_C_NANO), kt) == "YubiKey 5C Nano"
assert get_name(info(FORM_FACTOR.USB_C_LIGHTNING), kt) == "YubiKey 5Ci"
assert get_name(info(FORM_FACTOR.USB_A_BIO), kt) == "YubiKey Bio"
assert get_name(info(FORM_FACTOR.USB_C_BIO), kt) == "YubiKey C Bio"
assert get_name(info(FORM_FACTOR.UNKNOWN), kt) == "YubiKey 5"
assert get_name(info_nfc(FORM_FACTOR.UNKNOWN), kt) == "YubiKey 5 NFC"
def fido(device_info):
device_info.supported_capabilities[TRANSPORT.USB] = (
CAPABILITY.U2F | CAPABILITY.FIDO2
)
if TRANSPORT.NFC in device_info.supported_capabilities:
device_info.supported_capabilities[TRANSPORT.NFC] = (
CAPABILITY.U2F | CAPABILITY.FIDO2
)
return device_info
def test_yk5_fido():
kt = YUBIKEY.YK4
assert (
get_name(fido(info(FORM_FACTOR.USB_A_BIO)), kt) == "YubiKey Bio - FIDO Edition"
)
assert (
get_name(fido(info(FORM_FACTOR.USB_C_BIO)), kt)
== "YubiKey C Bio - FIDO Edition"
)
def fips(device_info):
device_info.is_fips = True
return device_info
def test_yk5_fips_formfactors():
kt = YUBIKEY.YK4
assert get_name(fips(info(FORM_FACTOR.USB_A_KEYCHAIN)), kt) == "YubiKey 5A FIPS"
assert (
get_name(fips(info_nfc(FORM_FACTOR.USB_A_KEYCHAIN)), kt) == "YubiKey 5 NFC FIPS"
)
assert get_name(fips(info(FORM_FACTOR.USB_A_NANO)), kt) == "YubiKey 5 Nano FIPS"
assert get_name(fips(info(FORM_FACTOR.USB_C_KEYCHAIN)), kt) == "YubiKey 5C FIPS"
assert (
get_name(fips(info_nfc(FORM_FACTOR.USB_C_KEYCHAIN)), kt)
== "YubiKey 5C NFC FIPS"
)
assert get_name(fips(info(FORM_FACTOR.USB_C_NANO)), kt) == "YubiKey 5C Nano FIPS"
assert get_name(fips(info(FORM_FACTOR.USB_C_LIGHTNING)), kt) == "YubiKey 5Ci FIPS"
assert get_name(fips(info(FORM_FACTOR.USB_A_BIO)), kt) == "YubiKey Bio FIPS"
assert get_name(fips(info(FORM_FACTOR.USB_C_BIO)), kt) == "YubiKey C Bio FIPS"
assert get_name(fips(info(FORM_FACTOR.UNKNOWN)), kt) == "YubiKey 5 FIPS"
assert get_name(fips(info_nfc(FORM_FACTOR.UNKNOWN)), kt) == "YubiKey 5 NFC FIPS"
|
{
"content_hash": "7dfe466fbf1928946dd1e647cccdf83c",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 88,
"avg_line_length": 36.044444444444444,
"alnum_prop": 0.6532059186189889,
"repo_name": "Yubico/yubikey-manager",
"id": "a0e3160173c6e6c2892049d16a05e691e4cdad6e",
"size": "3244",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_device.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "1028"
},
{
"name": "Python",
"bytes": "684639"
},
{
"name": "Shell",
"bytes": "929"
}
],
"symlink_target": ""
}
|
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import re
import urlparse
import xbmcaddon
import time
from salts_lib.trans_utils import i18n
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
BASE_URL = 'http://watchseries-online.ch'
class WSO_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
self.max_pages = int(xbmcaddon.Addon().getSetting('%s-max_pages' % (self.get_name())))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'wso.ch'
def resolve_link(self, link):
url = urlparse.urljoin(self.base_url, link)
html = self._http_get(url, cache_limit=.5)
match = re.search('href=(?:\'|")([^"\']+)(?:"|\')>Click Here to Play', html)
if match:
return match.group(1)
else:
return link
def format_source_label(self, item):
label = '[%s] %s' % (item['quality'], item['host'])
return label
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
pattern = 'class="[^"]*tdhost".*?href="([^"]+)">([^<]+)'
for match in re.finditer(pattern, html, re.DOTALL):
stream_url, host = match.groups()
hoster = {'multi-part': False, 'host': host.lower(), 'class': self, 'url': stream_url, 'quality': self._get_quality(video, host, QUALITIES.HIGH), 'views': None, 'rating': None, 'direct': False}
hosters.append(hoster)
return hosters
def get_url(self, video):
return super(WSO_Scraper, self)._default_get_url(video)
@classmethod
def get_settings(cls):
settings = super(WSO_Scraper, cls).get_settings()
name = cls.get_name()
settings.append(' <setting id="%s-max_pages" type="slider" range="1,50" option="int" label=" %s" default="1" visible="eq(-6,true)"/>' % (name, i18n('max_pages')))
return settings
def search(self, video_type, title, year):
url = urlparse.urljoin(self.base_url, '/index')
html = self._http_get(url, cache_limit=24)
results = []
for list_match in re.finditer('class="ddmcc"(.*?)</div>', html, re.DOTALL):
list_frag = list_match.group(1)
norm_title = self._normalize_title(title)
pattern = 'href="([^"]+)">([^<]+)'
for match in re.finditer(pattern, list_frag):
url, match_title = match.groups('')
if norm_title in self._normalize_title(match_title):
result = {'url': url.replace(self.base_url, ''), 'title': match_title, 'year': ''}
results.append(result)
return results
def _get_episode_url(self, show_url, video):
episode_pattern = '<h2>\s*<a\s+href="([^"]+)[^>]+title="[^"]+[Ss]%02d[Ee]%02d[ "]' % (int(video.season), int(video.episode))
title_pattern = ''
airdate_pattern = '<h2>\s*<a\s+href="([^"]+)[^>]+title="[^"]+{year} {p_month} {p_day}[ \)"]'
for page in xrange(1, self.max_pages + 1):
url = show_url
if page > 1: url += '%s/page/%s' % (show_url, page)
# if page is blank, don't continue getting pages
url = urlparse.urljoin(self.base_url, url)
html = self._http_get(url, cache_limit=2)
if not html:
return
ep_url = super(WSO_Scraper, self)._default_get_episode_url(url, video, episode_pattern, title_pattern, airdate_pattern)
if ep_url is not None:
return ep_url
def _http_get(self, url, data=None, cache_limit=8):
return super(WSO_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)
|
{
"content_hash": "27aef91c3a259352c7fa6cb8d4feb236",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 209,
"avg_line_length": 41.11864406779661,
"alnum_prop": 0.5946001648804616,
"repo_name": "aplicatii-romanesti/allinclusive-kodi-pi",
"id": "a744202d662bc5ed27e1561a19a80dbea7277c1b",
"size": "4852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".kodi/addons/plugin.video.salts/scrapers/wso_scraper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6178"
},
{
"name": "Python",
"bytes": "8657978"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
}
|
from typing import MutableMapping, MutableSequence
from google.protobuf import timestamp_pb2 # type: ignore
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.osconfig.v1",
manifest={
"VulnerabilityReport",
"GetVulnerabilityReportRequest",
"ListVulnerabilityReportsRequest",
"ListVulnerabilityReportsResponse",
"CVSSv3",
},
)
class VulnerabilityReport(proto.Message):
r"""This API resource represents the vulnerability report for a
specified Compute Engine virtual machine (VM) instance at a given
point in time.
For more information, see `Vulnerability
reports <https://cloud.google.com/compute/docs/instances/os-inventory-management#vulnerability-reports>`__.
Attributes:
name (str):
Output only. The ``vulnerabilityReport`` API resource name.
Format:
``projects/{project_number}/locations/{location}/instances/{instance_id}/vulnerabilityReport``
vulnerabilities (MutableSequence[google.cloud.osconfig_v1.types.VulnerabilityReport.Vulnerability]):
Output only. List of vulnerabilities
affecting the VM.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The timestamp for when the last
vulnerability report was generated for the VM.
"""
class Vulnerability(proto.Message):
r"""A vulnerability affecting the VM instance.
Attributes:
details (google.cloud.osconfig_v1.types.VulnerabilityReport.Vulnerability.Details):
Contains metadata as per the upstream feed of
the operating system and NVD.
installed_inventory_item_ids (MutableSequence[str]):
Corresponds to the ``INSTALLED_PACKAGE`` inventory item on
the VM. This field displays the inventory items affected by
this vulnerability. If the vulnerability report was not
updated after the VM inventory update, these values might
not display in VM inventory. For some distros, this field
may be empty.
available_inventory_item_ids (MutableSequence[str]):
Corresponds to the ``AVAILABLE_PACKAGE`` inventory item on
the VM. If the vulnerability report was not updated after
the VM inventory update, these values might not display in
VM inventory. If there is no available fix, the field is
empty. The ``inventory_item`` value specifies the latest
``SoftwarePackage`` available to the VM that fixes the
vulnerability.
create_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp for when the vulnerability was
first detected.
update_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp for when the vulnerability was
last modified.
items (MutableSequence[google.cloud.osconfig_v1.types.VulnerabilityReport.Vulnerability.Item]):
List of items affected by the vulnerability.
"""
class Details(proto.Message):
r"""Contains metadata information for the vulnerability. This
information is collected from the upstream feed of the operating
system.
Attributes:
cve (str):
The CVE of the vulnerability. CVE cannot be
empty and the combination of <cve,
classification> should be unique across
vulnerabilities for a VM.
cvss_v2_score (float):
The CVSS V2 score of this vulnerability. CVSS
V2 score is on a scale of 0 - 10 where 0
indicates low severity and 10 indicates high
severity.
cvss_v3 (google.cloud.osconfig_v1.types.CVSSv3):
The full description of the CVSSv3 for this
vulnerability from NVD.
severity (str):
Assigned severity/impact ranking from the
distro.
description (str):
The note or description describing the
vulnerability from the distro.
references (MutableSequence[google.cloud.osconfig_v1.types.VulnerabilityReport.Vulnerability.Details.Reference]):
Corresponds to the references attached to the
``VulnerabilityDetails``.
"""
class Reference(proto.Message):
r"""A reference for this vulnerability.
Attributes:
url (str):
The url of the reference.
source (str):
The source of the reference e.g. NVD.
"""
url: str = proto.Field(
proto.STRING,
number=1,
)
source: str = proto.Field(
proto.STRING,
number=2,
)
cve: str = proto.Field(
proto.STRING,
number=1,
)
cvss_v2_score: float = proto.Field(
proto.FLOAT,
number=2,
)
cvss_v3: "CVSSv3" = proto.Field(
proto.MESSAGE,
number=3,
message="CVSSv3",
)
severity: str = proto.Field(
proto.STRING,
number=4,
)
description: str = proto.Field(
proto.STRING,
number=5,
)
references: MutableSequence[
"VulnerabilityReport.Vulnerability.Details.Reference"
] = proto.RepeatedField(
proto.MESSAGE,
number=6,
message="VulnerabilityReport.Vulnerability.Details.Reference",
)
class Item(proto.Message):
r"""OS inventory item that is affected by a vulnerability or
fixed as a result of a vulnerability.
Attributes:
installed_inventory_item_id (str):
Corresponds to the ``INSTALLED_PACKAGE`` inventory item on
the VM. This field displays the inventory items affected by
this vulnerability. If the vulnerability report was not
updated after the VM inventory update, these values might
not display in VM inventory. For some operating systems,
this field might be empty.
available_inventory_item_id (str):
Corresponds to the ``AVAILABLE_PACKAGE`` inventory item on
the VM. If the vulnerability report was not updated after
the VM inventory update, these values might not display in
VM inventory. If there is no available fix, the field is
empty. The ``inventory_item`` value specifies the latest
``SoftwarePackage`` available to the VM that fixes the
vulnerability.
fixed_cpe_uri (str):
The recommended `CPE
URI <https://cpe.mitre.org/specification/>`__ update that
contains a fix for this vulnerability.
upstream_fix (str):
The upstream OS patch, packages or KB that
fixes the vulnerability.
"""
installed_inventory_item_id: str = proto.Field(
proto.STRING,
number=1,
)
available_inventory_item_id: str = proto.Field(
proto.STRING,
number=2,
)
fixed_cpe_uri: str = proto.Field(
proto.STRING,
number=3,
)
upstream_fix: str = proto.Field(
proto.STRING,
number=4,
)
details: "VulnerabilityReport.Vulnerability.Details" = proto.Field(
proto.MESSAGE,
number=1,
message="VulnerabilityReport.Vulnerability.Details",
)
installed_inventory_item_ids: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=2,
)
available_inventory_item_ids: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=3,
)
create_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
items: MutableSequence[
"VulnerabilityReport.Vulnerability.Item"
] = proto.RepeatedField(
proto.MESSAGE,
number=6,
message="VulnerabilityReport.Vulnerability.Item",
)
name: str = proto.Field(
proto.STRING,
number=1,
)
vulnerabilities: MutableSequence[Vulnerability] = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=Vulnerability,
)
update_time: timestamp_pb2.Timestamp = proto.Field(
proto.MESSAGE,
number=3,
message=timestamp_pb2.Timestamp,
)
class GetVulnerabilityReportRequest(proto.Message):
r"""A request message for getting the vulnerability report for
the specified VM.
Attributes:
name (str):
Required. API resource name for vulnerability resource.
Format:
``projects/{project}/locations/{location}/instances/{instance}/vulnerabilityReport``
For ``{project}``, either ``project-number`` or
``project-id`` can be provided. For ``{instance}``, either
Compute Engine ``instance-id`` or ``instance-name`` can be
provided.
"""
name: str = proto.Field(
proto.STRING,
number=1,
)
class ListVulnerabilityReportsRequest(proto.Message):
r"""A request message for listing vulnerability reports for all
VM instances in the specified location.
Attributes:
parent (str):
Required. The parent resource name.
Format:
``projects/{project}/locations/{location}/instances/-``
For ``{project}``, either ``project-number`` or
``project-id`` can be provided.
page_size (int):
The maximum number of results to return.
page_token (str):
A pagination token returned from a previous call to
``ListVulnerabilityReports`` that indicates where this
listing should continue from.
filter (str):
If provided, this field specifies the criteria that must be
met by a ``vulnerabilityReport`` API resource to be included
in the response.
"""
parent: str = proto.Field(
proto.STRING,
number=1,
)
page_size: int = proto.Field(
proto.INT32,
number=2,
)
page_token: str = proto.Field(
proto.STRING,
number=3,
)
filter: str = proto.Field(
proto.STRING,
number=4,
)
class ListVulnerabilityReportsResponse(proto.Message):
r"""A response message for listing vulnerability reports for all
VM instances in the specified location.
Attributes:
vulnerability_reports (MutableSequence[google.cloud.osconfig_v1.types.VulnerabilityReport]):
List of vulnerabilityReport objects.
next_page_token (str):
The pagination token to retrieve the next
page of vulnerabilityReports object.
"""
@property
def raw_page(self):
return self
vulnerability_reports: MutableSequence["VulnerabilityReport"] = proto.RepeatedField(
proto.MESSAGE,
number=1,
message="VulnerabilityReport",
)
next_page_token: str = proto.Field(
proto.STRING,
number=2,
)
class CVSSv3(proto.Message):
r"""Common Vulnerability Scoring System version 3.
For details, see
https://www.first.org/cvss/specification-document
Attributes:
base_score (float):
The base score is a function of the base
metric scores.
https://www.first.org/cvss/specification-document#Base-Metrics
exploitability_score (float):
The Exploitability sub-score equation is
derived from the Base Exploitability metrics.
https://www.first.org/cvss/specification-document#2-1-Exploitability-Metrics
impact_score (float):
The Impact sub-score equation is derived from
the Base Impact metrics.
attack_vector (google.cloud.osconfig_v1.types.CVSSv3.AttackVector):
This metric reflects the context by which
vulnerability exploitation is possible.
attack_complexity (google.cloud.osconfig_v1.types.CVSSv3.AttackComplexity):
This metric describes the conditions beyond
the attacker's control that must exist in order
to exploit the vulnerability.
privileges_required (google.cloud.osconfig_v1.types.CVSSv3.PrivilegesRequired):
This metric describes the level of privileges
an attacker must possess before successfully
exploiting the vulnerability.
user_interaction (google.cloud.osconfig_v1.types.CVSSv3.UserInteraction):
This metric captures the requirement for a
human user, other than the attacker, to
participate in the successful compromise of the
vulnerable component.
scope (google.cloud.osconfig_v1.types.CVSSv3.Scope):
The Scope metric captures whether a
vulnerability in one vulnerable component
impacts resources in components beyond its
security scope.
confidentiality_impact (google.cloud.osconfig_v1.types.CVSSv3.Impact):
This metric measures the impact to the
confidentiality of the information resources
managed by a software component due to a
successfully exploited vulnerability.
integrity_impact (google.cloud.osconfig_v1.types.CVSSv3.Impact):
This metric measures the impact to integrity
of a successfully exploited vulnerability.
availability_impact (google.cloud.osconfig_v1.types.CVSSv3.Impact):
This metric measures the impact to the
availability of the impacted component resulting
from a successfully exploited vulnerability.
"""
class AttackVector(proto.Enum):
r"""This metric reflects the context by which vulnerability
exploitation is possible.
"""
ATTACK_VECTOR_UNSPECIFIED = 0
ATTACK_VECTOR_NETWORK = 1
ATTACK_VECTOR_ADJACENT = 2
ATTACK_VECTOR_LOCAL = 3
ATTACK_VECTOR_PHYSICAL = 4
class AttackComplexity(proto.Enum):
r"""This metric describes the conditions beyond the attacker's
control that must exist in order to exploit the vulnerability.
"""
ATTACK_COMPLEXITY_UNSPECIFIED = 0
ATTACK_COMPLEXITY_LOW = 1
ATTACK_COMPLEXITY_HIGH = 2
class PrivilegesRequired(proto.Enum):
r"""This metric describes the level of privileges an attacker
must possess before successfully exploiting the vulnerability.
"""
PRIVILEGES_REQUIRED_UNSPECIFIED = 0
PRIVILEGES_REQUIRED_NONE = 1
PRIVILEGES_REQUIRED_LOW = 2
PRIVILEGES_REQUIRED_HIGH = 3
class UserInteraction(proto.Enum):
r"""This metric captures the requirement for a human user, other
than the attacker, to participate in the successful compromise
of the vulnerable component.
"""
USER_INTERACTION_UNSPECIFIED = 0
USER_INTERACTION_NONE = 1
USER_INTERACTION_REQUIRED = 2
class Scope(proto.Enum):
r"""The Scope metric captures whether a vulnerability in one
vulnerable component impacts resources in components beyond its
security scope.
"""
SCOPE_UNSPECIFIED = 0
SCOPE_UNCHANGED = 1
SCOPE_CHANGED = 2
class Impact(proto.Enum):
r"""The Impact metrics capture the effects of a successfully
exploited vulnerability on the component that suffers the worst
outcome that is most directly and predictably associated with
the attack.
"""
IMPACT_UNSPECIFIED = 0
IMPACT_HIGH = 1
IMPACT_LOW = 2
IMPACT_NONE = 3
base_score: float = proto.Field(
proto.FLOAT,
number=1,
)
exploitability_score: float = proto.Field(
proto.FLOAT,
number=2,
)
impact_score: float = proto.Field(
proto.FLOAT,
number=3,
)
attack_vector: AttackVector = proto.Field(
proto.ENUM,
number=5,
enum=AttackVector,
)
attack_complexity: AttackComplexity = proto.Field(
proto.ENUM,
number=6,
enum=AttackComplexity,
)
privileges_required: PrivilegesRequired = proto.Field(
proto.ENUM,
number=7,
enum=PrivilegesRequired,
)
user_interaction: UserInteraction = proto.Field(
proto.ENUM,
number=8,
enum=UserInteraction,
)
scope: Scope = proto.Field(
proto.ENUM,
number=9,
enum=Scope,
)
confidentiality_impact: Impact = proto.Field(
proto.ENUM,
number=10,
enum=Impact,
)
integrity_impact: Impact = proto.Field(
proto.ENUM,
number=11,
enum=Impact,
)
availability_impact: Impact = proto.Field(
proto.ENUM,
number=12,
enum=Impact,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
{
"content_hash": "16eaacccfd432cc7dcf283f39c684b76",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 129,
"avg_line_length": 36.65060240963855,
"alnum_prop": 0.5894696471619548,
"repo_name": "googleapis/python-os-config",
"id": "f0d16b206e2ce96722d558e18778be23fd4d564e",
"size": "18852",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/osconfig_v1/types/vulnerability.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1810720"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
}
|
import serial;
import sys;
AR_1 = 9600
AR_2 = 9601
class lightcontroller:
# Map out the number of the light to
# the channel on one of the two Arduino units.
# 0 -> 11 are the frst Arduino channels
# 20 -> 31 are the second Arduino channels
# The index is the 'light number', the value is the arduino channel
lightArray = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31];
# Class constructor
def __init__(self):
# Open a serial connection to each Arduino
self.ar1 = serial.Serial()
self.ar1.port = AR_1
self.ar1.baudrate = 9600
self.ar1.parity = serial.PARITY_NONE
self.ar1.bytesize = serial.EIGHTBITS
self.ar1.stopbits = serial.STOPBITS_ONE
self.ar1.timeout = 1
self.ar1.xonxoff = False
self.ar1.rtscts = False
self.ar1.dsrdtr = False
self.ar2 = serial.Serial()
self.ar2.port = AR_2
self.ar2.baudrate = 9601
self.ar2.parity = serial.PARITY_NONE
self.ar2.bytesize = serial.EIGHTBITS
self.ar2.stopbits = serial.STOPBITS_ONE
self.ar2.timeout = 1
self.ar2.xonxoff = False
self.ar2.rtscts = False
self.ar2.dsrdtr = False
self.ar1.open()
self.ar1.readline()
self.ar2.open()
self.ar2.readline()
self.lightOn(0, 1) # first call doesn't work for some reason. Get it out of the way.
self.lightOff(0) # Just in case it does work
self.lightOn(20, 1)
self.lightOff(20)
#self.ar2 = serial.Serial(AR_2, 9600);
# Method to turn on a light
# light -> light number
# timeout -> a timeout value in seconds
def lightOn(self, light, timeout):
# Figure out which Arduino and which channel to use
chan = self.lightArray[light];
ar = self.ar1;
if chan > 11:
chan = chan - 20;
ar = self.ar2;
# Maximum timeout value is 25.4 seconds
#if timeout > 25.4:
#timeout = 25.4;
# Granulate the timeout to 0.1 seconds
#timeout = int(round(timeout * 10));
# Write data to the serial port
ar.write(chr(255));
ar.write(chr(chan));
ar.write(chr(int(timeout)));
# Method to turn off a light,
# really just sets lightOn
def lightOff(self, light):
self.lightOn(light, 0);
|
{
"content_hash": "a6780ff239eb810bba62af692ad23d11",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 102,
"avg_line_length": 26.285714285714285,
"alnum_prop": 0.6385869565217391,
"repo_name": "KappaEtaKappa/Sound-2-Disco",
"id": "b6580dfe868970ce94fdae30999e803641855199",
"size": "2437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LightController.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1888"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "F#",
"bytes": "3065"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Processing",
"bytes": "6116"
},
{
"name": "Python",
"bytes": "680223"
},
{
"name": "Shell",
"bytes": "8302"
},
{
"name": "Tcl",
"bytes": "2065501"
}
],
"symlink_target": ""
}
|
import glob
from st2tests.api import FunctionalTest
from st2tests.fixturesloader import get_fixtures_packs_base_path
__all__ = ["PackConfigSchemasControllerTestCase"]
PACKS_PATH = get_fixtures_packs_base_path()
CONFIG_SCHEMA_COUNT = len(glob.glob("%s/*/config.schema.yaml" % (PACKS_PATH)))
assert CONFIG_SCHEMA_COUNT > 1
class PackConfigSchemasControllerTestCase(FunctionalTest):
register_packs = True
def test_get_all(self):
resp = self.app.get("/v1/config_schemas")
self.assertEqual(resp.status_int, 200)
self.assertEqual(
len(resp.json),
CONFIG_SCHEMA_COUNT,
"/v1/config_schemas did not return all schemas.",
)
def test_get_one_success(self):
resp = self.app.get("/v1/config_schemas/dummy_pack_1")
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json["pack"], "dummy_pack_1")
self.assertIn("api_key", resp.json["attributes"])
def test_get_one_doesnt_exist(self):
# Pack exists, schema doesnt
resp = self.app.get("/v1/config_schemas/dummy_pack_2", expect_errors=True)
self.assertEqual(resp.status_int, 404)
self.assertIn(
"Unable to identify resource with pack_ref ", resp.json["faultstring"]
)
# Pack doesn't exist
ref_or_id = "pack_doesnt_exist"
resp = self.app.get("/v1/config_schemas/%s" % ref_or_id, expect_errors=True)
self.assertEqual(resp.status_int, 404)
# Changed from: 'Unable to find the PackDB instance'
self.assertTrue(
'Resource with a ref or id "%s" not found' % ref_or_id
in resp.json["faultstring"]
)
|
{
"content_hash": "d7e09104e02e11caa5a13d8783aceb61",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 84,
"avg_line_length": 35.25,
"alnum_prop": 0.6377068557919622,
"repo_name": "Plexxi/st2",
"id": "a38c278f077d7f73a74e01c45e35384b37870328",
"size": "2320",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2api/tests/unit/controllers/v1/test_pack_config_schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
}
|
import eventlet.queue
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from osprofiler import profiler
from heat.common import context
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import messaging as rpc_messaging
from heat.engine import check_resource
from heat.engine import sync_point
from heat.rpc import worker_client as rpc_client
LOG = logging.getLogger(__name__)
@profiler.trace_cls("rpc")
class WorkerService(service.Service):
"""Service that has 'worker' actor in convergence.
This service is dedicated to handle internal messages to the 'worker'
(a.k.a. 'converger') actor in convergence. Messages on this bus will
use the 'cast' rather than 'call' method to anycast the message to
an engine that will handle it asynchronously. It won't wait for
or expect replies from these messages.
"""
RPC_API_VERSION = '1.3'
def __init__(self,
host,
topic,
engine_id,
thread_group_mgr):
super(WorkerService, self).__init__()
self.host = host
self.topic = topic
self.engine_id = engine_id
self.thread_group_mgr = thread_group_mgr
self._rpc_client = rpc_client.WorkerClient()
self._rpc_server = None
self.target = None
def start(self):
target = oslo_messaging.Target(
version=self.RPC_API_VERSION,
server=self.engine_id,
topic=self.topic)
self.target = target
LOG.info(_LI("Starting %(topic)s (%(version)s) in engine %(engine)s."),
{'topic': self.topic,
'version': self.RPC_API_VERSION,
'engine': self.engine_id})
self._rpc_server = rpc_messaging.get_rpc_server(target, self)
self._rpc_server.start()
super(WorkerService, self).start()
def stop(self):
if self._rpc_server is None:
return
# Stop rpc connection at first for preventing new requests
LOG.info(_LI("Stopping %(topic)s in engine %(engine)s."),
{'topic': self.topic, 'engine': self.engine_id})
try:
self._rpc_server.stop()
self._rpc_server.wait()
except Exception as e:
LOG.error(_LE("%(topic)s is failed to stop, %(exc)s"),
{'topic': self.topic, 'exc': e})
super(WorkerService, self).stop()
def stop_traversal(self, stack):
"""Update current traversal to stop workers from propagating.
Marks the stack as FAILED due to cancellation, but, allows all
in_progress resources to complete normally; no worker is stopped
abruptly.
"""
reason = 'User cancelled stack %s ' % stack.action
# state_set will update the current traversal to '' for FAILED state
old_trvsl = stack.current_traversal
updated = stack.state_set(stack.action, stack.FAILED, reason)
if not updated:
LOG.warning(_LW("Failed to stop traversal %(trvsl)s of stack "
"%(name)s while cancelling the operation."),
{'name': stack.name, 'trvsl': old_trvsl})
@context.request_context
def check_resource(self, cnxt, resource_id, current_traversal, data,
is_update, adopt_stack_data):
"""Process a node in the dependency graph.
The node may be associated with either an update or a cleanup of its
associated resource.
"""
resource_data = dict(sync_point.deserialize_input_data(data))
rsrc, rsrc_owning_stack, stack = check_resource.load_resource(
cnxt, resource_id, resource_data, is_update)
if rsrc is None:
return
if current_traversal != stack.current_traversal:
LOG.debug('[%s] Traversal cancelled; stopping.', current_traversal)
return
msg_queue = eventlet.queue.LightQueue()
try:
self.thread_group_mgr.add_msg_queue(stack.id, msg_queue)
cr = check_resource.CheckResource(self.engine_id, self._rpc_client,
self.thread_group_mgr, msg_queue)
cr.check(cnxt, resource_id, current_traversal, resource_data,
is_update, adopt_stack_data, rsrc, stack)
finally:
self.thread_group_mgr.remove_msg_queue(None,
stack.id, msg_queue)
@context.request_context
def cancel_check_resource(self, cnxt, stack_id):
"""Cancel check_resource for given stack.
All the workers running for the given stack will be
cancelled.
"""
# TODO(ananta): Implement cancel check-resource
LOG.debug('Cancelling workers for stack [%s]', stack_id)
|
{
"content_hash": "3ab32128cd5161b70cb5333c8965617b",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 36.88059701492537,
"alnum_prop": 0.6023876972885471,
"repo_name": "cwolferh/heat-scratch",
"id": "f434b9ff9924a332701f5345ab61e7597fd721e1",
"size": "5553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
}
|
from datadog_checks.base import ConfigurationError
REQUIRED_QUERY_FIELDS = ['stat', 'metric_prefix']
def validate_query(query):
for field in REQUIRED_QUERY_FIELDS:
if field not in query:
raise ConfigurationError("Custom Query: {} missing required field: {}. Skipping".format(query, field))
|
{
"content_hash": "205299ed3f25125118107f9cad596007",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 114,
"avg_line_length": 35.22222222222222,
"alnum_prop": 0.7097791798107256,
"repo_name": "DataDog/integrations-core",
"id": "ccbb13b51923703d5307a40ce4e21b36d205a2ed",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibm_was/datadog_checks/ibm_was/validation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
}
|
"""Test pushbullet integration."""
from collections.abc import Awaitable
from typing import Callable
from unittest.mock import MagicMock, patch
import aiohttp
from pushover_complete import BadAPIRequestError
import pytest
from homeassistant.components.notify import DOMAIN as NOTIFY_DOMAIN
from homeassistant.components.pushover.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from . import MOCK_CONFIG
from tests.common import MockConfigEntry
from tests.components.repairs import get_repairs
@pytest.fixture(autouse=True)
def mock_pushover():
"""Mock pushover."""
with patch(
"pushover_complete.PushoverAPI._generic_post", return_value={}
) as mock_generic_post:
yield mock_generic_post
async def test_setup(
hass: HomeAssistant,
hass_ws_client: Callable[
[HomeAssistant], Awaitable[aiohttp.ClientWebSocketResponse]
],
) -> None:
"""Test integration failed due to an error."""
assert await async_setup_component(
hass,
NOTIFY_DOMAIN,
{
NOTIFY_DOMAIN: [
{
"name": "Pushover",
"platform": "pushover",
"api_key": "MYAPIKEY",
"user_key": "MYUSERKEY",
}
]
},
)
await hass.async_block_till_done()
assert hass.config_entries.async_entries(DOMAIN)
issues = await get_repairs(hass, hass_ws_client)
assert len(issues) == 1
assert issues[0]["issue_id"] == "deprecated_yaml"
async def test_async_setup_entry_success(hass: HomeAssistant) -> None:
"""Test pushover successful setup."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_CONFIG,
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
async def test_unique_id_updated(hass: HomeAssistant) -> None:
"""Test updating unique_id to new format."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_CONFIG, unique_id="MYUSERKEY")
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
assert entry.unique_id is None
async def test_async_setup_entry_failed_invalid_api_key(
hass: HomeAssistant, mock_pushover: MagicMock
) -> None:
"""Test pushover failed setup due to invalid api key."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_CONFIG,
)
entry.add_to_hass(hass)
mock_pushover.side_effect = BadAPIRequestError("400: application token is invalid")
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.SETUP_ERROR
async def test_async_setup_entry_failed_conn_error(
hass: HomeAssistant, mock_pushover: MagicMock
) -> None:
"""Test pushover failed setup due to conn error."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_CONFIG,
)
entry.add_to_hass(hass)
mock_pushover.side_effect = BadAPIRequestError
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.SETUP_RETRY
|
{
"content_hash": "194e42df1659084d4acaf5e841d0f515",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 87,
"avg_line_length": 31.944444444444443,
"alnum_prop": 0.6828985507246377,
"repo_name": "nkgilley/home-assistant",
"id": "7a8b02c93a0c6ce9225d06e383d48b449b209b09",
"size": "3450",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/pushover/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import TouchSQL
#theDatabase = TouchSQL.CSqliteDatabase.alloc().initInMemory()
theDatabase = TouchSQL.CSqliteDatabase.alloc().initWithPath_('/Users/schwa/Desktop/test.db')
theDatabase.open_(None)
# theStatement = TouchSQL.CSqliteStatement.alloc().initWithDatabase_string_(theDatabase, 'create table foo (name varchar(100), value integer)')
# print theStatement
#
# print theStatement.step_(None)
theStatement = TouchSQL.CSqliteStatement.alloc().initWithDatabase_string_(theDatabase, 'SELECT * FROM messages')
print theStatement.rows_(None)
|
{
"content_hash": "ba235c109c8161de0c05ce79ce8382d5",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 143,
"avg_line_length": 36.4,
"alnum_prop": 0.7893772893772893,
"repo_name": "favormm/TouchSQL",
"id": "3b2230893f5ff82355db52719afa39316965d08b",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonTests/test.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import mock
from rally.benchmark.scenarios.glance import utils
from rally.benchmark import utils as butils
from rally import exceptions as rally_exceptions
from rally.openstack.common.fixture import mockpatch
from tests.benchmark.scenarios import test_utils
from tests import fakes
from tests import test
BM_UTILS = 'rally.benchmark.utils'
GLANCE_UTILS = 'rally.benchmark.scenarios.glance.utils'
class GlanceScenarioTestCase(test.TestCase):
def setUp(self):
super(GlanceScenarioTestCase, self).setUp()
self.image = mock.Mock()
self.image1 = mock.Mock()
self.res_is = mockpatch.Patch(BM_UTILS + ".resource_is")
self.get_fm = mockpatch.Patch(BM_UTILS + '.get_from_manager')
self.wait_for = mockpatch.Patch(GLANCE_UTILS + ".bench_utils.wait_for")
self.wait_for_delete = mockpatch.Patch(
GLANCE_UTILS + ".bench_utils.wait_for_delete")
self.useFixture(self.wait_for)
self.useFixture(self.wait_for_delete)
self.useFixture(self.res_is)
self.useFixture(self.get_fm)
self.gfm = self.get_fm.mock
self.useFixture(mockpatch.Patch('time.sleep'))
self.scenario = utils.GlanceScenario()
def test_generate_random_name(self):
for length in [8, 16, 32, 64]:
name = self.scenario._generate_random_name(length)
self.assertEqual(len(name), 16 + length)
def test_failed_image_status(self):
self.get_fm.cleanUp()
image_manager = fakes.FakeFailedImageManager()
self.assertRaises(rally_exceptions.GetResourceFailure,
butils.get_from_manager(),
image_manager.create('fails', 'url', 'cf', 'df'))
def _test_atomic_action_timer(self, atomic_actions_time, name):
action_duration = test_utils.get_atomic_action_timer_value_by_name(
atomic_actions_time, name)
self.assertIsNotNone(action_duration)
self.assertIsInstance(action_duration, float)
@mock.patch(GLANCE_UTILS + '.GlanceScenario.clients')
def test_list_images(self, mock_clients):
images_list = []
mock_clients("glance").images.list.return_value = images_list
scenario = utils.GlanceScenario()
return_images_list = scenario._list_images()
self.assertEqual(images_list, return_images_list)
self._test_atomic_action_timer(scenario.atomic_actions_time(),
'glance.list_images')
@mock.patch(GLANCE_UTILS + '.GlanceScenario.clients')
def test_create_image(self, mock_clients):
mock_clients("glance").images.create.return_value = self.image
scenario = utils.GlanceScenario()
return_image = scenario._create_image('image_name',
'image_location',
'container_format',
'disk_format')
self.wait_for.mock.assert_called_once_with(self.image,
update_resource=self.gfm(),
is_ready=self.res_is.mock(),
check_interval=1,
timeout=120)
self.res_is.mock.assert_has_calls(mock.call('active'))
self.assertEqual(self.wait_for.mock(), return_image)
self._test_atomic_action_timer(scenario.atomic_actions_time(),
'glance.create_image')
def test_delete_image(self):
scenario = utils.GlanceScenario()
scenario._delete_image(self.image)
self.image.delete.assert_called_once_with()
self.wait_for_delete.\
mock.assert_called_once_with(self.image,
update_resource=self.gfm(),
check_interval=1,
timeout=120)
self._test_atomic_action_timer(scenario.atomic_actions_time(),
'glance.delete_image')
|
{
"content_hash": "89b0ee77309af5a831b8bf450761a448",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 46.01111111111111,
"alnum_prop": 0.5790871770103839,
"repo_name": "ytsarev/rally",
"id": "a4c2edbe64abd056707de835489588e3995e7134",
"size": "4739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/benchmark/scenarios/glance/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "984256"
},
{
"name": "Shell",
"bytes": "14201"
}
],
"symlink_target": ""
}
|
from django.utils import timezone
from django.views.decorators.cache import cache_control
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.response import Response
from rest_framework.viewsets import ViewSet
from .scrapers import PharmacyGuardScraper
class PharmacyViewSet(ViewSet):
permission_classes = (IsAuthenticatedOrReadOnly,)
@cache_control(max_age=14400)
def list(self, request):
"""
Get information about pharmacies on duty today and tomorrow.
The corresponding Places (pharmacies) are included in the response.
"""
scraper = PharmacyGuardScraper(version=request.version)
return Response({
'meta': {
'lastUpdated': timezone.now(),
'source': scraper.get_source(),
},
'data': scraper.get_data(),
})
|
{
"content_hash": "791ae02dab327e728b8f5b0ba4cfc1de",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 32.7037037037037,
"alnum_prop": 0.6749716874292185,
"repo_name": "zarautz/pagoeta",
"id": "a3b978edaf9c5dc99c6a356f4996f7f701555474",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pagoeta/apps/health/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "51755"
},
{
"name": "Python",
"bytes": "121815"
},
{
"name": "Shell",
"bytes": "801"
}
],
"symlink_target": ""
}
|
import time
import cv2
import numpy as np
import shm
from vision import options
from vision.vision_common import (
draw_angled_arrow,
get_angle_from_rotated_rect,
Hierarchy,
is_clipping,
)
from vision.modules.base import ModuleBase
from mission.constants.config import bins as constants
CONTOUR_HEURISTIC_LIMIT = 5
CONTOUR_SCALED_HEURISTIC_LIMIT = 2
options = [
options.BoolOption('clipping_guard', constants.clipping_guard),
options.BoolOption('debug', False),
options.IntOption('max_fps', 30, 0, 30),
options.IntOption('border_bgr_compon', constants.border_bgr_compon, 0, 2),
options.IntOption('block_size', 401, 0, 4000, lambda x: x % 2 == 1),
# options.IntOption('border_thresh', constants.border_thresh, 0, 255),
options.IntOption('morph_size', 25, 1, 30, lambda x: x % 2 == 1),
options.DoubleOption('min_size', 0.1, 0, 2), # Min length of min length side
options.DoubleOption('min_rectangularity', 0.7, 0, 1),
options.DoubleOption('min_inner_outer_ratio', 0.3, 0, 1),
options.DoubleOption('min_cover_diff', 25, 0, 255),
options.IntOption('blur', 27, 0, 100, lambda x: x % 2 == 1),
]
class Bins(ModuleBase):
def post(self, *args, **kwargs):
if self.options['debug']:
super().post(*args, **kwargs)
def draw_contours(self, mat, *contours):
cv2.drawContours(mat, contours, -1, (0, 127, 255), thickness=3)
def process(self, mat):
start_time = time.time()
self.process_bins(mat)
shm.bins_vision.clock.set(not shm.bins_vision.clock.get())
runtime = time.time() - start_time
min_runtime = 1 / self.options['max_fps']
if min_runtime > runtime:
time.sleep(min_runtime - runtime)
runtime = min_runtime
print('FPS: {}'.format(1 / (runtime)))
def process_bins(self, mat):
results = [shm.bins_bin0.get(), shm.bins_bin1.get()]
for result in results:
result.visible = False
self.post('orig', mat)
self.bgr_sp = cv2.split(mat)
_, threshed = cv2.threshold(
self.bgr_sp[self.options['border_bgr_compon']],
0,
255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU,
)
self.post('threshed', threshed)
morph_kernel = cv2.getStructuringElement(
cv2.MORPH_ELLIPSE,
(self.options['morph_size'],) * 2,
)
# Get rid of small things
morphed = cv2.erode(threshed, morph_kernel)
self.post('morphed', morphed)
_, contours, hierarchy = cv2.findContours(
morphed.copy(),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE,
)
if hierarchy is None:
hierarchy = [[]]
hier = Hierarchy(hierarchy)
all_contours = [{'i': i, 'contour': contours[i]} for i in range(len(contours))]
big_rects = []
for info in all_contours:
info['rect'] = cv2.minAreaRect(info['contour'])
if hier.first_child(info['i']) == -1:
continue
if min(info['rect'][1]) / len(mat[1]) < self.options['min_size']:
continue
rectangularity = cv2.contourArea(info['contour']) / np.prod(info['rect'][1])
if rectangularity < self.options['min_rectangularity']:
continue
if is_clipping(mat, info['contour']) and self.options['clipping_guard']:
continue
big_rects.append(info)
concentric_rects = []
for info in big_rects:
max_child_i = max(
hier.siblings(hier.first_child(info['i'])),
key=lambda x: cv2.contourArea(contours[x])
)
info['inner_contour'] = contours[max_child_i]
info['inner_rect'] = cv2.minAreaRect(info['inner_contour'])
info['inner_width'], info['inner_length'] = sorted(info['inner_rect'][1])
if info['inner_width'] < self.options['min_size'] * self.options['min_inner_outer_ratio']:
continue
inner_area = cv2.contourArea(info['inner_contour'])
rectangularity = inner_area / (info['inner_rect'][1][0] * info['inner_rect'][1][1])
if rectangularity < self.options['min_rectangularity']:
continue
inner_mask = np.zeros(mat.shape[:2], dtype=np.uint8)
cv2.drawContours(inner_mask, [info['inner_contour']], -1, 255, -1)
info['average_cover'] = cv2.mean(self.bgr_sp[2], inner_mask)[0]
info['angle'] = get_angle_from_rotated_rect(info['rect'])
concentric_rects.append(info)
concentric_rects.sort(key=lambda x: -x['rect'][1][0] * x['rect'][1][1])
for info in concentric_rects:
info['covered'] = False
if len(concentric_rects) >= 2:
r0, r1 = tuple(concentric_rects[:2])
diff = r0['average_cover'] - r1['average_cover']
if abs(diff) >= self.options['min_cover_diff']:
r0_covered = diff > 0
r0['covered'], r1['covered'] = r0_covered, not r0_covered
for info, result in zip(concentric_rects, results):
result.visible = True
result.clipping = False
result.x, result.y = self.normalized(info['inner_rect'][0])
result.width, result.length = self.normalized_size(sorted(info['rect'][1]))
result.angle = info['angle']
result.covered = info['covered']
shm.bins_bin0.set(results[0])
shm.bins_bin1.set(results[1])
if self.options['debug']:
contours_mat = mat.copy()
self.draw_contours(contours_mat, *[info['contour'] for info in concentric_rects])
self.draw_contours(contours_mat, *[info['inner_contour'] for info in concentric_rects])
for info in concentric_rects:
draw_angled_arrow(contours_mat, info['rect'][0], info['angle'])
if info['covered']:
cv2.drawContours(
contours_mat,
[info['inner_contour']],
-1,
(20, 255, 57),
thickness=10,
)
self.post('contours', contours_mat)
all_con = mat.copy()
self.draw_contours(all_con, *[info['contour'] for info in big_rects])
self.post('all outer', all_con)
if __name__ == '__main__':
Bins('downward', options)()
|
{
"content_hash": "47d34489bc4ad867b144df11900694b0",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 102,
"avg_line_length": 36.02197802197802,
"alnum_prop": 0.5578096400244051,
"repo_name": "cuauv/software",
"id": "8a8087e84f2656cf44860aa30c2a8e7741491ff9",
"size": "6580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vision/modules/old/2018/bins.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "271780"
},
{
"name": "C++",
"bytes": "2831785"
},
{
"name": "CMake",
"bytes": "5365"
},
{
"name": "CSS",
"bytes": "5082"
},
{
"name": "Dockerfile",
"bytes": "2758"
},
{
"name": "Emacs Lisp",
"bytes": "19028"
},
{
"name": "GLSL",
"bytes": "6783"
},
{
"name": "HTML",
"bytes": "3642"
},
{
"name": "Haskell",
"bytes": "4770"
},
{
"name": "JavaScript",
"bytes": "113413"
},
{
"name": "Makefile",
"bytes": "12887"
},
{
"name": "Nix",
"bytes": "16335"
},
{
"name": "OCaml",
"bytes": "3804"
},
{
"name": "PureBasic",
"bytes": "58"
},
{
"name": "Python",
"bytes": "2141765"
},
{
"name": "Scheme",
"bytes": "129544"
},
{
"name": "Shell",
"bytes": "68820"
},
{
"name": "TeX",
"bytes": "25243"
},
{
"name": "Vim script",
"bytes": "125505"
}
],
"symlink_target": ""
}
|
"""
Sets the chain ID for a PDB file.
usage: python pdb_chain.py -<chain> <pdb file>
example: python pdb_chain.py -A 1CTF.pdb
Author: {0} ({1})
This program is part of the PDB tools distributed with HADDOCK
or with the HADDOCK tutorial. The utilities in this package
can be used to quickly manipulate PDB files, with the benefit
of 'piping' several different commands. This is a rewrite of old
FORTRAN77 code that was taking too much effort to compile. RIP.
"""
import os
import re
import sys
__author__ = "Joao Rodrigues"
__email__ = "j.p.g.l.m.rodrigues@gmail.com"
USAGE = __doc__.format(__author__, __email__)
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options."""
if not len(args):
# No chain, from pipe
if not sys.stdin.isatty():
pdbfh = sys.stdin
chain = ' '
else:
sys.stderr.write(USAGE)
sys.exit(1)
elif len(args) == 1:
# Chain & Pipe _or_ file & no chain
if re.match('\-[A-Za-z0-9]', args[0]):
chain = args[0][1:]
if not sys.stdin.isatty():
pdbfh = sys.stdin
else:
sys.stderr.write(USAGE)
sys.exit(1)
else:
if not os.path.isfile(args[0]):
sys.stderr.write('File not found: ' + args[0] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
pdbfh = open(args[0], 'r')
chain = ' '
elif len(args) == 2:
# Chain & File
if not re.match('\-[A-Za-z0-9]', args[0]):
sys.stderr.write('Invalid chain ID: ' + args[0] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
if not os.path.isfile(args[1]):
sys.stderr.write('File not found: ' + args[1] + '\n')
sys.stderr.write(USAGE)
sys.exit(1)
chain = args[0][1:]
pdbfh = open(args[1], 'r')
else:
sys.stderr.write(USAGE)
sys.exit(1)
return (chain, pdbfh)
def _alter_chain(fhandle, chain_id):
"""Enclosing logic in a function to speed up a bit"""
coord_re = re.compile('^(ATOM|HETATM)')
fhandle = fhandle
chain_id = chain_id
for line in fhandle:
line = line.strip()
if coord_re.match(line):
yield line[:21] + chain_id[0] + line[22:] + '\n'
else:
yield line + '\n'
if __name__ == '__main__':
# Check Input
chain, pdbfh = check_input(sys.argv[1:])
# Do the job
new_pdb = _alter_chain(pdbfh, chain)
try:
sys.stdout.write(''.join(new_pdb))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
|
{
"content_hash": "e8a830bd4941c97133135feeb3ca2f9b",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 82,
"avg_line_length": 28.317307692307693,
"alnum_prop": 0.5456706281833617,
"repo_name": "Adrimel/pdb-tools",
"id": "20c6b37db3e6f59db89e90b7576af22c365dd656",
"size": "2968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdb_chain.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70473"
}
],
"symlink_target": ""
}
|
import uuid
from kubernetes_py import K8sNamespace, K8sConfig
from kubernetes_py.K8sExceptions import *
from kubernetes_py.models.v1.Namespace import Namespace
from kubernetes_py.models.v1.NamespaceSpec import NamespaceSpec
from kubernetes_py.models.v1.NamespaceStatus import NamespaceStatus
from kubernetes_py.models.v1.ObjectMeta import ObjectMeta
from tests import _utils
from tests.BaseTest import BaseTest
class K8sNamespaceTest(BaseTest):
def setUp(self):
_utils.cleanup_namespaces()
def tearDown(self):
_utils.cleanup_namespaces()
# --------------------------------------------------------------------------------- init
def test_init_no_args(self):
try:
K8sNamespace()
self.fail("Should not fail.")
except SyntaxError:
pass
except IOError:
pass
except Exception as err:
self.fail("Unhandled exception: [ {0} ]".format(err.__class__.__name__))
def test_init_with_invalid_config(self):
config = object()
with self.assertRaises(SyntaxError):
K8sNamespace(config=config)
def test_init_with_invalid_name(self):
name = object()
with self.assertRaises(SyntaxError):
_utils.create_namespace(name=name)
def test_init_with_name(self):
name = "yoname"
ns = _utils.create_namespace(name=name)
self.assertIsNotNone(ns)
self.assertIsInstance(ns, K8sNamespace)
self.assertEqual("Namespace", ns.obj_type)
self.assertEqual(ns.name, name)
self.assertIsInstance(ns.config, K8sConfig)
def test_init_with_name_and_config(self):
nspace = "default"
config = K8sConfig(kubeconfig=_utils.kubeconfig_fallback, namespace=nspace)
name = "yoname"
ns = _utils.create_namespace(config=config, name=name)
self.assertIsNotNone(ns)
self.assertIsInstance(ns, K8sNamespace)
self.assertEqual(ns.name, name)
self.assertEqual("Namespace", ns.obj_type)
self.assertIsInstance(ns.config, K8sConfig)
self.assertEqual(nspace, ns.config.namespace)
# --------------------------------------------------------------------------------- struct
def test_struct_k8s_namespace(self):
name = "yoname"
ns = _utils.create_namespace(name=name)
self.assertIsInstance(ns, K8sNamespace)
self.assertIsInstance(ns.base_url, str)
self.assertIsInstance(ns.config, K8sConfig)
self.assertIsInstance(ns.model, Namespace)
self.assertIsInstance(ns.name, str)
self.assertIsInstance(ns.obj_type, str)
def test_struct_namespace(self):
name = "yoname"
ns = _utils.create_namespace(name=name)
self.assertIsInstance(ns, K8sNamespace)
self.assertIsInstance(ns.model, Namespace)
self.assertIsInstance(ns.model.metadata, ObjectMeta)
self.assertIsInstance(ns.model.spec, NamespaceSpec)
self.assertIsInstance(ns.model.status, NamespaceStatus)
# --------------------------------------------------------------------------------- add annotation
def test_add_annotation_none_args(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
try:
ns.add_annotation()
self.fail("Should not fail.")
except Exception as err:
self.assertIsInstance(err, SyntaxError)
def test_add_annotation_invalid_args(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
k = object()
v = object()
try:
ns.add_annotation(k, v)
self.fail("Should not fail.")
except Exception as err:
self.assertIsInstance(err, SyntaxError)
def test_add_annotation(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
k = "yokey"
v = "yovalue"
ns.add_annotation(k, v)
self.assertIn(k, ns.annotations)
self.assertEqual(v, ns.annotations[k])
# --------------------------------------------------------------------------------- add label
def test_add_label_none_args(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
with self.assertRaises(SyntaxError):
ns.add_label()
def test_add_label_invalid_args(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
k = object()
v = object()
with self.assertRaises(SyntaxError):
ns.add_label(k, v)
def test_add_label(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
k = "yokey"
v = "yovalue"
ns.add_label(k, v)
self.assertIn(k, ns.labels)
self.assertEqual(v, ns.labels[k])
# --------------------------------------------------------------------------------- get
def test_get_nonexistent(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
if _utils.is_reachable(ns.config):
with self.assertRaises(NotFoundException):
ns.get()
def test_get(self):
name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
ns = _utils.create_namespace(name=name)
if _utils.is_reachable(ns.config):
ns.create()
from_get = ns.get()
self.assertIsInstance(from_get, K8sNamespace)
self.assertEqual(ns, from_get)
# --------------------------------------------------------------------------------- get annotation
def test_get_annotation_none_arg(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
ann = ns.get_annotation()
self.assertIsNone(ann)
def test_get_annotation_invalid_arg(self):
name = "yonamespace"
svc = _utils.create_namespace(name=name)
k = object()
ann = svc.get_annotation(k)
self.assertIsNone(ann)
def test_get_annotation_doesnt_exist(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
k = "yokey"
v = ns.get_annotation(k)
self.assertIsNone(v)
def test_get_annotation(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
k = "yokey"
v_in = "yovalue"
ns.add_annotation(k, v_in)
v_out = ns.get_annotation(k)
self.assertEqual(v_in, v_out)
# --------------------------------------------------------------------------------- get annotations
def test_get_annotations_doesnt_exist(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
self.assertEqual({}, ns.annotations)
def test_get_annotations(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
count = 4
for i in range(0, count):
k = "yokey_{0}".format(i)
v = "yovalue_{0}".format(i)
ns.add_annotation(k, v)
self.assertEqual(count, len(ns.annotations))
for i in range(0, count):
k = "yokey_{0}".format(i)
v = "yovalue_{0}".format(i)
self.assertIn(k, ns.annotations)
self.assertEqual(v, ns.annotations[k])
# --------------------------------------------------------------------------------- get label
def test_get_label_none_arg(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
self.assertIsNone(ns.get_label())
def test_get_label_invalid_arg(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
k = object()
self.assertIsNone(ns.get_label(k))
def test_get_label_doesnt_exist(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
k = "yokey"
self.assertIsNone(ns.get_label(k))
def test_get_label(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
k = "yokey"
v = "yovalue"
ns.add_label(k, v)
self.assertEqual(v, ns.get_label(k))
# --------------------------------------------------------------------------------- get labels
def test_get_labels(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
labels = {"yokey": "yovalue"}
ns.labels = labels
self.assertEqual(labels, ns.labels)
# --------------------------------------------------------------------------------- set annotations
def test_set_annotations_none_arg(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
with self.assertRaises(SyntaxError):
ns.annotations = None
def test_set_annotations_invalid_arg(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
labels = object()
with self.assertRaises(SyntaxError):
ns.annotations = labels
def test_set_annotations_str_int(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
labels = {"yokey": 1234}
ns.annotations = labels
self.assertEqual(ns.annotations, labels)
def test_set_annotations(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
anns = {"yokey": "yovalue"}
ns.annotations = anns
self.assertEqual(anns, ns.annotations)
# --------------------------------------------------------------------------------- set labels
def test_set_labels_none_arg(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
with self.assertRaises(SyntaxError):
ns.labels = None
def test_set_labels_invalid_arg(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
labels = object()
with self.assertRaises(SyntaxError):
ns.labels = labels
def test_set_labels_invalid_dict(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
labels = {"yokey": 1234}
with self.assertRaises(SyntaxError):
ns.labels = labels
def test_set_labels(self):
name = "yonamespace"
ns = _utils.create_namespace(name=name)
labels = {"yokey": "yovalue"}
ns.labels = labels
self.assertEqual(labels, ns.labels)
# --------------------------------------------------------------------------------- api - get by name
# def test_get_by_name_nonexistent(self):
# name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
# ns = utils.create_namespace(name=name)
# if utils.is_reachable(ns.config):
# _list = K8sNamespace.get_by_name(config=ns.config, name=name)
# self.assertIsInstance(_list, list)
# self.assertEqual(0, len(_list))
#
# def test_get_by_name(self):
# name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
# ns = utils.create_namespace(name=name)
# if utils.is_reachable(ns.config):
# ns.create()
# _list = K8sNamespace.get_by_name(config=ns.config, name=name)
# self.assertIsInstance(_list, list)
# self.assertEqual(1, len(_list))
# from_get = _list[0]
# self.assertIsInstance(from_get, K8sNamespace)
# self.assertEqual(from_get, ns)
# --------------------------------------------------------------------------------- api - list
def test_list_without_create(self):
name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
ns = _utils.create_namespace(name=name)
if _utils.is_reachable(ns.config):
_list = ns.list()
for x in _list:
self.assertIsInstance(x, K8sNamespace)
self.assertIsInstance(_list, list)
def test_list(self):
name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
ns = _utils.create_namespace(name=name)
if _utils.is_reachable(ns.config):
ns.create()
_list = ns.list()
for x in _list:
self.assertIsInstance(x, K8sNamespace)
self.assertIsInstance(_list, list)
# --------------------------------------------------------------------------------- api - create
def test_create(self):
name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
ns = _utils.create_namespace(name=name)
if _utils.is_reachable(ns.config):
ns.create()
from_get = ns.get()
self.assertEqual(ns, from_get)
def test_create_already_exists(self):
name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
ns = _utils.create_namespace(name=name)
if _utils.is_reachable(ns.config):
ns.create()
with self.assertRaises(AlreadyExistsException):
ns.create()
# --------------------------------------------------------------------------------- api - update
def test_update_nonexistent(self):
name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
ns = _utils.create_namespace(name=name)
if _utils.is_reachable(ns.config):
with self.assertRaises(NotFoundException):
ns.update()
# --------------------------------------------------------------------------------- api - delete
def test_delete_nonexistent(self):
name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
ns = _utils.create_namespace(name=name)
if _utils.is_reachable(ns.config):
with self.assertRaises(NotFoundException):
ns.delete()
def test_delete(self):
name = "yo-{0}".format(str(uuid.uuid4().hex[:16]))
ns = _utils.create_namespace(name=name)
if _utils.is_reachable(ns.config):
ns.create()
from_get = K8sNamespace.get_by_name(ns.config, ns.name)
self.assertIsInstance(from_get, K8sNamespace)
self.assertEqual(name, from_get.name)
ns.delete()
from_get = K8sNamespace.get_by_name(ns.config, ns.name)
self.assertIsNone(from_get)
|
{
"content_hash": "19747e22889e40d20668934472cb3b1a",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 105,
"avg_line_length": 35.96455696202532,
"alnum_prop": 0.535196395889061,
"repo_name": "froch/kubernetes-py",
"id": "8120f69d583c366e54d2ed4aedb11862a40b8168",
"size": "14384",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_k8s_namespace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1042823"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
}
|
from optparse import OptionParser
from arc_hybrid import ArcHybridLSTM
import pickle, utils, os, time, sys
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--train", dest="conll_train", help="Annotated CONLL train file", metavar="FILE", default="../data/PTB_SD_3_3_0/train.conll")
parser.add_option("--dev", dest="conll_dev", help="Annotated CONLL dev file", metavar="FILE", default="../data/PTB_SD_3_3_0/dev.conll")
parser.add_option("--test", dest="conll_test", help="Annotated CONLL test file", metavar="FILE", default="../data/PTB_SD_3_3_0/test.conll")
parser.add_option("--params", dest="params", help="Parameters file", metavar="FILE", default="params.pickle")
parser.add_option("--extrn", dest="external_embedding", help="External embeddings", metavar="FILE")
parser.add_option("--model", dest="model", help="Load/Save model file", metavar="FILE", default="barchybrid.model")
parser.add_option("--wembedding", type="int", dest="wembedding_dims", default=100)
parser.add_option("--pembedding", type="int", dest="pembedding_dims", default=25)
parser.add_option("--rembedding", type="int", dest="rembedding_dims", default=25)
parser.add_option("--epochs", type="int", dest="epochs", default=30)
parser.add_option("--hidden", type="int", dest="hidden_units", default=100)
parser.add_option("--hidden2", type="int", dest="hidden2_units", default=0)
parser.add_option("--k", type="int", dest="window", default=3)
parser.add_option("--lr", type="float", dest="learning_rate", default=0.1)
parser.add_option("--outdir", type="string", dest="output", default="results")
parser.add_option("--activation", type="string", dest="activation", default="tanh")
parser.add_option("--lstmlayers", type="int", dest="lstm_layers", default=2)
parser.add_option("--lstmdims", type="int", dest="lstm_dims", default=200)
parser.add_option("--dynet-seed", type="int", dest="seed", default=7)
parser.add_option("--disableoracle", action="store_false", dest="oracle", default=True)
parser.add_option("--disableblstm", action="store_false", dest="blstmFlag", default=True)
parser.add_option("--bibi-lstm", action="store_true", dest="bibiFlag", default=False)
parser.add_option("--usehead", action="store_true", dest="headFlag", default=False)
parser.add_option("--userlmost", action="store_true", dest="rlFlag", default=False)
parser.add_option("--userl", action="store_true", dest="rlMostFlag", default=False)
parser.add_option("--predict", action="store_true", dest="predictFlag", default=False)
parser.add_option("--dynet-mem", type="int", dest="cnn_mem", default=512)
(options, args) = parser.parse_args()
print 'Using external embedding:', options.external_embedding
if not options.predictFlag:
if not (options.rlFlag or options.rlMostFlag or options.headFlag):
print 'You must use either --userlmost or --userl or --usehead (you can use multiple)'
sys.exit()
print 'Preparing vocab'
words, w2i, pos, rels = utils.vocab(options.conll_train)
with open(os.path.join(options.output, options.params), 'w') as paramsfp:
pickle.dump((words, w2i, pos, rels, options), paramsfp)
print 'Finished collecting vocab'
print 'Initializing blstm arc hybrid:'
parser = ArcHybridLSTM(words, pos, rels, w2i, options)
for epoch in xrange(options.epochs):
print 'Starting epoch', epoch
parser.Train(options.conll_train)
conllu = (os.path.splitext(options.conll_dev.lower())[1] == '.conllu')
devpath = os.path.join(options.output, 'dev_epoch_' + str(epoch+1) + ('.conll' if not conllu else '.conllu'))
utils.write_conll(devpath, parser.Predict(options.conll_dev))
if not conllu:
os.system('perl src/utils/eval.pl -g ' + options.conll_dev + ' -s ' + devpath + ' > ' + devpath + '.txt')
else:
os.system('python src/utils/evaluation_script/conll17_ud_eval.py -v -w src/utils/evaluation_script/weights.clas ' + options.conll_dev + ' ' + devpath + ' > ' + devpath + '.txt')
print 'Finished predicting dev'
parser.Save(os.path.join(options.output, options.model + str(epoch+1)))
else:
with open(options.params, 'r') as paramsfp:
words, w2i, pos, rels, stored_opt = pickle.load(paramsfp)
stored_opt.external_embedding = options.external_embedding
parser = ArcHybridLSTM(words, pos, rels, w2i, stored_opt)
parser.Load(options.model)
conllu = (os.path.splitext(options.conll_test.lower())[1] == '.conllu')
tespath = os.path.join(options.output, 'test_pred.conll' if not conllu else 'test_pred.conllu')
ts = time.time()
pred = list(parser.Predict(options.conll_test))
te = time.time()
utils.write_conll(tespath, pred)
if not conllu:
os.system('perl src/utils/eval.pl -g ' + options.conll_test + ' -s ' + tespath + ' > ' + tespath + '.txt')
else:
os.system('python src/utils/evaluation_script/conll17_ud_eval.py -v -w src/utils/evaluation_script/weights.clas ' + options.conll_test + ' ' + tespath + ' > ' + testpath + '.txt')
print 'Finished predicting test',te-ts
|
{
"content_hash": "e9848877cdfd54fb3e4bc33ac0175ae2",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 193,
"avg_line_length": 61.18181818181818,
"alnum_prop": 0.6424591381872214,
"repo_name": "elikip/bist-parser",
"id": "5d0f75f4550c734fbc433ead29da82231e249e6b",
"size": "5384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barchybrid/src/parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "99308"
},
{
"name": "Python",
"bytes": "106226"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from guardian.shortcuts import assign_perm
from .forms import UserRegistrationForm
def home_page(request):
return render_to_response('home.html', RequestContext(request))
# return HttpResponseRedirect('/script_builder/munger_builder_index/')
@login_required
def app_index(request):
template = loader.get_template('app_index.html')
context = RequestContext(request)
return HttpResponse(template.render(context))
def register(request):
context = RequestContext(request)
if request.method == 'POST':
user_form = UserRegistrationForm(data=request.POST)
# If the two forms are valid...
if user_form.is_valid():
new_user = user_form.save()
assign_perm('script_builder.add_mungerbuilder', new_user)
assign_perm('script_builder.add_fieldtype', new_user)
assign_perm('script_builder.add_datafield', new_user)
assign_perm('script_builder.add_pivotfield', new_user)
messages.success(request, "Thanks for registering. You are now logged in.")
new_user = authenticate(username=request.POST['username'],
password=request.POST['password1'])
login(request, new_user)
return HttpResponseRedirect('/script_builder/munger_builder_index/')
else:
input_dict = request.POST.dict()
for key in input_dict:
if not input_dict[key]:
messages.error(request, 'Please enter: {0}'.format(key))
return HttpResponseRedirect('/register/')
# Not a HTTP POST, so we render our form using two ModelForm instances.
# These forms will be blank, ready for user input.
else:
user_form = UserRegistrationForm()
# Render the template depending on the context.
return render_to_response(
'registration/register.html',
{'user_form': user_form},
context)
|
{
"content_hash": "21913ed2db01573b385fc2f3f2dd05c0",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 87,
"avg_line_length": 37.016129032258064,
"alnum_prop": 0.6671023965141613,
"repo_name": "cscanlin/munger-builder",
"id": "fa4c531ebafa4155e37baef34ee206262cf2c1ed",
"size": "2295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "munger_builder/views.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51193"
},
{
"name": "HTML",
"bytes": "28701"
},
{
"name": "JavaScript",
"bytes": "129371"
},
{
"name": "Python",
"bytes": "52431"
},
{
"name": "Shell",
"bytes": "615"
}
],
"symlink_target": ""
}
|
"""
Interfaces with Alarm.com alarm control panels.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel.alarmdotcom/
"""
import logging
import re
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_CODE, CONF_NAME, CONF_PASSWORD, CONF_USERNAME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyalarmdotcom==0.3.2']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Alarm.com'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_CODE): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up a Alarm.com control panel."""
name = config.get(CONF_NAME)
code = config.get(CONF_CODE)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
alarmdotcom = AlarmDotCom(hass, name, code, username, password)
await alarmdotcom.async_login()
async_add_entities([alarmdotcom])
class AlarmDotCom(alarm.AlarmControlPanel):
"""Representation of an Alarm.com status."""
def __init__(self, hass, name, code, username, password):
"""Initialize the Alarm.com status."""
from pyalarmdotcom import Alarmdotcom
_LOGGER.debug('Setting up Alarm.com...')
self._hass = hass
self._name = name
self._code = str(code) if code else None
self._username = username
self._password = password
self._websession = async_get_clientsession(self._hass)
self._state = None
self._alarm = Alarmdotcom(
username, password, self._websession, hass.loop)
async def async_login(self):
"""Login to Alarm.com."""
await self._alarm.async_login()
async def async_update(self):
"""Fetch the latest state."""
await self._alarm.async_update()
return self._alarm.state
@property
def name(self):
"""Return the name of the alarm."""
return self._name
@property
def code_format(self):
"""Return one or more digits/characters."""
if self._code is None:
return None
if isinstance(self._code, str) and re.search('^\\d+$', self._code):
return alarm.FORMAT_NUMBER
return alarm.FORMAT_TEXT
@property
def state(self):
"""Return the state of the device."""
if self._alarm.state.lower() == 'disarmed':
return STATE_ALARM_DISARMED
if self._alarm.state.lower() == 'armed stay':
return STATE_ALARM_ARMED_HOME
if self._alarm.state.lower() == 'armed away':
return STATE_ALARM_ARMED_AWAY
return None
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'sensor_status': self._alarm.sensor_status
}
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
if self._validate_code(code):
await self._alarm.async_alarm_disarm()
async def async_alarm_arm_home(self, code=None):
"""Send arm hom command."""
if self._validate_code(code):
await self._alarm.async_alarm_arm_home()
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
if self._validate_code(code):
await self._alarm.async_alarm_arm_away()
def _validate_code(self, code):
"""Validate given code."""
check = self._code is None or code == self._code
if not check:
_LOGGER.warning("Wrong code entered")
return check
|
{
"content_hash": "40f815a38cb63bdadc912be8ad52ab06",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 32.976,
"alnum_prop": 0.6392527899078118,
"repo_name": "nugget/home-assistant",
"id": "4f2913771b1ee09fab9a3751888da09f6ac3876a",
"size": "4122",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/alarm_control_panel/alarmdotcom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14492390"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17526"
}
],
"symlink_target": ""
}
|
from . import dbapi20
import pymysql
from pymysql.tests import base
import unittest
class test_MySQLdb(dbapi20.DatabaseAPI20Test):
driver = pymysql
connect_args = ()
connect_kw_args = base.PyMySQLTestCase.databases[0].copy()
connect_kw_args.update(
dict(
read_default_file="~/.my.cnf",
charset="utf8",
sql_mode="ANSI,STRICT_TRANS_TABLES,TRADITIONAL",
)
)
def test_setoutputsize(self):
pass
def test_setoutputsize_basic(self):
pass
"""The tests on fetchone and fetchall and rowcount bogusly
test for an exception if the statement cannot return a
result set. MySQL always returns a result set; it's just that
some things return empty result sets."""
def test_fetchall(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchall should raise an Error if called
# without executing a query that may return rows (such
# as a select)
self.assertRaises(self.driver.Error, cur.fetchall)
self.executeDDL1(cur)
for sql in self._populate():
cur.execute(sql)
# cursor.fetchall should raise an Error if called
# after executing a a statement that cannot return rows
## self.assertRaises(self.driver.Error,cur.fetchall)
cur.execute("select name from %sbooze" % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1, len(self.samples)))
self.assertEqual(
len(rows),
len(self.samples),
"cursor.fetchall did not retrieve all rows",
)
rows = [r[0] for r in rows]
rows.sort()
for i in range(0, len(self.samples)):
self.assertEqual(
rows[i], self.samples[i], "cursor.fetchall retrieved incorrect rows"
)
rows = cur.fetchall()
self.assertEqual(
len(rows),
0,
"cursor.fetchall should return an empty list if called "
"after the whole result set has been fetched",
)
self.assertTrue(cur.rowcount in (-1, len(self.samples)))
self.executeDDL2(cur)
cur.execute("select name from %sbarflys" % self.table_prefix)
rows = cur.fetchall()
self.assertTrue(cur.rowcount in (-1, 0))
self.assertEqual(
len(rows),
0,
"cursor.fetchall should return an empty list if "
"a select query returns no rows",
)
finally:
con.close()
def test_fetchone(self):
con = self._connect()
try:
cur = con.cursor()
# cursor.fetchone should raise an Error if called before
# executing a select-type query
self.assertRaises(self.driver.Error, cur.fetchone)
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
self.executeDDL1(cur)
## self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute("select name from %sbooze" % self.table_prefix)
self.assertEqual(
cur.fetchone(),
None,
"cursor.fetchone should return None if a query retrieves " "no rows",
)
self.assertTrue(cur.rowcount in (-1, 0))
# cursor.fetchone should raise an Error if called after
# executing a query that cannnot return rows
cur.execute(
"insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix)
)
## self.assertRaises(self.driver.Error,cur.fetchone)
cur.execute("select name from %sbooze" % self.table_prefix)
r = cur.fetchone()
self.assertEqual(
len(r), 1, "cursor.fetchone should have retrieved a single row"
)
self.assertEqual(
r[0], "Victoria Bitter", "cursor.fetchone retrieved incorrect data"
)
## self.assertEqual(cur.fetchone(),None,
## 'cursor.fetchone should return None if no more rows available'
## )
self.assertTrue(cur.rowcount in (-1, 1))
finally:
con.close()
# Same complaint as for fetchall and fetchone
def test_rowcount(self):
con = self._connect()
try:
cur = con.cursor()
self.executeDDL1(cur)
## self.assertEqual(cur.rowcount,-1,
## 'cursor.rowcount should be -1 after executing no-result '
## 'statements'
## )
cur.execute(
"insert into %sbooze values ('Victoria Bitter')" % (self.table_prefix)
)
## self.assertTrue(cur.rowcount in (-1,1),
## 'cursor.rowcount should == number or rows inserted, or '
## 'set to -1 after executing an insert statement'
## )
cur.execute("select name from %sbooze" % self.table_prefix)
self.assertTrue(
cur.rowcount in (-1, 1),
"cursor.rowcount should == number of rows returned, or "
"set to -1 after executing a select statement",
)
self.executeDDL2(cur)
## self.assertEqual(cur.rowcount,-1,
## 'cursor.rowcount not being reset to -1 after executing '
## 'no-result statements'
## )
finally:
con.close()
def test_callproc(self):
pass # performed in test_MySQL_capabilities
def help_nextset_setUp(self, cur):
"""Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"
"""
sql = """
create procedure deleteme()
begin
select count(*) from %(tp)sbooze;
select name from %(tp)sbooze;
end
""" % dict(
tp=self.table_prefix
)
cur.execute(sql)
def help_nextset_tearDown(self, cur):
"If cleaning up is needed after nextSetTest"
cur.execute("drop procedure deleteme")
def test_nextset(self):
from warnings import warn
con = self._connect()
try:
cur = con.cursor()
if not hasattr(cur, "nextset"):
return
try:
self.executeDDL1(cur)
sql = self._populate()
for sql in self._populate():
cur.execute(sql)
self.help_nextset_setUp(cur)
cur.callproc("deleteme")
numberofrows = cur.fetchone()
assert numberofrows[0] == len(self.samples)
assert cur.nextset()
names = cur.fetchall()
assert len(names) == len(self.samples)
s = cur.nextset()
if s:
empty = cur.fetchall()
self.assertEqual(
len(empty), 0, "non-empty result set after other result sets"
)
# warn("Incompatibility: MySQL returns an empty result set for the CALL itself",
# Warning)
# assert s == None,'No more return sets, should return None'
finally:
self.help_nextset_tearDown(cur)
finally:
con.close()
|
{
"content_hash": "472af7558554827797afb0e8d71e6d1b",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 100,
"avg_line_length": 36.31506849315068,
"alnum_prop": 0.5131396957123098,
"repo_name": "PyMySQL/PyMySQL",
"id": "9ac190f2714ecc79a4d81a8ea2db624726476437",
"size": "7953",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pymysql/tests/thirdparty/test_MySQLdb/test_MySQLdb_dbapi20.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "225744"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import auto_control_deps as acd
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import momentum
class AutomaticControlDependenciesTest(test.TestCase):
def testBasic(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
with acd.AutomaticControlDependencies() as c:
v.assign(v + 1)
v.assign(2 * v)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(), 4.0)
@test_util.run_v1_only("b/120545219")
def testCondMustRun(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 6.0)
@test_util.run_v1_only("b/120545219")
def testCondMustRunSeparateRead(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1)
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
one = constant_op.constant(1.0)
one = c.mark_as_return(one)
one.eval(feed_dict={p: False})
self.assertAllEqual(v.read_value().eval(), 5.0)
one.eval(feed_dict={p: True})
self.assertAllEqual(v.read_value().eval(), 6.0)
@test_util.run_v1_only("b/120545219")
def testCondNested(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
q = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
v.assign(v + 1, name='true')
return 1.0
def false_fn():
def inner_true_fn():
v.assign(v * 2, name='false_true')
return 2.0
def inner_false_fn():
v.assign(v * 3, name='false_false')
return 3.0
control_flow_ops.cond(q, inner_true_fn, inner_false_fn)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
with ops.name_scope('final'):
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False, q: False}), 3.0)
self.assertAllEqual(val.eval(feed_dict={p: False, q: True}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: True}), 7.0)
self.assertAllEqual(val.eval(feed_dict={p: True, q: False}), 8.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranch(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 5.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 5.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranchUpdateBefore(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
v.assign(v * 2)
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 6.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 12.0)
@test_util.run_v1_only("b/120545219")
def testCondOneBranchUpdateAfter(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
p = array_ops.placeholder(dtype=dtypes.bool)
with acd.AutomaticControlDependencies() as c:
def true_fn():
return 0.0
def false_fn():
v.assign(v + 4)
return 1.0
control_flow_ops.cond(p, true_fn, false_fn)
v.assign(v * 2)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(feed_dict={p: False}), 10.0)
self.assertAllEqual(val.eval(feed_dict={p: True}), 20.0)
def testDefunWhileLoopWithCapturedLoopVars(self):
n = 3
x = constant_op.constant(list(range(n)))
@function.defun
def loop():
c = lambda i, x: i < n
b = lambda i, x: (i + 1, x + 1)
i, out = control_flow_ops.while_loop(c, b, (0, x))
return i, out
i, out = loop()
self.assertEqual(int(i), 3)
self.assertAllEqual(out, [3, 4, 5])
def testDecorator(self):
with context.graph_mode(), self.cached_session():
v = resource_variable_ops.ResourceVariable(1.0)
self.evaluate(variables.global_variables_initializer())
@acd.automatic_control_dependencies
def f():
v.assign(v + 1)
v.assign(2 * v)
return v.read_value()
self.assertAllEqual(f().eval(), 4.0)
def testOptimizerInDefun(self):
def loss(v):
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
value = train()
self.assertEqual(value.numpy(), -1.0)
def testReturningNonTensorRaisesError(self):
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
optimizer.apply_gradients = function.defun(optimizer.apply_gradients)
v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(lambda v: v**2)(v)
with self.assertRaisesRegexp(TypeError,
'.*must return zero or more Tensors.*'):
# TODO(akshayka): We might want to allow defun-ing Python functions
# that return operations (and just execute the op instead of running it).
optimizer.apply_gradients(grad)
# TODO(b/111663004): This should work when the outer context is graph
# building.
def testOptimizerNonSlotVarsInDefunNoError(self):
def loss(v):
return v**2
optimizer = adam.AdamOptimizer(learning_rate=1.0)
@function.defun
def train():
self.v = resource_variable_ops.ResourceVariable(1.0)
grad = backprop.implicit_grad(loss)(self.v)
optimizer.apply_gradients(grad)
return self.v.read_value()
train()
def testOptimizerInDefunWithCapturedVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
def loss():
return v**2
optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
@function.defun
def train():
grad = backprop.implicit_grad(loss)()
optimizer.apply_gradients(grad)
train()
self.assertEqual(v.numpy(), -1.0)
def testRepeatedResourceInput(self):
var = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def inner(var1, var2):
return (resource_variable_ops.read_variable_op(var1, dtypes.float32) +
resource_variable_ops.read_variable_op(var2, dtypes.float32))
@def_function.function
def outer():
return inner(var.handle, var.handle)
self.assertEqual(self.evaluate(outer()), 2.0)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
{
"content_hash": "16123df24901e25bb85e71028533fb27",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 79,
"avg_line_length": 33.01393728222997,
"alnum_prop": 0.6478100263852242,
"repo_name": "jendap/tensorflow",
"id": "2c25ab133b914ddd5209bfcda3a8a80d7c44f505",
"size": "10165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/auto_control_deps_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "606044"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "55619540"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "78675"
},
{
"name": "Go",
"bytes": "1383418"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "900190"
},
{
"name": "Jupyter Notebook",
"bytes": "2510235"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "77367"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14644"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "45358371"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "530065"
},
{
"name": "Smarty",
"bytes": "25609"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ewsghana', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EWSExtension',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user_id', models.CharField(max_length=128, db_index=True)),
('domain', models.CharField(max_length=128)),
('location_id', models.CharField(max_length=128, null=True, db_index=True)),
('sms_notifications', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
]
|
{
"content_hash": "808a711e15839315c0823435fc3377a9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 114,
"avg_line_length": 31.615384615384617,
"alnum_prop": 0.5559610705596107,
"repo_name": "qedsoftware/commcare-hq",
"id": "b72d76b1a42980111b437be94a1d3d3dbf4ac11a",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/ewsghana/migrations/0002_ewsextension.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
import arcpy, os, datetime
import SharedArcNumericalLib as san
# Function Definitions
# Main Function Definition
def temporal_split(in_fc, out_ws, start_time, end_time, time_interval, compactBool=True):
""" This tool will split a feature class into multiple feature classes based on a datetime field based on
a set time interval.
Parameters
-----------------
in_fc - input feature class with datetime field
out_ws- out workspace
start_time - start date time
end_time - end date time
time_interval - temporal spacing
compactBool - compact db after run."""
try:
if arcpy.Exists(out_ws):
arcpy.env.workspace = out_ws
arcpy.env.overwriteOutput = True
san.arc_print("The current work space is: {0}.".format(out_ws), True)
workSpaceTail = os.path.split(out_ws)[1]
san.arc_print("Constructing Time Delta from input time period string.", True)
time_magnitude, time_unit = san.alphanumeric_split(time_interval)
time_delta = san.parse_time_units_to_dt(time_magnitude, time_unit)
inFeatureClassTail = os.path.split(in_fc)[1]
san.arc_print(
"Using datetime fields to generate new feature classes in {0}.".format(
str(workSpaceTail)))
san.arc_print("Getting start and final times in start time field {0}.".format(start_time))
start_time_min, start_time_max = san.get_min_max_from_field(in_fc, start_time)
if san.field_exist(in_fc, end_time) and end_time:
san.arc_print("Using start and end time to grab feature classes whose bins occur within an events "
"start or end time.")
end_time_min, end_time_max = san.get_min_max_from_field(in_fc, end_time)
start_time_field = start_time
end_time_field = end_time
start_time_range = start_time_min
end_time_range = end_time_max
else:
san.arc_print("Using only first datetime start field to construct time bin ranges.")
start_time_field = start_time
end_time_field = start_time
start_time_range = start_time_min
end_time_range = start_time_max
if isinstance(bin_start_time, datetime.datetime) or isinstance(bin_start_time, datetime.date):
start_time_range = bin_start_time
san.arc_print("Bin Start Time was selected, using {0} as bin starting time period."
.format(str(bin_start_time)))
time_bins = san.construct_time_bin_ranges(start_time_range, end_time_range, time_delta)
san.arc_print("Constructing queries based on datetime ranges.")
temporal_queries = san.construct_sql_queries_from_time_bin(time_bins, in_fc, start_time_field,
end_time_field)
time_counter = 0
san.arc_print("Splitting feature classes based on {0} queries.".format(len(temporal_queries)), True)
for query in temporal_queries:
try:
time_counter += 1
san.arc_print("Determining name and constructing query for new feature class.", True)
newFCName = "Bin_{0}_{1}".format(time_counter,
arcpy.ValidateTableName(inFeatureClassTail, out_ws))
expression = str(query)
arcpy.Select_analysis(in_fc, newFCName, expression)
san.arc_print(
"Selected out unique ID: {0} with query [{1}] and created a new feature class in {2}".format(
newFCName, expression, workSpaceTail), True)
except:
san.arc_print(
"The unique value ID {0}, could not be extracted. Check arguments of tool.".format(
str(newFCName)))
pass
if compactBool:
try:
san.arc_print("Compacting workspace.", True)
arcpy.Compact_management(out_ws)
except:
san.arc_print("Not a Compact capable workspace.")
pass
san.arc_print("Tool execution complete.", True)
pass
else:
san.arc_print("The desired workspace does not exist. Tool execution terminated.", True)
arcpy.AddWarning("The desired workspace does not exist.")
except arcpy.ExecuteError:
san.arc_print(arcpy.GetMessages(2))
except Exception as e:
san.arc_print(e.args[0])
# Main Script
if __name__ == "__main__":
# Define Inputs
inFeatureClass = arcpy.GetParameterAsText(0)
outWorkSpace = arcpy.GetParameterAsText(1)
start_time_field = arcpy.GetParameterAsText(2)
end_time_field = arcpy.GetParameterAsText(3)
time_interval = arcpy.GetParameter(4)
bin_start_time = arcpy.GetParameter(5)
compactWorkspace = arcpy.GetParameter(5)
temporal_split(inFeatureClass, outWorkSpace, start_time_field, end_time_field, time_interval, bin_start_time,
compactWorkspace)
|
{
"content_hash": "24651f36d814f8e4e0f2b77dda2436c9",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 117,
"avg_line_length": 50.43396226415094,
"alnum_prop": 0.5785634118967452,
"repo_name": "Holisticnature/ArcNumerical-Tools",
"id": "d5ef933b548a9429dcfd5127ffa220eed8ce2732",
"size": "6322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/TemporalSplit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "175639"
}
],
"symlink_target": ""
}
|
"""
read_length_distribution: returns the length of sequences from a fasta or fastq file
"""
# import libraries
from Bio import SeqIO
import argparse
####################
## Arguments
####################
parser = argparse.ArgumentParser()
parser.add_argument("-f","--inputfile",type=str,help="the fasta or fastq file from where sequence lenght will be assessed")
parser.add_argument("-o","--outfile",type=str,help="the tabulated text file returned ",default="nr_seq_counts.txt")
parser.add_argument("-n","--name",type=str,help="name of the sample (will be used as the column name",default="sample")
args = parser.parse_args()
if args.inputfile.endswith((".fasta",".fa")):
with open(args.inputfile,"r") as filin:
seqs = [str(rec.seq) for rec in SeqIO.parse(filin,"fasta")]
if args.inputfile.endswith((".fastq",".fq")):
with open(args.inputfile,"r") as filin:
seqs = [str(rec.seq) for rec in SeqIO.parse(filin,"fastq")]
# measure sequence length
with open(args.outfile,"w") as fileout:
fileout.write(args.name + "\n")
for seq in seqs:
fileout.write(str(len(seq)) + "\n")
|
{
"content_hash": "719630c138d3a172e390a9dc5e1056e3",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 123,
"avg_line_length": 30.305555555555557,
"alnum_prop": 0.6810265811182401,
"repo_name": "BleekerLab/Solanum_sRNAs",
"id": "590d9a9acf8dbaf165ae49a988c9d4f19e8d690d",
"size": "1091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/read_length_distribution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "12924"
},
{
"name": "Python",
"bytes": "228157"
},
{
"name": "R",
"bytes": "23819"
}
],
"symlink_target": ""
}
|
"""Utility methods for scheduling."""
import sys
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import db
from nova import notifications
from nova.objects import base as obj_base
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import rpc
LOG = logging.getLogger(__name__)
def build_request_spec(ctxt, image, instances, instance_type=None):
"""Build a request_spec for the scheduler.
The request_spec assumes that all instances to be scheduled are the same
type.
"""
instance = instances[0]
if isinstance(instance, instance_obj.Instance):
instance = obj_base.obj_to_primitive(instance)
if instance_type is None:
instance_type = flavors.extract_flavor(instance)
# NOTE(comstud): This is a bit ugly, but will get cleaned up when
# we're passing an InstanceType internal object.
extra_specs = db.flavor_extra_specs_get(ctxt, instance_type['flavorid'])
instance_type['extra_specs'] = extra_specs
request_spec = {
'image': image or {},
'instance_properties': instance,
'instance_type': instance_type,
'num_instances': len(instances),
# NOTE(alaski): This should be removed as logic moves from the
# scheduler to conductor. Provides backwards compatibility now.
'instance_uuids': [inst['uuid'] for inst in instances]}
return jsonutils.to_primitive(request_spec)
def set_vm_state_and_notify(context, service, method, updates, ex,
request_spec, db):
"""changes VM state and notifies."""
LOG.warning(_("Failed to %(service)s_%(method)s: %(ex)s"),
{'service': service, 'method': method, 'ex': ex})
vm_state = updates['vm_state']
properties = request_spec.get('instance_properties', {})
# NOTE(vish): We shouldn't get here unless we have a catastrophic
# failure, so just set all instances to error. if uuid
# is not set, instance_uuids will be set to [None], this
# is solely to preserve existing behavior and can
# be removed along with the 'if instance_uuid:' if we can
# verify that uuid is always set.
uuids = [properties.get('uuid')]
notifier = rpc.get_notifier(service)
for instance_uuid in request_spec.get('instance_uuids') or uuids:
if instance_uuid:
state = vm_state.upper()
LOG.warning(_('Setting instance to %s state.'), state,
instance_uuid=instance_uuid)
# update instance state and notify on the transition
(old_ref, new_ref) = db.instance_update_and_get_original(
context, instance_uuid, updates)
notifications.send_update(context, old_ref, new_ref,
service=service)
compute_utils.add_instance_fault_from_exc(context,
new_ref, ex, sys.exc_info())
payload = dict(request_spec=request_spec,
instance_properties=properties,
instance_id=instance_uuid,
state=vm_state,
method=method,
reason=ex)
event_type = '%s.%s' % (service, method)
notifier.error(context, event_type, payload)
def populate_filter_properties(filter_properties, host_state):
"""Add additional information to the filter properties after a node has
been selected by the scheduling process.
"""
if isinstance(host_state, dict):
host = host_state['host']
nodename = host_state['nodename']
limits = host_state['limits']
else:
host = host_state.host
nodename = host_state.nodename
limits = host_state.limits
# Adds a retry entry for the selected compute host and node:
_add_retry_host(filter_properties, host, nodename)
# Adds oversubscription policy
if not filter_properties.get('force_hosts'):
filter_properties['limits'] = limits
def _add_retry_host(filter_properties, host, node):
"""Add a retry entry for the selected compute node. In the event that
the request gets re-scheduled, this entry will signal that the given
node has already been tried.
"""
retry = filter_properties.get('retry', None)
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
if not retry or force_hosts or force_nodes:
return
hosts = retry['hosts']
hosts.append([host, node])
def parse_options(opts, sep='=', converter=str, name=""):
"""Parse a list of options, each in the format of <key><sep><value>. Also
use the converter to convert the value into desired type.
:params opts: list of options, e.g. from oslo.config.cfg.ListOpt
:params sep: the separator
:params converter: callable object to convert the value, should raise
ValueError for conversion failure
:params name: name of the option
:returns: a lists of tuple of values (key, converted_value)
"""
good = []
bad = []
for opt in opts:
try:
key, seen_sep, value = opt.partition(sep)
value = converter(value)
except ValueError:
key = None
value = None
if key and seen_sep and value is not None:
good.append((key, value))
else:
bad.append(opt)
if bad:
LOG.warn(_("Ignoring the invalid elements of the option "
"%(name)s: %(options)s"),
{'name': name,
'options': ", ".join(bad)})
return good
|
{
"content_hash": "44ebbe685e19ef8fe9c92d4c4a0ed82f",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 77,
"avg_line_length": 38.58552631578947,
"alnum_prop": 0.6211423699914749,
"repo_name": "CiscoSystems/nova",
"id": "e3250f12c8e80cff6b339035580bdf2b16fbb644",
"size": "6463",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/scheduler/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13926229"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
}
|
"""
Compare the various algorithms on a synthetic dataset.
"""
import time
import cPickle
import os
import gzip
import pprint
import numpy as np
from scipy.misc import logsumexp
from scipy.special import gammaln
# Use the Agg backend in running on a server without the DISPLAY variable
if "DISPLAY" not in os.environ:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pyhawkes.models import DiscreteTimeStandardHawkesModel, \
DiscreteTimeNetworkHawkesModelGammaMixture, DiscreteTimeNetworkHawkesModelSpikeAndSlab
from pyhawkes.plotting.plotting import plot_network
from baselines.xcorr import infer_net_from_xcorr
from sklearn.metrics import roc_auc_score, average_precision_score
def run_comparison(data_path, test_path, output_path, T_train=None, seed=None):
"""
Run the comparison on the given data file
:param data_path:
:return:
"""
if seed is None:
seed = np.random.randint(2**32)
print "Setting seed to ", seed
np.random.seed(seed)
assert os.path.exists(os.path.dirname(output_path)), "Output directory does not exist!"
if data_path.endswith(".gz"):
with gzip.open(data_path, 'r') as f:
S, true_model = cPickle.load(f)
else:
with open(data_path, 'r') as f:
S, true_model = cPickle.load(f)
# If T_train is given, only use a fraction of the dataset
if T_train is not None:
S = S[:T_train,:]
if test_path.endswith(".gz"):
with gzip.open(test_path, 'r') as f:
S_test, test_model = cPickle.load(f)
else:
with open(test_path, 'r') as f:
S_test, test_model = cPickle.load(f)
K = true_model.K
C = true_model.C
B = true_model.B
dt = true_model.dt
dt_max = true_model.dt_max
use_parse_results = True
if use_parse_results and os.path.exists(output_path + ".parsed_results.pkl"):
with open(output_path + ".parsed_results.pkl") as f:
auc_rocs, auc_prcs, plls, timestamps = cPickle.load(f)
timestamps['svi'] = np.array(timestamps['svi'])
else:
# Compute the cross correlation to estimate the connectivity
W_xcorr = infer_net_from_xcorr(S, dtmax=true_model.dt_max // true_model.dt)
# Fit a standard Hawkes model on subset of data with BFGS
bfgs_model, bfgs_time = fit_standard_hawkes_model_bfgs(S, K, B, dt, dt_max,
output_path=output_path)
# Fit a standard Hawkes model with SGD
# standard_models, timestamps = fit_standard_hawkes_model_sgd(S, K, B, dt, dt_max,
# init_model=init_model)
#
# # Save the models
# with open(output_path + ".sgd.pkl", 'w') as f:
# print "Saving SGD results to ", (output_path + ".sgd.pkl")
# cPickle.dump((standard_models, timestamps), f, protocol=-1)
# Fit a network Hawkes model with Gibbs
gibbs_samples, gibbs_timestamps = fit_network_hawkes_gibbs(S, K, C, B, dt, dt_max,
output_path=output_path,
standard_model=bfgs_model)
# Fit a spike and slab network Hawkes model with Gibbs
gibbs_ss_samples = gibbs_ss_timestamps = None
# gibbs_ss_samples, gibbs_ss_timestamps = fit_network_hawkes_gibbs_ss(S, K, C, B, dt, dt_max,
# output_path=output_path,
# standard_model=bfgs_model)
# Fit a network Hawkes model with Batch VB
vb_models, vb_timestamps = fit_network_hawkes_vb(S, K, C, B, dt, dt_max,
output_path=output_path,
standard_model=bfgs_model)
# Fit a network Hawkes model with SVI
# svi_models = svi_timestamps = None
svi_models, svi_timestamps = fit_network_hawkes_svi(S, K, C, B, dt, dt_max,
output_path,
standard_model=bfgs_model)
# Combine timestamps into a dict
timestamps = {}
timestamps['bfgs'] = bfgs_time
timestamps['gibbs'] = gibbs_timestamps
timestamps['gibbs_ss'] = gibbs_ss_timestamps
timestamps['svi'] = svi_timestamps
timestamps['vb'] = vb_timestamps
amis = compute_clustering_score(true_model,
bfgs_model=bfgs_model,
gibbs_samples=gibbs_samples,
gibbs_ss_samples=gibbs_ss_samples,
svi_models=svi_models,
vb_models=vb_models)
print "AMIS"
pprint.pprint(amis)
auc_rocs = compute_auc(true_model,
W_xcorr=W_xcorr,
bfgs_model=bfgs_model,
gibbs_samples=gibbs_samples,
gibbs_ss_samples=gibbs_ss_samples,
svi_models=svi_models,
vb_models=vb_models)
print "AUC-ROC"
pprint.pprint(auc_rocs)
# Compute area under precisino recall curve of inferred network
auc_prcs = compute_auc_prc(true_model,
W_xcorr=W_xcorr,
bfgs_model=bfgs_model,
gibbs_samples=gibbs_samples,
gibbs_ss_samples=gibbs_ss_samples,
svi_models=svi_models,
vb_models=vb_models)
print "AUC-PRC"
pprint.pprint(auc_prcs)
plls = compute_predictive_ll(S_test, S,
true_model=true_model,
bfgs_model=bfgs_model,
gibbs_samples=gibbs_samples,
gibbs_ss_samples=gibbs_ss_samples,
svi_models=svi_models,
vb_models=vb_models)
with open(output_path + ".parsed_results.pkl", 'w') as f:
print "Saving parsed results to ", output_path + ".parsed_results.pkl"
cPickle.dump((auc_rocs, auc_prcs, plls, timestamps), f, protocol=-1)
plot_pred_ll_vs_time(plls, timestamps, Z=float(S.size), T_train=T_train)
def fit_standard_hawkes_model_bfgs(S, K, B, dt, dt_max, output_path,
init_len=10000, xv_len=1000):
"""
Fit
:param S:
:return:
"""
# Check for existing results
if os.path.exists(out_path + ".bfgs.pkl"):
print "Existing BFGS results found. Loading from file."
with open(output_path + ".bfgs.pkl", 'r') as f:
init_model, init_time = cPickle.load(f)
else:
print "Fitting the data with a standard Hawkes model"
# betas = np.logspace(-3,-0.8,num=10)
betas = np.array([0.01, 0.1, 1.0, 10.0, 20.0])
# betas = np.concatenate(([0], betas))
init_models = []
S_init = S[:init_len,:]
xv_ll = np.zeros(len(betas))
S_xv = S[init_len:init_len+xv_len, :]
# Make a model to initialize the parameters
init_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, B=B, dt_max=dt_max, beta=0.0)
init_model.add_data(S_init)
# Initialize the background rates to their mean
init_model.initialize_to_background_rate()
start = time.clock()
for i,beta in enumerate(betas):
print "Fitting with BFGS on first ", init_len, " time bins, beta = ", beta
init_model.beta = beta
init_model.fit_with_bfgs()
init_models.append(init_model.copy_sample())
# Compute the heldout likelihood on the xv data
xv_ll[i] = init_model.heldout_log_likelihood(S_xv)
if not np.isfinite(xv_ll[i]):
xv_ll[i] = -np.inf
init_time = time.clock() - start
# Take the best model
print "XV predictive log likelihoods: "
for beta, ll in zip(betas, xv_ll):
print "Beta: %.2f\tLL: %.2f" % (beta, ll)
best_ind = np.argmax(xv_ll)
print "Best beta: ", betas[best_ind]
init_model = init_models[best_ind]
if best_ind == 0 or best_ind == len(betas) - 1:
print "WARNING: Best BFGS model was for extreme value of beta. " \
"Consider expanding the beta range."
# Save the model (sans data)
with open(output_path + ".bfgs.pkl", 'w') as f:
print "Saving BFGS results to ", (output_path + ".bfgs.pkl")
cPickle.dump((init_model, init_time), f, protocol=-1)
return init_model, init_time
def fit_standard_hawkes_model_sgd(S, K, B, dt, dt_max, init_model=None):
"""
Fit
:param S:
:return:
"""
print "Fitting the data with a standard Hawkes model using SGD"
# Make a new model for inference
test_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, dt_max=dt_max, B=B)
test_model.add_data(S, minibatchsize=256)
# Initialize the test model with the init model weights
if init_model is not None:
test_model.weights = init_model.weights
plt.ion()
im = plot_network(np.ones((K,K)), test_model.W, vmax=0.5)
plt.pause(0.001)
# Gradient descent
N_steps = 1000
samples = []
lls = []
timestamps = []
learning_rate = 0.01 * np.ones(N_steps)
momentum = 0.8 * np.ones(N_steps)
prev_velocity = None
for itr in xrange(N_steps):
# W,ll,grad = test_model.gradient_descent_step(stepsz=0.001)
W,ll,prev_velocity = test_model.sgd_step(prev_velocity, learning_rate[itr], momentum[itr])
samples.append(test_model.copy_sample())
lls.append(ll)
timestamps.append(time.clock())
if itr % 1 == 0:
print "Iteration ", itr, "\t LL: ", ll
im.set_data(np.ones((K,K)) * test_model.W)
plt.pause(0.001)
plt.ioff()
plt.figure()
plt.plot(np.arange(N_steps), lls)
plt.xlabel("Iteration")
plt.ylabel("Log likelihood")
plot_network(np.ones((K,K)), test_model.W)
plt.show()
return samples, timestamps
def load_partial_results(output_path, typ="gibbs"):
import glob
# Check for existing Gibbs results
if os.path.exists(output_path + ".%s.pkl" % typ):
with open(output_path + ".%s.pkl" % typ, 'r') as f:
print "Loading %s results from " % typ, (output_path + ".%s.pkl" % typ)
(samples, timestamps) = cPickle.load(f)
return samples, timestamps
else:
if os.path.exists(os.path.join(os.path.dirname(output_path),
"%s_timestamps.pkl" % typ)):
with open(os.path.join(os.path.dirname(output_path),
"%s_timestamps.pkl" % typ), 'r') as f:
names_and_timestamps = dict(cPickle.load(f))
# Look for individual iteration files instead
files = glob.glob(output_path + ".%s.itr*.pkl" % typ)
if len(files) > 0:
full_samples = []
for file in files:
with open(file, 'r') as f:
print "Loading sample from ", file
try:
res = cPickle.load(f)
if isinstance(res, tuple):
sample, timestamp = res
else:
sample = res
timestamp = names_and_timestamps[os.path.basename(file)]
full_samples.append((file, sample, timestamp))
except:
print "Failed to load file ", file
# Sort the samples by iteration name
full_samples = sorted(full_samples, key=lambda x: x[0])
names = [n for (n,s,t) in full_samples]
itrs = np.array([int(n[-8:-4]) for n in names]) # Hack out the iteration number
samples = [s for (n,s,t) in full_samples]
timestamps = np.array([t for (n,s,t) in full_samples])
if np.all(timestamps > 1e8):
import pdb; pdb.set_trace()
timestamps = timestamps[1:] - timestamps[0]
samples = samples[1:]
assert np.all(np.diff(itrs) == 1), "Iterations are not sequential!"
return samples, timestamps
def fit_network_hawkes_gibbs(S, K, C, B, dt, dt_max,
output_path,
standard_model=None):
samples_and_timestamps = load_partial_results(output_path, typ="gibbs")
if samples_and_timestamps is not None:
samples, timestamps = samples_and_timestamps
# # Check for existing Gibbs results
# if os.path.exists(output_path + ".gibbs.pkl"):
# with open(output_path + ".gibbs.pkl", 'r') as f:
# print "Loading Gibbs results from ", (output_path + ".gibbs.pkl")
# (samples, timestamps) = cPickle.load(f)
else:
print "Fitting the data with a network Hawkes model using Gibbs sampling"
# Make a new model for inference
network_hypers = {'C': C, 'alpha': 1.0, 'beta': 1.0/20.0}
test_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, dt=dt, dt_max=dt_max, B=B,
network_hypers=network_hypers)
test_model.add_data(S)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
plt.ion()
im = plot_network(test_model.weight_model.A, test_model.weight_model.W, vmax=0.5)
plt.pause(0.001)
# Gibbs sample
N_samples = 1000
samples = []
lps = []
timestamps = [time.clock()]
for itr in xrange(N_samples):
lps.append(test_model.log_probability())
# lps.append(test_model.log_likelihood())
samples.append(test_model.resample_and_copy())
timestamps.append(time.clock())
if itr % 1 == 0:
print "Iteration ", itr, "\t LL: ", lps[-1]
# im.set_data(test_model.weight_model.A * \
# test_model.weight_model.W)
# plt.pause(0.001)
# Save this sample
with open(output_path + ".gibbs.itr%04d.pkl" % itr, 'w') as f:
cPickle.dump((samples[-1], timestamps[-1]-timestamps[0]), f, protocol=-1)
# Save the Gibbs timestamps
timestamps = np.array(timestamps)
with open(output_path + ".gibbs.timestamps.pkl", 'w') as f:
print "Saving Gibbs samples to ", (output_path + ".gibbs.timestamps.pkl")
cPickle.dump(timestamps, f, protocol=-1)
# Save the Gibbs samples
with open(output_path + ".gibbs.pkl", 'w') as f:
print "Saving Gibbs samples to ", (output_path + ".gibbs.pkl")
cPickle.dump((samples, timestamps[1:] - timestamps[0]), f, protocol=-1)
return samples, timestamps
def fit_network_hawkes_gibbs_ss(S, K, C, B, dt, dt_max,
output_path,
standard_model=None):
samples_and_timestamps = load_partial_results(output_path, typ="gibbs_ss")
if samples_and_timestamps is not None:
samples, timestamps = samples_and_timestamps
else:
print "Fitting the data with a spike adn slab network Hawkes model using Gibbs sampling"
# Make a new model for inference
network_hypers = {'C': C, 'alpha': 1.0, 'beta': 1.0/20.0}
test_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, B=B,
network_hypers=network_hypers)
test_model.add_data(S)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
# Gibbs sample
N_samples = 1000
samples = []
lps = []
timestamps = [time.clock()]
for itr in xrange(N_samples):
lps.append(test_model.log_probability())
samples.append(test_model.resample_and_copy())
timestamps.append(time.clock())
if itr % 1 == 0:
print "Iteration ", itr, "\t LL: ", lps[-1]
# Save this sample
with open(output_path + ".gibbs_ss.itr%04d.pkl" % itr, 'w') as f:
cPickle.dump((samples[-1], timestamps[-1]-timestamps[0]), f, protocol=-1)
# Save the Gibbs timestamps
timestamps = np.array(timestamps)
with open(output_path + ".gibbs_ss.timestamps.pkl", 'w') as f:
print "Saving spike and slab Gibbs samples to ", (output_path + ".gibbs_ss.timestamps.pkl")
cPickle.dump(timestamps, f, protocol=-1)
# Save the Gibbs samples
with open(output_path + ".gibbs_ss.pkl", 'w') as f:
print "Saving Gibbs samples to ", (output_path + ".gibbs_ss.pkl")
cPickle.dump((samples, timestamps[1:] - timestamps[0]), f, protocol=-1)
return samples, timestamps
def fit_network_hawkes_vb(S, K, C, B, dt, dt_max,
output_path,
standard_model=None):
samples_and_timestamps = load_partial_results(output_path, typ="vb")
if samples_and_timestamps is not None:
samples, timestamps = samples_and_timestamps
# # Check for existing Gibbs results
# if os.path.exists(output_path + ".vb.pkl.gz"):
# with gzip.open(output_path + ".vb.pkl.gz", 'r') as f:
# print "Loading vb results from ", (output_path + ".vb.pkl.gz")
# (samples, timestamps) = cPickle.load(f)
#
# if isinstance(timestamps, list):
# timestamps = np.array(timestamps)
else:
print "Fitting the data with a network Hawkes model using Batch VB"
# Make a new model for inference
network_hypers = {'C': C, 'alpha': 1.0, 'beta': 1.0/20.0}
test_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, dt=dt, dt_max=dt_max, B=B,
network_hypers=network_hypers)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
plt.ion()
im = plot_network(test_model.weight_model.A, test_model.weight_model.W, vmax=0.5)
plt.pause(0.001)
# TODO: Add the data in minibatches
minibatchsize = 500
test_model.add_data(S)
# Stochastic variational inference
N_iters = 1000
vlbs = []
samples = []
start = time.clock()
timestamps = []
for itr in xrange(N_iters):
vlbs.append(test_model.meanfield_coordinate_descent_step())
print "Batch VB Iter: ", itr, "\tVLB: ", vlbs[-1]
samples.append(test_model.copy_sample())
timestamps.append(time.clock())
if itr % 1 == 0:
im.set_data(test_model.weight_model.expected_W())
plt.pause(0.001)
# Save this sample
with open(output_path + ".vb.itr%04d.pkl" % itr, 'w') as f:
cPickle.dump((samples[-1], timestamps[-1] - start), f, protocol=-1)
# Save the Gibbs samples
timestamps = np.array(timestamps)
with gzip.open(output_path + ".vb.pkl.gz", 'w') as f:
print "Saving VB samples to ", (output_path + ".vb.pkl.gz")
cPickle.dump((samples, timestamps - start), f, protocol=-1)
return samples, timestamps
def fit_network_hawkes_svi(S, K, C, B, dt, dt_max,
output_path,
standard_model=None):
samples_and_timestamps = load_partial_results(output_path, typ="svi2")
if samples_and_timestamps is not None:
samples, timestamps = samples_and_timestamps
else:
print "Fitting the data with a network Hawkes model using SVI"
# Make a new model for inference
network_hypers = {'C': C, 'alpha': 1.0, 'beta': 1.0/20.0}
test_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, dt=dt, dt_max=dt_max, B=B,
network_hypers=network_hypers)
# Initialize with the standard model parameters
if standard_model is not None:
test_model.initialize_with_standard_model(standard_model)
plt.ion()
im = plot_network(test_model.weight_model.A, test_model.weight_model.W, vmax=0.5)
plt.pause(0.001)
# TODO: Add the data in minibatches
minibatchsize = 500
import pdb; pdb.set_trace()
test_model.add_data(S)
# Stochastic variational inference
N_iters = 10000
samples = []
delay = 1.0
forgetting_rate = 0.5
stepsize = (np.arange(N_iters) + delay)**(-forgetting_rate)
start = time.clock()
timestamps = []
for itr in xrange(N_iters):
print "SVI Iter: ", itr, "\tStepsize: ", stepsize[itr]
test_model.sgd_step(minibatchsize=minibatchsize, stepsize=stepsize[itr])
test_model.resample_from_mf()
samples.append(test_model.copy_sample())
timestamps.append(time.clock())
if itr % 1 == 0:
im.set_data(test_model.weight_model.expected_W())
plt.pause(0.001)
# Save this sample
with open(output_path + ".svi.itr%04d.pkl" % itr, 'w') as f:
cPickle.dump((samples[-1], timestamps[-1] -start), f, protocol=-1)
# Save the Gibbs samples
timestamps = np.array(timestamps)
with gzip.open(output_path + ".svi.pkl.gz", 'w') as f:
print "Saving SVI samples to ", (output_path + ".svi.pkl.gz")
cPickle.dump((samples, timestamps - start), f, protocol=-1)
return samples, timestamps
def compute_auc(true_model,
W_xcorr=None,
bfgs_model=None,
sgd_model=None,
gibbs_samples=None,
gibbs_ss_samples=None,
vb_models=None,
svi_models=None):
"""
Compute the AUC score for each of competing models
:return:
"""
aucs = {}
# Get the true adjacency matrix
A_true = true_model.weight_model.A.ravel()
if W_xcorr is not None:
aucs['xcorr'] = roc_auc_score(A_true,
W_xcorr.ravel())
if bfgs_model is not None:
assert isinstance(bfgs_model, DiscreteTimeStandardHawkesModel)
aucs['bfgs'] = roc_auc_score(A_true,
bfgs_model.W.ravel())
if sgd_model is not None:
assert isinstance(sgd_model, DiscreteTimeStandardHawkesModel)
aucs['sgd'] = roc_auc_score(A_true,
sgd_model.W.ravel())
if gibbs_samples is not None:
# Compute ROC based on mean value of W_effective in second half of samples
Weff_samples = np.array([s.weight_model.W_effective for s in gibbs_samples])
N_samples = Weff_samples.shape[0]
offset = N_samples // 2
Weff_mean = Weff_samples[offset:,:,:].mean(axis=0)
aucs['gibbs'] = roc_auc_score(A_true, Weff_mean.ravel())
if gibbs_ss_samples is not None:
# Compute ROC based on mean value of W_effective in second half of samples
Weff_samples = np.array([s.weight_model.W_effective for s in gibbs_ss_samples])
N_samples = Weff_samples.shape[0]
offset = N_samples // 2
Weff_mean = Weff_samples[offset:,:,:].mean(axis=0)
aucs['gibbs_ss'] = roc_auc_score(A_true, Weff_mean.ravel())
if vb_models is not None:
# Compute ROC based on E[A] under variational posterior
aucs['vb'] = roc_auc_score(A_true,
vb_models[-1].weight_model.expected_A().ravel())
if svi_models is not None:
# Compute ROC based on E[A] under variational posterior
aucs['svi'] = roc_auc_score(A_true,
svi_models[-1].weight_model.expected_A().ravel())
return aucs
def compute_auc_prc(true_model,
W_xcorr=None,
bfgs_model=None,
sgd_model=None,
gibbs_samples=None,
gibbs_ss_samples=None,
vb_models=None,
svi_models=None,
average="macro"):
"""
Compute the AUC of the precision recall curve
:return:
"""
A_flat = true_model.weight_model.A.ravel()
aucs = {}
if W_xcorr is not None:
aucs['xcorr'] = average_precision_score(A_flat,
W_xcorr.ravel(),
average=average)
if bfgs_model is not None:
assert isinstance(bfgs_model, DiscreteTimeStandardHawkesModel)
W_bfgs = bfgs_model.W.copy()
W_bfgs -= np.diag(np.diag(W_bfgs))
aucs['bfgs'] = average_precision_score(A_flat,
W_bfgs.ravel(),
average=average)
if sgd_model is not None:
assert isinstance(sgd_model, DiscreteTimeStandardHawkesModel)
aucs['sgd'] = average_precision_score(A_flat,
sgd_model.W.ravel(),
average=average)
if gibbs_samples is not None:
# Compute ROC based on mean value of W_effective in second half of samples
Weff_samples = np.array([s.weight_model.W_effective for s in gibbs_samples])
N_samples = Weff_samples.shape[0]
offset = N_samples // 2
Weff_mean = Weff_samples[offset:,:,:].mean(axis=0)
aucs['gibbs'] = average_precision_score(A_flat, Weff_mean.ravel(), average=average)
if gibbs_ss_samples is not None:
# Compute ROC based on mean value of W_effective in second half of samples
Weff_samples = np.array([s.weight_model.W_effective for s in gibbs_ss_samples])
N_samples = Weff_samples.shape[0]
offset = N_samples // 2
Weff_mean = Weff_samples[offset:,:,:].mean(axis=0)
aucs['gibbs_ss'] = average_precision_score(A_flat, Weff_mean.ravel(), average=average)
if vb_models is not None:
# Compute ROC based on E[A] under variational posterior
aucs['vb'] = average_precision_score(A_flat,
vb_models[-1].weight_model.expected_A().ravel(),
average=average)
if svi_models is not None:
# Compute ROC based on E[A] under variational posterior
aucs['svi'] = average_precision_score(A_flat,
svi_models[-1].weight_model.expected_A().ravel(),
average=average)
return aucs
def compute_predictive_ll(S_test, S_train,
true_model=None,
bfgs_model=None,
sgd_models=None,
gibbs_samples=None,
gibbs_ss_samples=None,
vb_models=None,
svi_models=None):
"""
Compute the predictive log likelihood
:return:
"""
plls = {}
# Compute homogeneous pred ll
T = S_train.shape[0]
T_test = S_test.shape[0]
lam_homog = S_train.sum(axis=0) / float(T)
plls['homog'] = 0
plls['homog'] += -gammaln(S_test+1).sum()
plls['homog'] += (-lam_homog * T_test).sum()
plls['homog'] += (S_test.sum(axis=0) * np.log(lam_homog)).sum()
if true_model is not None:
plls['true'] = true_model.heldout_log_likelihood(S_test)
if bfgs_model is not None:
assert isinstance(bfgs_model, DiscreteTimeStandardHawkesModel)
plls['bfgs'] = bfgs_model.heldout_log_likelihood(S_test)
if sgd_models is not None:
assert isinstance(sgd_models, list)
plls['sgd'] = np.zeros(len(sgd_models))
for i,sgd_model in enumerate(sgd_models):
plls['sgd'] = sgd_model.heldout_log_likelihood(S_test)
if gibbs_samples is not None:
print "Computing predictive log likelihood for Gibbs samples"
# Compute log(E[pred likelihood]) on second half of samplese
offset = 0
# Preconvolve with the Gibbs model's basis
F_test = gibbs_samples[0].basis.convolve_with_basis(S_test)
plls['gibbs'] = []
for s in gibbs_samples[offset:]:
plls['gibbs'].append(s.heldout_log_likelihood(S_test, F=F_test))
# Convert to numpy array
plls['gibbs'] = np.array(plls['gibbs'])
if gibbs_ss_samples is not None:
print "Computing predictive log likelihood for spike and slab Gibbs samples"
# Compute log(E[pred likelihood]) on second half of samplese
offset = 0
# Preconvolve with the Gibbs model's basis
F_test = gibbs_samples[0].basis.convolve_with_basis(S_test)
plls['gibbs_ss'] = []
for s in gibbs_ss_samples[offset:]:
plls['gibbs_ss'].append(s.heldout_log_likelihood(S_test, F=F_test))
# Convert to numpy array
plls['gibbs_ss'] = np.array(plls['gibbs_ss'])
if vb_models is not None:
print "Computing predictive log likelihood for VB iterations"
# Compute predictive likelihood over samples from VB model
N_models = len(vb_models)
N_samples = 10
# Preconvolve with the VB model's basis
F_test = vb_models[0].basis.convolve_with_basis(S_test)
vb_plls = np.zeros((N_models, N_samples))
for i, vb_model in enumerate(vb_models):
for j in xrange(N_samples):
vb_model.resample_from_mf()
vb_plls[i,j] = vb_model.heldout_log_likelihood(S_test, F=F_test)
# Compute the log of the average predicted likelihood
plls['vb'] = -np.log(N_samples) + logsumexp(vb_plls, axis=1)
if svi_models is not None:
print "Computing predictive log likelihood for SVI iterations"
# Compute predictive likelihood over samples from VB model
N_models = len(svi_models)
N_samples = 10
# Preconvolve with the VB model's basis
F_test = svi_models[0].basis.convolve_with_basis(S_test)
svi_plls = np.zeros((N_models, N_samples))
for i, svi_model in enumerate(svi_models):
# print "Computing pred ll for SVI iteration ", i
if i % 10 != 0:
svi_plls[i,:] = np.nan
continue
for j in xrange(N_samples):
svi_model.resample_from_mf()
svi_plls[i,j] = svi_model.heldout_log_likelihood(S_test, F=F_test)
plls['svi'] = -np.log(N_samples) + logsumexp(svi_plls, axis=1)
return plls
def compute_clustering_score(true_model=None,
bfgs_model=None,
sgd_models=None,
gibbs_samples=None,
gibbs_ss_samples=None,
vb_models=None,
svi_models=None):
"""
Compute a few clustering scores.
:return:
"""
# Compute the adjusted mutual info score of the clusterings
amis = {}
arss = {}
true_c = true_model.network.c
N_samples = 100
from sklearn.metrics import adjusted_mutual_info_score
from sklearn.metrics import adjusted_rand_score
if bfgs_model is not None:
from sklearn.cluster import KMeans
assert isinstance(bfgs_model, DiscreteTimeStandardHawkesModel)
# Extract features
features = []
for k in xrange(true_model.K):
features.append(np.concatenate((bfgs_model.W[:,k], bfgs_model.W[k,:])))
bfgs_amis = []
for s in xrange(N_samples):
c = KMeans(n_clusters=true_model.C).fit(np.array(features)).labels_
bfgs_amis.append(adjusted_mutual_info_score(true_c, c))
bfgs_amis = np.array(bfgs_amis)
amis['bfgs'] = (bfgs_amis.mean(), bfgs_amis.std())
if gibbs_samples is not None:
print "Computing predictive log likelihood for Gibbs samples"
# Compute log(E[pred likelihood]) on second half of samplese
offset = len(gibbs_samples) // 2
gibbs_amis = []
for s in gibbs_samples[offset:]:
gibbs_amis.append(adjusted_mutual_info_score(true_c, s.network.c))
# Convert to numpy array
gibbs_amis = np.array(gibbs_amis)
amis['gibbs'] = (gibbs_amis.mean(), gibbs_amis.std())
if vb_models is not None:
print "Computing predictive log likelihood for VB samples"
# Compute log(E[pred likelihood]) on second half of samplese
vb_model = vb_models[-1]
vb_amis = []
for s in xrange(N_samples):
vb_model.resample_from_mf()
vb_amis.append(adjusted_mutual_info_score(true_c, vb_model.network.c))
# Convert to numpy array
vb_amis = np.array(vb_amis)
amis['vb'] = (vb_amis.mean(), vb_amis.std())
if svi_models is not None:
print "Computing predictive log likelihood for SVI samples"
svi_model = svi_models[-1]
svi_amis = []
for s in xrange(N_samples):
svi_model.resample_from_mf()
svi_amis.append(adjusted_mutual_info_score(true_c, svi_model.network.c))
# Convert to numpy array
svi_amis = np.array(svi_amis)
amis['svi'] = (svi_amis.mean(), svi_amis.std())
return amis
def plot_pred_ll_vs_time(plls, timestamps, Z=1.0, T_train=None, nbins=4):
# import seaborn as sns
# sns.set(style="whitegrid")
from hips.plotting.layout import create_figure
from hips.plotting.colormaps import harvard_colors
# Make the ICML figure
fig = create_figure((4,3))
ax = fig.add_subplot(111)
col = harvard_colors()
plt.grid()
# Compute the max and min time in seconds
print "Homog PLL: ", plls['homog']
# DEBUG
plls['homog'] = 0.0
Z = 1.0
assert "bfgs" in plls and "bfgs" in timestamps
# t_bfgs = timestamps["bfgs"]
t_bfgs = 1.0
t_start = 1.0
t_stop = 0.0
if 'svi' in plls and 'svi' in timestamps:
isreal = ~np.isnan(plls['svi'])
svis = plls['svi'][isreal]
t_svi = timestamps['svi'][isreal]
t_svi = t_bfgs + t_svi - t_svi[0]
t_stop = max(t_stop, t_svi[-1])
ax.semilogx(t_svi, (svis - plls['homog'])/Z, color=col[0], label="SVI", lw=1.5)
if 'vb' in plls and 'vb' in timestamps:
t_vb = timestamps['vb']
t_vb = t_bfgs + t_vb
t_stop = max(t_stop, t_vb[-1])
ax.semilogx(t_vb, (plls['vb'] - plls['homog'])/Z, color=col[1], label="VB", lw=1.5)
if 'gibbs' in plls and 'gibbs' in timestamps:
t_gibbs = timestamps['gibbs']
t_gibbs = t_bfgs + t_gibbs
t_stop = max(t_stop, t_gibbs[-1])
ax.semilogx(t_gibbs, (plls['gibbs'] - plls['homog'])/Z, color=col[2], label="Gibbs", lw=1.5)
# if 'gibbs_ss' in plls and 'gibbs_ss' in timestamps:
# t_gibbs = timestamps['gibbs_ss']
# t_gibbs = t_bfgs + t_gibbs
# t_stop = max(t_stop, t_gibbs[-1])
# ax.semilogx(t_gibbs, (plls['gibbs_ss'] - plls['homog'])/Z, color=col[8], label="Gibbs-SS", lw=1.5)
# Extend lines to t_st
if 'svi' in plls and 'svi' in timestamps:
final_svi_pll = -np.log(4) + logsumexp(svis[-4:])
ax.semilogx([t_svi[-1], t_stop],
[(final_svi_pll - plls['homog'])/Z,
(final_svi_pll - plls['homog'])/Z],
'--',
color=col[0], lw=1.5)
if 'vb' in plls and 'vb' in timestamps:
ax.semilogx([t_vb[-1], t_stop],
[(plls['vb'][-1] - plls['homog'])/Z,
(plls['vb'][-1] - plls['homog'])/Z],
'--',
color=col[1], lw=1.5)
ax.semilogx([t_start, t_stop],
[(plls['bfgs'] - plls['homog'])/Z, (plls['bfgs'] - plls['homog'])/Z],
color=col[3], lw=1.5, label="MAP" )
# Put a legend above
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=5, mode="expand", borderaxespad=0.,
prop={'size':9})
ax.set_xlim(t_start, t_stop)
# Format the ticks
# plt.locator_params(nbins=nbins)
import matplotlib.ticker as ticker
logxscale = 3
xticks = ticker.FuncFormatter(lambda x, pos: '{0:.2f}'.format(x/10.**logxscale))
ax.xaxis.set_major_formatter(xticks)
ax.set_xlabel('Time ($10^{%d}$ s)' % logxscale)
logyscale = 4
yticks = ticker.FuncFormatter(lambda y, pos: '{0:.3f}'.format(y/10.**logyscale))
ax.yaxis.set_major_formatter(yticks)
ax.set_ylabel('Pred. LL ($ \\times 10^{%d}$)' % logyscale)
# ylim = ax.get_ylim()
# ax.plot([t_bfgs, t_bfgs], ylim, '--k')
# ax.set_ylim(ylim)
ylim = (-129980, -129840)
ax.set_ylim(ylim)
# plt.tight_layout()
plt.subplots_adjust(bottom=0.2, left=0.2)
# plt.title("Predictive Log Likelihood ($T=%d$)" % T_train)
plt.show()
fig.savefig('figure2b.pdf')
# seed = 2650533028
seed = None
run = 4
K = 50
C = 5
T = 100000
T_train = 99000
T_test = 1000
data_path = os.path.join("data", "synthetic", "synthetic_K%d_C%d_T%d.pkl.gz" % (K,C,T))
test_path = os.path.join("data", "synthetic", "synthetic_test_K%d_C%d_T%d.pkl" % (K,C,T_test))
out_path = os.path.join("data", "synthetic", "results_K%d_C%d_T%d" % (K,C,T), "run%03d" %run, "results" )
run_comparison(data_path, test_path, out_path, T_train=T_train, seed=seed)
|
{
"content_hash": "64169fddc018e552816a1276930657b8",
"timestamp": "",
"source": "github",
"line_count": 1009,
"max_line_length": 108,
"avg_line_length": 38.194251734390484,
"alnum_prop": 0.5487570709429653,
"repo_name": "michaelpacer/pyhawkes",
"id": "7170701dfbbd1028c72a7b891eb838555cd3cea4",
"size": "38538",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "experiments/synthetic_comparison_long.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "1782"
},
{
"name": "Python",
"bytes": "467808"
}
],
"symlink_target": ""
}
|
"""
This module provide the function :py:func:`summary` that is used for printing
an execution summary at the end of luigi invocations.
See `https://github.com/spotify/luigi/blob/master/examples/execution_summary_example.py` for an example.
"""
import textwrap
import datetime
def _partition_tasks(worker):
"""
Takes a worker and sorts out tasks based on their status.
Still_pending_not_ext is only used to get upstream_failure, upstream_missing_dependency and run_by_other_worker
"""
task_history = worker._add_task_history
pending_tasks = {task for(task, status, ext) in task_history if status == 'PENDING'}
set_tasks = {}
set_tasks["completed"] = {task for (task, status, ext) in task_history if status == 'DONE' and task in pending_tasks}
set_tasks["already_done"] = {task for (task, status, ext) in task_history if status == 'DONE' and task not in pending_tasks and task not in set_tasks["completed"]}
set_tasks["failed"] = {task for (task, status, ext) in task_history if status == 'FAILED'}
set_tasks["still_pending_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["failed"] and task not in set_tasks["completed"] and not ext}
set_tasks["still_pending_not_ext"] = {task for (task, status, ext) in task_history if status == 'PENDING' and task not in set_tasks["failed"] and task not in set_tasks["completed"] and ext}
set_tasks["run_by_other_worker"] = set()
set_tasks["upstream_failure"] = set()
set_tasks["upstream_missing_dependency"] = set()
set_tasks["upstream_run_by_other_worker"] = set()
set_tasks["unknown_reason"] = set()
return set_tasks
def _populate_unknown_statuses(set_tasks):
"""
Add the "upstream_*" and "unknown_reason" statuses my mutating set_tasks.
"""
visited = set()
for task in set_tasks["still_pending_not_ext"]:
_depth_first_search(set_tasks, task, visited)
def _depth_first_search(set_tasks, current_task, visited):
"""
This dfs checks why tasks are still pending.
"""
visited.add(current_task)
if current_task in set_tasks["still_pending_not_ext"]:
upstream_failure = False
upstream_missing_dependency = False
upstream_run_by_other_worker = False
for task in current_task._requires():
if task not in visited:
_depth_first_search(set_tasks, task, visited)
if task in set_tasks["failed"] or task in set_tasks["upstream_failure"]:
set_tasks["upstream_failure"].add(current_task)
upstream_failure = True
if task in set_tasks["still_pending_ext"] or task in set_tasks["upstream_missing_dependency"]:
set_tasks["upstream_missing_dependency"].add(current_task)
upstream_missing_dependency = True
if task in set_tasks["run_by_other_worker"] or task in set_tasks["upstream_run_by_other_worker"]:
set_tasks["upstream_run_by_other_worker"].add(current_task)
upstream_run_by_other_worker = True
if not upstream_failure and not upstream_missing_dependency and not upstream_run_by_other_worker and current_task not in set_tasks["run_by_other_worker"]:
set_tasks["unknown_reason"].add(current_task)
def _get_str(task_dict, extra_indent):
"""
This returns a string for each status
"""
lines = []
for task_family, tasks in task_dict.items():
row = ' '
if extra_indent:
row += ' '
if len(lines) >= 5:
"""
This is how many rows will be printed for each status. If you want fewer rows you can lower the limit.
"""
row += '...'
lines.append(row)
break
if len(tasks[0].get_params()) == 0:
row += '- {0} {1}()'.format(len(tasks), str(task_family))
elif _get_len_of_params(tasks[0]) > 60 or (len(tasks) == 2 and len(tasks[0].get_params()) > 1 and (_get_len_of_params(tasks[0]) > 40 or len(str(tasks[0])) > 100)) or len(str(tasks[0])) > 200:
"""
This is to make sure that there is no really long task in the output
"""
row += '- {0} {1}(...)'.format(len(tasks), task_family)
elif len((tasks[0].get_params())) == 1:
attributes = sorted({getattr(task, tasks[0].get_params()[0][0]) for task in tasks})
row += '- {0} {1}({2}='.format(len(tasks), task_family, tasks[0].get_params()[0][0])
if _ranging_attributes(attributes, tasks[0].get_params()[0]) and len(attributes) > 3:
row += '{0}...{1}'.format(tasks[0].get_params()[0][1].serialize(attributes[0]), tasks[0].get_params()[0][1].serialize(attributes[-1]))
else:
row += '{0}'.format(_get_str_one_parameter(tasks))
row += ")"
else:
ranging = False
params = _get_set_of_params(tasks)
unique_param_keys = list(_get_unique_param_keys(params))
if len(unique_param_keys) == 1:
unique_param, = unique_param_keys
attributes = sorted(params[unique_param])
if _ranging_attributes(attributes, unique_param) and len(attributes) > 2:
ranging = True
row += '- {0} {1}({2}'.format(len(tasks), task_family, _get_str_ranging_multiple_parameters(attributes, tasks, unique_param))
if not ranging:
if len(tasks) == 1:
row += '- {0} {1}'.format(len(tasks), tasks[0])
if len(tasks) == 2:
row += '- {0} and {1}'.format(tasks[0], tasks[1])
if len(tasks) > 2:
row += '- {0} and {1} other {2}'.format(tasks[0], len(tasks) - 1, task_family)
lines.append(row)
return '\n'.join(lines)
def _get_len_of_params(task):
return sum(len(param[0]) for param in task.get_params())
def _get_str_ranging_multiple_parameters(attributes, tasks, unique_param):
row = ''
str_unique_param = '{0}...{1}'.format(unique_param[1].serialize(attributes[0]), unique_param[1].serialize(attributes[-1]))
for param in tasks[0].get_params():
row += '{0}='.format(param[0])
if param[0] == unique_param[0]:
row += '{0}'.format(str_unique_param)
else:
row += '{0}'.format(param[1].serialize(getattr(tasks[0], param[0])))
if param != tasks[0].get_params()[-1]:
row += ", "
row += ')'
return row
def _get_set_of_params(tasks):
params = {}
for param in tasks[0].get_params():
params[param] = {getattr(task, param[0]) for task in tasks}
return params
def _get_unique_param_keys(params):
for param_key, param_values in params.items():
if len(param_values) > 1:
yield param_key
def _ranging_attributes(attributes, unique_param):
"""
Checks if there is a continuous range
"""
if len(attributes) > 2:
if unique_param[1].next_in_enumeration(attributes[0]) is None:
return False
for i in range(1, len(attributes)):
if unique_param[1].next_in_enumeration(attributes[i - 1]) != attributes[i]:
return False
return True
def _get_str_one_parameter(tasks):
row = ''
count = 0
for task in tasks:
if (len(row) >= 30 and count > 2 and count != len(tasks) - 1) or len(row) > 200:
row += '...'
break
row += '{0}'.format(getattr(task, task.get_params()[0][0]))
if count < len(tasks) - 1:
row += ','
count += 1
return row
def _serialize_first_param(task):
return task.get_params()[0][1].serialize(getattr(task, task.get_params()[0][0]))
def _get_number_of_tasks_for(status, group_tasks):
if status == "still_pending":
return (_get_number_of_tasks(group_tasks["still_pending_ext"]) +
_get_number_of_tasks(group_tasks["still_pending_not_ext"]))
return _get_number_of_tasks(group_tasks[status])
def _get_number_of_tasks(task_dict):
return sum(len(tasks) for tasks in task_dict.values())
def _get_comments(group_tasks):
"""
Get the human readable comments and quantities for the task types.
"""
comments = {}
for status, human in _COMMENTS:
num_tasks = _get_number_of_tasks_for(status, group_tasks)
if num_tasks:
space = " " if status in _PENDING_SUB_STATUSES else ""
comments[status] = '{space}* {num_tasks} {human}:\n'.format(
space=space,
num_tasks=num_tasks,
human=human)
return comments
# Oredered in the sense that they'll be printed in this order
_ORDERED_STATUSES = (
"already_done",
"completed",
"failed",
"still_pending",
"still_pending_ext",
"run_by_other_worker",
"upstream_failure",
"upstream_missing_dependency",
"upstream_run_by_other_worker",
"unknown_reason",
)
_PENDING_SUB_STATUSES = set(_ORDERED_STATUSES[_ORDERED_STATUSES.index("still_pending_ext"):])
_COMMENTS = set((
("already_done", 'present dependencies were encountered'),
("completed", 'ran successfully'),
("failed", 'failed'),
("still_pending", 'were left pending, among these'),
("still_pending_ext", 'were missing external dependencies'),
("run_by_other_worker", 'were being run by another worker'),
("upstream_failure", 'had failed dependencies'),
("upstream_missing_dependency", 'had missing external dependencies'),
("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'),
("unknown_reason", 'were left pending because of unknown reason'),
))
def _get_run_by_other_worker(worker):
"""
This returns a set of the tasks that are being run by other worker
"""
worker_that_blocked_task = dict()
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_task:
worker_that_blocked_task[other_task] = other_worker_id
return set(worker_that_blocked_task.keys())
def _get_external_workers(worker):
"""
This returns a dict with a set of tasks for all of the other workers
"""
worker_that_blocked_task = dict()
get_work_response_history = worker._get_work_response_history
for get_work_response in get_work_response_history:
if get_work_response['task_id'] is None:
for running_task in get_work_response['running_tasks']:
other_worker_id = running_task['worker']
other_task_id = running_task['task_id']
other_task = worker._scheduled_tasks.get(other_task_id)
if other_task:
if other_worker_id not in worker_that_blocked_task.keys():
worker_that_blocked_task[other_worker_id] = set()
worker_that_blocked_task[other_worker_id].add(other_task)
return worker_that_blocked_task
def _group_tasks_by_name_and_status(task_dict):
"""
Takes a dictionary with sets of tasks grouped by their status and returns a dictionary with dictionaries with an array of tasks grouped by their status and task name
"""
group_status = {}
for task in task_dict:
if task.task_family not in group_status:
group_status[task.task_family] = []
group_status[task.task_family].append(task)
return group_status
def _summary_dict(worker):
set_tasks = _partition_tasks(worker)
set_tasks["run_by_other_worker"] = _get_run_by_other_worker(worker)
_populate_unknown_statuses(set_tasks)
return set_tasks
def _summary_format(set_tasks, worker):
group_tasks = {}
for status, task_dict in set_tasks.items():
group_tasks[status] = _group_tasks_by_name_and_status(task_dict)
str_tasks = {}
comments = _get_comments(group_tasks)
num_all_tasks = len(set_tasks["already_done"]) + len(set_tasks["completed"]) + len(set_tasks["failed"]) + len(set_tasks["still_pending_ext"]) + len(set_tasks["still_pending_not_ext"])
str_output = ''
str_output += 'Scheduled {0} tasks of which:\n'.format(num_all_tasks)
for status in _ORDERED_STATUSES:
if status not in comments:
continue
str_output += '{0}'.format(comments[status])
if status != 'still_pending':
str_output += '{0}\n'.format(_get_str(group_tasks[status], status in _PENDING_SUB_STATUSES))
ext_workers = _get_external_workers(worker)
group_tasks_ext_workers = {}
for ext_worker, task_dict in ext_workers.items():
group_tasks_ext_workers[ext_worker] = _group_tasks_by_name_and_status(task_dict)
if len(ext_workers) > 0:
str_output += "\nThe other workers were:\n"
count = 0
for ext_worker, task_dict in ext_workers.items():
if count > 3 and count < len(ext_workers) - 1:
str_output += " and {0} other workers".format(len(ext_workers) - count)
break
str_output += " - {0} ran {1} tasks\n".format(ext_worker, len(task_dict))
count += 1
str_output += '\n'
if num_all_tasks == len(set_tasks["already_done"]) + len(set_tasks["still_pending_ext"]) + len(set_tasks["still_pending_not_ext"]):
if len(ext_workers) == 0:
str_output += '\n'
str_output += 'Did not run any tasks'
smiley = ""
reason = ""
if len(set_tasks["failed"]):
smiley = ":("
reason = "there were failed tasks"
elif len(set_tasks["still_pending_ext"]):
smiley = ":|"
reason = "there were missing external dependencies"
else:
smiley = ":)"
reason = "there were no failed tasks or missing external dependencies"
str_output += "\nThis progress looks {0} because {1}".format(smiley, reason)
if num_all_tasks == 0:
str_output = 'Did not schedule any tasks'
return str_output
def _summary_wrap(str_output):
return textwrap.dedent("""
===== Luigi Execution Summary =====
{str_output}
===== Luigi Execution Summary =====
""").format(str_output=str_output)
def summary(worker):
"""
Given a worker, return a human readable string describing roughly what the
workers have done.
"""
return _summary_wrap(_summary_format(_summary_dict(worker), worker))
# 5
|
{
"content_hash": "9f70b5beecad58087e5873d2791480ad",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 199,
"avg_line_length": 41.29329608938548,
"alnum_prop": 0.6024487587093282,
"repo_name": "penelopy/luigi",
"id": "d188bb1819ca200784895d85b9b241c6db8f9427",
"size": "15386",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "luigi/execution_summary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2162"
},
{
"name": "HTML",
"bytes": "31520"
},
{
"name": "JavaScript",
"bytes": "441840"
},
{
"name": "Python",
"bytes": "1162084"
},
{
"name": "Shell",
"bytes": "2464"
}
],
"symlink_target": ""
}
|
"""
tools for expression and count based tasks
"""
import os,sys,csv,gc,re
import numpy as np
def read_RSEM_counts_files(geneFilePath,isoformFilePath):
"""
read the RSEM counts files into a matrix
"""
if not os.path.exists(geneFilePath):
raise Exception("Cannot find gene file\n%s"%(geneFilePath))
if not os.path.exists(isoformFilePath):
raise Exception("Cannot find isoform file\n%s"%(isoformFilePath))
## load the gene counts
fid1 = open(geneFilePath,'rU')
reader1 = csv.reader(fid1,delimiter="\t")
header1 = next(reader1)
results1 = {}
check = 0
gc.disable()
for linja in reader1:
check += 1
results1[linja[0]] = {'transcript':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\
'exp_count':int(round(float(linja[4]))),'TPM':float(linja[5]),'FPKM':float(linja[6])}
fid1.close()
if check != len(results1.keys()):
raise Exception("Rows in gene count file are not first columns unique")
## load the isoform results
fid2 = open(isoformFilePath,'rU')
reader2 = csv.reader(fid2,delimiter="\t")
header2 = next(reader2)
results2 = {}
check = 0
for linja in reader2:
check += 1
results2[linja[0]] = {'gene':linja[1],'length':float(linja[2]),'eff_length':float(linja[3]),\
'exp_count':float(linja[4]),'TPM':float(linja[5]),'FPKM':float(linja[6])}
fid1.close()
if check != len(results2.keys()):
raise Exception("Rows in gene count file are not first columns unique")
fid2.close()
gc.enable()
return results1, results2
def read_matrix(matFilePath,delimiter=",",mtype='float'):
"""
assumes that row one are the samples and col one are the transcripts
matrix can only be of mtype 'int' or 'float'
"""
print('reading', matFilePath)
if mtype not in ['int','float']:
raise Exception("mtype must be 'int' or 'float'")
if not os.path.exists(matFilePath):
raise Exception("Cannot find matFilePath\n%s"%matFilePath)
fid = open(matFilePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
## get the gene and sample ids
transcriptIds = []
sampleIds = np.array(header[1:])
gc.disable()
for linja in reader:
transcriptIds.append(linja[0])
gc.enable()
transcriptIds = np.array(transcriptIds)
fid.close()
## fill in the matrix
mat = np.zeros((transcriptIds.shape[0],sampleIds.shape[0]),dtype=mtype)
fid = open(matFilePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
row = 0
for linja in reader:
if mtype == 'int':
mat[row,:] = [int(float(i)) for i in linja[1:]]
else:
mat[row,:] = [float(i) for i in linja[1:]]
row +=1
fid.close()
return transcriptIds,sampleIds,mat
def read_de_results(filePath,delimiter=",",tool="edgeR"):
"""
read the differential expression output from DESeq or edgeR
"""
print('reading', filePath)
if not os.path.exists(filePath):
raise Exception("Cannot find matFilePath\n%s"%filePath)
if tool not in ["edgeR","DESeq"]:
raise Exception("invalid tool specified use 'edgeR' or 'DESeq'")
fid = open(filePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
## get columnIds
header = next(reader)
columnIds = np.array(header[1:])
## get the gene and sample ids
transcriptIds = []
gc.disable()
for linja in reader:
transcriptIds.append(linja[0])
gc.enable()
transcriptIds = np.array(transcriptIds)
fid.close()
## fill in the matrix
mat = np.zeros((transcriptIds.shape[0],columnIds.shape[0]))
fid = open(filePath,'r')
reader = csv.reader(fid,delimiter=delimiter)
header = next(reader)
row = 0
for linja in reader:
_row = [re.sub("NA","NaN",i) for i in linja[1:]]
mat[row,:] = [float(i) for i in _row]
row +=1
fid.close()
return transcriptIds,columnIds,mat
[(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]#
def create_count_matrix(results,label,sampleList):
"""
this function is untested
"""
## use first sample to get rows
mat = np.zeros((len(results[0].keys()),len(sampleList)))
keys = sorted(np.array(results[0].keys()))
for j,sample in enumerate(sampleList):
for i,key in enumerate(keys):
mat[i,j] = results[j][key]['exp_count']
## write to file
fid = open("%s-counts.csv"%label,'w')
writer = csv.writer(fid)
if re.search("gene",label):
writer.writerow(["gene"]+sampleList)
else:
writer.writerow(["isoform"]+sampleList)
for r in range(mat.shape[0]):
row = [keys[r]] + [int(i) for i in mat[r,:].tolist()]
writer.writerow(row)
fid.close()
|
{
"content_hash": "b51ba957e91ee1f7609fb0c65e875c48",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 115,
"avg_line_length": 29.08823529411765,
"alnum_prop": 0.6048533872598585,
"repo_name": "ajrichards/htsint",
"id": "391448ef16677991707376e6c40a8102d65853ce",
"size": "4968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "htsint/tools/ExpressionLib.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "273743"
}
],
"symlink_target": ""
}
|
"""
Copyright 2017 Andris Zbitkovskis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import time
import threading
import requests
import json
import pika
import random
from pika.exceptions import *
from individuals import Individual
from population import Population
from weigherstate import WeigherState
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from cephapi.cephapi import put_osd_reweight,get_fitness_df,get_pg_stat
class Weigher (threading.Thread):
#class Weigher ():
DELAY=5
def get_rnd(self):
if self.use_random:
return (random.random())
else:
self.current_random_idx+=1
if self.current_random_idx >=len(self.random_values):
self.current_random_idx=0
return (self.random_values[self.current_random_idx])
def read_random_values(self):
with open(self.random_file_name,'r') as f:
res=[]
for row in f:
res+=row.rstrip('\n').split(' ')
f.close()
r=[]
for s in res:
r.append(float(s))
return r
def get_universe_index(self,a_rnd):
res=math.floor(float(a_rnd*(self.universe_size+1)))
return res
def get_new_memebers(self):
t_selecion=[]
while len(t_selecion) <(self.pop_size*self.osd_count) :
i=self.get_universe_index(self.get_rnd())
t_selecion.append(i)
#Get initial population
src_individuals=[]
for pi in range(self.pop_size):
" get touples for population"
weights=[]
for oi in range (self.osd_count):
xi=t_selecion[pi+oi*self.pop_size]
#Universe satur tikai vienu svaru tuple
w=self.universe[xi].weights[0]
weights.append((w['weight'],w['idx']))
ind=Individual(weights,self.byte_size)
src_individuals.append(ind)
return src_individuals
def init_population(self):
src_individuals=self.get_new_memebers()
self.current_pop.init_population(src_individuals)
self.logger.debug (" state {}".format(self.state))
def set_weights(self,inds, inspect_ind =False):
for ind in inds:
if not inspect_ind or ind.must_recalc:
self.logger.debug("setting weights for {} ".format(ind.str_binary_rep()))
if self.is_physical_model:
for i,val in enumerate(ind.weights):
put_osd_reweight(i,val['weight'])
cidx=0
pgstat=get_pg_stat()
while pgstat['active+clean'] != pgstat['pgs'] and cidx < self.max_cidx :
pgstat=get_pg_stat()
cidx+=1
self.logger.debug("going to sleep for {}.time {} ".format(cidx,ind.str_binary_rep()))
time.sleep(10)
ind_stats=get_fitness_df()
self.obtain_chart_data()
fitness=0
for ist in ind_stats:
fitness+=ist['var']*ist['optim']
ind.fitness=fitness
else:
ind.recalc_fitness()
def select_individuals(self,s_individuals=None):
ng_individuals=[]
curr_pop=self.pops[self.current_pop_idx]
pop_size=self.pop_size
if s_individuals is None:
s_individuals=curr_pop.individuals
curr_pop.init_roulette(verbose=False,a_inds=s_individuals)
max_fit=-9999
max_ind=None
max_id=""
for i in range(len(s_individuals)):
ind=s_individuals[i]
if ind.fitness>max_fit:
max_fit=ind.fitness
max_ind=ind
max_id=i
#elitisms ari X
t_ind= self.get_ind_copy(max_ind)
t_ind.fitness=max_ind.fitness
t_ind.p_id=max_id
ng_individuals.append(t_ind)
for i in range(pop_size-1):
rnd=self.get_rnd()
idx=curr_pop.get_chosen_idx(rnd)
t_ind=self.get_ind_copy(s_individuals[idx])
ng_individuals.append(t_ind)
return ng_individuals
def select_for_crossover(self):
if self.current_pop_idx >0 and self.continuous_add_new_individuals :
src_individuals=self.get_new_memebers()
self.current_pop.init_population(src_individuals,self.current_pop.selection_source)
self.set_weights(self.current_pop.selection_source)
self.current_pop.print_ind(self.current_pop.selection_source,prefix="POP {} newin".format(self.current_pop_idx),filename=self.log_file)
self.current_pop.selection_source=self.current_pop.selection_source+self.current_pop.individuals
newinds=self.select_individuals(self.current_pop.selection_source)
else:
newinds=self.select_individuals()
self.current_pop.init_population(newinds,self.current_pop.selection)
def refresh_osd():
pass
def get_ind_copy(self,s_ind):
weights=[]
for w in s_ind.weights:
weights.append((w['weight'],w['idx']))
ind=Individual(weights,self.byte_size)
ind.fitness=s_ind.fitness
return ind
def get_empty_pop(self, pop_idx = 0 ):
pop=Population(self.lower_limit,self.upper_limit,self.interval,self.fitness_func,pop_idx =pop_idx, log_file = self.log_file )
return pop
def make_new_pop(self):
self.set_weights(self.current_pop.final,True)
newgen=self.select_individuals(self.current_pop.final)
self.current_pop_idx+=1
pop=self.get_empty_pop(self.current_pop_idx)
self.pops.append(pop)
self.current_pop=self.pops[self.current_pop_idx]
self.current_pop.pop_idx=self.current_pop_idx
self.current_pop.init_population(newgen)
def exec_genetic_selection(self):
self.exec_cross_over()
self.current_pop.final=self.current_pop.individuals+self.current_pop.childs
self.exec_mutation()
def exec_cross_over(self):
curr_pop=self.current_pop
it = range(len(curr_pop.selection)//2)
for i in it:
ind_a=curr_pop.selection[i*2]
ind_b=curr_pop.selection[i*2+1]
rnd_a=self.get_rnd()
rnd_b=self.get_rnd()
c_split_point=self.byte_size*self.osd_count//2
res_a,res_b=curr_pop.cross_over(ind_a,ind_b,rnd_a,rnd_b,c_split_point)
res_a.must_recalc=True
res_b.must_recalc=True
curr_pop.append(res_a,curr_pop.childs)
curr_pop.append(res_b,curr_pop.childs)
def exec_mutation(self):
curr_pop=self.current_pop
for i in curr_pop.final:
rnd=self.get_rnd()
if rnd < self.mutate_prob:
i.mutate=True
for i in range(len(curr_pop.final)):
if curr_pop.final[i].mutate:
rnd_a=self.get_rnd()
rnd_b=self.get_rnd()
m_res=curr_pop.exec_mutation(curr_pop.final[i],(rnd_a,rnd_b),2)
m_res.must_recalc=True
curr_pop.set_individual(m_res,i,curr_pop.final)
def obtain_chart_data(self):
resp=requests.get(self.chart_url)
if resp.status_code != 200:
self.logger.error ("resp.status_code {}".format(resp.status_code))
def run(self):
self.logger.debug ("Starting {} ".format(self.name))
self.state = WeigherState.RUNNING
while True:
if self.state == WeigherState.RUNNING:
self.set_weights(self.current_pop.individuals)
self.send_stats()
self.logger.debug ("Going to print ind ")
self.current_pop.print_ind(prefix="POP {} indiv".format(self.current_pop_idx),log_file=self.log_file)
self.select_for_crossover()
self.current_pop.print_ind(self.current_pop.selection,prefix="POP {} selec".format(self.current_pop_idx),log_file=self.log_file)
self.exec_cross_over()
self.set_weights(self.current_pop.childs,True)
self.current_pop.print_ind(self.current_pop.childs,prefix="POP {} child".format(self.current_pop_idx),log_file=self.log_file)
self.current_pop.final=self.current_pop.individuals+self.current_pop.childs
self.current_pop.print_ind(self.current_pop.final,prefix="POP {} final".format(self.current_pop_idx),log_file=self.log_file)
self.exec_mutation()
self.make_new_pop()
self.logger.debug ("Going to sleep for {} ".format(self.delay))
time.sleep(self.delay)
def get_json_stats(self, shift=0):
pop_idx=self.current_pop_idx
if shift <=0 and (self.current_pop_idx +shift) >0:
pop_idx= self.current_pop_idx +shift
sumf,maxf,avgf = self.pops[pop_idx].get_stats()
stats={'attempt':self.attempt, 'generation':pop_idx, 'avg':avgf, 'max':maxf, 'sum':sumf}
self.logger.debug ("Sending stats {} ".format(stats))
jstats= json.dumps(stats)
return jstats
else:
return None
def send_stats(self, shift=0):
jstats=self.get_json_stats(shift)
if not jstats is None:
try:
connection = pika.BlockingConnection(pika.URLParameters(self.amqp_url))
channel = connection.channel()
channel.queue_declare(queue=self.amqp_queue)
channel.basic_publish(exchange='',
routing_key=self.amqp_queue,
body=jstats)
self.logger.debug ("Sending amqp msg {} ".format(jstats))
connection.close()
except AMQPError as ae:
self.logger.error(" There was en error while connecting to amq server: {}".format(type(ae).__name__ ))
def init_clean_pop(self, pop =None):
if pop is None:
pop=Population(self.lower_limit,self.upper_limit,self.interval,self.fitness_func ,pop_idx=0 , log_file = self.log_file)
self.current_random_idx=0
self.pops=[]
self.pops.append(pop)
self.current_pop_idx=0
self.current_pop=self.pops[self.current_pop_idx]
self.init_population()
self.state = WeigherState.RESET
self.attempt=self.attempt+1
def reset_weights(self, inds = None , weight =1.0):
if inds is None:
inds=self.current_pop.individuals
if len(inds) >0:
ind =inds[0]
for i,val in enumerate(ind.weights):
put_osd_reweight(i,weight)
def __init__(self,logger,name,l_limit,u_limit,interval, pop_size, osd_count, r_file_name, fitness_func, amqp_url, amqp_queue='bdrq', is_physical_model = True ,mutate_prob =0.3 , max_cidx =10 , log_file =None):
threading.Thread.__init__(self)
self.attempt=0
self.use_random = self.attempt >0
self.name=name
self.logger=logger
self.chart_url='http://172.28.57.129:8000/obtain_new_chart_data/'
self.amqp_url=amqp_url
self.amqp_queue=amqp_queue
self.lower_limit=l_limit
self.upper_limit=u_limit
self.interval=interval
self.osd_count = osd_count
self.pop_size = pop_size
self.random_file_name= r_file_name
self.universe=[]
self.fitness_func=fitness_func
self.log_file = log_file
pop=Population(l_limit,u_limit,interval,self.fitness_func ,pop_idx=0 , log_file = self.log_file)
self.universe=pop.get_universe(3) #universe precision
self.universe_size=len(self.universe)
self.byte_size=pop.b_size
self.current_random_idx=0
self.random_values=self.read_random_values()
self.mutate_prob = mutate_prob
self.pops=[]
self.pops.append(pop)
self.current_pop_idx=0
self.current_pop=self.pops[self.current_pop_idx]
self.is_physical_model=is_physical_model
self.max_cidx = max_cidx
self.continuous_add_new_individuals=False
self.state = WeigherState.INIT
self.delay=self.DELAY
self.init_population()
|
{
"content_hash": "3398253eeba1a400a28b7f96abdefeca",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 214,
"avg_line_length": 40.78313253012048,
"alnum_prop": 0.5646233382570163,
"repo_name": "zbitmanis/cmanager",
"id": "c311c3a68a5f1e234fd75f6fffba875609b9a883",
"size": "13566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scalerd/weigher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "24825"
},
{
"name": "Python",
"bytes": "87481"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SearchByCoordinates(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SearchByCoordinates Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SearchByCoordinates, self).__init__(temboo_session, '/Library/Yelp/SearchByCoordinates')
def new_input_set(self):
return SearchByCoordinatesInputSet()
def _make_result_set(self, result, path):
return SearchByCoordinatesResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchByCoordinatesChoreographyExecution(session, exec_id, path)
class SearchByCoordinatesInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SearchByCoordinates
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Accuracy(self, value):
"""
Set the value of the Accuracy input for this Choreo. ((optional, decimal) Narrow or widen the search range in relation to the coordinates, such as "2" for state or "8" for street address.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Accuracy', value)
def set_BusinessType(self, value):
"""
Set the value of the BusinessType input for this Choreo. ((optional, string) A term to narrow the search, such as "wine" or "restaurants". Leave blank to search for all business types.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('BusinessType', value)
def set_Category(self, value):
"""
Set the value of the Category input for this Choreo. ((optional, string) The category to filter search results with. This can be a list of comma delimited categories. For example, "bars,french". See Choreo description for a list of categories.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Category', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The Consumer Key provided by Yelp.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The Consumer Secret provided by Yelp.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('ConsumerSecret', value)
def set_Count(self, value):
"""
Set the value of the Count input for this Choreo. ((optional, integer) The number of business results to return. The maxiumum is 20.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Count', value)
def set_CountryCode(self, value):
"""
Set the value of the CountryCode input for this Choreo. ((optional, string) The ISO 3166-1 2-digit country code to use when parsing the location field. United States = US, Canada = CA, United Kingdom = GB.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('CountryCode', value)
def set_Deals(self, value):
"""
Set the value of the Deals input for this Choreo. ((optional, boolean) Set to "true" to exclusively search for businesses with deals.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Deals', value)
def set_LanguageCode(self, value):
"""
Set the value of the LanguageCode input for this Choreo. ((optional, string) The ISO 639 language code. Default to "en". Reviews and snippets written in the specified language will be returned.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('LanguageCode', value)
def set_Latitude(self, value):
"""
Set the value of the Latitude input for this Choreo. ((required, decimal) The latitude to search near, such as "37.788022".)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Latitude', value)
def set_Longitude(self, value):
"""
Set the value of the Longitude input for this Choreo. ((required, decimal) The longitude to search near, such as "-122.399797".)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Longitude', value)
def set_Offset(self, value):
"""
Set the value of the Offset input for this Choreo. ((optional, integer) Offsets the list of returned business results by this amount.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Offset', value)
def set_Range(self, value):
"""
Set the value of the Range input for this Choreo. ((optional, integer) Narrow or expand a search by specifying a range in either feet, meters, miles, or kilometers, depending on the value of the Units input. Maximum is 25 miles (40000 meters).)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Range', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from Yelp, either XML or JSON (the default).)
"""
super(SearchByCoordinatesInputSet, self)._set_input('ResponseFormat', value)
def set_Sort(self, value):
"""
Set the value of the Sort input for this Choreo. ((optional, integer) The sort mode: 0 = Best matched, 1 = Distance (default), 2 = Highest Rated.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Sort', value)
def set_TokenSecret(self, value):
"""
Set the value of the TokenSecret input for this Choreo. ((required, string) The Token Secret provided by Yelp.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('TokenSecret', value)
def set_Token(self, value):
"""
Set the value of the Token input for this Choreo. ((required, string) The Token provided by Yelp.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Token', value)
def set_Units(self, value):
"""
Set the value of the Units input for this Choreo. ((optional, string) Specify "feet" (the default), "meters", "miles", or "kilometers". Units apply to the Range input value.)
"""
super(SearchByCoordinatesInputSet, self)._set_input('Units', value)
class SearchByCoordinatesResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SearchByCoordinates Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Yelp. Corresponds to the input value for ResponseFormat (defaults to JSON).)
"""
return self._output.get('Response', None)
class SearchByCoordinatesChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchByCoordinatesResultSet(response, path)
|
{
"content_hash": "e2fc1c633f2c4783a6f6ca569d0a8364",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 252,
"avg_line_length": 52.56737588652482,
"alnum_prop": 0.6807879114948732,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "cd0cfdad0391af6cc54055008a5c571be3f001fa",
"size": "8297",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/Yelp/SearchByCoordinates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
import glob
import os
from DistUtilsExtra.command import *
setup(name='clearcutter',
version='0.2',
description='ClearCutter Log Event Processor',
author='Conrad Constantine',
author_email='conrad@alienvault.com',
url = 'http://code.google.com/p/alienvault-labs-garage/',
packages = ['clearcutter'],
scripts=['clear-cutter'],
cmdclass = { "build" : build_extra.build_extra},
requires = ['argparse'],
)
#TODO: easy_install argparse
|
{
"content_hash": "7448aa4ee8f3232066459d599d892275",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 29.22222222222222,
"alnum_prop": 0.6615969581749049,
"repo_name": "MercenaryLogic/bullwood",
"id": "7976bb5889f94bb0643678d6b117bfb84a95a35d",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34806"
}
],
"symlink_target": ""
}
|
import json
import nltk
from nltk.corpus import stopwords
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def Preprocess(t):
'''
Everything reduced to lower case for ease of processing
'''
t = t.lower()
# Specific case to remove <'> so as to tag properly
t = t.replace('\'','')
return t
def Postag(t):
'''
Text Segmentation and Tagging
'''
sentences = nltk.sent_tokenize(t)
tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
# Attaching POS tags
tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]
# Flattening list of lists
pos_tagged = [item for sublist in tagged_sentences for item in sublist]
return pos_tagged
def Chunking(t):
'''
Chunking is grouping tagged words as phrases
'''
# Tag pattern to identify dishes
pattern = '''FOOD : {<NN.*>+}'''
chunk_rule = nltk.RegexpParser(pattern)
tree = chunk_rule.parse(t)
return tree
def Treeparse(tree):
'''
To parse chunk tree
'''
foods = []
for subtree in tree.subtrees():
if subtree.label() == 'FOOD':
foods.append(' '.join([str(child[0]) for child in subtree]))
return foods
if __name__ == '__main__':
unprocess_data = {}
process_data = {}
named_entity = {}
# Reading data back
with open('../data/Reviews.json', 'r') as f:
unprocess_data = json.load(f)
for place, reviews in unprocess_data.items():
for review, rating in reviews:
processed = Preprocess(str(review))
tagged = Postag(processed)
chunked = Chunking(tagged)
parsed = set(Treeparse(chunked))
# parsed has all menu items mentioned in one single review
for item in parsed:
if len(item) >= 4:
if item in process_data.keys():
stats = process_data[item]
popular = stats[0]
senti = stats[1]
# Update new values of item aggr. senti and popularity
popular += 1
senti += rating
process_data[item] = [popular, senti]
else:
process_data[item] = [1, rating]
# Putting menu generated via reviews against place
place = place.encode('utf-8')
named_entity[place] = process_data
process_data = {} # Done to process other place reviews
# Writing JSON data
with open('../data/tagged_mentions.json', 'w') as f:
json.dump(named_entity, f)
|
{
"content_hash": "cb50645c553c3f8c34de325b81d7e719",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 83,
"avg_line_length": 27.65979381443299,
"alnum_prop": 0.5620573984345881,
"repo_name": "anirudhagar13/Zomato-Food-Review",
"id": "b6072f07268978977b2525852c41b8f27f9eacfd",
"size": "2683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/POStagger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31012"
},
{
"name": "HTML",
"bytes": "14313"
},
{
"name": "JavaScript",
"bytes": "8859"
},
{
"name": "PHP",
"bytes": "1136"
},
{
"name": "Python",
"bytes": "24840"
}
],
"symlink_target": ""
}
|
import asyncio
import json
import logging
from urllib.parse import urlparse
import websockets
from google.datacatalog_connectors.qlik.scrape import \
authenticator, constants, engine_api_dimensions_helper, \
engine_api_measures_helper, engine_api_sheets_helper, \
engine_api_visualizations_helper
class EngineAPIScraper:
"""Wraps requests to the Qlik Engine JSON API.
The Qlik Engine JSON API is a WebSocket protocol that uses JSON to pass
information between the Qlik associative engine and the clients. This API
consists of a set of objects representing apps, lists, and so on. These
objects are organized in a hierarchical structure.
Websockets use an asynchronous communication channel, but the public
methods from this class are intended to be called synchronously to keep
consistency with the overall scrape > prepare > ingest workflow. The
public methods take care of handling the async API calls for their clients.
Most private coroutines (async def) rely on 'async with' statements.
They work with an asynchronous context manager and the connection is closed
when exiting the context.
Attributes:
__auth_cookie: An HTTP cookie used to authorize the requests.
"""
def __init__(self, server_address, ad_domain, username, password):
self.__server_address = server_address
self.__ad_domain = ad_domain
self.__username = username
self.__password = password
# The server address starts with an http/https scheme. The below
# statement replaces the original scheme with 'wss', which is used for
# secure websockets communication.
self.__base_api_endpoint = f'wss://{urlparse(server_address).hostname}'
self.__common_headers = {
constants.XRFKEY_HEADER_NAME: constants.XRFKEY,
}
self.__auth_cookie = None
def get_dimensions(self, app_id):
"""Gets the Dimensions (Master Items) set up to a given App.
Returns:
A list of [GenericDimensionProperties](https://help.qlik.com/en-US/sense-developer/September2020/APIs/EngineAPI/definitions-GenericDimensionProperties.html). # noqa E501
"""
self.__set_up_auth_cookie()
return engine_api_dimensions_helper.EngineAPIDimensionsHelper(
self.__server_address, self.__auth_cookie).get_dimensions(app_id)
def get_measures(self, app_id):
"""Gets the Measures (Master Items) set up to a given App.
Returns:
A list of [GenericMeasureProperties](https://help.qlik.com/en-US/sense-developer/September2020/APIs/EngineAPI/definitions-GenericMeasureProperties.html). # noqa E501
"""
self.__set_up_auth_cookie()
return engine_api_measures_helper.EngineAPIMeasuresHelper(
self.__server_address, self.__auth_cookie).get_measures(app_id)
def get_sheets(self, app_id):
"""Gets the Sheets that belong to the given App.
Returns:
A list of [NxContainerEntry](https://help.qlik.com/en-US/sense-developer/September2020/APIs/EngineAPI/definitions-NxContainerEntry.html). # noqa E501
"""
self.__set_up_auth_cookie()
return engine_api_sheets_helper.EngineAPISheetsHelper(
self.__server_address, self.__auth_cookie).get_sheets(app_id)
def get_visualizations(self, app_id):
"""Gets the Visualizations (Master Items) set up to a given App.
Returns:
A list of [GenericObjectProperties](https://help.qlik.com/en-US/sense-developer/September2020/APIs/EngineAPI/definitions-GenericObjectProperties.html). # noqa E501
"""
self.__set_up_auth_cookie()
return engine_api_visualizations_helper.EngineAPIVisualizationsHelper(
self.__server_address,
self.__auth_cookie).get_visualizations(app_id)
def __set_up_auth_cookie(self):
if self.__auth_cookie:
return
windows_auth_url = asyncio.get_event_loop().run_until_complete(
self.__get_windows_authentication_url())
self.__auth_cookie = authenticator.Authenticator\
.get_qps_session_cookie_windows_auth(
ad_domain=self.__ad_domain,
username=self.__username,
password=self.__password,
auth_url=windows_auth_url)
logging.debug('QPS session cookie issued for the Engine API: %s',
self.__auth_cookie)
async def __get_windows_authentication_url(self):
"""Gets a Windows Authentication url.
This method sends an unauthenticated request to a well known endpoint
of the Qlik Engine JSON API. The expected response has a `loginUri`
param, which is the Windows Authentication url.
P.S. The endpoint was manually captured from the Engine API Explorer's
Execution Logs (https://<qlik-site>/dev-hub/engine-api-explorer).
Returns:
A string.
"""
uri = f'{self.__base_api_endpoint}/app/?transient=' \
f'?Xrfkey={constants.XRFKEY}' \
f'&reloadUri={self.__server_address}/dev-hub/engine-api-explorer'
# Sets the User-Agent to Windows temporarily to get a Windows
# Authentication URL that is required by the NTLM authentication flow.
headers = self.__common_headers.copy()
headers['User-Agent'] = constants.WINDOWS_USER_AGENT
async with websockets.connect(uri=uri,
extra_headers=headers) as websocket:
async for message in websocket:
json_message = json.loads(message)
params = json_message.get('params')
if params:
return params.get('loginUri')
|
{
"content_hash": "e01d58319529331a339d32cdbc0b81f5",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 182,
"avg_line_length": 43,
"alnum_prop": 0.6568475452196383,
"repo_name": "GoogleCloudPlatform/datacatalog-connectors-bi",
"id": "2ffc88f5a5ed34a02ede32015cd82b9f3108f29e",
"size": "6401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-datacatalog-qlik-connector/src/google/datacatalog_connectors/qlik/scrape/engine_api_scraper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3191"
},
{
"name": "Python",
"bytes": "980579"
},
{
"name": "Shell",
"bytes": "9469"
}
],
"symlink_target": ""
}
|
import binascii
import random
from collections import defaultdict, namedtuple
import serial
from terminaltables import AsciiTable
from tools_shared.cmd_line_parser import get_args
from tqdm import tqdm
from vrc_t70.communicator import VrcT70Communicator
from vrc_t70.limitations import MAX_TRUNKS_COUNT
from .shared import init_logger
SensorTemperatureData = namedtuple(
typename="SensorData",
field_names=["sensor_address", "temperature", "trunk_number", "sensor_index"]
)
def print_sensors_per_trunk_count(sensors_count_per_trunk, logger, skip_empty_trunks = True):
table_data = [["Trunk Name", "Sensors Count"]]
for trunk_number, sensors_count in enumerate(sensors_count_per_trunk, 1):
if skip_empty_trunks and (not sensors_count):
continue
table_data.append(
[
"Trunk-{}".format(trunk_number),
sensors_count
]
)
table = AsciiTable(table_data)
logger.info("sensors per trunks:\n{}\n".format(table.table))
def print_sensors_per_trunk_count_protocol_data(sensors_count_per_trunk, logger, skip_empty_trunks = True):
table_data = [["Trunk Name", "Sensors Count"]]
for sensors_data in sensors_count_per_trunk:
if skip_empty_trunks and (not sensors_data.sensors_count()):
continue
table_data.append(
[
"Trunk-{}".format(sensors_data.trunk_number()),
sensors_data.sensors_count()
]
)
table = AsciiTable(table_data)
logger.info("sensors per trunks:\n{}\n".format(table.table))
def print_sensors_data(sensors_data, logger):
sensors_per_trunk = defaultdict(list)
for item in sensors_data:
sensors_per_trunk[item.trunk_number].append(item)
for trunk_number in sorted(sensors_per_trunk.keys()):
sensors_on_trunk = sensors_per_trunk[trunk_number]
sensors_on_trunk = sorted(sensors_on_trunk, key=(lambda x: x.sensor_index))
table_data = [["Index", "Temperature", "Address"]]
for sensor_data in sensors_on_trunk:
table_data.append(
[
sensor_data.sensor_index,
round(sensor_data.temperature, 2),
my_hexlify(sensor_data.sensor_address)
]
)
table = AsciiTable(table_data)
logger.info("data for Trunk-{}:\n{}\n".format(trunk_number, table.table))
def main():
args = get_args()
logger = init_logger("temp reader")
logger.debug("app started")
uart = init_serial(args.uart_name, args.uart_speed)
communicator = VrcT70Communicator(uart, controller_address=args.device_address)
logger.info("initializing communication with device {0} [0x{0:02x}]...".format(args.device_address))
logger.info("\tping")
communicator.ping()
new_session_id = random_byte_array(4)
logger.debug("\tinitializing session id with {}".format(my_hexlify(new_session_id)))
r = communicator.set_session_id(new_session_id)
assert r.session_id() == new_session_id
r = communicator.get_session_id()
logger.debug("\tsession_id = {}".format(my_hexlify(r.session_id())))
assert r.session_id() == new_session_id
logger.debug("scanning for sensors on trunks...")
sensors_count_per_trunk = rescan_devices_on_all_trunks(communicator, logger)
print_sensors_per_trunk_count(sensors_count_per_trunk, logger)
logger.info("bulk data processing commands")
sensors_data = list()
for trunk_number, sensors_count in enumerate(tqdm(sensors_count_per_trunk, unit="trunks"), 1):
temperatures = communicator.get_temperature_on_trunk(trunk_number)
assert temperatures.temperatures_count() == sensors_count
addresses = communicator.get_sensors_unique_addresses_on_trunk(trunk_number)
assert sensors_count == addresses.sensors_count()
for sensor_index in range(sensors_count):
is_connected = temperatures.is_connected(sensor_index)
assert is_connected
temperature = temperatures.temperature(sensor_index)
assert not addresses.is_error_detected(sensor_index)
unique_address = addresses.sensor_unique_address(sensor_index)
sensor_data = SensorTemperatureData(
sensor_address=unique_address,
temperature=temperature,
trunk_number=trunk_number,
sensor_index=sensor_index
)
sensors_data.append(sensor_data)
print_sensors_data(sensors_data, logger)
logger.info("simple data processing commands")
sensors_data = list()
for trunk_number, sensors_count in enumerate(tqdm(sensors_count_per_trunk, unit="trunk"), 1):
for sensor_index in range(sensors_count):
r = communicator.get_temperature_on_sensor_on_trunk(trunk_number, sensor_index)
temperature = r.temperature()
r = communicator.get_sensor_unique_address_on_trunk(trunk_number, sensor_index)
unique_address = r.unique_address()
sensor_data = SensorTemperatureData(
sensor_address=unique_address,
temperature=temperature,
trunk_number=trunk_number,
sensor_index=sensor_index
)
sensors_data.append(sensor_data)
print_sensors_data(sensors_data, logger)
logger.info("retrieving sensors count")
sensors_count_per_trunk = list()
for trunk_number in tqdm(range(1, MAX_TRUNKS_COUNT + 1), unit="trunks"):
r = communicator.get_sensors_count_on_trunk(trunk_number)
sensors_count_per_trunk.append(r)
print_sensors_per_trunk_count_protocol_data(sensors_count_per_trunk, logger)
uart.close()
logger.info("application finished")
return 0
def rescan_devices_on_all_trunks(communicator, logger):
res = []
logger.info("Rescanning devices on trunks")
for trunk_number in tqdm(range(1, MAX_TRUNKS_COUNT + 1), unit="trunsk"):
r = communicator.rescan_sensors_on_trunk(trunk_number)
res.append(r.sensors_count())
return res
def init_serial(uart_name, uart_speed):
return serial.Serial(
uart_name,
baudrate=uart_speed,
bytesize=serial.EIGHTBITS,
timeout=1,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE
)
def my_hexlify(data):
return binascii.hexlify(data).decode("ascii")
def random_byte_array(length):
return bytearray((random.getrandbits(8) for _ in range(length)))
if __name__ == "__main__":
res = main()
exit(res)
|
{
"content_hash": "4367d2b53a36c63cc974ac5dbc896566",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 107,
"avg_line_length": 32.74384236453202,
"alnum_prop": 0.6464570482924628,
"repo_name": "JFF-Bohdan/vrc_t70",
"id": "ab72c9205197ad24182f9bd3fab4203104016f87",
"size": "6647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vrc_t70/command_line/get_temperatures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1988"
},
{
"name": "Python",
"bytes": "33441"
}
],
"symlink_target": ""
}
|
"""Wichman-Hill random number generator.
Wichmann, B. A. & Hill, I. D. (1982)
Algorithm AS 183:
An efficient and portable pseudo-random number generator
Applied Statistics 31 (1982) 188-190
see also:
Correction to Algorithm AS 183
Applied Statistics 33 (1984) 123
McLeod, A. I. (1985)
A remark on Algorithm AS 183
Applied Statistics 34 (1985),198-200
USE:
whrandom.random() yields double precision random numbers
uniformly distributed between 0 and 1.
whrandom.seed(x, y, z) must be called before whrandom.random()
to seed the generator
There is also an interface to create multiple independent
random generators, and to choose from other ranges.
Multi-threading note: the random number generator used here is not
thread-safe; it is possible that nearly simultaneous calls in
different theads return the same random value. To avoid this, you
have to use a lock around all calls. (I didn't want to slow this
down in the serial case by using a lock here.)
"""
import warnings
warnings.warn("the whrandom module is deprecated; please use the random module",
DeprecationWarning)
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley.
class whrandom:
def __init__(self, x = 0, y = 0, z = 0):
"""Initialize an instance.
Without arguments, initialize from current time.
With arguments (x, y, z), initialize from them."""
self.seed(x, y, z)
def seed(self, x = 0, y = 0, z = 0):
"""Set the seed from (x, y, z).
These must be integers in the range [0, 256)."""
if not type(x) == type(y) == type(z) == type(0):
raise TypeError, 'seeds must be integers'
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError, 'seeds must be in range(0, 256)'
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
# This part is thread-unsafe:
# BEGIN CRITICAL SECTION
x, y, z = self._seed
#
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
#
self._seed = x, y, z
# END CRITICAL SECTION
#
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def uniform(self, a, b):
"""Get a random number in the range [a, b)."""
return a + (b-a) * self.random()
def randint(self, a, b):
"""Get a random integer in the range [a, b] including
both end points.
(Deprecated; use randrange below.)"""
return self.randrange(a, b+1)
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))]
def randrange(self, start, stop=None, step=1, int=int, default=None):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
Do not supply the 'int' and 'default' arguments."""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking
istart = int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is default:
if istart > 0:
return int(self.random() * istart)
raise ValueError, "empty range for randrange()"
istop = int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
if step == 1:
if istart < istop:
return istart + int(self.random() *
(istop - istart))
raise ValueError, "empty range for randrange()"
istep = int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (istop - istart + istep - 1) / istep
elif istep < 0:
n = (istop - istart + istep + 1) / istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
return istart + istep*int(self.random() * n)
# Initialize from the current time
_inst = whrandom()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
|
{
"content_hash": "dd46d52b7b0e3dad187b643ef682a3f8",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 80,
"avg_line_length": 34.09722222222222,
"alnum_prop": 0.5765784114052953,
"repo_name": "MalloyPower/parsing-python",
"id": "bc0d1a4520eac209807d29e79eac9d329b82c3cb",
"size": "4910",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.4.3/Lib/whrandom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
import unittest
import uuid
import cloudinitd
from cloudinitd.pollables import *
class PollableTests(unittest.TestCase):
def tearDown(self):
cloudinitd.close_log_handlers()
def test_popen_fail(self):
cmd = "/bin/false"
pexe = PopenExecutablePollable(cmd, allowed_errors=0)
pexe.start()
try:
failed = True
rc = pexe.poll()
while not rc:
rc = pexe.poll()
except ProcessException, pex:
failed = False
self.assertFalse(failed)
def test_popen_fail_retry(self):
cmd = "/bin/false"
pexe = PopenExecutablePollable(cmd, allowed_errors=3)
pexe.start()
try:
failed = True
rc = pexe.poll()
while not rc:
rc = pexe.poll()
except ProcessException, pex:
failed = False
ex = pexe.get_exception()
self.assertEqual(ex, pex)
self.assertFalse(failed)
def test_null_poll(self):
np = NullPollable()
np.start()
rc = np.poll()
self.assertTrue(rc)
def test_popen_true(self):
cmd = cloudinitd.find_true()
pexe = PopenExecutablePollable(cmd, allowed_errors=0)
pexe.start()
rc = pexe.poll()
while not rc:
rc = pexe.poll()
def test_popen_output(self):
test_out = str(uuid.uuid1())
cmd = "/bin/echo %s" % (test_out)
pexe = PopenExecutablePollable(cmd, allowed_errors=0)
pexe.start()
rc = pexe.poll()
while not rc:
rc = pexe.poll()
out = pexe.get_output().strip()
print test_out
print out
self.assertEqual(test_out, out)
def test_popen_badcmd(self):
cmd = str(uuid.uuid1())
pexe = PopenExecutablePollable(cmd, allowed_errors=0)
pexe.start()
try:
failed = True
rc = pexe.poll()
while not rc:
rc = pexe.poll()
except ProcessException, pex:
failed = False
self.assertFalse(failed)
def test_popen_timeoutex(self):
cmd = "/bin/sleep 30"
pexe = PopenExecutablePollable(cmd, allowed_errors=0, timeout=5)
pexe.start()
try:
failed = True
rc = pexe.poll()
while not rc:
rc = pexe.poll()
except TimeoutException, pex:
failed = False
self.assertFalse(failed)
def test_multilevel_simple(self):
cmd = cloudinitd.find_true()
pexe1_1 = PopenExecutablePollable(cmd, allowed_errors=0)
pexe1_2 = PopenExecutablePollable(cmd, allowed_errors=0)
pexe2_1 = PopenExecutablePollable(cmd, allowed_errors=0)
pexe2_2 = PopenExecutablePollable(cmd, allowed_errors=0)
mcp = MultiLevelPollable()
mcp.add_level([pexe1_1, pexe1_2])
mcp.add_level([pexe2_1, pexe2_2])
mcp.start()
rc = False
while not rc:
rc = mcp.poll()
def test_multilevel_error(self):
cmd = cloudinitd.find_true()
pexe1_1 = PopenExecutablePollable(cmd, allowed_errors=0, timeout=60)
pexe1_2 = PopenExecutablePollable(cmd, allowed_errors=0, timeout=60)
pexe2_1 = PopenExecutablePollable(cmd, allowed_errors=0, timeout=60)
pexe2_2 = PopenExecutablePollable("NotACommand", allowed_errors=0, timeout=5)
mcp = MultiLevelPollable()
mcp.add_level([pexe1_1, pexe1_2])
mcp.add_level([pexe2_1, pexe2_2])
mcp.start()
rc = False
try:
failed = True
while not rc:
rc = mcp.poll()
except:
failed = False
self.assertFalse(failed)
def test_popen_cancel(self):
cmd = "/bin/sleep 100000"
pexe1_1 = PopenExecutablePollable(cmd, allowed_errors=0)
pexe1_1.start()
pexe1_1.cancel()
rc = False
try:
while not rc:
rc = pexe1_1.poll()
self.fail("Should have raised an exception")
except ProcessException, pex:
pass
|
{
"content_hash": "a4d6901f265301faaf6e25145b81dd56",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 85,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.5510887772194305,
"repo_name": "nimbusproject/cloudinit.d",
"id": "8ea4bc97092d96dd3ccb354bcc2e318eb79f384c",
"size": "4179",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudinitd/nosetests/pollable_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "262162"
},
{
"name": "Shell",
"bytes": "1414"
}
],
"symlink_target": ""
}
|
import os
import re
import cStringIO as StringIO
from random import choice
from PIL import Image
SURVEY_ID = 'sdss'
DATA_DIR = 'data'
FILE_DIR = os.path.dirname(__file__)
DATA_PATH = os.path.join(FILE_DIR, DATA_DIR)
print DATA_PATH
data_index = {} # map from original file to a list of its RGB splits
data_index_reverse = {}
data_index_size = 0
data_index_keys = []
all_files = []
for root, subFolders, files in os.walk(DATA_PATH):
for file in files:
f = os.path.relpath(os.path.join(root, file), FILE_DIR)
if not f.endswith('.jpg'): continue
if f.find('split') > -1:
# r, g, b for each split should be grouped under the same key
key = re.sub('_[rgb]_', '_', f)
data_index.setdefault(key, [])
data_index[key].append(f)
data_index_reverse[f] = key
else:
data_index.setdefault(f, [])
# Prune bad entries
for key in data_index.keys():
if len(data_index[key]) < 2:
del data_index[key]
data_index_keys = data_index.keys()
data_index_size = len(data_index_keys)
print 'SDSS data index size: %d images' % data_index_size
def get_unknown_group():
# Return random rgb group
keys = data_index[choice(data_index_keys)]
keys.sort()
ret_keys = [{'key': key, 'offset_x': 0, 'offset_y': 0} for key in keys]
return {'survey': SURVEY_ID, 'images': ret_keys}
def get_control_group():
# placeholder for when I set up a real API for image sources
pass
def image_from_key(key):
# TODO this should really be static. This is awful.
if key in data_index_reverse:
im = Image.open(os.path.join(FILE_DIR, key))
# Scale for now
# im.thumbnail((661, 454), Image.ANTIALIAS) # 1/3 size
output = StringIO.StringIO()
im.save(output, format='PNG')
return output.getvalue()
return None
|
{
"content_hash": "34e39b46e5802240e4ab716a32877adf",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 73,
"avg_line_length": 27.276923076923076,
"alnum_prop": 0.6559503666102651,
"repo_name": "ahoym/asterank",
"id": "1bb5b36104d83bbc45301712a6d79a9252fcaacf",
"size": "1866",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sdss/sdss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6912"
},
{
"name": "HTML",
"bytes": "2832585"
},
{
"name": "JavaScript",
"bytes": "229741"
},
{
"name": "Python",
"bytes": "76771"
},
{
"name": "Shell",
"bytes": "20604"
}
],
"symlink_target": ""
}
|
__author__ = 'waziz'
from itertools import izip
import numpy as np
def scaled_fmap(fmap, scaling=1.0):
"""Returns a feature map scaled by a constant"""
if type(fmap) is dict:
return {k: v*scaling for k, v in fmap.iteritems()}
else:
return {k: v*scaling for k, v in fmap}
def fmap_dot(fmap, wmap):
return np.sum([fmap.get(fname, 0) * fweight for fname, fweight in wmap.iteritems()])
#return sum(fmap.get(fname, 0) * fweight for fname, fweight in wmap.iteritems())
def str2fmap(line):
"""converts a string of the type 'f1=v1 f2=v2' into a feature map {f1: v1, f2: v2}"""
return {k: float(v) for k, v in (pair.split('=') for pair in line.split())}
def fpairs2str(iterable):
"""converts an iterable of feature-value pairs into string"""
return ' '.join('%s=%s' % (k, str(v)) for k, v in iterable)
def dict2str(d, separator='=', sort=False, reverse=False):
"""converts an iterable of feature-value pairs into string"""
if sort:
return ' '.join('{0}{1}{2}'.format(k, separator, v) for k, v in sorted(d.iteritems(), reverse=reverse))
else:
return ' '.join('{0}{1}{2}'.format(k, separator, v) for k, v in d.iteritems())
def npvec2str(nparray, fnames=None):
"""converts an array of feature values into a string (fnames can be provided)"""
if fnames is None:
return ' '.join(str(fvalue) for fvalue in nparray)
else:
return ' '.join('{0}={1}'.format(fname, fvalue) for fname, fvalue in izip(fnames, nparray))
def kv2str(key, value, named=True):
return '{0}={1}'.format(key, value) if named else str(value)
def resample(p, size):
"""Resample elements according to a distribution p and returns an empirical distribution"""
support = p.size
hist, edges = np.histogram(np.random.choice(np.arange(support), size, p=p), bins=np.arange(support + 1), density=True)
return hist
def obj2id(element, vocab):
v = vocab.get(element, None)
if v is None:
v = len(vocab)
vocab[element] = v
return v
|
{
"content_hash": "6d49dc8dfda2283ecaa6d38e8d97e7d2",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 122,
"avg_line_length": 33.016129032258064,
"alnum_prop": 0.6394723986321446,
"repo_name": "wilkeraziz/chisel",
"id": "6dffa54327bbf8de0bf2ad45f416684a69b2482a",
"size": "2047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/chisel/util/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "276021"
}
],
"symlink_target": ""
}
|
class YamlException(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(YamlException, self).__init__(message)
class YaqlException(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(YaqlException, self).__init__(message)
|
{
"content_hash": "c48896266bd3576bf530a0af5e0679a6",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 70,
"avg_line_length": 31.833333333333332,
"alnum_prop": 0.675392670157068,
"repo_name": "ALU-CloudBand/yaqluator",
"id": "f4894cb51b60203cb365bfee9486d28ae27a0fd7",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/utils/exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4999"
},
{
"name": "HTML",
"bytes": "10551"
},
{
"name": "JavaScript",
"bytes": "15037"
},
{
"name": "Python",
"bytes": "8558"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, render_template
from collections import defaultdict
app = Flask(__name__)
clients_so_far = defaultdict(dict)
@app.route('/update', methods=['POST'])
def update():
if 'progress' in request.form:
clients_so_far[request.form['uuid']].update({'progress': int(float(request.form['progress']))})
if 'time' in request.form:
clients_so_far[request.form['uuid']].update({'time': int(float(request.form['time']))})
return 'ok'
@app.route('/')
def index():
return render_template('index.html', clients=clients_so_far)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
{
"content_hash": "1e776439e427d91422eb9542e48eca2d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 99,
"avg_line_length": 29.857142857142858,
"alnum_prop": 0.671451355661882,
"repo_name": "jzelinskie/chihaya-demo",
"id": "bda3cf1469706fbee3cffe9e0150d8480d718000",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_progress.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "949"
},
{
"name": "Python",
"bytes": "2843"
},
{
"name": "Shell",
"bytes": "2030"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import helper
import sys
import unittest
import ops
class PermTestCase(unittest.TestCase):
def test_default(self):
p = ops.perm()
self.assertEqual(p.read, None)
self.assertEqual(p.write, None)
self.assertEqual(p.execute, None)
def test_seven(self):
p = ops.perm(7)
self.assertTrue(p.read)
self.assertTrue(p.write)
self.assertTrue(p.execute)
def test_six(self):
p = ops.perm(6)
self.assertTrue(p.read)
self.assertTrue(p.write)
self.assertFalse(p.execute)
def test_five(self):
p = ops.perm(5)
self.assertTrue(p.read)
self.assertFalse(p.write)
self.assertTrue(p.execute)
def test_four(self):
p = ops.perm(4)
self.assertTrue(p.read)
self.assertFalse(p.write)
self.assertFalse(p.execute)
def test_three(self):
p = ops.perm(3)
self.assertFalse(p.read)
self.assertTrue(p.write)
self.assertTrue(p.execute)
def test_two(self):
p = ops.perm(2)
self.assertFalse(p.read)
self.assertTrue(p.write)
self.assertFalse(p.execute)
def test_one(self):
p = ops.perm(1)
self.assertFalse(p.read)
self.assertFalse(p.write)
self.assertTrue(p.execute)
def test_zero(self):
p = ops.perm(0)
self.assertFalse(p.read)
self.assertFalse(p.write)
self.assertFalse(p.execute)
class ModeTestCase(unittest.TestCase):
def test_default(self):
m = ops.mode()
self.assertEqual(m.user.read, None)
self.assertEqual(m.group.write, None)
self.assertEqual(m.other.execute, None)
def test_get(self):
m = ops.mode(0o740)
self.assertTrue(m.user.read)
self.assertTrue(m.user.write)
self.assertTrue(m.user.execute)
self.assertTrue(m.group.read)
self.assertFalse(m.group.write)
self.assertFalse(m.group.execute)
self.assertFalse(m.other.read)
self.assertFalse(m.other.write)
self.assertFalse(m.other.execute)
def test_set(self):
m = ops.mode(0o640)
m.group.write = True
m.other.read = True
self.assertEqual(m.numeric, 0o664)
def test_set_type(self):
m = ops.mode()
m.user = 7
self.assertTrue(m.user.read)
self.assertTrue(m.user.write)
self.assertTrue(m.user.execute)
m.group = 5
self.assertTrue(m.group.read)
self.assertFalse(m.group.write)
self.assertTrue(m.group.execute)
m.other = 2
self.assertFalse(m.other.read)
self.assertTrue(m.other.write)
self.assertFalse(m.other.execute)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "600055e3119c3a7ab5947ff7a53753da",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 47,
"avg_line_length": 24.02542372881356,
"alnum_prop": 0.5950617283950618,
"repo_name": "silas/ops",
"id": "769b8dcee9350df0e6ea3b9964c311334bc2f877",
"size": "2835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_mode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "97220"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django import forms
from django.test import TestCase
from django_filters.filterset import FilterSet
from django_filters.filters import CharFilter
from django_filters.filters import ChoiceFilter
from .models import User
from .models import Book
from .models import STATUS_CHOICES
class FilterSetFormTests(TestCase):
def test_form_from_empty_filterset(self):
class F(FilterSet):
pass
f = F(queryset=Book.objects.all()).form
self.assertIsInstance(f, forms.Form)
def test_form(self):
class F(FilterSet):
class Meta:
model = Book
fields = ('title',)
f = F().form
self.assertIsInstance(f, forms.Form)
self.assertEqual(list(f.fields), ['title'])
def test_custom_form(self):
class MyForm(forms.Form):
pass
class F(FilterSet):
class Meta:
model = Book
form = MyForm
f = F().form
self.assertIsInstance(f, MyForm)
def test_form_prefix(self):
class F(FilterSet):
class Meta:
model = Book
fields = ('title',)
f = F().form
self.assertIsNone(f.prefix)
f = F(prefix='prefix').form
self.assertEqual(f.prefix, 'prefix')
def test_form_fields(self):
class F(FilterSet):
class Meta:
model = User
fields = ['status']
f = F().form
self.assertEqual(len(f.fields), 1)
self.assertIn('status', f.fields)
self.assertEqual(sorted(f.fields['status'].choices),
sorted(STATUS_CHOICES))
def test_form_fields_exclusion(self):
class F(FilterSet):
title = CharFilter(exclude=True)
class Meta:
model = Book
fields = ('title',)
f = F().form
self.assertEqual(f.fields['title'].help_text, "This is an exclusion filter")
def test_form_fields_using_widget(self):
class F(FilterSet):
status = ChoiceFilter(widget=forms.RadioSelect,
choices=STATUS_CHOICES)
class Meta:
model = User
fields = ['status', 'username']
f = F().form
self.assertEqual(len(f.fields), 2)
self.assertIn('status', f.fields)
self.assertIn('username', f.fields)
self.assertEqual(sorted(f.fields['status'].choices),
sorted(STATUS_CHOICES))
self.assertIsInstance(f.fields['status'].widget, forms.RadioSelect)
def test_form_field_with_custom_label(self):
class F(FilterSet):
title = CharFilter(label="Book title")
class Meta:
model = Book
fields = ('title',)
f = F().form
self.assertEqual(f.fields['title'].label, "Book title")
self.assertEqual(f['title'].label, 'Book title')
def test_form_field_with_manual_name(self):
class F(FilterSet):
book_title = CharFilter(name='title')
class Meta:
model = Book
fields = ('book_title',)
f = F().form
self.assertEqual(f.fields['book_title'].label, None)
self.assertEqual(f['book_title'].label, 'Book title')
def test_form_field_with_manual_name_and_label(self):
class F(FilterSet):
f1 = CharFilter(name='title', label="Book title")
class Meta:
model = Book
fields = ('f1',)
f = F().form
self.assertEqual(f.fields['f1'].label, "Book title")
self.assertEqual(f['f1'].label, 'Book title')
def test_filter_with_initial(self):
class F(FilterSet):
status = ChoiceFilter(choices=STATUS_CHOICES, initial=1)
class Meta:
model = User
fields = ['status']
f = F().form
self.assertEqual(f.fields['status'].initial, 1)
def test_form_is_not_bound(self):
class F(FilterSet):
class Meta:
model = Book
fields = ('title',)
f = F().form
self.assertFalse(f.is_bound)
self.assertEqual(f.data, {})
def test_form_is_bound(self):
class F(FilterSet):
class Meta:
model = Book
fields = ('title',)
f = F({'title': 'Some book'}).form
self.assertTrue(f.is_bound)
self.assertEqual(f.data, {'title': 'Some book'})
def test_ordering(self):
class F(FilterSet):
class Meta:
model = User
fields = ['username', 'status']
order_by = ['status']
f = F().form
self.assertEqual(len(f.fields), 3)
self.assertIn('o', f.fields)
self.assertEqual(f.fields['o'].choices, [('status', 'Status')])
def test_ordering_uses_all_fields(self):
class F(FilterSet):
class Meta:
model = User
fields = ['username', 'status']
order_by = True
f = F().form
self.assertEqual(f.fields['o'].choices,
[('username', 'Username'), ('-username', 'Username (descending)'), ('status', 'Status'), ('-status', 'Status (descending)')])
def test_ordering_uses_filter_label(self):
class F(FilterSet):
username = CharFilter(label='Account')
class Meta:
model = User
fields = ['username', 'status']
order_by = True
f = F().form
self.assertEqual(f.fields['o'].choices,
[('username', 'Account'), ('-username', 'Account (descending)'), ('status', 'Status'), ('-status', 'Status (descending)')])
def test_ordering_uses_explicit_filter_name(self):
class F(FilterSet):
account = CharFilter(name='username')
class Meta:
model = User
fields = ['account', 'status']
order_by = True
f = F().form
self.assertEqual(f.fields['o'].choices,
[('account', 'Account'), ('-account', 'Account (descending)'), ('status', 'Status'), ('-status', 'Status (descending)')])
def test_ordering_with_overridden_field_name(self):
"""
Set the `order_by_field` on the queryset and ensure that the
field name is respected.
"""
class F(FilterSet):
order_by_field = 'order'
class Meta:
model = User
fields = ['username', 'status']
order_by = ['status']
f = F().form
self.assertNotIn('o', f.fields)
self.assertIn('order', f.fields)
self.assertEqual(f.fields['order'].choices, [('status', 'Status')])
def test_ordering_with_overridden_field_name_and_descending(self):
"""
Set the `order_by_field` on the queryset and ensure that the
field name is respected.
"""
class F(FilterSet):
order_by_field = 'order'
class Meta:
model = User
fields = ['username', 'status']
order_by = ['status', '-status']
f = F().form
self.assertNotIn('o', f.fields)
self.assertIn('order', f.fields)
self.assertEqual(f.fields['order'].choices, [('status', 'Status'), ('-status', 'Status (descending)')])
def test_ordering_with_overridden_field_name_and_using_all_fields(self):
class F(FilterSet):
order_by_field = 'order'
class Meta:
model = User
fields = ['username', 'status']
order_by = True
f = F().form
self.assertIn('order', f.fields)
self.assertEqual(f.fields['order'].choices,
[('username', 'Username'), ('-username', 'Username (descending)'), ('status', 'Status'), ('-status', 'Status (descending)')])
def test_ordering_with_custom_display_names(self):
class F(FilterSet):
class Meta:
model = User
fields = ['username', 'status']
order_by = [('status', 'Current status')]
f = F().form
self.assertEqual(
f.fields['o'].choices, [('status', 'Current status')])
|
{
"content_hash": "3e5eb919e84a4af76bf01480f5b0c115",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 137,
"avg_line_length": 30.923357664233578,
"alnum_prop": 0.5267319721468193,
"repo_name": "jpadilla/django-filter",
"id": "684b74dedf737fa4ac44269e88525c0cd8a7bbcb",
"size": "8473",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "tests/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "73"
},
{
"name": "Python",
"bytes": "167320"
}
],
"symlink_target": ""
}
|
import json
from pymongo import MongoClient
import os
from launchpad_reporting.db import db
from launchpad_reporting.launchpad.lpdata import LaunchpadAnonymousData
lpdata = LaunchpadAnonymousData(bugs_db=db)
connection = MongoClient()
db = connection["assignees"]
assignees = db.assignees
path_to_data = "/".join(os.path.abspath(__file__).split('/')[:-1])
with open('{0}/fuel_teams.json'.format(path_to_data)) as data_file:
data = json.load(data_file)
teams = ["Fuel", "Partners", "mos-linux", "mos-openstack"]
db.drop_collection(assignees)
global_team_list = {}
for team in teams:
people = []
people.extend(data[team]["teams"])
team_list = {}
for t in data[team]["teams"]:
team_list[t] = []
tt = lpdata.launchpad.people[t]
members = tt.members_details
for member in members:
people.append(member.member.name)
team_list[t].append(member.member.name)
global_team_list[team] = team_list
assignees.insert({"Team": "{0}".format(team),
"Members": people})
with open("file.json", "w") as f:
f.write(json.dumps(global_team_list))
|
{
"content_hash": "e4920a3d43932acffc10a3e670534efb",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 71,
"avg_line_length": 25.488888888888887,
"alnum_prop": 0.6495204882301656,
"repo_name": "Mirantis/launchpad-reports-summary",
"id": "054a3ce609ed9495d9f1926901c39449b8fe8993",
"size": "1194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collect_assignees.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40598"
},
{
"name": "HTML",
"bytes": "69003"
},
{
"name": "JavaScript",
"bytes": "476106"
},
{
"name": "Python",
"bytes": "88669"
}
],
"symlink_target": ""
}
|
import gepify
from . import GepifyTestCase
from flask import url_for, session, g
import spotipy
from urllib import parse
from unittest import mock
from gepify.services import spotify
from gepify.providers import songs
from werkzeug.contrib.cache import SimpleCache
import json
import os
import time
class MockResponse:
def __init__(self, data, status_code):
self.text = json.dumps(data)
self.status_code = status_code
def mocked_spotify_api_post(url, **kwargs):
data = kwargs.get('data', None)
headers = kwargs.get('headers', None)
if url == 'https://accounts.spotify.com/api/token':
assert 'Authorization' in headers
assert headers['Authorization'].startswith('Basic')
assert 'grant_type' in data
assert data['grant_type'] in ['authorization_code', 'refresh_token']
if data['grant_type'] == 'authorization_code':
assert 'code' in data
assert 'redirect_uri' in data
return MockResponse({
'access_token': 'dummy code',
'refresh_token': 'refresh me',
'expires_in': 60
}, 200)
else:
assert 'refresh_token' in data
assert data['refresh_token'] == 'refresh me'
return MockResponse({
'access_token': 'new dummy code',
'refresh_token': 'refresh me again',
'expires_in': 60
}, 200)
def mocked_spotify_api_404(*args, **kwargs):
return MockResponse({}, 404)
def mocked_spotify_api_401(*args, **kwargs):
return MockResponse({}, 401)
class MockSpotipy:
def __init__(self, auth=None):
self.auth = auth
def me(self):
return {
'id': 'test_user'
}
def user_playlists(self, username):
with open('tests/spotify_dump/spotify_user_playlists.json') as f:
return json.loads(f.read())
def user_playlist(self, username, playlist_id, fields=None):
if username == 'test_user' and playlist_id == '1':
with open('tests/spotify_dump/spotify_user_playlist.json') as f:
return json.loads(f.read())
def current_user_saved_albums(self):
with open('tests/spotify_dump/'
'spotify_current_user_saved_albums.json') as f:
return json.loads(f.read())
def album(self, album_id):
if album_id == '0AYlrY39QmCNwR4r1uzlv3':
with open('tests/spotify_dump/spotify_album.json') as f:
return json.loads(f.read())
def next(self, result):
if result['next'] == 'https://api.spotify.com/v1/users/aplusk' + \
'/playlists/5ExcrV72XoJ6aQT8plfau3/' + \
'tracks?offset=100&limit=100':
with open('tests/spotify_dump/spotify_next.json') as f:
return json.loads(f.read())
class ProfileMixin():
@mock.patch('requests.post', side_effect=mocked_spotify_api_post)
def login(self, *args):
login_response = self.client.get(url_for('spotify.login'))
spotify_redirect = login_response.location
self.assertTrue(spotify_redirect.startswith(
'https://accounts.spotify.com/authorize'))
params = parse.parse_qs(parse.urlparse(spotify_redirect).query)
response = self.client.get(url_for(
'spotify.callback', code='dummy code', state=params['state'][0]
))
self.assertRedirects(response, url_for('spotify.index'))
return response
@mock.patch('requests.post', side_effect=mocked_spotify_api_post)
def logout(self, *args):
logout_response = self.client.get(url_for('spotify.logout'))
self.assertRedirects(logout_response, url_for('views.index'))
return logout_response
@mock.patch('requests.post', side_effect=mocked_spotify_api_post)
class SpotifyDecoratorsTestCase(GepifyTestCase, ProfileMixin):
def test_login_required_decorator(self, *args):
@self.app.route('/test')
@spotify.view_decorators.login_required
def test():
return 'You should be logged in to read this'
response = self.client.get('/test')
self.assertRedirects(response, url_for('spotify.login'))
login_response = self.login()
response = self.client.get('/test')
self.assert200(response)
self.assertIn(b'You should be logged in to read this', response.data)
with self.client.session_transaction() as sess:
sess['spotify_refresh_token'] = 'false token'
sess['spotify_expires_at'] = -1
response = self.client.get('/test')
self.assertEqual(response.status_code, 503)
self.assertIn(b'There was an error with authenticating', response.data)
self.logout()
response = self.client.get('/test')
self.assertRedirects(response, url_for('spotify.login'))
def test_login_required_if_spotify_session_has_expired(self, post):
@self.app.route('/test')
@spotify.view_decorators.login_required
def test():
return 'You should be logged in to read this'
with self.client as client:
self.login()
old_token = session['spotify_access_token']
old_refresh_token = session['spotify_refresh_token']
with client.session_transaction() as sess:
sess['spotify_expires_at'] = -1
response = self.client.get('/test')
new_token = session['spotify_access_token']
new_refresh_token = session['spotify_refresh_token']
self.assertEqual(old_token, 'dummy code')
self.assertEqual(old_refresh_token, 'refresh me')
self.assertEqual(new_token, 'new dummy code')
self.assertEqual(new_refresh_token, 'refresh me again')
def test_logout_required_decorator(self, post):
@self.app.route('/test')
@spotify.view_decorators.logout_required
def test():
return 'You should be logged out to read this'
response = self.client.get('/test')
self.assert200(response)
self.assertIn(b'You should be logged out to read this', response.data)
login_response = self.login()
response = self.client.get('/test')
self.assert403(response)
self.assertIn(
b'You need to be logged out to see this page', response.data)
self.logout()
response = self.client.get('/test')
self.assert200(response)
self.assertIn(b'You should be logged out to read this', response.data)
class SpotifyModelsTestCase(GepifyTestCase):
def setUp(self):
g.spotipy = MockSpotipy()
songs.cache = SimpleCache()
def tearDown(self):
g.spotipy = None
@mock.patch('requests.post', side_effect=mocked_spotify_api_post)
def test_get_access_token_from_code(self, post):
token_data =spotify.models.get_access_token_from_code('some code')
self.assertEqual(post.call_count, 1)
self.assertEqual(token_data['access_token'], 'dummy code')
self.assertEqual(token_data['refresh_token'], 'refresh me')
self.assertEqual(token_data['expires_in'], 60)
@mock.patch('requests.post', side_effect=mocked_spotify_api_post)
def test_get_access_token_from_refresh_token(self, post):
token_data = spotify.models.get_access_token_from_refresh_token(
'refresh me')
self.assertEqual(post.call_count, 1)
self.assertEqual(token_data['access_token'], 'new dummy code')
self.assertEqual(token_data['refresh_token'], 'refresh me again')
self.assertEqual(token_data['expires_in'], 60)
def test_save_token_data_in_session(self):
token_data = {
'access_token': 'access token',
'refresh_token': 'refresh token',
'expires_in': 60
}
spotify.models.save_token_data_in_session(token_data)
self.assertEqual(session['spotify_access_token'], 'access token')
self.assertEqual(session['spotify_refresh_token'], 'refresh token')
self.assertEqual(session['spotify_expires_at'], int(time.time()) + 60)
# @mock.patch('requests.post', side_effect=mocked_spotify_api_post)
# def test_request_access_token_with_refresh_token_request(self, post):
# payload = {
# 'grant_type': 'refresh_token',
# 'refresh_token': 'refresh me'
# }
# with self.client.session_transaction() as sess:
# sess['spotify_access_token'] = 'some random test token'
# sess['spotify_refresh_token'] = 'some random refresh token'
# self.assertNotIn('spotify_expires_at', session)
# spotify.models.request_access_token(payload)
# self.assertEqual(post.call_count, 1)
# self.assertEqual(session['spotify_access_token'], 'new dummy code')
# self.assertEqual(session['spotify_refresh_token'], 'refresh me again')
# self.assertIn('spotify_expires_at', session)
@mock.patch('requests.post', side_effect=mocked_spotify_api_404)
def test_get_access_token_from_code_with_error(self, post):
with self.assertRaisesRegex(
RuntimeError, 'Could not get authentication token'):
spotify.models.get_access_token_from_code('code')
@mock.patch('requests.post', side_effect=mocked_spotify_api_404)
def test_get_access_token_from_refresh_token_with_error(self, post):
with self.assertRaisesRegex(
RuntimeError, 'Could not get authentication token'):
spotify.models.get_access_token_from_refresh_token('token')
def test_get_username(self):
self.assertEqual(spotify.models.get_username(), 'test_user')
self.assertEqual(spotify.models.get_username(), 'test_user')
def test_get_song_name(self):
track = {
'artists': [{'name': 'Artist 1'}, {'name': 'Artist 2'}],
'name': 'Track name'
}
self.assertEqual(spotify.models.get_song_name(track),
'Artist 1 & Artist 2 - Track name')
def test_get_playlists(self):
playlists = spotify.models.get_playlists()
self.assertEqual(len(playlists), 25)
self.assertEqual(playlists[0]['name'], 'Starred')
self.assertEqual(playlists[-1]['name'], 'Bozdugan')
def test__get_playlist(self):
playlist = spotify.models._get_playlist('test_user', '1')
self.assertEqual(playlist['name'], 'Starred')
self.assertIsNone(playlist['description'])
self.assertIn('image', playlist)
self.assertEqual(playlist['id'], 'test_user:1')
self.assertEqual(len(playlist['tracks']), 200)
def test__get_album(self):
album = spotify.models._get_album('0AYlrY39QmCNwR4r1uzlv3')
self.assertEqual(album['name'], 'Bozdugan')
self.assertEqual(album['id'], 'album:0AYlrY39QmCNwR4r1uzlv3')
self.assertIn('image', album)
self.assertEqual(len(album['tracks']), 13)
@mock.patch('gepify.providers.songs.get_song',
side_effect=lambda song_name: {'name': song_name})
def test_get_playlist_without_keeping_song_names(self, get_song):
playlist = spotify.models.get_playlist('test_user:1')
self.assertEqual(playlist['id'], 'test_user:1')
self.assertIsNone(playlist['description'])
self.assertEqual(playlist['name'], 'Starred')
self.assertEqual(len(playlist['tracks']), 200)
self.assertEqual(get_song.call_count, len(playlist['tracks']))
self.assertEqual(playlist['tracks'][25]['name'],
'Leona Lewis - Bleeding Love')
self.assertEqual(playlist['tracks'][42]['name'],
'The National - Anyone’s Ghost')
get_song.reset_mock()
playlist = spotify.models.get_playlist('album:0AYlrY39QmCNwR4r1uzlv3')
self.assertEqual(playlist['name'], 'Bozdugan')
self.assertEqual(playlist['id'], 'album:0AYlrY39QmCNwR4r1uzlv3')
self.assertEqual(len(playlist['tracks']), 13)
self.assertEqual(get_song.call_count, len(playlist['tracks']))
class SpotifyViewsTestCase(GepifyTestCase, ProfileMixin):
def setUp(self):
songs.cache = SimpleCache()
@classmethod
def tearDownClass(cls):
if os.path.isfile('test song.mp3'):
os.remove('test song.mp3')
if os.path.isfile('playlist.zip'):
os.remove('playlist.zip')
def test_index_if_not_logged_in(self):
response = self.client.get(url_for('spotify.index'))
self.assertRedirects(response, url_for('spotify.login'))
@mock.patch('spotipy.Spotify', side_effect=MockSpotipy)
def test_index_if_logged_in(self, *args):
self.login()
response = self.client.get(url_for('spotify.index'))
self.assert200(response)
self.assertIn(b'Bozdugan', response.data)
@mock.patch('requests.post', side_effect=mocked_spotify_api_post)
def test_login(self, *args):
response = self.client.get(url_for('spotify.login'))
self.assertTrue(response.location.startswith(
'https://accounts.spotify.com/authorize/'))
self.login()
response = self.client.get(url_for('spotify.login'))
self.assertIn(b'You need to be logged out to see this page',
response.data)
@mock.patch('requests.post', side_effect=mocked_spotify_api_post)
def test_login_callback(self, post, *args):
response = self.client.get(
url_for('spotify.callback', error='access_denied'))
self.assertEqual(response.status_code, 503)
self.assertIn(b'There was an error while trying to authenticate you.'
b'Please, try again.', response.data)
self.assertEqual(post.call_count, 0)
with self.client.session_transaction() as sess:
sess['spotify_auth_state'] = 'some state'
response = self.client.get(
url_for('spotify.callback', state='other state', code='123'))
self.assertEqual(response.status_code, 503)
self.assertIn(b'There was an error while trying to authenticate you.'
b'Please, try again.', response.data)
self.assertEqual(post.call_count, 0)
response = self.client.get(
url_for('spotify.callback', state='some state', code='123'))
self.assertRedirects(response, url_for('spotify.index'))
self.assertEqual(post.call_count, 1)
@mock.patch('requests.post', side_effect=mocked_spotify_api_404)
def test_login_callback_with_spotify_error(self, post, *args):
with self.client.session_transaction() as sess:
sess['spotify_auth_state'] = 'some state'
response = self.client.get(
url_for('spotify.callback', state='some state', code='123'))
self.assertEqual(response.status_code, 503)
self.assertIn(b'There was an error while trying to authenticate you.'
b'Please, try again.', response.data)
self.assertEqual(post.call_count, 1)
@mock.patch('spotipy.Spotify', side_effect=MockSpotipy)
def test_logout(self, Spotify):
response = self.client.get(url_for('spotify.logout'))
self.assertRedirects(response, url_for('views.index'))
response = self.client.get(url_for('spotify.index'))
self.assertRedirects(response, url_for('spotify.login'))
self.login()
response = self.client.get(url_for('spotify.index'))
self.assert200(response)
response = self.client.get(url_for('spotify.logout'))
self.assertRedirects(response, url_for('views.index'))
response = self.client.get(url_for('spotify.index'))
self.assertRedirects(response, url_for('spotify.login'))
@mock.patch('spotipy.Spotify', side_effect=MockSpotipy)
@mock.patch('gepify.providers.songs.get_song',
side_effect=lambda song_name: {'name': song_name, 'files': {}})
def test_get_playlist(self, get_song, Spotify):
self.login()
response = self.client.get(
url_for('spotify.playlist', id='test_user:1'))
self.assert200(response)
self.assertIn(b'Use Somebody', response.data)
def test_download_song_in_unsupported_format(self, *args):
self.login()
response = self.client.get(
url_for('spotify.download_song',
song_name='test song', format='wav'))
self.assertEqual(response.status_code, 400)
self.assertIn(b'Unsupported format', response.data)
def test_download_song_with_unsupported_provider(self, *args):
self.login()
response = self.client.get(
url_for('spotify.download_song',
song_name='song with / / slashes /',
format='mp3', provider='zamunda'))
self.assertEqual(response.status_code, 400)
self.assertIn(b'Unsupported provider', response.data)
@mock.patch('gepify.providers.songs.has_song_format',
side_effect=lambda song, format: False)
@mock.patch('gepify.providers.songs.download_song.delay',
side_effect=lambda *args, **kwargs: None)
def test_download_song_if_song_is_missing(self, *args):
self.login()
response = self.client.get(
url_for('spotify.download_song',
song_name='test song', format='mp3'))
self.assert200(response)
self.assertIn(b'Your song has started downloading.', response.data)
@mock.patch('gepify.providers.songs.has_song_format',
side_effect=lambda song, format: True)
@mock.patch('gepify.providers.songs.get_song',
side_effect=lambda song: {
'name': song, 'files': {'mp3': os.getcwd() + '/' + song + '.mp3'}})
def test_download_song_if_song_is_not_missing(self, *args):
with open('test song.mp3', 'w+') as f:
f.write('some data')
self.login()
response = self.client.get(
url_for('spotify.download_song',
song_name='test song', format='mp3'))
self.assert200(response)
self.assertEqual(b'some data', response.data)
self.assertTrue(response.content_type.startswith('audio'))
response.close()
def test_download_playlist_with_wrong_post_data(self, *args):
self.login()
response = self.client.post(url_for('spotify.download_playlist'))
self.assertEqual(response.status_code, 400)
response = self.client.post(url_for('spotify.download_playlist'),
data={'format': 'mp3'})
self.assertEqual(response.status_code, 400)
response = self.client.post(
url_for('spotify.download_playlist'),
data={'playlist_id': 'test_user:some id', 'format': 'wav'})
self.assertEqual(response.status_code, 400)
self.assertIn(b'Unsupported format', response.data)
response = self.client.post(
url_for('spotify.download_playlist'),
data={'playlist_id': 'test_user:some id', 'provider': 'zamunda'})
self.assertEqual(response.status_code, 400)
self.assertIn(b'Unsupported provider', response.data)
@mock.patch('gepify.providers.playlists.has_playlist',
side_effect=lambda *args: False)
@mock.patch('gepify.providers.playlists.download_playlist.delay')
@mock.patch('spotipy.Spotify', side_effect=MockSpotipy)
def test_download_playlist_if_playlist_is_missing(self, *args):
self.login()
response = self.client.post(
url_for('spotify.download_playlist'),
data={'playlist_id': 'test_user:1', 'format': 'mp3'})
self.assert200(response)
self.assertIn(b'Your playlist is getting downloaded', response.data)
@mock.patch('gepify.providers.playlists.has_playlist',
side_effect=lambda *args: True)
@mock.patch('gepify.providers.playlists.get_playlist',
side_effect=lambda *args: {
'path': os.getcwd() + '/playlist.zip',
'checksum': '89c2226a90943679844cdc71693bc543'})
@mock.patch('spotipy.Spotify', side_effect=MockSpotipy)
def test_download_playlist_if_playlist_is_not_missing(self, *args):
with open('playlist.zip', 'w+') as f:
f.write('some data')
self.login()
response = self.client.post(
url_for('spotify.download_playlist'),
data={'playlist_id': 'test_user:1', 'format': 'mp3'})
self.assert200(response)
self.assertEqual(b'some data', response.data)
self.assertEqual(response.content_type, 'application/zip')
response.close()
@mock.patch('gepify.providers.playlists.has_playlist',
side_effect=lambda *args: True)
@mock.patch('gepify.providers.playlists.get_playlist',
side_effect=lambda *args: {
'path': 'playlist.zip',
'checksum': 'old checkum'})
@mock.patch('gepify.providers.playlists.download_playlist.delay')
@mock.patch('spotipy.Spotify', side_effect=MockSpotipy)
def test_download_playlist_if_playlist_has_changed(self, *args):
self.login()
response = self.client.post(
url_for('spotify.download_playlist'),
data={'playlist_id': 'test_user:1', 'format': 'mp3'})
self.assert200(response)
self.assertIn(b'Your playlist is getting downloaded', response.data)
@mock.patch('requests.post', side_effect=mocked_spotify_api_post)
def test_get_access_token_success(self, post):
response = self.client.get(
'/spotify/get_access_token/code'
)
self.assertEqual(post.call_count, 1)
self.assert200(response)
self.assertIn(b'dummy code', response.data)
self.assertIn(b'refresh me', response.data)
self.assertIn(b'60', response.data)
@mock.patch('requests.post', side_effect=mocked_spotify_api_404)
def test_get_access_token_error(self, post):
response = self.client.get(
'/spotify/get_access_token/code'
)
self.assertEqual(post.call_count, 1)
self.assertEqual(response.status_code, 401)
self.assertIn(b'There was an error while trying to authenticate you.'
b'Please, try again.', response.data)
@mock.patch('requests.post', side_effect=mocked_spotify_api_post)
def test_refresh_access_token_success(self, post):
response = self.client.get(
'/spotify/refresh_access_token/refresh me'
)
self.assertEqual(post.call_count, 1)
self.assert200(response)
self.assertIn(b'new dummy code', response.data)
self.assertIn(b'refresh me again', response.data)
self.assertIn(b'60', response.data)
@mock.patch('requests.post', side_effect=mocked_spotify_api_401)
def test_refresh_access_token_error(self, post):
response = self.client.get(
'/spotify/refresh_access_token/refresh me'
)
self.assertEqual(post.call_count, 1)
self.assertEqual(response.status_code, 401)
self.assertIn(b'Unable to refresh token.', response.data)
|
{
"content_hash": "73936829aef1cc1ed843e07b28480407",
"timestamp": "",
"source": "github",
"line_count": 562,
"max_line_length": 87,
"avg_line_length": 41.612099644128115,
"alnum_prop": 0.6199863165996751,
"repo_name": "nvlbg/gepify",
"id": "c2a9ca3a6eca70988379779f9838948618ef660a",
"size": "23388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_spotify.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1135"
},
{
"name": "Dockerfile",
"bytes": "225"
},
{
"name": "HTML",
"bytes": "6522"
},
{
"name": "Python",
"bytes": "126835"
}
],
"symlink_target": ""
}
|
from IPython import get_ipython
save_get_ipython_system = get_ipython().system
get_ipython().system = lambda x: get_ipython_system(x)
# interactiveshell.py's system_piped() function comment saids:
# "we store the exit_code in user_ns."
# this function check the exit_code and raise exception if it is not 0.
def get_ipython_system(_cmd):
save_get_ipython_system(_cmd)
if get_ipython().user_ns['_exit_code'] != 0:
raise RuntimeError('Unexpected exit code: %d' % get_ipython().user_ns['_exit_code'])
|
{
"content_hash": "871df1a1ccecd0081124721557da1438",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 92,
"avg_line_length": 43,
"alnum_prop": 0.7093023255813954,
"repo_name": "NII-cloud-operation/Jupyter-LC_docker",
"id": "5c964c03a45d48b8cac922d01bac898c831b338f",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf/10-custom-get_ipython_system.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1959"
},
{
"name": "Dockerfile",
"bytes": "7223"
},
{
"name": "Jupyter Notebook",
"bytes": "1253093"
},
{
"name": "Lua",
"bytes": "417"
},
{
"name": "Python",
"bytes": "3736"
},
{
"name": "Shell",
"bytes": "618"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.