text
stringlengths 4
1.02M
| meta
dict |
|---|---|
'''-------------------------------------------------------------------------
Copyright IBM Corp. 2015, 2015 All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
Limitations under the License.
-------------------------------------------------------------------------'''
from dragonclient.common import http
from dragonclient.v1 import dr
class Client(http.HTTPClient):
"""Client for the Heat v1 API.
:param string endpoint: A user-supplied endpoint URL for the heat
service.
:param string token: Token for authentication.
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
"""
def __init__(self, *args, **kwargs):
"""Initialize a new client for the Dragon v1 API."""
super(Client, self).__init__(*args, **kwargs)
self.dr = dr.DRManager(self)
|
{
"content_hash": "fcfe90176b6820bb418b1c585f977712",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 40.96969696969697,
"alnum_prop": 0.6257396449704142,
"repo_name": "os-cloud-storage/openstack-workload-disaster-recovery-client",
"id": "51f96f24ba4e8848cf3602f94806f716f7266ff2",
"size": "1353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragonclient/v1/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "104592"
},
{
"name": "Shell",
"bytes": "6954"
}
],
"symlink_target": ""
}
|
from steerclear import app
from steerclear.forms import RideForm
from steerclear.models import *
import unittest, flask
"""
RideFormTestCase
----------------
Test class for the RideForm class
"""
class RideFormTestCase(unittest.TestCase):
"""
submit_form
-----------
helper method to submit a RideForm by faking
a request context. Returns True is the form
validated and False if not.
*payload* is a dictionary of name/value pairs
of the form data that is being submitted
"""
def submit_form(self, payload):
with app.test_request_context():
form = RideForm(data=payload)
return form.validate()
def setUp(self):
self.payload = {
u"num_passengers": 4,
u"start_latitude": 1.1,
u"start_longitude": 2.2,
u"end_latitude": 3.3,
u"end_longitude": 4.4,
}
"""
test_ride_form_correct_submit
-----------------------------
Tests that a RideForm can be validated correctly
"""
def test_ride_form_correct_submit(self):
result = self.submit_form(self.payload)
self.assertTrue(result)
"""
test_data_required_fields
-------------------------
tests that a RideForm is not valid unless
all fields are included in the form data
"""
def test_data_required_fields(self):
payload = self.payload
for key in payload.keys():
bad_payload = payload.copy()
bad_payload.pop(key, None)
result = self.submit_form(bad_payload)
self.assertFalse(result)
"""
test_num_passengers_min_range
-----------------------------
Tests that a RideForm accepts the correct min
range value for the 'num_passengers' field
"""
def test_num_passengers_min_range(self):
payload = self.payload.copy()
payload[u'num_passengers'] = 1
result = self.submit_form(payload)
self.assertTrue(result)
"""
test_num_passengers_max_range
-----------------------------
Tests that a RideForm accepts the correct max
range value for the 'num_passengers' field
"""
def test_num_passengers_max_range(self):
payload = self.payload.copy()
payload[u'num_passengers'] = 8
result = self.submit_form(payload)
self.assertTrue(result)
"""
test_num_passengers_bad_range
-----------------------------
Tests that a RideForm does not accept values
for the 'num_passengers' field that are out of range
"""
def test_num_passengers_bad_range(self):
bad_payload = self.payload.copy()
bad_payload[u'num_passengers'] = 0
result = self.submit_form(bad_payload)
self.assertFalse(result)
bad_payload[u'num_passengers'] = -1
result = self.submit_form(bad_payload)
self.assertFalse(result)
bad_payload[u'num_passengers'] = -100
result = self.submit_form(bad_payload)
self.assertFalse(result)
bad_payload[u'num_passengers'] = 9
result = self.submit_form(bad_payload)
self.assertFalse(result)
bad_payload[u'num_passengers'] = 100
result = self.submit_form(bad_payload)
self.assertFalse(result)
|
{
"content_hash": "6cc134a88b328ffc93cb2fa92ea121dd",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 56,
"avg_line_length": 29.8,
"alnum_prop": 0.587248322147651,
"repo_name": "beblount/Steer-Clear-Backend",
"id": "ec7dab13922808da2a084ae8ee7a3047406c31dd",
"size": "3278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/forms_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "54"
},
{
"name": "CSS",
"bytes": "29"
},
{
"name": "HTML",
"bytes": "2505"
},
{
"name": "JavaScript",
"bytes": "2215"
},
{
"name": "Python",
"bytes": "36742"
},
{
"name": "Shell",
"bytes": "277"
}
],
"symlink_target": ""
}
|
from rockstar import RockStar
pascal_code = """program HelloWorld;
begin
writeln('Hello World');
end."""
rock_it_bro = RockStar(days=400, file_name='helloworld.pas', code=pascal_code)
rock_it_bro.make_me_a_rockstar()
|
{
"content_hash": "2019c3057801437a0c48cf26ece2ee58",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 78,
"avg_line_length": 22.5,
"alnum_prop": 0.7155555555555555,
"repo_name": "RobertWang/rockstar",
"id": "eab133691567f5b802f5a4379cf6afce8671d5a7",
"size": "225",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "examples/pascal_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12549"
}
],
"symlink_target": ""
}
|
import configparser
import os
from logging import getLogger
import emoji
from .utils import markup_inline_keyboard
logger = getLogger(__name__)
def load_user_config():
config = configparser.ConfigParser()
try:
config.read('config.ini') or config.read(os.environ['CONFIG_FILE'])
except KeyError:
logger.error('config file NOT FOUND')
return None, None, None
return config['BOT']['allowed groups'].split(','), config['BOT']['name'], config['BOT']['token']
class BotConfig:
ALLOWED_GROUPS, NAME, TOKEN = load_user_config()
@classmethod
def check(cls):
return all((cls.ALLOWED_GROUPS, cls.NAME, cls.TOKEN))
class ErrorReply:
INCORRECT_SYNTAX = 'Errore!\nSintassi corretta: {}'
INVALID_TIME = 'Errore!\nOrario invalido!'
WORD_NOT_FOUND = "Non ho trovato nulla:( per favore avvisa un admin così possiamo migliorare il servizio!"
NO_ACTIVE_DUNGEONS = 'Errore!\nNon hai un dungeon attivo, mandami il messaggio di entrata nel dungeon:)'
class Url:
ITEMS = 'http://fenixweb.net:3300/api/v1/items'
GROUP = 'http://fenixweb.net:3300/api/v2/oI65t30vbkwBisPw6668/team/'
SHOPS = 'http://fenixweb.net:3300/api/v1/updatedshops/1'
class Emoji:
BYTES = [e.replace(' ', '').encode('utf-8') for e in emoji.UNICODE_EMOJI]
ARROW_UP = emoji.emojize(':arrow_up:', use_aliases=True)
ARROW_LEFT = emoji.emojize(':arrow_left:', use_aliases=True)
ARROW_RIGHT = emoji.emojize(':arrow_right:', use_aliases=True)
NEUTRAL = emoji.emojize(':full_moon_with_face:', use_aliases=True)
POSITIVE = emoji.emojize(':green_heart:', use_aliases=True)
NEGATIVE = emoji.emojize(':red_circle:', use_aliases=True)
CROSS = emoji.emojize(':x:', use_aliases=True)
CHECK = emoji.emojize(':white_check_mark:', use_aliases=True)
VACATION = emoji.emojize(':calendar:', use_aliases=True)
class Dungeon:
RE = {
"(^L'aria si fa più pesante.|^Incontri un)": 'mostro',
'Aprendo la porta ti ritrovi in un ambiente aperto,': 'vecchia',
'Oltrepassando la porta ti trovi davanti ad altre due porte': 'due porte',
"Appena entrato nella stanza vedi nell'angolo": 'aiuta',
"Questa stanza è vuota, c'è solo una piccola fessura sul muro di fronte": 'tributo',
"Un cartello con un punto esclamativo ti preoccupa, al centro della stanza": 'ascia',
"Davanti a te si erge un portale completamente rosso": 'desideri',
"Appena entrato nella stanza noti subito una strana fontana situata nel centro": 'fontana',
"Il corridoio si stringe in un'umida strettoia, sembrerebbe un vicolo cieco!": 'leve',
"Nella stanza incontri un marinaio dall'aria furba": 'marinaio',
"Entri nella stanza e per sbaglio pesti una mattonella leggermente rovinata": 'mattonella',
"Raggiungi una stanza con un'incisione profonda:": 'meditazione',
"Nella stanza incontri un viandante": "mercante",
"Una luce esagerata ti avvolge, esci in un piccolo spiazzo": "pozzo",
"Appena aperta la porta della stanza": "pulsantiera",
"Al centro della stanza vedi un mucchietto di monete": "monete",
"Raggiungi una stanza suddivisa in due, vedi un oggetto per lato": 'raro',
"Nella stanza sembra esserci uno scrigno pronto per essere aperto": 'scrigno',
"Entri in una stanza apparentemente vuota": 'stanza vuota',
"Entri in una stanza piena d'oro luccicante e una spada": 'spada',
"Nella stanza incontri un predone del deserto dall'aria docile": 'predone',
"Camminando per raggiungere la prossima stanza, una trappola": 'trappola',
"Percorrendo un corridoio scivoli su una pozzanghera": 'trappola',
"Vedi un Nano della terra di Grumpi e ti chiedi": 'trappola',
"Uno strano pulsante rosso come un pomodoro ti incuriosisce": 'trappola',
"In questa stanza non noti nessuna porta, al loro posto 3 incisioni con un pulsante ciascuna": 'incisioni',
"Ehy amico lo sai chi sono": 'mappatore distratto',
"Al centro della stanza vedi un signore anziano con gli occhi sbarrati": 'vecchio cieco',
"Entri in una stanza con un piccolo specchio al centro": 'specchio',
"Entri nella stanza e incontri l'Alchimista dell'Ovest": 'alchimista',
"Raggiungi una stanza completamente trasparente": "Cuore e spirito",
"Spalancata la porta della stanza vieni sbalzato": "Energia magica",
"Sull'angolo della stanza noti un uomo magro con un cappello a forma di Bomba": "Bombarolo",
"Entri in una stanza che non ha affatto le sembianze di una stanza": "Mercante draconico",
"Questa stanza è strana, scorgi solamente una fessura sul muro": "Stanza strana",
"Senti puzza di bruciato, ti accorgi di essere entrato": "esplosivi",
"Entri in una stanza completamente luccicante, quasi accecante": "gioielliere",
"Non fai che un passo, una voce mite ma ferma ti paralizza": "contrabbandiere del coso",
"Entrando nella stanza pesti una leva": "maledizione",
"Raggiungi una stanza con un cartello con su scritto": "vicolo cieco",
"Entri in una stanza che anziché una parete": "burrone",
"Hai schivato con destrezza una trappola piazzata sul muro della stanza": "trappola",
"Entri in una stanza completamente piena di polvere":"stanza polvere",
}
EMOJIS = {
'mostro': emoji.emojize(':boar:', use_aliases=True),
'tributo': Emoji.NEGATIVE,
'vecchia': Emoji.NEUTRAL,
'due porte': Emoji.NEUTRAL,
'aiuta': Emoji.POSITIVE,
'ascia': emoji.emojize(':dragon_face:', use_aliases=True),
'desideri': Emoji.NEUTRAL,
'fontana': Emoji.POSITIVE,
'leve': Emoji.NEUTRAL,
'marinaio': Emoji.NEUTRAL,
'mattonella': emoji.emojize(':dragon_face:', use_aliases=True),
'meditazione': Emoji.NEUTRAL,
"mercante": Emoji.NEUTRAL,
"pozzo": Emoji.NEGATIVE,
"pulsantiera": Emoji.NEGATIVE,
"monete": emoji.emojize(':moneybag:', use_aliases=True),
'raro': Emoji.POSITIVE,
'scrigno': Emoji.POSITIVE,
'stanza vuota': Emoji.POSITIVE,
'spada': emoji.emojize(':dollar:', use_aliases=True),
'predone': Emoji.NEUTRAL,
'trappola': Emoji.NEGATIVE,
'incisioni': Emoji.NEGATIVE,
'': emoji.emojize(':question:', use_aliases=True),
'vecchio cieco': Emoji.NEUTRAL,
'minatore distratto': Emoji.NEGATIVE,
'specchio': Emoji.NEUTRAL,
'alchimista': Emoji.NEUTRAL,
"Cuore e spirito": Emoji.NEUTRAL,
"Energia magica": Emoji.NEUTRAL,
"Bombarolo": Emoji.NEUTRAL,
"Mercante draconico": Emoji.NEUTRAL,
"Stanza strana": Emoji.NEUTRAL,
"esplosivi": Emoji.NEUTRAL,
"gioielliere": Emoji.NEUTRAL,
"contrabbandiere del coso": Emoji.NEUTRAL,
"maledizione": Emoji.NEUTRAL,
"vicolo cieco": Emoji.NEUTRAL,
"burrone": Emoji.NEUTRAL,
"stanza polvere": Emoji.NEUTRAL,
}
ROOMS = set(RE.values())
LENGTH = {
"Il Burrone Oscuro": 10,
"La Grotta Infestata": 15,
"Il Vulcano Impetuoso": 20,
"La Caverna degli Orchi": 25,
"Il Cratere Ventoso": 30,
"Il Deserto Rosso": 40,
"La Foresta Maledetta": 45,
"La Vetta delle Anime": 50,
"Il Lago Evanescente": 55,
"Il Labirinto Spettrale": 60,
"La Vallata Impervia": 75
}
ACRONYMS = {''.join([w[0].lower() for w in key.split(' ')][1:]): key for key in LENGTH}
DIRECTIONS = {Emoji.ARROW_LEFT: 0, Emoji.ARROW_UP: 1, Emoji.ARROW_RIGHT: 2}
MARKUP = markup_inline_keyboard([[(key, f"stats1click-{key}")] for key in LENGTH])
@staticmethod
def length(name):
dungeon_name = ' '.join(name.split(' ')[:-1])
return Dungeon.LENGTH[dungeon_name]
@staticmethod
def stringify_room(i, left, up, right, emojis):
def room_emoji(room):
return emojis.get(room) if 'mostro' not in room else emojis.get('mostro')
return f"---*Stanza*: *{i}*---\n{Emoji.ARROW_LEFT} {left} {room_emoji(left)}\n" \
f"{Emoji.ARROW_UP} {up} {room_emoji(up)}\n" \
f"{Emoji.ARROW_RIGHT} {right} {room_emoji(right)}\n"
@staticmethod
def map_directions(dungeon, start, end, user, json=True):
return markup_inline_keyboard(
[[(emoji.emojize(":arrow_double_up:", use_aliases=True), f"mapclick-{dungeon}:{start}:{end}:up:{user}")],
[(emoji.emojize(
":arrow_double_down:", use_aliases=True), f"mapclick-{dungeon}:{start}:{end}:down:{user}")]],
json=json)
|
{
"content_hash": "0efb02af4c89fd392a4be2ca1e6a5741",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 117,
"avg_line_length": 47.375,
"alnum_prop": 0.6441436273947458,
"repo_name": "akita8/helper_bot",
"id": "be566fd3df0bb9aa36dd0f75501303226678a359",
"size": "8723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helper_bot/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44590"
}
],
"symlink_target": ""
}
|
from telemetry import decorators
from telemetry.testing import legacy_page_test_case
from measurements import skpicture_printer
class SkpicturePrinterUnitTest(legacy_page_test_case.LegacyPageTestCase):
# Picture printing is not supported on all platforms.
@decorators.Disabled('android', 'chromeos')
def testSkpicturePrinter(self):
page_test = skpicture_printer.SkpicturePrinter(self.options.output_dir)
measurements = self.RunPageTest(page_test, 'file://blank.html')
saved_picture_count = measurements['saved_picture_count']['samples']
self.assertEqual(len(saved_picture_count), 1)
self.assertGreater(saved_picture_count[0], 0)
|
{
"content_hash": "42e3ba0c57a6fcf8e01357b53e744f82",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 43.86666666666667,
"alnum_prop": 0.7781155015197568,
"repo_name": "nwjs/chromium.src",
"id": "dd5742152e7704720902d6082ca78ff2b2f24ae4",
"size": "799",
"binary": false,
"copies": "7",
"ref": "refs/heads/nw70",
"path": "tools/perf/measurements/skpicture_printer_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
'''Generates certificate chains for testing name normalization.'''
import os
import subprocess
import sys
sys.path.append(os.path.join('..', '..', '..', 'tools', 'testserver'))
import minica
def pretty_print_cert(der):
command = ["openssl", "x509", "-text", "-inform", "DER"]
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
result = p.communicate(der)
if p.returncode != 0:
raise RuntimeError("openssl failed: %s" % p.returncode)
return result[0]
def writecerts(name, der_certs):
fn = os.path.join('..', 'certificates', name)
text_certs = []
print 'pretty printing', fn
for der in der_certs:
text_certs.append(pretty_print_cert(der))
print 'writing', fn
with open(fn, 'w') as f:
f.write('\n'.join(text_certs))
def GenerateCertAndIntermediate(leaf_subject,
leaf_issuer,
intermediate_subject,
ip_sans=None,
dns_sans=None,
serial=0):
if serial == 0:
serial = minica.RandomNumber(16)
intermediate_serial = minica.RandomNumber(16)
target_cert_der = minica.MakeCertificate(
leaf_issuer, leaf_subject, serial, minica.LEAF_KEY,
minica.INTERMEDIATE_KEY, ip_sans=ip_sans, dns_sans=dns_sans)
intermediate_cert_der = minica.MakeCertificate(
minica.ROOT_CN, intermediate_subject, intermediate_serial,
minica.INTERMEDIATE_KEY, minica.ROOT_KEY, is_ca=True)
return [target_cert_der, intermediate_cert_der]
def GeneratePrintableStringUtf8StringChain():
namesuffix = " for PrintableString / Utf8String comparison"
issuer_name = "Intermediate" + namesuffix
certs = GenerateCertAndIntermediate(leaf_subject="Leaf" + namesuffix,
leaf_issuer=issuer_name,
intermediate_subject=unicode(issuer_name),
ip_sans=["\x7F\x00\x00\x01"],
dns_sans=["example.test"])
writecerts('name-normalization-printable-utf8.pem', certs)
def GenerateCaseFoldChain():
namesuffix = " for case folding comparison"
issuer_name = "Intermediate" + namesuffix
certs = GenerateCertAndIntermediate(leaf_subject="Leaf" + namesuffix,
leaf_issuer=issuer_name.replace('I', 'i'),
intermediate_subject=issuer_name,
ip_sans=["\x7F\x00\x00\x01"],
dns_sans=["example.test"])
writecerts('name-normalization-case-folding.pem', certs)
def GenerateNormalChain():
namesuffix = " for byte equality comparison"
issuer_name = "Intermediate" + namesuffix
certs = GenerateCertAndIntermediate(leaf_subject="Leaf" + namesuffix,
leaf_issuer=issuer_name,
intermediate_subject=issuer_name,
ip_sans=["\x7F\x00\x00\x01"],
dns_sans=["example.test"])
writecerts('name-normalization-byteequal.pem', certs)
if __name__ == '__main__':
GeneratePrintableStringUtf8StringChain()
GenerateCaseFoldChain()
GenerateNormalChain()
|
{
"content_hash": "7a8473a9fed3628574fe42154b1700dc",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 80,
"avg_line_length": 36.03225806451613,
"alnum_prop": 0.5816174276335422,
"repo_name": "endlessm/chromium-browser",
"id": "06429c1f0b039c7d9e73be4c9d21a14c6ccccbdd",
"size": "3539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "net/data/ssl/scripts/generate-name-normalization-certs.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Interface tests."""
import logging
import unittest
from unittest.mock import Mock
from pyof.v0x04.common.port import PortFeatures
from kytos.core.interface import Interface
from kytos.core.switch import Switch
logging.basicConfig(level=logging.CRITICAL)
class TestInterface(unittest.TestCase):
"""Test Interfaces."""
def setUp(self):
"""Create interface object."""
self.iface = self._get_v0x04_iface()
@staticmethod
def _get_v0x04_iface(*args, **kwargs):
"""Create a v0x04 interface object with optional extra arguments."""
switch = Switch('dpid')
switch.connection = Mock()
switch.connection.protocol.version = 0x04
return Interface('name', 42, switch, *args, **kwargs)
def test_speed_feature_none(self):
"""When port's current features is None."""
self.iface.features = None
self.assertIsNone(self.iface.speed)
self.assertEqual('', self.iface.get_hr_speed())
def test_speed_feature_zero(self):
"""When port's current features is 0. E.g. port 65534."""
self.iface.features = 0
self.assertIsNone(self.iface.speed)
self.assertEqual('', self.iface.get_hr_speed())
def test_1_tera_speed(self):
"""1Tb link."""
self.iface.features = PortFeatures.OFPPF_1TB_FD
self.assertEqual(10**12 / 8, self.iface.speed)
self.assertEqual('1 Tbps', self.iface.get_hr_speed())
def test_100_giga_speed(self):
"""100Gb link."""
self.iface.features = PortFeatures.OFPPF_100GB_FD
self.assertEqual(100 * 10**9 / 8, self.iface.speed)
self.assertEqual('100 Gbps', self.iface.get_hr_speed())
def test_40_giga_speed(self):
"""40Gb link."""
self.iface.features = PortFeatures.OFPPF_40GB_FD
self.assertEqual(40 * 10**9 / 8, self.iface.speed)
self.assertEqual('40 Gbps', self.iface.get_hr_speed())
def test_10_giga_speed(self):
"""10Gb link."""
self.iface.features = PortFeatures.OFPPF_10GB_FD
self.assertEqual(10 * 10**9 / 8, self.iface.speed)
self.assertEqual('10 Gbps', self.iface.get_hr_speed())
def test_1_giga_speed(self):
"""1Gb link."""
self.iface.features = PortFeatures.OFPPF_1GB_FD
self.assertEqual(10**9 / 8, self.iface.speed)
self.assertEqual('1 Gbps', self.iface.get_hr_speed())
def test_100_mega_speed(self):
"""100Mb link."""
self.iface.features = PortFeatures.OFPPF_100MB_FD
self.assertEqual(100 * 10**6 / 8, self.iface.speed)
self.assertEqual('100 Mbps', self.iface.get_hr_speed())
def test_10_mega_speed(self):
"""10Mb link."""
self.iface.features = PortFeatures.OFPPF_10MB_FD
self.assertEqual(10 * 10**6 / 8, self.iface.speed)
self.assertEqual('10 Mbps', self.iface.get_hr_speed())
def test_speed_setter(self):
"""Should return speed that was set and not features'."""
expected_speed = 12345
self.iface.features = PortFeatures.OFPPF_10MB_FD
self.iface.set_custom_speed(expected_speed)
actual_speed = self.iface.speed
self.assertEqual(expected_speed, actual_speed)
def test_speed_in_constructor(self):
"""Custom speed should override features'."""
expected_speed = 6789
iface = self._get_v0x04_iface(speed=expected_speed,
features=PortFeatures.OFPPF_10MB_FD)
actual_speed = iface.speed
self.assertEqual(expected_speed, actual_speed)
def test_remove_custom_speed(self):
"""Should return features' speed again when custom's becomes None."""
custom_speed = 101112
of_speed = 10 * 10**6 / 8
iface = self._get_v0x04_iface(speed=custom_speed,
features=PortFeatures.OFPPF_10MB_FD)
self.assertEqual(custom_speed, iface.speed)
iface.set_custom_speed(None)
self.assertEqual(of_speed, iface.speed)
|
{
"content_hash": "35be838ff020e5a653b7eee42c08fb1a",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 77,
"avg_line_length": 37.598130841121495,
"alnum_prop": 0.6273924931643052,
"repo_name": "macartur/kytos",
"id": "4db2eb4df185e6a0024cfa29916e15e5c50649d3",
"size": "4023",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_core/test_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187067"
}
],
"symlink_target": ""
}
|
import datetime
import math
import time
import requests
def main():
t0 = datetime.datetime.now()
compute_some()
compute_some()
compute_some()
download_some()
download_some()
download_some_more()
download_some_more()
wait_some()
wait_some()
wait_some()
wait_some()
dt = datetime.datetime.now() - t0
print("Synchronous version done in {:,.2f} seconds.".format(dt.total_seconds()))
def compute_some():
print("Computing...")
for _ in range(1, 10_000_000):
math.sqrt(25 ** 25 + .01)
def download_some():
print("Downloading...")
url = 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2'
resp = requests.get(url)
resp.raise_for_status()
text = resp.text
print("Downloaded (more) {:,} characters.".format(len(text)))
def download_some_more():
print("Downloading more ...")
url = 'https://pythonbytes.fm/episodes/show/92/will-your-python-be-compiled'
resp = requests.get(url)
resp.raise_for_status()
text = resp.text
print("Downloaded {:,} characters.".format(len(text)))
def wait_some():
print("Waiting...")
for _ in range(1, 1000):
time.sleep(.001)
if __name__ == '__main__':
main()
|
{
"content_hash": "188c1c9dc98a18f6c687f1c462d46661",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 99,
"avg_line_length": 20.852459016393443,
"alnum_prop": 0.6155660377358491,
"repo_name": "Wintellect/WintellectWebinars",
"id": "2b14dd1f0509c487a54ebb65a9d1cde4e89d1077",
"size": "1272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2019-01-24-async-python-kennedy/code/the_unsync/nosync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "47583"
},
{
"name": "CSS",
"bytes": "39803"
},
{
"name": "HTML",
"bytes": "87870"
},
{
"name": "JavaScript",
"bytes": "4383753"
},
{
"name": "Jupyter Notebook",
"bytes": "234737"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "208421"
},
{
"name": "SCSS",
"bytes": "152"
},
{
"name": "Shell",
"bytes": "4251"
},
{
"name": "TypeScript",
"bytes": "142946"
}
],
"symlink_target": ""
}
|
import os
from os import PathLike
from typing import Union, Dict, Optional
import torch
from torch.cuda import amp
from torch.testing import assert_allclose
import pytest
from allennlp.common.testing import AllenNlpTestCase, run_distributed_test, requires_multi_gpu
from allennlp.nn.util import load_state_dict_distributed
from allennlp.nn.parallel import (
FairScaleFsdpAccelerator,
FairScaleFsdpWrappedModel,
ShardedModuleMixin,
)
class EncoderDecoderModel(torch.nn.Module):
"""
Simple model to use for testing. We use an encoder-decoder architecture with tied
embeddings to make sure we cover enough edge cases.
"""
def __init__(self, fsdp_wrapper: FairScaleFsdpAccelerator) -> None:
super().__init__()
self.embedding = torch.nn.Embedding(12, 4)
self.emb_proj = fsdp_wrapper.wrap_module(torch.nn.Linear(4, 4))
self.encoder = fsdp_wrapper.wrap_module(Encoder())
self.decoder = Decoder(self.embedding, fsdp_wrapper)
# Add a buffer to make sure these are handled correctly. We don't actually
# do anything with this though.
self.register_buffer("buffer", torch.randn(4, 4))
def tie_weights(self):
"""
Should be called after loading state dict to make sure embedding weigths are tied.
"""
self.decoder.linear.weight = self.embedding.weight
def forward(self, x):
x = self.embedding(x)
x = self.emb_proj(x)
x = self.encoder(x)
x = self.decoder(x)
return x
class Encoder(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.ff1 = FeedForward()
self.ff2 = FeedForward()
# Add a buffer to make sure these are handled correctly. We don't actually
# do anything with this though.
self.register_buffer("buffer", torch.randn(4, 4))
def forward(self, x):
return self.ff2(self.ff1(x))
class Decoder(torch.nn.Module):
def __init__(
self, embedding: torch.nn.Embedding, fsdp_wrapper: FairScaleFsdpAccelerator
) -> None:
super().__init__()
self.ff = fsdp_wrapper.wrap_module(FeedForward())
# Don't want to wrap this linear layer since we are tying the weights to the embedding.
self.linear = torch.nn.Linear(4, 12, bias=False)
self.linear.weight = embedding.weight
# Add a buffer to make sure these are handled correctly. We don't actually
# do anything with this though.
self.register_buffer("buffer", torch.randn(4, 4))
def forward(self, x):
return self.linear(self.ff(x))
class FeedForward(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = torch.nn.Linear(4, 4)
self.activation = torch.nn.ReLU()
def forward(self, x):
return self.activation(self.linear(x))
def _dist_load_and_train(
global_rank: int,
world_size: int,
gpu_id: int,
test_dir: Union[str, PathLike],
mixed_precision: bool,
**kwargs,
):
# make sure everything is deterministic.
torch.manual_seed(global_rank)
fsdp_wrapper = FairScaleFsdpAccelerator(
local_rank=global_rank,
world_size=world_size,
cuda_device=gpu_id,
mixed_precision=mixed_precision,
**kwargs,
)
model = EncoderDecoderModel(fsdp_wrapper)
state_dict: Optional[Dict[str, torch.Tensor]] = None
if global_rank == 0:
embedding_weight = torch.randn(12, 4)
state_dict = {
"embedding.weight": embedding_weight,
"emb_proj.weight": torch.randn(4, 4),
"emb_proj.bias": torch.randn(4),
"encoder.ff1.linear.weight": torch.randn(4, 4),
"encoder.ff1.linear.bias": torch.randn(4),
"encoder.ff2.linear.weight": torch.randn(4, 4),
"encoder.ff2.linear.bias": torch.randn(4),
"encoder.buffer": torch.randn(4, 4),
"decoder.ff.linear.weight": torch.randn(4, 4),
"decoder.ff.linear.bias": torch.randn(4),
"decoder.linear.weight": embedding_weight,
"decoder.buffer": torch.randn(4, 4),
"buffer": torch.randn(4, 4),
}
torch.save(state_dict, os.path.join(test_dir, "state.pt"))
# Make sure the right modules are sharded.
assert not isinstance(model.embedding, ShardedModuleMixin)
assert isinstance(model.encoder, ShardedModuleMixin)
assert isinstance(model.decoder.ff, ShardedModuleMixin)
# Now load the state dict... we should be able to do this before wrapping the model itself
# with the fsdp_wrapper.
missing_keys, unexpected_keys = load_state_dict_distributed(model, state_dict)
assert not missing_keys
assert not unexpected_keys
# Make sure weights are still tied.
model.tie_weights()
# Now wrap outer model.
model, wrapped_model = fsdp_wrapper.wrap_model(model)
# TODO: grad scaler doesn't work now due to https://github.com/facebookresearch/fairscale/issues/421.
# scaler = wrapped_model.init_grad_scaler()
scaler: Optional[amp.GradScaler] = None
# Checkpoint each worker's state.
worker_state = wrapped_model.state_dict()
for name, value in worker_state["weights"].items():
# Each tensor should be on the current device if mixed_precision is `False`,
# otherwise they will be on CPU (since we set `move_params_to_cpu`).
if mixed_precision:
assert value.device == torch.device("cpu")
else:
assert value.device == torch.device(gpu_id)
# Either way, tensors returned should be full precision.
assert value.dtype == torch.float, f"{name} is {value.dtype}"
# Save state dict from each worker.
torch.save(worker_state, os.path.join(test_dir, f"state_worker{gpu_id}.pt"))
# Now we'll make sure we can successfully do a forward pass, backward pass, and optimizer step.
optim = torch.optim.Adam(wrapped_model.model.parameters(), lr=0.0001)
x = torch.randint(12, (2, 6)).to(torch.device(gpu_id))
# Do a forward pass.
with amp.autocast(enabled=mixed_precision):
x = wrapped_model.model(x)
loss = x.sum()
# And a backwards pass + optimizer step.
if scaler is not None:
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
else:
loss.backward()
optim.step()
# Now save final state.
torch.save(wrapped_model.state_dict(), os.path.join(test_dir, f"final_state_worker{gpu_id}.pt"))
class TestFairScaleFsdpAccelerator(AllenNlpTestCase):
@pytest.mark.parametrize(
"mixed_precision",
(True, False),
ids=lambda val: f"amp={val}",
)
@pytest.mark.parametrize(
"flatten_parameters",
(True, False),
ids=lambda val: f"flatten={val}",
)
@requires_multi_gpu
def test_distributed_loading_and_training(self, mixed_precision, flatten_parameters):
run_distributed_test(
[0, 1],
func=_dist_load_and_train,
test_dir=self.TEST_DIR,
mixed_precision=mixed_precision,
flatten_parameters=flatten_parameters,
)
# Now make sure the sharded saved state is exactly the same as the original state when consolidated.
original_state = torch.load(self.TEST_DIR / "state.pt", map_location="cpu")
consolidated_state = FairScaleFsdpWrappedModel.consolidate_sharded_state(
[
self.TEST_DIR / "state_worker0.pt",
self.TEST_DIR / "state_worker1.pt",
]
)
assert set(original_state.keys()) - set(consolidated_state.keys()) == {
"decoder.linear.weight" # won't be in the state dict since param is tied to embedding.weight
}
for key, tensor0 in original_state.items():
if key not in consolidated_state:
continue
# Need to give extra tolerance for buffers when `mixed_precision` is `True`.
tolerance = None if not mixed_precision or "buffer" not in key else 1e-3
tensor1 = consolidated_state[key]
assert_allclose(
tensor0,
tensor1,
msg=f"{key} is off in consolidated state.\nExpected:\n{tensor0}\nGot:\n{tensor1}",
atol=tolerance,
rtol=tolerance,
)
|
{
"content_hash": "9bb4ef4caefbe776a1ea84795e65a328",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 108,
"avg_line_length": 36.02991452991453,
"alnum_prop": 0.6283952081603605,
"repo_name": "allenai/allennlp",
"id": "1a63f464d6393d5f3901ca9e499eb0c318161794",
"size": "8431",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/nn/parallel/fairscale_fsdp_accelerator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39870"
},
{
"name": "Dockerfile",
"bytes": "1190"
},
{
"name": "Jsonnet",
"bytes": "4469"
},
{
"name": "Makefile",
"bytes": "5306"
},
{
"name": "Perl",
"bytes": "101"
},
{
"name": "Python",
"bytes": "3575059"
},
{
"name": "Scilab",
"bytes": "4085"
},
{
"name": "Shell",
"bytes": "2092"
}
],
"symlink_target": ""
}
|
from CGATReport.Tracker import TrackerSQL
class ProjectTracker(TrackerSQL):
'''Define convenience tracks for plots'''
class WordFrequencies(ProjectTracker):
pattern = "(.*)_counts"
def __call__(self, track):
return self.getValues("SELECT freq FROM %(track)s_counts")
|
{
"content_hash": "51789da99b7b588e2e0f7d24a13cddcc",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.702054794520548,
"repo_name": "CGATOxford/CGATPipelines",
"id": "9db302d874908fa81273d8159fad7297bf2213f8",
"size": "292",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "CGATPipelines/pipeline_template_data/TemplateReport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4360"
},
{
"name": "HTML",
"bytes": "40732"
},
{
"name": "JavaScript",
"bytes": "302029"
},
{
"name": "Jupyter Notebook",
"bytes": "4393775"
},
{
"name": "Makefile",
"bytes": "45084"
},
{
"name": "Python",
"bytes": "5357820"
},
{
"name": "R",
"bytes": "62312"
},
{
"name": "Shell",
"bytes": "67312"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from congress.api.system import driver_model
from congress.api import webservice
from congress import harness
from congress.managers import datasource as datasource_manager
from congress.tests import base
from congress.tests import helper
class TestDriverModel(base.SqlTestCase):
def setUp(self):
super(TestDriverModel, self).setUp()
cfg.CONF.set_override(
'drivers',
['congress.tests.fake_datasource.FakeDataSource'])
self.cage = harness.create(helper.root_path())
self.datasource_mgr = datasource_manager.DataSourceManager
self.datasource_mgr.validate_configured_drivers()
req = {'driver': 'fake_datasource',
'name': 'fake_datasource'}
req['config'] = {'auth_url': 'foo',
'username': 'foo',
'password': 'password',
'tenant_name': 'foo'}
self.datasource = self.datasource_mgr.add_datasource(req)
self.engine = self.cage.service_object('engine')
self.api_system = self.cage.service_object('api-system')
self.driver_model = (
driver_model.DatasourceDriverModel("driver-model", {},
policy_engine=self.engine)
)
def tearDown(self):
super(TestDriverModel, self).tearDown()
def test_drivers_list(self):
context = {}
expected_ret = {"results": [
{
"description": "This is a fake driver used for testing",
"id": "fake_datasource"
}
]}
ret = self.driver_model.get_items({}, context)
self.assertEqual(expected_ret, ret)
def test_driver_details(self):
context = {
"driver_id": "fake_datasource"
}
expected_ret = {
"config": {
"auth_url": "required",
"endpoint": "(optional)",
"password": "required",
"poll_time": "(optional)",
"region": "(optional)",
"tenant_name": "required",
"username": "required"
},
"description": "This is a fake driver used for testing",
"id": "fake_datasource",
"module": "congress.tests.fake_datasource.FakeDataSource",
"secret": ["password"],
"tables": [{'columns': [
{'description': 'None', 'name': 'id'},
{'description': 'None', 'name': 'name'}],
'table_id': 'fake_table'}
]
}
ret = self.driver_model.get_item('fake_datasource', {}, context)
self.assertEqual(expected_ret, ret)
def test_invalid_driver_details(self):
context = {
"driver_id": "invalid-id"
}
self.assertRaises(webservice.DataModelException,
self.driver_model.get_item,
'invalid-id', {}, context)
|
{
"content_hash": "3c44f56e77d9d99edefdf01102f660be",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 73,
"avg_line_length": 35.88095238095238,
"alnum_prop": 0.5321831453218314,
"repo_name": "ekcs/congress",
"id": "d83888c424eaad2db625bc9e55d4655e996fd6da",
"size": "3604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congress/tests/api/test_driver_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2744"
},
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "HTML",
"bytes": "19644"
},
{
"name": "JavaScript",
"bytes": "9896"
},
{
"name": "Makefile",
"bytes": "503"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "1874341"
},
{
"name": "Shell",
"bytes": "8824"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
import numpy as np
from vtk.numpy_interface import dataset_adapter as dsa
# VTK imports:
from vtk.util import numpy_support as nps
from base import TestBase
# Functionality to test:
from PVGeo.readers import (
DelimitedTextReader,
MadagascarReader,
PackedBinariesReader,
XYZTextReader,
)
RTOL = 0.000001
class TestDelimitedTextReader(TestBase):
"""
Test the `DelimitedTextReader`: A widely used base class
"""
def setUp(self):
TestBase.setUp(self)
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
self.commafilename = os.path.join(self.test_dir, 'comma.txt')
self.tabfilename = os.path.join(self.test_dir, 'tab.txt')
# Make a temporary delimited text file to test:
lines = [
'This is a header line to skip',
'int,str,float ! Comment,this line has the data array names',
'5,foo,6.9',
'1,bar,8.5 ! another comment',
'3,oof,7.7',
]
# Append newlines
lines = [ln + '\n' for ln in lines]
# Now write contents to files
f = open(self.commafilename, 'w')
f.writelines(lines)
f.close()
f = open(self.tabfilename, 'w')
f.writelines([ln.replace(',', '\t') for ln in lines])
f.close()
return
def tearDown(self):
# Remove the test data directory after the test
shutil.rmtree(self.test_dir)
TestBase.tearDown(self)
###########################################
def _check_shape(self, table):
# Check number of rows
self.assertEqual(table.GetNumberOfRows(), 3)
# Check number of columns:
self.assertEqual(table.GetNumberOfColumns(), 3)
return
def _check_titles(self, table, titles=['int', 'str', 'float']):
# Check data array names:
self.assertEqual(table.GetColumnName(0), titles[0])
self.assertEqual(table.GetColumnName(1), titles[1])
self.assertEqual(table.GetColumnName(2), titles[2])
return
def _check_array_types(self, table, types=[16, 13, 11]):
# Check data array types:
typ = table.GetColumn(0).GetDataType()
self.assertEqual(typ, types[0])
typ = table.GetColumn(1).GetDataType()
self.assertEqual(typ, types[1])
typ = table.GetColumn(2).GetDataType()
self.assertEqual(typ, types[2])
return
def _check_array_values(self, table):
# Check data array values:
wt = dsa.WrapDataObject(table)
arr = wt.RowData[0]
self.assertEqual(arr[0], 5)
self.assertEqual(arr[1], 1)
self.assertEqual(arr[2], 3)
arr = table.GetColumn(1)
self.assertEqual(arr.GetValue(0), 'foo')
self.assertEqual(arr.GetValue(1), 'bar')
self.assertEqual(arr.GetValue(2), 'oof')
arr = wt.RowData[2]
self.assertEqual(arr[0], 6.9)
self.assertEqual(arr[1], 8.5)
self.assertEqual(arr[2], 7.7)
return
# TODO: check timesteps!
def test_comma_read(self):
"""`DelimitedTextReader`: comma delimited file"""
reader = DelimitedTextReader()
reader.AddFileName(self.commafilename)
reader.set_delimiter(',')
reader.set_skip_rows(1)
reader.set_comments('!')
reader.Update()
table = reader.GetOutput()
self._check_shape(table)
self._check_titles(table)
self._check_array_types(table)
self._check_array_values(table)
return
def test_tab_read(self):
"""`DelimitedTextReader`: tab delimited file"""
reader = DelimitedTextReader()
reader.AddFileName(self.tabfilename)
reader.set_split_on_white_space(True)
reader.set_skip_rows(1)
reader.set_comments('!')
reader.Update()
table = reader.GetOutput()
self._check_shape(table)
self._check_titles(table)
self._check_array_types(table)
self._check_array_values(table)
return
def test_no_titles(self):
"""`DelimitedTextReader`: file without headers"""
reader = DelimitedTextReader()
reader.AddFileName(self.commafilename)
reader.set_delimiter(',')
reader.set_skip_rows(2)
reader.set_has_titles(False)
reader.set_comments('!')
reader.Update()
table = reader.GetOutput()
self._check_shape(table)
self._check_titles(table, titles=['Field 0', 'Field 1', 'Field 2'])
self._check_array_types(table)
self._check_array_values(table)
return
###############################################################################
class TestXYZTextReader(TestBase):
"""
Test the `XYZTextReader`
"""
def setUp(self):
TestBase.setUp(self)
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
self.filename = os.path.join(self.test_dir, 'test.xyz')
# Make a temporary file to test:
self.nrows = 100
self.ncols = 8 # LEAVE ALONE
self.header = 'X, dx, Y, dy, Z, dz, approximate distance, cell index'
self.data = np.random.random((self.nrows, self.ncols))
np.savetxt(
self.filename, self.data, header=self.header, comments='! ', fmt='%.6e'
)
reader = XYZTextReader()
reader.AddFileName(self.filename)
reader.Update()
self.TABLE = reader.GetOutput()
return
def tearDown(self):
# Remove the test data directory after the test
shutil.rmtree(self.test_dir)
TestBase.tearDown(self)
###########################################
def test_data_aray_titles(self):
"""`XYZTextReader`: check data array names"""
titles = self.header.split(', ')
for i in range(self.ncols):
self.assertEqual(self.TABLE.GetColumnName(i), titles[i])
return
def test_data_fidelity(self):
"""`XYZTextReader`: check data fidelity"""
titles = self.header.split(', ')
for i in range(self.ncols):
arr = nps.vtk_to_numpy(self.TABLE.GetColumnByName(titles[i]))
self.assertTrue(np.allclose(self.data[:, i], arr, rtol=RTOL))
return
def test_shape(self):
"""`XYZTextReader`: data table shape"""
self.assertEqual(self.TABLE.GetNumberOfRows(), self.nrows)
self.assertEqual(self.TABLE.GetNumberOfColumns(), self.ncols)
return
###############################################################################
class TestPackedBinariesReader(TestBase):
"""
Test the `PackedBinariesReader`
"""
def setUp(self):
TestBase.setUp(self)
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
self.n = 100
return
def tearDown(self):
# Remove the test data directory after the test
shutil.rmtree(self.test_dir)
TestBase.tearDown(self)
###########################################
def _check_data(self, table, data):
arr = nps.vtk_to_numpy(table.GetColumn(0))
self.assertTrue(np.allclose(data, arr, rtol=RTOL))
return arr
###########################################
def test_floats(self):
"""`PackedBinariesReader`: floats"""
# Make data and write out
dtype = np.dtype('f')
arr = np.array(np.random.random(self.n), dtype=dtype)
filename = os.path.join(self.test_dir, 'test.bin')
arr.tofile(filename)
# Set up reader
reader = PackedBinariesReader()
reader.AddFileName(filename)
reader.set_data_type('f')
reader.set_data_name('Test Data')
# Perfrom Read
reader.Update()
table = reader.GetOutput()
# Check output
self.assertEqual(table.GetColumnName(0), 'Test Data')
self._check_data(table, arr)
return
def test_doubles(self):
"""`PackedBinariesReader`: doubles"""
# Make data and write out
dtype = np.dtype('d')
arr = np.array(np.random.random(self.n), dtype=dtype)
filename = os.path.join(self.test_dir, 'test.bin')
arr.tofile(filename)
# Set up reader
reader = PackedBinariesReader()
reader.AddFileName(filename)
reader.set_data_type('d')
# Perfrom Read
reader.Update()
table = reader.GetOutput()
# Check output
self._check_data(table, arr)
return
def test_ints(self):
"""`PackedBinariesReader`: ints"""
# Make data and write out
dtype = np.dtype('i')
arr = np.array(np.random.random(self.n), dtype=dtype)
filename = os.path.join(self.test_dir, 'test.bin')
arr.tofile(filename)
# Set up reader
reader = PackedBinariesReader()
reader.AddFileName(filename)
reader.set_data_type(2) # 'i' test that sending an int choice works
# Perfrom Read
reader.Update()
table = reader.GetOutput()
# Check output
self._check_data(table, arr)
return
def test_endian_big(self):
"""`PackedBinariesReader`: floats with big-endianness"""
# Make data and write out
dtype = np.dtype('>f')
arr = np.asarray(np.random.random(self.n), dtype=dtype)
filename = os.path.join(self.test_dir, 'test.bin')
arr.tofile(filename)
# Set up reader
reader = PackedBinariesReader()
reader.AddFileName(filename)
reader.set_data_type('f')
reader.set_endian('>')
# Perfrom Read
reader.Update()
table = reader.GetOutput()
# Check output
self._check_data(table, arr)
return
def test_endian_little(self):
"""`PackedBinariesReader`: floats with little-endianness"""
# Make data and write out
dtype = np.dtype('<f')
arr = np.array(np.random.random(self.n), dtype=dtype)
filename = os.path.join(self.test_dir, 'test.bin')
arr.tofile(filename)
# Set up reader
reader = PackedBinariesReader()
reader.AddFileName(filename)
reader.set_data_type('f')
reader.set_endian(1) # '<' test that sending an int choice works
# Perfrom Read
reader.Update()
table = reader.GetOutput()
# Check output
self._check_data(table, arr)
return
###############################################################################
class TestMadagascarReader(TestBase):
"""
Test the `MadagascarReader`
Does not test inherrited functionality
"""
def tearDown(self):
# Remove the test data directory after the test
shutil.rmtree(self.test_dir)
TestBase.tearDown(self)
###########################################
def test_data_fidelity(self):
"""`MadagascarReader`: Check data fidelity"""
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
self.n = 100
# Make data and write out
dtype = np.dtype('f')
self.data = np.array(np.random.random(self.n), dtype=dtype)
filename = os.path.join(self.test_dir, 'test.rsf')
# Write ascii header
lines = ['hello\n'] * 10
with open(filename, 'w') as f:
f.writelines(lines)
# Write data
with open(filename, 'ab') as f:
f.write(b'\014\014\004') # The control sequence
self.data.tofile(f)
# Set up reader
reader = MadagascarReader()
reader.AddFileName(filename)
reader.set_data_type('f')
reader.set_data_name('Test Data')
self.assertEqual(reader.get_data_name(), 'Test Data')
# Perfrom Read
reader.Update()
table = reader.GetOutput()
arr = nps.vtk_to_numpy(table.GetColumn(0))
self.assertTrue(np.allclose(self.data, arr)) # , rtol=0.0001))
self.assertEqual(table.GetColumnName(0), 'Test Data')
return
###############################################################################
###############################################################################
###############################################################################
###############################################################################
if __name__ == '__main__':
import unittest
unittest.main()
###############################################################################
###############################################################################
###############################################################################
|
{
"content_hash": "d7cf69d00eec08428541663c18dc2ca2",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 83,
"avg_line_length": 32.59079283887468,
"alnum_prop": 0.5436710350780821,
"repo_name": "banesullivan/ParaViewGeophysics",
"id": "ce3625c561b9cacfd0bbfc53a3ab9b64fee29974",
"size": "12743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/readers_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "789"
},
{
"name": "Python",
"bytes": "191998"
},
{
"name": "Shell",
"bytes": "9602"
}
],
"symlink_target": ""
}
|
import socket
import struct
__all__ = [
"is_private_subnet",
"is_public_subnet",
"is_valid_ip_address",
"join_ipv4_segments",
"increment_ipv4_segments",
]
def is_private_subnet(ip):
"""
Utility function to check if an IP address is inside a private subnet.
:type ip: ``str``
:param ip: IP address to check
:return: ``bool`` if the specified IP address is private.
"""
priv_subnets = [
{"subnet": "10.0.0.0", "mask": "255.0.0.0"},
{"subnet": "172.16.0.0", "mask": "255.240.0.0"},
{"subnet": "192.168.0.0", "mask": "255.255.0.0"},
]
ip = struct.unpack("I", socket.inet_aton(ip))[0]
for network in priv_subnets:
subnet = struct.unpack("I", socket.inet_aton(network["subnet"]))[0]
mask = struct.unpack("I", socket.inet_aton(network["mask"]))[0]
if (ip & mask) == (subnet & mask):
return True
return False
def is_public_subnet(ip):
"""
Utility function to check if an IP address is inside a public subnet.
:type ip: ``str``
:param ip: IP address to check
:return: ``bool`` if the specified IP address is public.
"""
return not is_private_subnet(ip=ip)
def is_valid_ip_address(address, family=socket.AF_INET):
"""
Check if the provided address is valid IPv4 or IPv6 address.
:param address: IPv4 or IPv6 address to check.
:type address: ``str``
:param family: Address family (socket.AF_INTET / socket.AF_INET6).
:type family: ``int``
:return: ``bool`` True if the provided address is valid.
"""
try:
socket.inet_pton(family, address)
except socket.error:
return False
return True
def join_ipv4_segments(segments):
"""
Helper method to join ip numeric segment pieces back into a full
ip address.
:param segments: IPv4 segments to join.
:type segments: ``list`` or ``tuple``
:return: IPv4 address.
:rtype: ``str``
"""
return ".".join([str(s) for s in segments])
def increment_ipv4_segments(segments):
"""
Increment an ip address given in quad segments based on ipv4 rules
:param segments: IPv4 segments to increment.
:type segments: ``list`` or ``tuple``
:return: Incremented segments.
:rtype: ``list``
"""
segments = [int(segment) for segment in segments]
segments[3] += 1
if segments[3] == 256:
segments[3] = 0
segments[2] += 1
if segments[2] == 256:
segments[2] = 0
segments[1] += 1
if segments[1] == 256:
segments[1] = 0
segments[0] += 1
return segments
|
{
"content_hash": "d7b32c7b3c8affd7e44fb6890c40aa98",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 75,
"avg_line_length": 23.803571428571427,
"alnum_prop": 0.5843960990247562,
"repo_name": "mistio/libcloud",
"id": "60d4572143da5b2acef7b6ef61d60c9d5771dcca",
"size": "3444",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "libcloud/utils/networking.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9067225"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
}
|
__author__ = 'Jan Pecinovsky'
import pandas as pd
"""
A Device is an entity that can contain multiple sensors.
The generic Device class can be inherited by a specific device class, eg.
Fluksometer
"""
class Device(object):
def __init__(self, key=None, site=None):
self.key = key
self.site = site
self.sensors = []
def __repr__(self):
return """
{}
Key: {}
{} sensors
""".format(self.__class__.__name__,
self.key,
len(self.sensors)
)
def get_sensors(self, sensortype = None):
"""
Return a list with all sensors in this Device
Parameters
----------
sensortype: gas, water, electricity: optional
Returns
-------
list of Sensors
"""
return [sensor for sensor in self.sensors if sensor.type == sensortype or sensortype is None]
def get_data(self, sensortype=None, head=None, tail=None, diff='default', resample='min', unit='default'):
"""
Return a Pandas Dataframe with the joined data for all sensors in this device
Parameters
----------
sensors : list of Sensor objects
If None, use sensortype to make a selection
sensortype : string (optional)
gas, water, electricity. If None, and Sensors = None,
all available sensors in the houseprint are fetched
head, tail: timestamps,
diff : bool or 'default'
If True, the original data will be differentiated
If 'default', the sensor will decide: if it has the attribute
cumulative==True, the data will be differentiated.
resample : str (default='min')
Sampling rate, if any. Use 'raw' if no resampling.
unit : str , default='default'
String representation of the target unit, eg m**3/h, kW, ...
Returns
-------
Pandas DataFrame
"""
sensors = self.get_sensors(sensortype)
series = [sensor.get_data(head=head, tail=tail, diff=diff, resample=resample, unit=unit) for sensor in sensors]
# workaround for https://github.com/pandas-dev/pandas/issues/12985
series = [s for s in series if not s.empty]
if series:
df = pd.concat(series, axis=1)
else:
df = pd.DataFrame()
# Add unit as string to each series in the df. This is not persistent: the attribute unit will get
# lost when doing operations with df, but at least it can be checked once.
for s in series:
try:
df[s.name].unit = s.unit
except:
pass
return df
def number_of_sensors(self, sensortype=None):
"""
Parameters
----------
sensortype: gas, water, electricity
Returns
-------
int
"""
return len(self.get_sensors(sensortype=sensortype))
def last_timestamp(self, epoch=False):
"""
Get the last timestamp for a device, by returning the latest timestamp
of the sensors
Parameters
----------
epoch : bool
default False
If True return as epoch
If False return as pd.Timestamp
Returns
-------
pd.Timestamp | int
"""
timestamps = [sensor.last_timestamp(epoch=epoch) for sensor in self.sensors]
return max(timestamps)
def add_sensor(self, sensor):
"""
Parameters
----------
sensor : Sensor
"""
sensor.device = self
self.sensors.append(sensor)
class Fluksometer(Device):
def __init__(self, site=None, key=None, mastertoken=None, tmpos=None):
# invoke init method of generic Device
super(Fluksometer, self).__init__(key, site)
self.mastertoken = mastertoken
self._tmpos = tmpos
@property
def tmpos(self):
if self._tmpos is not None:
return self._tmpos
elif self.site.tmpos is not None:
return self.site.tmpos
else:
raise AttributeError('TMPO session not defined')
|
{
"content_hash": "6ddff9eb5e211d75fd56aea95b89e4d7",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 119,
"avg_line_length": 28.816326530612244,
"alnum_prop": 0.5576015108593012,
"repo_name": "WolfBerwouts/opengrid",
"id": "a074b63d4512ea333a82c93b40e79581188091a6",
"size": "4236",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "opengrid/library/houseprint/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5560086"
},
{
"name": "Python",
"bytes": "245729"
},
{
"name": "Shell",
"bytes": "3377"
}
],
"symlink_target": ""
}
|
"""
Module for testing the Reader class.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pytest
from surprise import Reader
def test_params():
"""Test Reader parameters"""
with pytest.raises(ValueError):
Reader(name='wrong_name')
with pytest.raises(ValueError):
Reader(line_format='users item rating')
with pytest.raises(ValueError):
Reader(line_format='user itemm rating')
with pytest.raises(ValueError):
Reader(line_format='item user rrating')
with pytest.raises(ValueError):
Reader(line_format='item BLABLA user rating')
def test_parse_line():
"""Test the parse_line method"""
# Basic line parsing
line_format = 'user item rating timestamp'
sep = ','
reader = Reader(line_format=line_format, sep=sep)
line = 'me,best_movie_ever, 5 ,25111990'
uid, iid, rating, timestamp = reader.parse_line(line)
assert uid == 'me'
assert iid == 'best_movie_ever'
assert rating == 5
assert timestamp == '25111990'
# Change order of fields (and sep)
line_format = 'timestamp rating item user'
sep = ' '
reader = Reader(line_format=line_format, sep=sep)
line = '25111990 5 best_movie_ever me'
uid, iid, rating, timestamp = reader.parse_line(line)
assert uid == 'me'
assert iid == 'best_movie_ever'
assert rating == 5
assert timestamp == '25111990'
# Without timestamp (changed sep as well)
line_format = 'rating item user'
sep = '-'
reader = Reader(line_format=line_format, sep=sep)
line = '5 - best_movie_ever - me'
uid, iid, rating, _ = reader.parse_line(line)
assert uid == 'me'
assert iid == 'best_movie_ever'
assert rating == 5
# Wrong sep
line_format = 'rating item user'
sep = ';'
reader = Reader(line_format=line_format, sep=sep)
line = '5 - best_movie_ever - me'
with pytest.raises(ValueError):
uid, iid, rating, _ = reader.parse_line(line)
# Wrong number of fields
line = '5 - best_movie_ever'
with pytest.raises(ValueError):
uid, iid, rating, _ = reader.parse_line(line)
|
{
"content_hash": "720f9f2d64b2843c282f7cd43f1aeed8",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 66,
"avg_line_length": 25.858823529411765,
"alnum_prop": 0.6287534121929026,
"repo_name": "charmoniumQ/Surprise",
"id": "8ea8d16ab8cd6216f7cb6a0d2281b924c8f0ec12",
"size": "2198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_reader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "177820"
}
],
"symlink_target": ""
}
|
from setuptools import find_packages, setup
setup(
name='rnnmorph',
packages=find_packages(),
version='0.4.1',
description='RNNMorph: neural network disambiguation of pymorphy2 parses for precise '
'POS-tagging in Russian language.',
author='Ilya Gusev',
author_email='phoenixilya@gmail.com',
url='https://github.com/IlyaGusev/rnnmorph',
download_url='https://github.com/IlyaGusev/rnnmorph/archive/0.4.1.tar.gz',
keywords=['nlp', 'russian', 'lstm', 'morphology'],
package_data={
'rnnmorph': ['models/ru/*', 'models/en/*']
},
install_requires=[
'numpy>=1.12.1',
'scipy>=0.19.0',
'scikit-learn>=0.18.1',
'keras>=2.1.4',
'h5py>=2.7.0',
'pymorphy2>=0.8',
'russian-tagsets==0.6',
'tqdm>=4.14.0',
'jsonpickle>=0.9.4',
'nltk>=3.2.5'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Text Processing :: Linguistic',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: Russian',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
)
|
{
"content_hash": "5f527db26908d070e4017c1cafe8403a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 90,
"avg_line_length": 31.044444444444444,
"alnum_prop": 0.5647816750178954,
"repo_name": "IlyaGusev/rnnmorph",
"id": "ef5938e0b51f6f1d2435f04934510f26c2c5fd12",
"size": "1397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "91880"
},
{
"name": "Shell",
"bytes": "249"
}
],
"symlink_target": ""
}
|
import argparse
import datetime
import logging
import os
import sys
from multiprocessing.pool import ThreadPool
import devil_chromium
from devil.android import device_denylist
from devil.android import device_errors
from devil.android import device_utils
from devil.utils import run_tests_helper
from pylib import constants
from pylib.symbols import stack_symbolizer
_TZ_UTC = {'TZ': 'UTC'}
def _ListTombstones(device):
"""List the tombstone files on the device.
Args:
device: An instance of DeviceUtils.
Yields:
Tuples of (tombstone filename, date time of file on device).
"""
try:
if not device.PathExists('/data/tombstones', as_root=True):
return
entries = device.StatDirectory('/data/tombstones', as_root=True)
for entry in entries:
if 'tombstone' in entry['filename']:
yield (entry['filename'],
datetime.datetime.fromtimestamp(entry['st_mtime']))
except device_errors.CommandFailedError:
logging.exception('Could not retrieve tombstones.')
except device_errors.DeviceUnreachableError:
logging.exception('Device unreachable retrieving tombstones.')
except device_errors.CommandTimeoutError:
logging.exception('Timed out retrieving tombstones.')
def _GetDeviceDateTime(device):
"""Determine the date time on the device.
Args:
device: An instance of DeviceUtils.
Returns:
A datetime instance.
"""
device_now_string = device.RunShellCommand(
['date'], check_return=True, env=_TZ_UTC)
return datetime.datetime.strptime(
device_now_string[0], '%a %b %d %H:%M:%S %Z %Y')
def _GetTombstoneData(device, tombstone_file):
"""Retrieve the tombstone data from the device
Args:
device: An instance of DeviceUtils.
tombstone_file: the tombstone to retrieve
Returns:
A list of lines
"""
return device.ReadFile(
'/data/tombstones/' + tombstone_file, as_root=True).splitlines()
def _EraseTombstone(device, tombstone_file):
"""Deletes a tombstone from the device.
Args:
device: An instance of DeviceUtils.
tombstone_file: the tombstone to delete.
"""
return device.RunShellCommand(
['rm', '/data/tombstones/' + tombstone_file],
as_root=True, check_return=True)
def _ResolveTombstone(args):
tombstone = args[0]
tombstone_symbolizer = args[1]
lines = []
lines += [tombstone['file'] + ' created on ' + str(tombstone['time']) +
', about this long ago: ' +
(str(tombstone['device_now'] - tombstone['time']) +
' Device: ' + tombstone['serial'])]
logging.info('\n'.join(lines))
logging.info('Resolving...')
lines += tombstone_symbolizer.ExtractAndResolveNativeStackTraces(
tombstone['data'],
tombstone['device_abi'],
tombstone['stack'])
return lines
def _ResolveTombstones(jobs, tombstones, tombstone_symbolizer):
"""Resolve a list of tombstones.
Args:
jobs: the number of jobs to use with multithread.
tombstones: a list of tombstones.
"""
if not tombstones:
logging.warning('No tombstones to resolve.')
return []
if len(tombstones) == 1:
data = [_ResolveTombstone([tombstones[0], tombstone_symbolizer])]
else:
pool = ThreadPool(jobs)
data = pool.map(
_ResolveTombstone,
[[tombstone, tombstone_symbolizer] for tombstone in tombstones])
pool.close()
pool.join()
resolved_tombstones = []
for tombstone in data:
resolved_tombstones.extend(tombstone)
return resolved_tombstones
def _GetTombstonesForDevice(device, resolve_all_tombstones,
include_stack_symbols,
wipe_tombstones):
"""Returns a list of tombstones on a given device.
Args:
device: An instance of DeviceUtils.
resolve_all_tombstone: Whether to resolve every tombstone.
include_stack_symbols: Whether to include symbols for stack data.
wipe_tombstones: Whether to wipe tombstones.
"""
ret = []
all_tombstones = list(_ListTombstones(device))
if not all_tombstones:
logging.warning('No tombstones.')
return ret
# Sort the tombstones in date order, descending
all_tombstones.sort(key=lambda a: a[1], reverse=True)
# Only resolve the most recent unless --all-tombstones given.
tombstones = all_tombstones if resolve_all_tombstones else [all_tombstones[0]]
device_now = _GetDeviceDateTime(device)
try:
for tombstone_file, tombstone_time in tombstones:
ret += [{'serial': str(device),
'device_abi': device.product_cpu_abi,
'device_now': device_now,
'time': tombstone_time,
'file': tombstone_file,
'stack': include_stack_symbols,
'data': _GetTombstoneData(device, tombstone_file)}]
except device_errors.CommandFailedError:
for entry in device.StatDirectory(
'/data/tombstones', as_root=True, timeout=60):
logging.info('%s: %s', str(device), entry)
raise
# Erase all the tombstones if desired.
if wipe_tombstones:
for tombstone_file, _ in all_tombstones:
_EraseTombstone(device, tombstone_file)
return ret
def ClearAllTombstones(device):
"""Clear all tombstones in the device.
Args:
device: An instance of DeviceUtils.
"""
all_tombstones = list(_ListTombstones(device))
if not all_tombstones:
logging.warning('No tombstones to clear.')
for tombstone_file, _ in all_tombstones:
_EraseTombstone(device, tombstone_file)
def ResolveTombstones(device, resolve_all_tombstones, include_stack_symbols,
wipe_tombstones, jobs=4, apk_under_test=None,
tombstone_symbolizer=None):
"""Resolve tombstones in the device.
Args:
device: An instance of DeviceUtils.
resolve_all_tombstone: Whether to resolve every tombstone.
include_stack_symbols: Whether to include symbols for stack data.
wipe_tombstones: Whether to wipe tombstones.
jobs: Number of jobs to use when processing multiple crash stacks.
Returns:
A list of resolved tombstones.
"""
return _ResolveTombstones(jobs,
_GetTombstonesForDevice(device,
resolve_all_tombstones,
include_stack_symbols,
wipe_tombstones),
(tombstone_symbolizer
or stack_symbolizer.Symbolizer(apk_under_test)))
def main():
custom_handler = logging.StreamHandler(sys.stdout)
custom_handler.setFormatter(run_tests_helper.CustomFormatter())
logging.getLogger().addHandler(custom_handler)
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('--device',
help='The serial number of the device. If not specified '
'will use all devices.')
parser.add_argument('--denylist-file', help='Device denylist JSON file.')
parser.add_argument('-a', '--all-tombstones', action='store_true',
help='Resolve symbols for all tombstones, rather than '
'just the most recent.')
parser.add_argument('-s', '--stack', action='store_true',
help='Also include symbols for stack data')
parser.add_argument('-w', '--wipe-tombstones', action='store_true',
help='Erase all tombstones from device after processing')
parser.add_argument('-j', '--jobs', type=int,
default=4,
help='Number of jobs to use when processing multiple '
'crash stacks.')
parser.add_argument('--output-directory',
help='Path to the root build directory.')
parser.add_argument('--adb-path', type=os.path.abspath,
help='Path to the adb binary.')
args = parser.parse_args()
if args.output_directory:
constants.SetOutputDirectory(args.output_directory)
devil_chromium.Initialize(output_directory=constants.GetOutDirectory(),
adb_path=args.adb_path)
denylist = (device_denylist.Denylist(args.denylist_file)
if args.denylist_file else None)
if args.device:
devices = [device_utils.DeviceUtils(args.device)]
else:
devices = device_utils.DeviceUtils.HealthyDevices(denylist)
# This must be done serially because strptime can hit a race condition if
# used for the first time in a multithreaded environment.
# http://bugs.python.org/issue7980
for device in devices:
resolved_tombstones = ResolveTombstones(
device, args.all_tombstones,
args.stack, args.wipe_tombstones, args.jobs)
for line in resolved_tombstones:
logging.info(line)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "057b07aa4cc1f1f8b775a8ba6a598d4e",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 80,
"avg_line_length": 32.86617100371747,
"alnum_prop": 0.6530935414545865,
"repo_name": "scheib/chromium",
"id": "ae478bf689754821a14cbfa9cdd29e29425130cd",
"size": "9186",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "build/android/tombstones.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.http import HttpResponse
from django.template import Context, loader
from django.db import models
from django.shortcuts import (render,
get_object_or_404,
redirect,)
from spmurray_app.models import Post, User
from django.views.decorators.cache import cache_control
from django.core.context_processors import csrf
from django.http import Http404
from django.views.decorators.cache import cache_page
from django.core.cache import cache
# --- PUBLIC DYANMIC PAGES ---
@cache_page(60*15)
@cache_control(must_revalidate=True, max_age=0)
def blog(request, start_post_id=False, length=3):
if length == 0:
length = 1
if not start_post_id:
start_post_id = Post.objects.latest('id').id
posts = Post.objects.filter(
published=1, id__lte=start_post_id).order_by('-timestamp')[0:length]
context = {
'posts': posts
}
if length == 3 and len(posts) == 3:
more_posts = Post.objects.filter(
published=1,
id__lt=posts[2].id
).order_by('-timestamp')[0:length]
if len(more_posts) > 0:
context.update({'next_id': more_posts[0].id})
return render(request, 'blog.html', context)
@cache_page(60*15)
@cache_control(must_revalidate=True, max_age=0)
def post(request, post_id=False):
post = get_object_or_404(Post, pk=post_id)
if post.published == 0:
raise Http404
posts = [post]
return render(request, 'blog.html', {'posts': posts})
@cache_page(60*15)
@cache_control(must_revalidate=True, max_age=0)
def archive(request):
posts = Post.objects.all().order_by('-timestamp')
return render(request, 'archive.html', {'posts': posts})
# -----------------
# --- PUBLIC STATIC PAGES ---
@cache_page(60*15)
@cache_control(must_revalidate=True, max_age=0)
def about(request):
return render(request, 'about.html')
# @cache_page(86400)
# @cache_control(must_revalidate=True, max_age=86400)
# def projects(request):
# return render(request, 'projects.html')
# -----------------
# --- AUTHed ADMIN PAGES ---
def posts(request):
if not request.user.is_authenticated():
raise Http404
posts = Post.objects.all().order_by('-timestamp')
return render(request, 'admin/posts.html', {'posts': posts})
def add(request):
if not request.user.is_authenticated():
raise Http404
c = {}
c.update(csrf(request))
post_data = request.POST
title = post_data.get('post_title', False)
content = post_data.get('post_content', False)
published = post_data.get('post_published', False)
if not published:
published = 0
else:
published = 1
if title and content:
user = User.objects.all()[0] # super hacky, but who cares
post = Post(title=title, content=content, user=user)
post.published = published
post.save()
return redirect('posts')
return render(request, 'admin/add.html')
def edit(request, post_id=False):
if not request.user.is_authenticated():
raise Http404
post = get_object_or_404(Post, pk=post_id)
c = {}
c.update(csrf(request))
post_data = request.POST
title = post_data.get('post_title', False)
content = post_data.get('post_content', False)
published = post_data.get('post_published', False)
if not published:
published = 0
else:
published = 1
if title and content:
post.title = title
post.content = content
post.published = published
post.save()
return redirect('posts')
if post.published == 1:
checked = 'checked="checked"'
else:
checked = ''
return render(request, 'admin/edit.html',
{'post': post, 'checked': checked})
# -----------------
|
{
"content_hash": "35b04b8ca94b1f22ee6ca25f371a9d6e",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 76,
"avg_line_length": 26.30821917808219,
"alnum_prop": 0.6183285602707628,
"repo_name": "hijonathan/spmurraydev.com",
"id": "2c2603432e924ad870f654c91c64c160cacf4c87",
"size": "3841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spmurray_app/views.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "whturk.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "4725c777b23f6d9d529b17c6ad2aac29",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "paftree/WHturk",
"id": "87675954c0b628bce2549e1e83218ecf32d486fd",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "412079"
},
{
"name": "HTML",
"bytes": "36344"
},
{
"name": "JavaScript",
"bytes": "1526712"
},
{
"name": "Python",
"bytes": "48695"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
}
|
import logging
import optparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
from kudu_util import check_output, init_logging
JAVA_ACC_GIT_URL = "https://github.com/lvc/japi-compliance-checker.git"
# The annotations for what we consider our public API.
PUBLIC_ANNOTATIONS = ["InterfaceAudience.LimitedPrivate",
"InterfaceAudience.Public"]
# Various relative paths
PATH_TO_REPO_DIR = "../"
PATH_TO_BUILD_DIR = "../build/compat-check"
def get_repo_dir():
""" Return the path to the top of the repo. """
dirname, _ = os.path.split(os.path.abspath(__file__))
return os.path.abspath(os.path.join(dirname, PATH_TO_REPO_DIR))
def get_scratch_dir():
""" Return the path to the scratch dir that we build within. """
dirname, _ = os.path.split(os.path.abspath(__file__))
return os.path.abspath(os.path.join(dirname, PATH_TO_BUILD_DIR))
def get_java_acc_dir():
""" Return the path where we check out the Java API Compliance Checker. """
return os.path.join(get_repo_dir(), "thirdparty/java-acc")
def clean_scratch_dir(scratch_dir):
""" Clean up and re-create the scratch directory. """
if os.path.exists(scratch_dir):
logging.info("Removing scratch dir %s...", scratch_dir)
shutil.rmtree(scratch_dir)
logging.info("Creating empty scratch dir %s...", scratch_dir)
os.makedirs(scratch_dir)
def checkout_java_tree(rev, path):
""" Check out the Java source tree for the given revision into the given path. """
logging.info("Checking out %s in %s", rev, path)
os.makedirs(path)
# Extract java source
subprocess.check_call(["bash", '-o', 'pipefail', "-c",
("git archive --format=tar %s java/ | " +
"tar -C \"%s\" -xf -") % (rev, path)],
cwd=get_repo_dir())
# Extract proto files which the Java build also relies on.
# bsdtar doesn't support --wildcards so we need to extract them in two steps.
git_tar_cmd = "git archive --format=tar %s src/" % rev
proto_filenames_file = tempfile.NamedTemporaryFile()
subprocess.check_call(["bash", '-o', 'pipefail', "-c",
git_tar_cmd + " | tar -t | grep -a '\.proto$'"],
cwd=get_repo_dir(), stdout=proto_filenames_file)
subprocess.check_call(["bash", '-o', 'pipefail', "-c",
git_tar_cmd + " | " +
("tar -C \"%s\" -xT %s") % (path, proto_filenames_file.name)],
cwd=get_repo_dir())
# Symlink thirdparty from the outer build so that protoc is available.
# This may break at some point in the future if we switch protobuf versions,
# but for now it's faster than rebuilding protobuf in both trees.
os.symlink(os.path.join(get_repo_dir(), "thirdparty"),
os.path.join(path, "thirdparty"))
def get_git_hash(revname):
""" Convert 'revname' to its SHA-1 hash. """
return check_output(["git", "rev-parse", revname],
cwd=get_repo_dir()).strip()
def build_tree(path):
""" Run the Java build within 'path'. """
java_path = os.path.join(path, "java")
logging.info("Building in %s...", java_path)
subprocess.check_call(["mvn", "-DskipTests", "-Dmaven.javadoc.skip=true",
"package"],
cwd=java_path)
def checkout_java_acc(force):
"""
Check out the Java API Compliance Checker. If 'force' is true, will re-download even if the
directory exists.
"""
acc_dir = get_java_acc_dir()
if os.path.exists(acc_dir):
logging.info("Java JAVA_ACC is already downloaded.")
if not force:
return
logging.info("Forcing re-download.")
shutil.rmtree(acc_dir)
logging.info("Checking out Java JAVA_ACC...")
subprocess.check_call(["git", "clone", "--depth=1", JAVA_ACC_GIT_URL, acc_dir])
def find_client_jars(path):
""" Return a list of jars within 'path' to be checked for compatibility. """
all_jars = set(check_output(["find", path, "-name", "*.jar"]).splitlines())
# If we see "original-foo.jar", then remove "foo.jar" since that's a post-shading
# duplicate.
dups = []
for j in all_jars:
dirname, name = os.path.split(j)
m = re.match("original-(.+)", name)
if m:
dups.append(os.path.join(dirname, m.group(1)))
for d in dups:
all_jars.remove(d)
return [j for j in all_jars if (
"-tests" not in j and
"-sources" not in j and
"-with-dependencies" not in j)]
def run_java_acc(src_name, src, dst_name, dst):
""" Run the compliance checker to compare 'src' and 'dst'. """
src_jars = find_client_jars(src)
dst_jars = find_client_jars(dst)
logging.info("Will check compatibility between original jars:\n%s\n" +
"and new jars:\n%s",
"\n".join(src_jars),
"\n".join(dst_jars))
annotations_path = os.path.join(get_scratch_dir(), "annotations.txt")
with file(annotations_path, "w") as f:
for ann in PUBLIC_ANNOTATIONS:
print >>f, ann
java_acc_path = os.path.join(get_java_acc_dir(), "japi-compliance-checker.pl")
out_path = os.path.join(get_scratch_dir(), "report.html")
subprocess.check_call(["perl", java_acc_path,
"-l", "Kudu",
"-v1", src_name,
"-v2", dst_name,
"-d1", ",".join(src_jars),
"-d2", ",".join(dst_jars),
"-report-path", out_path,
"-annotations-list", annotations_path])
def main(argv):
parser = optparse.OptionParser(
usage="usage: %prog SRC..[DST]")
parser.add_option("-f", "--force-download", dest="force_download_deps",
help=("Download dependencies (i.e. Java JAVA_ACC) even if they are " +
"already present"))
opts, args = parser.parse_args()
if len(args) != 1:
parser.error("no src/dst revision specified")
sys.exit(1)
src_rev, dst_rev = args[0].split("..", 1)
if dst_rev == "":
dst_rev = "HEAD"
src_rev = get_git_hash(src_rev)
dst_rev = get_git_hash(dst_rev)
logging.info("Source revision: %s", src_rev)
logging.info("Destination revision: %s", dst_rev)
# Download deps.
checkout_java_acc(opts.force_download_deps)
# Set up the build.
scratch_dir = get_scratch_dir()
clean_scratch_dir(scratch_dir)
# Check out the src and dst source trees.
src_dir = os.path.join(scratch_dir, "src")
dst_dir = os.path.join(scratch_dir, "dst")
checkout_java_tree(src_rev, src_dir)
checkout_java_tree(dst_rev, dst_dir)
# Run the build in each.
build_tree(src_dir)
build_tree(dst_dir)
run_java_acc(src_rev, src_dir,
dst_rev, dst_dir)
if __name__ == "__main__":
init_logging()
main(sys.argv)
|
{
"content_hash": "6e4b4f3e982b0dc29f7a85b62f6d1fe6",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 93,
"avg_line_length": 33.78606965174129,
"alnum_prop": 0.6050655279045796,
"repo_name": "EvilMcJerkface/kudu",
"id": "61d56613bf8bbfff33af6a54043222ccc1ccb613",
"size": "7801",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "build-support/check_compatibility.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "423003"
},
{
"name": "C++",
"bytes": "14088007"
},
{
"name": "CMake",
"bytes": "203355"
},
{
"name": "CSS",
"bytes": "1364"
},
{
"name": "Clojure",
"bytes": "54969"
},
{
"name": "HTML",
"bytes": "24429"
},
{
"name": "Java",
"bytes": "1919604"
},
{
"name": "JavaScript",
"bytes": "5920"
},
{
"name": "Makefile",
"bytes": "658"
},
{
"name": "Perl",
"bytes": "32137"
},
{
"name": "Python",
"bytes": "485662"
},
{
"name": "R",
"bytes": "11537"
},
{
"name": "Scala",
"bytes": "166106"
},
{
"name": "Shell",
"bytes": "106702"
},
{
"name": "Thrift",
"bytes": "59110"
}
],
"symlink_target": ""
}
|
from abc import ABC
from typing import TYPE_CHECKING
from ._configuration import AnomalyDetectorClientConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core import AsyncPipelineClient
from .._serialization import Deserializer, Serializer
class AnomalyDetectorClientMixinABC(ABC):
"""DO NOT use this class. It is for internal typing use only."""
_client: "AsyncPipelineClient"
_config: AnomalyDetectorClientConfiguration
_serialize: "Serializer"
_deserialize: "Deserializer"
|
{
"content_hash": "3a420fe98cfa390836fc4a8f4626c7ad",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 29.210526315789473,
"alnum_prop": 0.7711711711711712,
"repo_name": "Azure/azure-sdk-for-python",
"id": "4225c867c0db4eb32b523573b8d1bdd52ea19ba1",
"size": "1008",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/anomalydetector/azure-ai-anomalydetector/azure/ai/anomalydetector/aio/_vendor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from test_framework import BitcoinTestFramework
from halobitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytes(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return blk
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytes(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(BitcoinTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def run_test(self):
node = self.nodes[0]
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = b'\xff'
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
try:
assert_template(node, tmpl, txlist, 'n/a')
except JSONRPCException:
pass # Expected
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytes(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
|
{
"content_hash": "d4e2701eaee61197f91f3cd4274bdd33",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 237,
"avg_line_length": 34.3954802259887,
"alnum_prop": 0.5855781865965834,
"repo_name": "HaloExchange/HaloBitcoin",
"id": "0e9f671119b06a81fd95eab96aa16499b9074a2c",
"size": "6303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/getblocktemplate_proposals.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "339509"
},
{
"name": "C++",
"bytes": "3469504"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "Makefile",
"bytes": "61468"
},
{
"name": "Objective-C",
"bytes": "3103"
},
{
"name": "Objective-C++",
"bytes": "7196"
},
{
"name": "Python",
"bytes": "195469"
},
{
"name": "Shell",
"bytes": "370705"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ErrorResponse(Model):
"""Error reponse indicates Relay service is not able to process the incoming
request. The reason is provided in the error message.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, code=None, message=None):
self.code = code
self.message = message
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
|
{
"content_hash": "1b277f9850f0ff1dc338f2a0c2569ace",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 99,
"avg_line_length": 30.147058823529413,
"alnum_prop": 0.6702439024390244,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "33d628db5418552676b0981b540d9e017400af53",
"size": "1499",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-relay/azure/mgmt/relay/models/error_response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
"""
Copyright 2012 Ali Ok (aliokATapacheDOTorg)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from bson.code import Code
class NgramTypeFrequencyFinder(object):
@classmethod
def find_frequency_of_frequency(cls, collection, ngram_type, frequency):
"""
Finds the frequency of given frequency.
For frequency 0, this method should not be used!
@type collection: Collection
@type ngram_type: list
@type frequency: int
@rtype: int
"""
assert frequency and frequency > 0
emission_keys = cls._create_emission_keys(ngram_type)
filter_criteria = {"value.count": frequency}
return cls._find_count(collection, emission_keys, filter_criteria)
@classmethod
def find_frequency_of_parse_result_frequency(cls, unigram_collection, frequency):
"""
Finds the frequency of given parse result frequency.
For frequency 0, this method should not be used!
@type unigram_collection: Collection
@type frequency: int
@rtype: int
"""
assert frequency and frequency > 0
emission_key = "emission_key_val:this.item_0.word.parse_result.value"
filter_criteria = {"value.count": frequency}
return cls._find_count(unigram_collection, emission_key, filter_criteria)
@classmethod
def find_frequency_of_word_frequency(cls, unigram_collection, frequency):
"""
Finds the frequency of given word frequency.
For frequency 0, this method should not be used!
@type unigram_collection: Collection
@type frequency: int
@rtype: int
"""
assert frequency and frequency > 0
emission_key = "emission_key_val:this.item_0.word.surface.value"
filter_criteria = {"value.count": frequency}
return cls._find_count(unigram_collection, emission_key, filter_criteria)
@classmethod
def find_distinct_count(cls, collection, ngram_type):
"""
Finds the count of distinct items for ngram_type.
@type collection: Collection
@type ngram_type: list
@rtype: int
"""
emission_keys = cls._create_emission_keys(ngram_type)
return cls._find_count(collection, emission_keys, None)
@classmethod
def find_distinct_word_count(cls, unigram_collection):
"""
Finds the count of distinct words.
@type unigram_collection:Collection
@rtype: int
"""
emission_key = "emission_key_val:this.item_0.word.surface.value"
return cls._find_count(unigram_collection, emission_key, None)
@classmethod
def find_distinct_parse_result_count(cls, unigram_collection):
"""
Finds the count of distinct parse results.
@type unigram_collection:Collection
@rtype: int
"""
emission_key = "emission_key_val:this.item_0.word.parse_result.value"
return cls._find_count(unigram_collection, emission_key, None)
@classmethod
def _find_count(cls, collection, emission_keys, filter_criteria):
mapper = Code("""
function(){
emit({
""" + emission_keys + """
}, {count: 1});
}
""")
reducer = Code("""
function(key,values){
var total = 0;
for (var i = 0; i < values.length; i++) {
total += values[i].count
}
return {count:total};
}
""")
result = collection.map_reduce(mapper, reducer, "_temporary")
if filter_criteria:
result = result.find(filter_criteria)
return result.count()
@classmethod
def _create_emission_keys(cls, ngram_type):
emission_keys = ''
for i, ngram_type_item in enumerate(ngram_type):
emission_keys += "emission_key_val{}:this.item_{}.word.{}.value, ".format(i, i, ngram_type_item)
emission_keys += "emission_key_cat{}:this.item_{}.word.{}.syntactic_category, ".format(i, i, ngram_type_item)
# will be something like
#emission_key_val0:this.item_0.word.surface.value, emission_key_cat0:this.item_0.word.surface.syntactic_category
#emission_key_val1:this.item_1.word.surface.value, emission_key_cat1:this.item_1.word.surface.syntactic_category
#emission_key_val2:this.item_2.word.stem.value, emission_key_cat2:this.item_2.word.stem.syntactic_category
return emission_keys
|
{
"content_hash": "61820bf788137577156527ee8c79e2b5",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 124,
"avg_line_length": 34.87162162162162,
"alnum_prop": 0.6138345281922108,
"repo_name": "aliok/trnltk",
"id": "8250c65ee266ab643415dd2d530ccbe25b4dd2b2",
"size": "5161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trnltk/morphology/contextful/likelihoodmetrics/hidden/ngramtypefrequencyfinder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "60232"
},
{
"name": "Python",
"bytes": "1320401"
},
{
"name": "Shell",
"bytes": "2191"
}
],
"symlink_target": ""
}
|
import time
from seesaw.item import Item
def install_stdout_extension(control):
'''
Each item has a log output, and we want to be able to broadcast that in
the ArchiveBot Dashboard. This extension overrides an item's logger to
shove a log message into ArchiveBot's Redis instance for broadcast.
'''
old_logger = Item.log_output
def tee_to_control(self, data, full_line=True):
old_logger(self, data, full_line)
# seesaw's interface accepts data as str or byte
if isinstance(data, bytes):
text = data.decode('utf8', 'replace')
else:
text = data
if 'ident' in self and 'log_key' in self:
ident = self['ident']
log_key = self['log_key']
packet = {
'type': 'stdout',
'ts': int(time.time()),
'message': text
}
control.log(packet, ident, log_key)
Item.log_output = tee_to_control
# vim:ts=4:sw=4:et:tw=78
|
{
"content_hash": "9474fdbe741a7bf5c8f146ac8ed29e2a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 27.37837837837838,
"alnum_prop": 0.5715695952615992,
"repo_name": "Frogging101/ArchiveBot",
"id": "b6576dc873b9caec0ae2add3f8db1818eda46eda",
"size": "1013",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pipeline/archivebot/seesaw/extensions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "927"
},
{
"name": "HTML",
"bytes": "74835"
},
{
"name": "Haxe",
"bytes": "16694"
},
{
"name": "JavaScript",
"bytes": "24102"
},
{
"name": "Makefile",
"bytes": "79"
},
{
"name": "Python",
"bytes": "113219"
},
{
"name": "Ruby",
"bytes": "104126"
},
{
"name": "Shell",
"bytes": "1350"
}
],
"symlink_target": ""
}
|
import threading
from SmartMeshSDK import ApiException
class IpMgrSubscribe(object):
'''
\brief Notification listener for IpMgrConnectorMux object
'''
class SubscribeError(Exception) :
def __init__(self, msg) :
self.msg = msg
def __str__(self):
return self.msg
ERROR = "error"
FINISH = "finish"
NOTIFEVENT = "notifEvent"
NOTIFLOG = "notifLog"
NOTIFDATA = "notifData"
NOTIFIPDATA = "notifIpData"
NOTIFHEALTHREPORT = "notifHealthReport"
ALLNOTIF = [NOTIFEVENT, NOTIFLOG, NOTIFDATA, NOTIFIPDATA, NOTIFHEALTHREPORT]
EVENTMOTERESET = "eventMoteReset"
EVENTNETWORKRESET = "eventNetworkReset"
EVENTCOMMANDFINISHED = "eventCommandFinished"
EVENTMOTEJOIN = "eventMoteJoin"
EVENTMOTEOPERATIONAL = "eventMoteOperational"
EVENTMOTELOST = "eventMoteLost"
EVENTNETWORKTIME = "eventNetworkTime"
EVENTPINGRESPONSE = "eventPingResponse"
EVENTPATHCREATE = "eventPathCreate"
EVENTPATHDELETE = "eventPathDelete"
EVENTPACKETSENT = "eventPacketSent"
EVENTMOTECREATE = "eventMoteCreate"
EVENTMOTEDELETE = "eventMoteDelete"
_trNotifNameTable = {
"eventMoteReset" : "notifEvent",
"eventNetworkReset" : "notifEvent",
"eventCommandFinished" : "notifEvent",
"eventMoteJoin" : "notifEvent",
"eventMoteOperational" : "notifEvent",
"eventMoteLost" : "notifEvent",
"eventNetworkTime" : "notifEvent",
"eventPingResponse" : "notifEvent",
"eventPathCreate" : "notifEvent",
"eventPathDelete" : "notifEvent",
"eventPacketSent" : "notifEvent",
"eventMoteCreate" : "notifEvent",
"eventMoteDelete" : "notifEvent",
}
#======================== public ==========================================
def __init__(self, ipMgrConnector) :
# Structure of self._callback :
# Notification Name :
# [0] - subscription mask mask,
# [1] - cb-function. Notification is subscribed if [1]!=None,
# [2] - transport for notification: True - reliable, false - unreliable
self._callback = {
self.ERROR : [0x00, None, True],
self.FINISH : [0x00, None, True],
self.NOTIFEVENT : [0x02, None, True],
self.NOTIFLOG : [0x04, None, True],
self.NOTIFDATA : [0x10, None, True],
self.NOTIFIPDATA : [0x20, None, True],
self.NOTIFHEALTHREPORT : [0x40, None, True],
}
self._con = ipMgrConnector
self._thread = None
self._mask = self._unrlblMask = 0
self._isStarted = False
self._lock = threading.Lock()
def start(self):
'''
\brief Start the subscriber _thread.
'''
if self._thread : # Wait finish disconnect process
try :
self._thread.join(1.0)
if self._thread.isAlive() :
raise ApiException.ConnectionError("Already connected")
except RuntimeError :
pass # Ignore join error
self._thread = None
# Clear _callback table
for i in self._callback :
self._callback[i][1] = None
self._callback[i][2] = True
self._mask = self._unrlblMask = 0
self._thread = threading.Thread(target = self._process)
self._thread.name = "IpMgrSubscribe"
self._thread.start()
self._isStarted = True
def subscribe(self, notifTypes, fun, isRlbl):
'''
\brief Subscribe to notification(s).
Calling this function multiple times will not cancel the effects of
the previous calls.
\pre Call start() before calling this function.
\param notifTypes Type(s) of notification(s) to subscribe to. This can
be a single string (when subscribing to a single notification), or
a list of strings (when subscribing to multiple notifications).
The list of possible types is:
ERROR, FINISH, NOTIFEVENT, NOTIFLOG, NOTIFDATA, NOTIFIPDATA, NOTIFHEALTHREPORT, ALLNOTIF
\param fun The function to call when any of the notification types
specified in the notifTypes parameter occurs. If you wish to assign
a different _callback function to different notification types,
call this function multiple times. The signature of the function
needs to be fun(<notification name>, <notification parameter>),
as described below.
\param isRlbl define type of transport using for delivery
notification: reliable (True) or best effort (False)
The _callback function is called with a notification name and a
notification parameter. Depending on the type of notification, the
parameter will be of a different format, according to the table below.
<table>
<tr><th>Notification Name </th><th>Parameter</th>
<tr><td>ERROR </td><td>Exception</td>
<tr><td>FINISH </td><td>''</td>
<tr><td>NOTIFLOG </td><td>Tuple_notifLog</td>
<tr><td>NOTIFDATA </td><td>Tuple_notifData</td>
<tr><td>NOTIFIPDATA </td><td>Tuple_notifIpData</td>
<tr><td>NOTIFHEALTHREPORT </td><td>Tuple_notifHealthReport</td>
<tr><td>EVENTMOTERESET </td><td>Tuple_eventMoteReset</td>
<tr><td>EVENTNETWORKRESET </td><td>Tuple_eventNetworkReset</td>
<tr><td>EVENTCOMMANDFINISHED</td><td>Tuple_eventCommandFinished</td>
<tr><td>EVENTMOTEJOIN </td><td>Tuple_eventMoteJoin</td>
<tr><td>EVENTMOTEOPERATIONAL</td><td>Tuple_eventMoteOperational</td>
<tr><td>EVENTMOTELOST </td><td>Tuple_eventMoteLost</td>
<tr><td>EVENTNETWORKTIME </td><td>Tuple_eventNetworkTime</td>
<tr><td>EVENTPINGRESPONSE </td><td>Tuple_eventPingResponse</td>
<tr><td>EVENTPATHCREATE </td><td>Tuple_eventPathCreate</td>
<tr><td>EVENTPATHDELETE </td><td>Tuple_eventPathDelete</td>
<tr><td>EVENTPACKETSENT </td><td>Tuple_eventPacketSent</td>
<tr><td>EVENTMOTECREATE </td><td>Tuple_eventMoteCreate</td>
<tr><td>EVENTMOTEDELETE </td><td>Tuple_eventMoteDelete</td>
</table>
\exception IpMgrSubscribe.SubscribeError The subscriber hasn't been
started, or the notification type(s) specified is (are) not valid.
'''
if not self._isStarted :
raise self.SubscribeError("Error: subscriber is not started")
if isinstance(notifTypes, str) :
notifTypes = [notifTypes]
for nType in notifTypes : # subscribe type validation
if nType not in self._callback :
raise self.SubscribeError("Error subscribe type: {0}".format(nType))
self._lock.acquire()
for nType in notifTypes :
self._callback[nType][1] = fun
self._callback[nType][2] = isRlbl
self._lock.release()
mask = unrlblMask = 0
# Structure of self._callback.values() :
# [0] - subscription mask mask,
# [1] - cb-function. Notification is subscribed if [1]!=None,
# [2] - transport for notification: True - reliable, false - unreliable
for cb in self._callback.values() :
if cb[1] :
mask = mask | cb[0]
if cb[2] == False :
unrlblMask = unrlblMask | cb[0]
if mask != self._mask or unrlblMask != self._unrlblMask :
self._mask = mask
self._unrlblMask = unrlblMask
self._con.dn_subscribe([0,self._mask], [0,self._unrlblMask])
#======================== private =========================================
def _process(self):
while True :
try :
notif = self._con.getNotification()
name = notif[0]
if name in self._trNotifNameTable :
name = self._trNotifNameTable[name]
self._processOneNotif(name, notif[0], notif[1])
except ApiException.QueueError:
self._processOneNotif(self.FINISH, self.FINISH, '')
self._isStarted = False
break
except Exception as ex :
self._processOneNotif(self.ERROR, self.ERROR, ex)
def _processOneNotif(self, notifType, notifName, payload):
cb = self._getCallback(notifType)
if cb :
cb(notifName, payload)
def _getCallback(self, name) :
res = None
self._lock.acquire()
if name in self._callback :
res = self._callback[name][1]
self._lock.release()
return res
|
{
"content_hash": "388abc12702d4197d4b7f93aeb5f0956",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 100,
"avg_line_length": 43.32258064516129,
"alnum_prop": 0.5480268056589724,
"repo_name": "dustcloud/dustlink",
"id": "d888250155bcbe6d67fbe3b42d6f8b363d0a0f6a",
"size": "9543",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "SmartMeshSDK/IpMgrConnectorMux/IpMgrSubscribe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "103133"
},
{
"name": "JavaScript",
"bytes": "175666"
},
{
"name": "Python",
"bytes": "2231651"
}
],
"symlink_target": ""
}
|
from runner.koan import *
def my_global_function(a,b):
return a + b
class AboutMethods(Koan):
def test_calling_a_global_function(self):
self.assertEqual(5, my_global_function(2,3))
# NOTE: Wrong number of arguments is not a SYNTAX error, but a
# runtime error.
def test_calling_functions_with_wrong_number_of_arguments(self):
try:
my_global_function()
except TypeError as exception:
msg = exception.args[0]
# Note, the text comparison works for Python 3.2
# It has changed in the past and may change in the future
self.assertRegexpMatches(msg,
r'my_global_function\(\) missing 2 required positional arguments')
try:
my_global_function(1, 2, 3)
except Exception as e:
msg = e.args[0]
# Note, watch out for parenthesis. They need slashes in front!
self.assertRegexpMatches(msg, "takes 2 positional arguments but 3 were given")
# ------------------------------------------------------------------
def pointless_method(self, a, b):
sum = a + b
return sum
def test_which_does_not_return_anything(self):
self.assertEqual(3, self.pointless_method(1, 2))
# Notice that methods accessed from class scope do not require
# you to pass the first "self" argument?
# ------------------------------------------------------------------
def method_with_defaults(self, a, b='default_value'):
return [a, b]
def test_calling_with_default_values(self):
self.assertEqual([1, 'default_value'], self.method_with_defaults(1))
self.assertEqual([1,2], self.method_with_defaults(1, 2))
# ------------------------------------------------------------------
def method_with_var_args(self, *args):
return args
def test_calling_with_variable_arguments(self):
self.assertEqual(tuple(), self.method_with_var_args())
self.assertEqual(('one',), self.method_with_var_args('one'))
self.assertEqual(('one','two'), self.method_with_var_args('one', 'two'))
# ------------------------------------------------------------------
def function_with_the_same_name(self, a, b):
return a + b
def test_functions_without_self_arg_are_global_functions(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(12, function_with_the_same_name(3,4))
def test_calling_methods_in_same_class_with_explicit_receiver(self):
def function_with_the_same_name(a, b):
return a * b
self.assertEqual(7, self.function_with_the_same_name(3,4))
# ------------------------------------------------------------------
def another_method_with_the_same_name(self):
return 10
link_to_overlapped_method = another_method_with_the_same_name
def another_method_with_the_same_name(self):
return 42
def test_that_old_methods_are_hidden_by_redefinitions(self):
self.assertEqual(42, self.another_method_with_the_same_name())
def test_that_overlapped_method_is_still_there(self):
self.assertEqual(10, self.link_to_overlapped_method())
# ------------------------------------------------------------------
def empty_method(self):
pass
def test_methods_that_do_nothing_need_to_use_pass_as_a_filler(self):
self.assertEqual(None, self.empty_method())
def test_pass_does_nothing_at_all(self):
"You"
"shall"
"not"
pass
self.assertEqual(True, "Still got to this line" != None)
# ------------------------------------------------------------------
def one_line_method(self): return 'Madagascar'
def test_no_indentation_required_for_one_line_statement_bodies(self):
self.assertEqual("Madagascar", self.one_line_method())
# ------------------------------------------------------------------
def method_with_documentation(self):
"A string placed at the beginning of a function is used for documentation"
return "ok"
def test_the_documentation_can_be_viewed_with_the_doc_method(self):
self.assertRegexpMatches(self.method_with_documentation.__doc__,
"A string placed at the beginning of a function is used for documentation"
)
# ------------------------------------------------------------------
class Dog:
def name(self):
return "Fido"
def _tail(self):
# Prefixing a method with an underscore implies private scope
return "wagging"
def __password(self):
return 'password' # Genius!
def test_calling_methods_in_other_objects(self):
rover = self.Dog()
self.assertEqual("Fido", rover.name())
def test_private_access_is_implied_but_not_enforced(self):
rover = self.Dog()
# This is a little rude, but legal
self.assertEqual("wagging", rover._tail())
def test_attributes_with_double_underscore_prefixes_are_subject_to_name_mangling(self):
rover = self.Dog()
with self.assertRaises(AttributeError): password = rover.__password()
# But this still is!
self.assertEqual("password", rover._Dog__password())
# Name mangling exists to avoid name clash issues when subclassing.
# It is not for providing effective access protection
|
{
"content_hash": "8baf6d36de0008a9ab374b07da135436",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 91,
"avg_line_length": 34.22012578616352,
"alnum_prop": 0.5611100900569749,
"repo_name": "taw/python_koans",
"id": "778f93489dd91aa0f618fdb194c29d0ff26f3e2e",
"size": "5545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/koans/about_methods.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1633"
},
{
"name": "Python",
"bytes": "332158"
},
{
"name": "Shell",
"bytes": "167"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import weakref
from defcon.objects.base import BaseDictCompareObject
from defcon.objects.color import Color
from defcon.tools.identifiers import makeRandomIdentifier
class Anchor(BaseDictCompareObject):
"""
This object represents an anchor point.
**This object posts the following notifications:**
- Anchor.Changed
- Anchor.XChanged
- Anchor.YChanged
- Anchor.NameChanged
- Anchor.ColorChanged
- Anchor.IdentifierChanged
During initialization an anchor dictionary can be passed. If so,
the new object will be populated with the data from the dictionary.
"""
changeNotificationName = "Anchor.Changed"
representationFactories = {}
def __init__(self, glyph=None, anchorDict=None):
self._font = None
self._layerSet = None
self._layer = None
self._glyph = None
self.glyph = glyph
super(Anchor, self).__init__()
self.beginSelfNotificationObservation()
self._dirty = False
if anchorDict is not None:
self.x = anchorDict.get("x")
self.y = anchorDict.get("y")
self.name = anchorDict.get("name")
self.color = anchorDict.get("color")
self.identifier = anchorDict.get("identifier")
# parents
def getParent(self):
return self.glyph
def _get_font(self):
font = None
if self._font is None:
glyph = self.glyph
if glyph is not None:
font = glyph.font
if font is not None:
self._font = weakref.ref(font)
else:
font = self._font()
return font
font = property(_get_font, doc="The :class:`Font` that this anchor belongs to.")
def _get_layerSet(self):
layerSet = None
if self._layerSet is None:
glyph = self.glyph
if glyph is not None:
layerSet = glyph.layerSet
if layerSet is not None:
self._layerSet = weakref.ref(layerSet)
else:
layerSet = self._layerSet()
return layerSet
layerSet = property(_get_layerSet, doc="The :class:`LayerSet` that this anchor belongs to.")
def _get_layer(self):
layer = None
if self._layer is None:
glyph = self.glyph
if glyph is not None:
layer = glyph.layer
if layer is not None:
self._layer = weakref.ref(layer)
else:
layer = self._layer()
return layer
layer = property(_get_layer, doc="The :class:`Layer` that this anchor belongs to.")
def _get_glyph(self):
if self._glyph is None:
return None
return self._glyph()
def _set_glyph(self, glyph):
assert self._glyph is None
if glyph is not None:
glyph = weakref.ref(glyph)
self._font = None
self._layerSet = None
self._layer = None
self._glyph = glyph
glyph = property(_get_glyph, _set_glyph, doc="The :class:`Glyph` that this anchor belongs to. This should not be set externally.")
# coordinates
def _get_x(self):
return self.get("x")
def _set_x(self, value):
old = self.get("x")
if value == old:
return
self["x"] = value
self.postNotification("Anchor.XChanged", data=dict(oldValue=old, newValue=value))
x = property(_get_x, _set_x, doc="The x coordinate. Setting this will post *Anchor.XChanged* and *Anchor.Changed* notifications.")
def _get_y(self):
return self.get("y")
def _set_y(self, value):
old = self.get("y")
if value == old:
return
self["y"] = value
self.postNotification("Anchor.YChanged", data=dict(oldValue=old, newValue=value))
y = property(_get_y, _set_y, doc="The y coordinate. Setting this will post *Anchor.YChanged* and *Anchor.Changed* notifications.")
# name
def _get_name(self):
return self.get("name")
def _set_name(self, value):
old = self.get("name")
if value == old:
return
self["name"] = value
self.postNotification("Anchor.NameChanged", data=dict(oldValue=old, newValue=value))
name = property(_get_name, _set_name, doc="The name. Setting this will post *Anchor.NameChanged* and *Anchor.Changed* notifications.")
# color
def _get_color(self):
return self.get("color")
def _set_color(self, color):
if color is None:
newColor = None
else:
newColor = Color(color)
oldColor = self.get("color")
if newColor == oldColor:
return
self["color"] = newColor
self.postNotification("Anchor.ColorChanged", data=dict(oldValue=oldColor, newValue=newColor))
color = property(_get_color, _set_color, doc="The anchors's :class:`Color` object. When setting, the value can be a UFO color string, a sequence of (r, g, b, a) or a :class:`Color` object. Setting this posts *Anchor.ColorChanged* and *Anchor.Changed* notifications.")
# identifier
def _get_identifiers(self):
identifiers = None
glyph = self.glyph
if glyph is not None:
identifiers = glyph.identifiers
if identifiers is None:
identifiers = set()
return identifiers
identifiers = property(_get_identifiers, doc="Set of identifiers for the glyph that this anchor belongs to. This is primarily for internal use.")
def _get_identifier(self):
return self.get("identifier")
def _set_identifier(self, value):
# don't allow overwritting an existing identifier
if self.identifier is not None:
return
oldIdentifier = self.identifier
if value == oldIdentifier:
return
# don't allow a duplicate
identifiers = self.identifiers
assert value not in identifiers
# free the old identifier
if oldIdentifier in identifiers:
identifiers.remove(oldIdentifier)
# store
self["identifier"] = value
if value is not None:
identifiers.add(value)
# post notifications
self.postNotification("Anchor.IdentifierChanged", data=dict(oldValue=oldIdentifier, newValue=value))
identifier = property(_get_identifier, _set_identifier, doc="The identifier. Setting this will post *Anchor.IdentifierChanged* and *Anchor.Changed* notifications.")
def generateIdentifier(self):
"""
Create a new, unique identifier for and assign it to the guideline.
This will post *Anchor.IdentifierChanged* and *Anchor.Changed* notifications.
"""
if self.identifier is None:
identifier = makeRandomIdentifier(existing=self.identifiers)
self.identifier = identifier
return self.identifier
# ----
# Move
# ----
def move(self, values):
"""
Move the anchor by **(x, y)**.
This will post *Anchor.XChange*, *Anchor.YChanged* and *Anchor.Changed* notifications if anything changed.
"""
(x, y) = values
self.x += x
self.y += y
# ------------------------
# Notification Observation
# ------------------------
def endSelfNotificationObservation(self):
super(Anchor, self).endSelfNotificationObservation()
self._font = None
self._layerSet = None
self._layer = None
self._glyph = None
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{
"content_hash": "640ff70513fb107771fa0871ba762041",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 271,
"avg_line_length": 31.522633744855966,
"alnum_prop": 0.5955613577023499,
"repo_name": "moyogo/defcon",
"id": "d37accd2a33c71bb83bb3ae0eb01dd8192e82a38",
"size": "7660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/defcon/objects/anchor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "629945"
}
],
"symlink_target": ""
}
|
from .VectorDifferenceQuantity import VectorDifferenceQuantity
from ..kinvarbuilder import CachingFunction, IllegalArgumentTypes
@CachingFunction
class MeanEta(VectorDifferenceQuantity):
""" average pseudorapidity of two or more vectors, as a generalization
of the mean pseudorapidity proposed by Zeppenfeld et. al. in hep-ph/9605444"""
def __init__(self, *vectors):
self.vectors = vectors
for vector in self.vectors:
if not vector.isFourVector():
raise IllegalArgumentTypes()
def getValue(self):
vecValues = [ vec.getValue() for vec in self.vectors ]
for vecVal in vecValues:
if vecVal == None:
return None
sumEta = sum([ vec.Eta() for vec in vecValues ])
return sumEta / float(len(vecValues))
@staticmethod
def getNumArguments(maxNumArguments):
return range(2, maxNumArguments + 1)
#----------------------------------------
def getParents(self):
return self.vectors
#----------------------------------------
def __str__(self):
return "MeanEta(" + ",".join([ str(x) for x in self.vectors]) + ")"
#----------------------------------------
|
{
"content_hash": "fb75d02a90f534f3f81da2267cee5642",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 82,
"avg_line_length": 32.13157894736842,
"alnum_prop": 0.5749385749385749,
"repo_name": "kinvarbuilder/kinvarbuilder",
"id": "c96a3336d40c596f18f3f17ad5a0096c4a0e3fd2",
"size": "1927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kinvarbuilder/functions/MeanEta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "69188"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.cdn import CdnManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cdn
# USAGE
python log_analytics_get_log_analytics_resources.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CdnManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.log_analytics.get_log_analytics_resources(
resource_group_name="RG",
profile_name="profile1",
)
print(response)
# x-ms-original-file: specification/cdn/resource-manager/Microsoft.Cdn/stable/2021-06-01/examples/LogAnalytics_GetLogAnalyticsResources.json
if __name__ == "__main__":
main()
|
{
"content_hash": "a753057e782c30f9ed6ba21cf417c093",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 140,
"avg_line_length": 32.81818181818182,
"alnum_prop": 0.7285318559556787,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c71e49b2be1718eeefb3f5e7e26a6f13de25e43b",
"size": "1551",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cdn/azure-mgmt-cdn/generated_samples/log_analytics_get_log_analytics_resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import yaml
from models import Session
from interface import Artifice
default_config = "/etc/artifice/config.yaml"
def connect(config=None):
if config is None:
try:
fh = open(default_config)
except IOError:
print "Can't open default config!"
raise
config = yaml.load( fh.read() )
# conn_string = 'postgresql://%(username)s:%(password)s@%(host)s:%(port)s/%(database)s' % {
# "username": config["database"]["username"],
# "password": config["database"]["password"],
# "host": config["database"]["host"],
# "port": config["database"]["port"],
# "database": config["database"]["database"]
# }
# engine = create_engine(conn_string)
# session.configure(bind=engine)
artifice = Artifice(config)
# artifice.artifice = session
return artifice
|
{
"content_hash": "1106a9be502dce930b56fabd6ccda33e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 95,
"avg_line_length": 33.65384615384615,
"alnum_prop": 0.592,
"repo_name": "aurynn/openstack-artifice",
"id": "f937fd2b10377d51a1ab3a0d563593d04e57d16d",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artifice/artifice.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1105"
},
{
"name": "Python",
"bytes": "72812"
},
{
"name": "Shell",
"bytes": "2105"
}
],
"symlink_target": ""
}
|
from decimal import Decimal
from .choices import WirelessChannelChoices
__all__ = (
'get_channel_attr',
)
def get_channel_attr(channel, attr):
"""
Return the specified attribute of a given WirelessChannelChoices value.
"""
if channel not in WirelessChannelChoices.values():
raise ValueError(f"Invalid channel value: {channel}")
channel_values = channel.split('-')
attrs = {
'band': channel_values[0],
'id': int(channel_values[1]),
'frequency': Decimal(channel_values[2]),
'width': Decimal(channel_values[3]),
}
if attr not in attrs:
raise ValueError(f"Invalid channel attribute: {attr}")
return attrs[attr]
|
{
"content_hash": "101a6cec63082fb5a117987971543f93",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 25.962962962962962,
"alnum_prop": 0.6433666191155493,
"repo_name": "digitalocean/netbox",
"id": "d98d6a853a0a1921b433a5152172f74154a88bd3",
"size": "701",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/wireless/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815170"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
}
|
import nltk
from nltk.corpus import stopwords
class NounPhraseExtractor:
def __init__(self):
self.lemmatizer = nltk.WordNetLemmatizer()
self.grammar = grammar = r"""
NOUN:
{<VBG|NN.*|JJ|>*<NN.*>} # Nouns and Adjectives, terminated with Nouns
ENTITY:
{<VBG|NN|NE.*|JJ|>*<NE.*>}
{<NE.*><VBG|NN|NE|JJ|>*<NN.*>}
EP:
{<NOUN|ENTITY><IN>?<ENTITY>}
{<ENTITY><IN>?<NOUN|ENTITY>}
{<ENTITY>}
NP:
{<NOUN>}
{<NOUN><IN>?<NOUN>} # Above, connected with in/of/etc...
"""
self.chunker = nltk.RegexpParser(grammar)
self.stopwords = stopwords.words('english')
self.corpus = []
for resource in ["data/astronomy.dat", "data/computerscience.dat", "data/physics.dat"]:
with open(resource) as f:
for l in f:
splt = l.strip().split("~~")
if len(splt) > 1:
self.corpus.append(l.strip().split("~~")[1])
def get_noun_phrases(self, text):
"""
Extract noun phrases from text, prioritizing the ones that contain named entities.
:param text: The text to extract noun phrases from.
:return: A list of noun phrases sorted by the ones that contain named entities first
"""
tokens = nltk.word_tokenize(text)
pos_tokens = nltk.pos_tag(tokens)
ne_tokens = nltk.ne_chunk(pos_tokens, binary=True)
tree = self.chunker.parse(ne_tokens)
terms = self.get_terms(tree)
print(terms)
return terms
def named_leaves(self, tree):
"""Finds NP (nounphrase) leaf nodes of a chunk tree."""
for subtree in tree.subtrees(filter=lambda t: t.label() == 'EP'):
yield subtree.leaves()
def leaves(self, tree):
"""Finds NP (nounphrase) leaf nodes of a chunk tree."""
for subtree in tree.subtrees(filter=lambda t: t.label() == 'NP'):
yield subtree.leaves()
def normalize(self, word):
"""Normalises words to lowercase and lemmatizes it."""
word = word.lower()
word = self.lemmatizer.lemmatize(word)
return word
def acceptable_word(self, word):
"""Checks conditions for acceptable word: length, stopword."""
return 2 <= len(word) <= 40 and word.lower() not in self.stopwords
def get_terms(self, tree):
terms = []
for leaf in self.named_leaves(tree):
term = " ".join([self.normalize(w) for w, t in leaf if self.acceptable_word(w) or t == "IN"])
terms.append(term)
for leaf in self.leaves(tree):
term = " ".join([self.normalize(w) for w, t in leaf if self.acceptable_word(w) or t == "IN"])
terms.append(term)
return terms
def get_sentence(self, phrase):
result = []
for sent in self.corpus:
if phrase.lower() in sent.lower():
result.append(sent)
return result
from unittest import TestCase
class TestStuff(TestCase):
def setUp(self):
self.npe = NounPhraseExtractor()
def testPhrases(self):
terms = self.npe.get_noun_phrases("I am from the capital of California and I like to write in programming languages")
self.assertTrue("capital of california" == terms[0])
self.assertTrue("programming language" == terms[1])
terms = self.npe.get_noun_phrases("I like to write in programming languages and I am from the capital of California")
self.assertTrue("capital of california" == terms[0])
self.assertTrue("programming language" == terms[1])
terms = self.npe.get_noun_phrases("I like to write in programming languages and I am from the city of Los Angeles")
self.assertTrue("city of los angeles" == terms[0])
self.assertTrue("programming language" == terms[1])
terms = self.npe.get_noun_phrases("I like to write in programming languages and I am from Mexico city")
self.assertTrue("mexico city" == terms[0])
self.assertTrue("programming language" == terms[1])
def testSentences(self):
terms = self.npe.get_noun_phrases("I am from the capital of California and I like to write in programming languages")
print(self.npe.get_sentence(terms[1]))
|
{
"content_hash": "702058b9ce30960ced655f3681a92eef",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 125,
"avg_line_length": 37.469565217391306,
"alnum_prop": 0.599443026224182,
"repo_name": "ipachev/irc_chatbot",
"id": "4cd21c075e386cbf712b8eae44fefa64b49fe742",
"size": "4309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lang.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21201"
},
{
"name": "Shell",
"bytes": "55"
}
],
"symlink_target": ""
}
|
"""
==================================
Regularized OT with generic solver
==================================
Illustrates the use of the generic solver for regularized OT with
user-designed regularization term. It uses Conditional gradient as in [6] and
generalized Conditional Gradient as proposed in [5][7].
[5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, Optimal Transport for
Domain Adaptation, in IEEE Transactions on Pattern Analysis and Machine
Intelligence , vol.PP, no.99, pp.1-1.
[6] Ferradans, S., Papadakis, N., Peyré, G., & Aujol, J. F. (2014).
Regularized discrete optimal transport. SIAM Journal on Imaging Sciences,
7(3), 1853-1882.
[7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized
conditional gradient: analysis of convergence and applications.
arXiv preprint arXiv:1510.06567.
"""
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std
b = ot.datasets.make_1D_gauss(n, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
##############################################################################
# Solve EMD
# ---------
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0')
##############################################################################
# Solve EMD with Frobenius norm regularization
# --------------------------------------------
#%% Example with Frobenius norm regularization
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
Gl2 = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(3)
ot.plot.plot1D_mat(a, b, Gl2, 'OT matrix Frob. reg')
##############################################################################
# Solve EMD with entropic regularization
# --------------------------------------
#%% Example with entropic regularization
def f(G):
return np.sum(G * np.log(G))
def df(G):
return np.log(G) + 1.
reg = 1e-3
Ge = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Ge, 'OT matrix Entrop. reg')
##############################################################################
# Solve EMD with Frobenius norm + entropic regularization
# -------------------------------------------------------
#%% Example with Frobenius norm + entropic regularization with gcg
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg1 = 1e-3
reg2 = 1e-1
Gel2 = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True)
pl.figure(5, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gel2, 'OT entropic + matrix Frob. reg')
pl.show()
|
{
"content_hash": "8b421783fcaa58c9db3ffd9d06efe66c",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 78,
"avg_line_length": 22.7890625,
"alnum_prop": 0.5282824820020569,
"repo_name": "rflamary/POT",
"id": "2c58defb4d32e0bdd46816aa476bcc67555fe0c9",
"size": "2942",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/plot_optim_OTreg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2074"
},
{
"name": "Python",
"bytes": "452064"
},
{
"name": "Shell",
"bytes": "366"
}
],
"symlink_target": ""
}
|
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2019, 2020 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__all__ = ['CoolPropPhase', 'CoolPropPhase', 'CoolPropLiquid', 'CoolPropGas']
import sys
from chemicals.utils import log
from collections import OrderedDict
from thermo.phases.phase import Phase
from thermo.coolprop import has_CoolProp
SORTED_DICT = sys.version_info >= (3, 6)
# Emperically measured to be ~140 KB/instance, do not want to cache too many - 35 is 5 MB
max_CoolProp_states = 35
global CoolProp
global CoolProp_constants_set
CoolProp_constants_set = False
def set_coolprop_constants():
global CPPT_INPUTS, CPrhoT_INPUTS, CPrhoP_INPUTS, CPiP, CPiT, CPiDmolar, CPiHmolar, CPiSmolar
global CPPQ_INPUTS, CPQT_INPUTS, CoolProp_gas_phases, CoolProp_liquid_phases
global CPliquid, CPgas, CPunknown, caching_states_CoolProp, caching_state_CoolProp
global CoolProp
import CoolProp
CoolProp_constants_set = True
CPPT_INPUTS = CoolProp.PT_INPUTS
CPrhoT_INPUTS = CoolProp.DmolarT_INPUTS
CPrhoP_INPUTS = CoolProp.DmolarP_INPUTS
CPiP, CPiT, CPiDmolar = CoolProp.iP, CoolProp.iT, CoolProp.iDmolar
CPiHmolar, CPiSmolar = CoolProp.iHmolar, CoolProp.iSmolar
CPPQ_INPUTS, CPQT_INPUTS = CoolProp.PQ_INPUTS, CoolProp.QT_INPUTS
CoolProp_gas_phases = set([CoolProp.iphase_gas, CoolProp.iphase_supercritical, CoolProp.iphase_unknown,
CoolProp.iphase_critical_point, CoolProp.iphase_supercritical_gas])
CoolProp_liquid_phases = set([CoolProp.iphase_liquid, CoolProp.iphase_supercritical_liquid])
CPliquid = CoolProp.iphase_liquid
CPgas = CoolProp.iphase_gas
CPunknown = CoolProp.iphase_not_imposed
# Probably todo - hold onto ASs for up to 1 sec, then release them for reuse
# Do not allow Phase direct access any more, use a decorator
# CoolProp_AS_cache = {}
# def get_CoolProp_AS(backend, fluid):
# key = (backend, fluid)
# try:
# in_use, free = CoolProp_AS_cache[key]
# if free:
# AS = free.pop()
# else:
# AS = CoolProp.AbstractState(backend, fluid)
# in_use.add(AS)
## in_use.append(AS)
# return AS
# except KeyError:
## in_use, free = [], []
# in_use, free = set([]), set([])
# AS = CoolProp.AbstractState(backend, fluid)
# in_use.add(AS)
## in_use.append(AS)
# CoolProp_AS_cache[key] = (in_use, free)
# return AS
#
# def free_CoolProp_AS(AS, backend, fluid):
# key = (backend, fluid)
# try:
# in_use, free = CoolProp_AS_cache[key]
# except KeyError:
# raise ValueError("Should not happen")
# in_use.remove(AS)
## free.append(AS)
# free.add(AS)
# Forget about time - just use them last; make sure the LRU is at the top
#
if not SORTED_DICT:
caching_states_CoolProp = OrderedDict()
else:
caching_states_CoolProp = {}
def caching_state_CoolProp(backend, fluid, spec0, spec1, spec_set, phase, zs):
# Pretty sure about as optimized as can get!
# zs should be a tuple, not a list
if type(fluid) is list:
fluid = '&'.join(fluid)
key = (backend, fluid, spec0, spec1, spec_set, phase, zs)
if key in caching_states_CoolProp:
AS = caching_states_CoolProp[key]
try:
caching_states_CoolProp.move_to_end(key)
except:
# Move to end the old fashioned way
del caching_states_CoolProp[key]
caching_states_CoolProp[key] = AS
elif len(caching_states_CoolProp) < max_CoolProp_states:
# Always make a new item until the cache is full
AS = CoolProp.AbstractState(backend, fluid)
AS.specify_phase(phase)
if zs is not None:
AS.set_mole_fractions(zs)
try:
AS.update(spec_set, spec0, spec1) # A failed call here takes ~400 us.
except:
# The best workaround is to impose a different phase with CoolProp
AS.specify_phase(CPliquid if phase == CPgas else CPgas)
AS.update(spec_set, spec0, spec1)
caching_states_CoolProp[key] = AS
return AS
else:
# Reuse an item if not in the cache, making the value go to the end of
# the ordered dict
if not SORTED_DICT:
old_key, AS = caching_states_CoolProp.popitem(False)
else:
# Hack - get first item in dict
old_key = next(iter(caching_states_CoolProp))
AS = caching_states_CoolProp.pop(old_key)
if old_key[1] != fluid or old_key[0] != backend:
# Handle different components - other will be gc
AS = CoolProp.AbstractState(backend, fluid)
AS.specify_phase(phase)
if zs is not None:
AS.set_mole_fractions(zs)
AS.update(spec_set, spec0, spec1)
caching_states_CoolProp[key] = AS
return AS
CPgas = 5
CPliquid = 0
CPunknown = 8
CPPQ_INPUTS = 2
CPQT_INPUTS = 1
CPiDmolar = 24
CPrhoT_INPUTS = 11
caching_state_CoolProp = None
class CoolPropPhase(Phase):
prefer_phase = 8
ideal_gas_basis = False
def __str__(self):
if self.phase == 'g':
s = '<%s, ' %('CoolPropGas')
else:
s = '<%s, ' %('CoolPropLiquid')
try:
s += 'T=%g K, P=%g Pa' %(self.T, self.P)
except:
pass
s += '>'
return s
# def __del__(self):
# # Not sustainable at all
# # time-based cache seems next best
# free_CoolProp_AS(self.AS, self.backend, self.fluid)
@property
def phase(self):
try:
idx = self.AS.phase()
if idx in CoolProp_gas_phases:
return 'g'
return 'l'
except:
if self.prefer_phase == CPliquid:
return 'l'
return 'g'
model_attributes = ('backend', 'fluid', 'Hfs', 'Gfs', 'Sfs')
def __init__(self, backend, fluid,
T=None, P=None, zs=None, Hfs=None,
Gfs=None, Sfs=None,):
if not CoolProp_constants_set:
if has_CoolProp():
set_coolprop_constants()
else:
raise ValueError("CoolProp is not installed")
self.Hfs = Hfs
self.Gfs = Gfs
self.Sfs = Sfs
self.backend = backend
self.fluid = fluid
if type(fluid) is list:
self.skip_comp = skip_comp = False
else:
self.skip_comp = skip_comp = (backend in ('IF97') or fluid in ('water') or '&' not in fluid)
if zs is None:
zs = [1.0]
self.zs = zs
self.N = N = len(zs)
if skip_comp or N == 1:
zs_key = None
else:
zs_key = tuple(zs)
if T is not None and P is not None:
self.T = T
self.P = P
try:
key = [backend, fluid, P, T, CPPT_INPUTS, self.prefer_phase, zs_key]
AS = caching_state_CoolProp(*key)
except:
key = [backend, fluid, P, T, CPPT_INPUTS, CPunknown, zs_key]
AS = caching_state_CoolProp(*key)
self.key = key
self._cache_easy_properties(AS)
# if not skip_comp and zs is None:
# self.zs = [1.0]
# AS = get_CoolProp_AS(backend, fluid)#CoolProp.AbstractState(backend, fluid)
# if not skip_comp:
# AS.set_mole_fractions(zs)
# AS.specify_phase(self.prefer_phase)
# try:
# AS.update(CPPT_INPUTS, P, T)
# except:
# AS.specify_phase(CPunknown)
# AS.update(CPPT_INPUTS, P, T)
#
# rho = AS.rhomolar()
# key = (backend, fluid, T, rho)
@property
def AS(self):
return caching_state_CoolProp(*self.key)
def to_TP_zs(self, T, P, zs):
return self.to(T=T, P=P, zs=zs)
def from_AS(self, AS):
new = self.__class__.__new__(self.__class__)
new.N = N = self.N
if N == 1:
zs_key = None
new.zs = self.zs
else:
new.zs = zs = AS.get_mole_fractions()
zs_key = tuple(zs)
new.backend = backend = self.backend
new.fluid = fluid = self.fluid
new.skip_comp = self.skip_comp
new.T, new.P = T, P = AS.T(), AS.p()
new.Hfs = self.Hfs
new.Gfs = self.Gfs
new.Sfs = self.Sfs
# Always use density as an input - does not require a phase ID spec / setting with AS.phase() seems to not work
new._cache_easy_properties(AS)
new.key = (backend, fluid, self._rho, T, CPrhoT_INPUTS, CPunknown, zs_key)
return new
def to(self, zs, T=None, P=None, V=None, prefer_phase=None):
new = self.__class__.__new__(self.__class__)
new.zs = zs
new.N = self.N
new.backend = backend = self.backend
new.fluid = fluid = self.fluid
new.skip_comp = skip_comp = self.skip_comp
if skip_comp or self.N == 1:
zs_key = None
else:
zs_key = tuple(zs)
if prefer_phase is None:
prefer_phase = self.prefer_phase
try:
if T is not None:
if P is not None:
new.T, new.P = T, P
key = (backend, fluid, P, T, CPPT_INPUTS, prefer_phase, zs_key)
AS = caching_state_CoolProp(*key)
elif V is not None:
key = (backend, fluid, 1.0/V, T, CPrhoT_INPUTS, prefer_phase, zs_key)
AS = caching_state_CoolProp(*key)
# AS.update(CPrhoT_INPUTS, 1.0/V, T)
new.T, new.P = T, AS.p()
elif P is not None and V is not None:
key = (backend, fluid, 1.0/V, P, CPrhoP_INPUTS, prefer_phase, zs_key)
AS = caching_state_CoolProp(*key)
# AS.update(CPrhoP_INPUTS, 1.0/V, P)
new.T, new.P = AS.T(), P
except ValueError:
prefer_phase = CPunknown
if T is not None:
if P is not None:
new.T, new.P = T, P
key = (backend, fluid, P, T, CPPT_INPUTS, prefer_phase, zs_key)
AS = caching_state_CoolProp(*key)
elif V is not None:
key = (backend, fluid, 1.0/V, T, CPrhoT_INPUTS, prefer_phase, zs_key)
AS = caching_state_CoolProp(*key)
new.T, new.P = T, AS.p()
elif P is not None and V is not None:
key = (backend, fluid, 1.0/V, P, CPrhoP_INPUTS, prefer_phase, zs_key)
AS = caching_state_CoolProp(*key)
new.T, new.P = AS.T(), P
new.Hfs = self.Hfs
new.Gfs = self.Gfs
new.Sfs = self.Sfs
new.key = key
new._cache_easy_properties(AS)
return new
def _cache_easy_properties(self, AS):
self._rho = AS.rhomolar()
self._V = 1.0/self._rho
self._H = AS.hmolar()
self._S = AS.smolar()
self._Cp = AS.cpmolar()
self._PIP = AS.PIP()
def V(self):
return self._V
# return 1.0/self.AS.rhomolar()
def lnphis(self):
try:
return self._lnphis
except AttributeError:
pass
self._lnphis = lnphis = []
AS = self.AS
for i in range(self.N):
lnphis.append(log(AS.fugacity_coefficient(i)))
return lnphis
lnphis_G_min = lnphis
def dlnphis_dT(self):
raise NotImplementedError("Not in CoolProp")
def dlnphis_dP(self):
raise NotImplementedError("Not in CoolProp")
def dlnphis_dns(self):
raise NotImplementedError("Not in CoolProp")
def dlnphis_dzs(self):
raise NotImplementedError("Not in CoolProp")
def gammas(self):
raise NotImplementedError("TODO")
def dP_dT(self):
return self.AS.first_partial_deriv(CPiP, CPiT, CPiDmolar)
dP_dT_V = dP_dT
def dP_dV(self):
rho = self.AS.rhomolar()
dP_drho = self.AS.first_partial_deriv(CPiP, CPiDmolar, CPiT)
return -dP_drho*rho*rho
dP_dV_T = dP_dV
def d2P_dT2(self):
return self.AS.second_partial_deriv(CPiP, CPiT, CPiDmolar, CPiT, CPiDmolar)
d2P_dT2_V = d2P_dT2
def d2P_dV2(self):
d2P_drho2 = self.AS.second_partial_deriv(CPiP, CPiDmolar, CPiT, CPiDmolar, CPiT)
V = self.V()
dP_dV = self.dP_dV()
return (d2P_drho2/-V**2 + 2.0*V*dP_dV)/-V**2
d2P_dV2_T = d2P_dV2
def d2P_dTdV(self):
d2P_dTdrho = self.AS.second_partial_deriv(CPiP, CPiT, CPiDmolar, CPiDmolar, CPiT)
rho = self.AS.rhomolar()
return -d2P_dTdrho*rho*rho
def PIP(self):
return self._PIP
# Saves time
# return self.AS.PIP()
def H(self):
return self._H
# return self.AS.hmolar()
def S(self):
return self._S
# return self.AS.smolar()
def H_dep(self):
return self.AS.hmolar_excess()
def S_dep(self):
return self.AS.smolar_excess()
def Cp_dep(self):
raise NotImplementedError("Not in CoolProp")
def Cp(self):
return self._Cp
# return self.AS.cpmolar()
dH_dT = Cp
def dH_dP(self):
return self.AS.first_partial_deriv(CoolProp.iHmolar, CPiP, CPiT)
def dH_dT_V(self):
# Does not need rho multiplication
return self.AS.first_partial_deriv(CoolProp.iHmolar, CPiT, CPiDmolar)
def dH_dP_V(self):
return self.AS.first_partial_deriv(CoolProp.iHmolar, CPiP, CPiDmolar)
def dH_dV_T(self):
rho = self.AS.rhomolar()
return -self.AS.first_partial_deriv(CoolProp.iHmolar, CPiDmolar, CPiT)*rho*rho
def dH_dV_P(self):
rho = self.AS.rhomolar()
return -self.AS.first_partial_deriv(CoolProp.iHmolar, CPiDmolar, CPiP)*rho*rho
def d2H_dT2(self):
return self.AS.second_partial_deriv(CoolProp.iHmolar, CPiT, CPiP, CPiT, CPiP)
def d2H_dP2(self):
return self.AS.second_partial_deriv(CoolProp.iHmolar, CPiP, CPiT, CPiP, CPiT)
def d2H_dTdP(self):
return self.AS.second_partial_deriv(CoolProp.iHmolar, CPiT, CPiP, CPiP, CPiT)
def dS_dT(self):
return self.AS.first_partial_deriv(CPiSmolar, CPiT, CPiP)
dS_dT_P = dS_dT
def dS_dP(self):
return self.AS.first_partial_deriv(CPiSmolar, CPiP, CPiT)
dS_dP_T = dS_dP
def dS_dT_V(self):
return self.AS.first_partial_deriv(CPiSmolar, CPiT, CPiDmolar)
def dS_dP_V(self):
return self.AS.first_partial_deriv(CPiSmolar, CPiP, CPiDmolar)
def dS_dV_T(self):
rho = self.AS.rhomolar()
return -self.AS.first_partial_deriv(CPiSmolar, CPiDmolar, CPiT)*rho*rho
def dS_dV_P(self):
rho = self.AS.rhomolar()
return -self.AS.first_partial_deriv(CPiSmolar, CPiDmolar, CPiP)*rho*rho
def d2S_dT2(self):
return self.AS.second_partial_deriv(CPiSmolar, CPiT, CPiP, CPiT, CPiP)
def d2S_dP2(self):
return self.AS.second_partial_deriv(CPiSmolar, CPiP, CPiT, CPiP, CPiT)
def d2S_dTdP(self):
return self.AS.second_partial_deriv(CPiSmolar, CPiT, CPiP, CPiP, CPiT)
def mu(self):
try:
return self._mu
except AttributeError:
mu = self._mu = self.AS.viscosity()
return mu
def k(self):
try:
return self._k
except AttributeError:
k = self._k = self.AS.conductivity()
return k
class CoolPropLiquid(CoolPropPhase):
prefer_phase = CPliquid
is_gas = False
is_liquid = True
class CoolPropGas(CoolPropPhase):
prefer_phase = CPgas
is_gas = True
is_liquid = False
|
{
"content_hash": "c858a16bee229fc40763c499fc6dc925",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 119,
"avg_line_length": 33.846456692913385,
"alnum_prop": 0.5711294637664301,
"repo_name": "CalebBell/thermo",
"id": "51415004ae2ff9154484e8b389f51ebe9ecbd54a",
"size": "17218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thermo/phases/coolprop_phase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6939422"
}
],
"symlink_target": ""
}
|
import collections
from supriya import CalculationRate
from supriya.ugens.DUGen import DUGen
class Dswitch(DUGen):
"""
A demand-rate generator for embedding different inputs.
::
>>> index = supriya.ugens.Dseq(sequence=[0, 1, 2, 1, 0])
>>> sequence = (1., 2., 3.)
>>> dswitch = supriya.ugens.Dswitch.new(
... index=index,
... sequence=sequence,
... )
>>> dswitch
Dswitch()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
[("index", None), ("sequence", None)]
)
_unexpanded_input_names = ("sequence",)
_valid_calculation_rates = (CalculationRate.DEMAND,)
|
{
"content_hash": "93d28fdb6d571a01684ff7b0f4490c73",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 64,
"avg_line_length": 22.21875,
"alnum_prop": 0.5668073136427567,
"repo_name": "Pulgama/supriya",
"id": "ebf49ee7be1c91c9c2fbf36cb767c4fb50588265",
"size": "711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/ugens/Dswitch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
}
|
'''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com>
.. py:module:: fantastico.sdk.tests.test_sdk_command_core
'''
from fantastico.sdk.sdk_core import SdkCommandsRegistry
from fantastico.sdk.fantastico import SdkCore
from fantastico.sdk.commands.tests.commands_for_bdd_test import MockCmdTest
from fantastico.tests.base_case import FantasticoUnitTestsCase
class SdkCommandCoreTests(FantasticoUnitTestsCase):
'''This class provides the unit tests for sdk command core. It covers autodiscovery of submodules.'''
def init(self):
'''This method is invoked automatically for cleaning existing commands.'''
SdkCommandsRegistry.COMMANDS.clear()
def cleanup(self):
'''This method is invoked automatically for cleaning dependencies.'''
SdkCommandsRegistry.COMMANDS.clear()
def test_core_command_ok(self):
'''This test case ensures fantastico sdk main command works as expected.'''
argv = ["fantastico", "test_cmd"]
SdkCommandsRegistry.COMMANDS["fantastico"] = SdkCore
SdkCommandsRegistry.COMMANDS["test_cmd"] = MockCmdTest
cmd_test = SdkCore(argv, supported_prefixes=["not supported ----"])
self.assertIsNotNone(cmd_test)
cmd_test.exec_command()
|
{
"content_hash": "e9b287eaa6fccb601bc7ceb98d247431",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 126,
"avg_line_length": 47.42857142857143,
"alnum_prop": 0.7607573149741824,
"repo_name": "rcosnita/fantastico",
"id": "845e0081310dde057800aeed4eb4a3ae0ae61517",
"size": "2324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fantastico/sdk/commands/tests/test_sdk_command_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6802"
},
{
"name": "Python",
"bytes": "2168052"
},
{
"name": "Shell",
"bytes": "13309"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
def get_notification_config():
return {
"for_doctype": {
"Error Log": {"seen": 0},
"Communication": {"status": "Open", "communication_type": "Communication"},
"ToDo": "frappe.core.notifications.get_things_todo",
"Event": "frappe.core.notifications.get_todays_events",
"Error Snapshot": {"seen": 0, "parent_error_snapshot": None},
"Workflow Action": {"status": 'Open'}
},
"for_other": {
"Likes": "frappe.core.notifications.get_unseen_likes",
"Email": "frappe.core.notifications.get_unread_emails",
}
}
def get_things_todo(as_list=False):
"""Returns a count of incomplete todos"""
data = frappe.get_list("ToDo",
fields=["name", "description"] if as_list else "count(*)",
filters=[["ToDo", "status", "=", "Open"]],
or_filters=[["ToDo", "owner", "=", frappe.session.user],
["ToDo", "assigned_by", "=", frappe.session.user]],
as_list=True)
if as_list:
return data
else:
return data[0][0]
def get_todays_events(as_list=False):
"""Returns a count of todays events in calendar"""
from frappe.desk.doctype.event.event import get_events
from frappe.utils import nowdate
today = nowdate()
events = get_events(today, today)
return events if as_list else len(events)
def get_unseen_likes():
"""Returns count of unseen likes"""
return frappe.db.sql("""select count(*) from `tabCommunication`
where
communication_type='Comment'
and modified >= (NOW() - INTERVAL '1' YEAR)
and comment_type='Like'
and owner is not null and owner!=%(user)s
and reference_owner=%(user)s
and seen=0""", {"user": frappe.session.user})[0][0]
def get_unread_emails():
"returns unread emails for a user"
return frappe.db.sql("""\
SELECT count(*)
FROM `tabCommunication`
WHERE communication_type='Communication'
AND communication_medium='Email'
AND sent_or_received='Received'
AND email_status not in ('Spam', 'Trash')
AND email_account in (
SELECT distinct email_account from `tabUser Email` WHERE parent=%(user)s
)
AND modified >= (NOW() - INTERVAL '1' YEAR)
AND seen=0
""", {"user": frappe.session.user})[0][0]
|
{
"content_hash": "86a81a9f8956ab9d0a646ff1c77b0974",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 31.5,
"alnum_prop": 0.6699346405228758,
"repo_name": "RicardoJohann/frappe",
"id": "619231e5cdd9b5b7486f7dba0b6f70f8cf164c18",
"size": "2243",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/core/notifications.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "447183"
},
{
"name": "HTML",
"bytes": "199549"
},
{
"name": "JavaScript",
"bytes": "2009239"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2338007"
},
{
"name": "Shell",
"bytes": "2296"
},
{
"name": "Vue",
"bytes": "24090"
}
],
"symlink_target": ""
}
|
"""
Unit tests for :mod:`behave.api.async_test` for Python 3.5 (or newer).
"""
# -- IMPORTS:
from __future__ import absolute_import, print_function
import sys
from behave._stepimport import use_step_import_modules
from behave.runner import Context, Runner
import pytest
from .testing_support import StopWatch, SimpleStepContainer
from .testing_support_async import AsyncStepTheory
# -----------------------------------------------------------------------------
# SUPPORT:
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# ASYNC STEP EXAMPLES:
# -----------------------------------------------------------------------------
# if python_version >= 3.5:
# @step('an async coroutine step waits "{duration:f}" seconds')
# @async_run_until_complete
# async def step_async_step_waits_seconds(context, duration):
# print("async_step: Should sleep for %.3f seconds" % duration)
# await asyncio.sleep(duration)
#
# if python_version >= 3.4:
# @step('a tagged-coroutine async step waits "{duration:f}" seconds')
# @async_run_until_complete
# @asyncio.coroutine
# def step_async_step_waits_seconds2(context, duration):
# print("async_step2: Should sleep for %.3f seconds" % duration)
# yield from asyncio.sleep(duration)
#
# -----------------------------------------------------------------------------
# TEST MARKERS:
# -----------------------------------------------------------------------------
# xfail = pytest.mark.xfail
_python_version = float("%s.%s" % sys.version_info[:2])
py35_or_newer = pytest.mark.skipif(_python_version < 3.5, reason="Needs Python >= 3.5")
# -----------------------------------------------------------------------------
# TESTSUITE:
# -----------------------------------------------------------------------------
@py35_or_newer
class TestAsyncStepDecoratorPy35(object):
def test_step_decorator_async_run_until_complete1(self):
step_container = SimpleStepContainer()
with use_step_import_modules(step_container):
# -- STEP-DEFINITIONS EXAMPLE (as MODULE SNIPPET):
# VARIANT 1: Use async def step_impl()
from behave import step
from behave.api.async_step import async_run_until_complete
import asyncio
@step('an async coroutine step waits "{duration:f}" seconds')
@async_run_until_complete
async def step_async_step_waits_seconds(context, duration):
await asyncio.sleep(duration)
# -- USES: async def step_impl(...) as async-step (coroutine)
AsyncStepTheory.validate(step_async_step_waits_seconds)
# -- RUN ASYNC-STEP: Verify that it is behaving correctly.
# ENSURE: Execution of async-step matches expected duration.
context = Context(runner=Runner(config={}))
with StopWatch() as stop_watch:
step_async_step_waits_seconds(context, 0.2)
assert abs(stop_watch.duration - 0.2) <= 0.05
@py35_or_newer
class TestAsyncStepRunPy35(object):
"""Ensure that execution of async-steps works as expected."""
def test_async_step_passes(self):
"""ENSURE: Failures in async-steps are detected correctly."""
step_container = SimpleStepContainer()
with use_step_import_modules(step_container):
# -- STEP-DEFINITIONS EXAMPLE (as MODULE SNIPPET):
# VARIANT 1: Use async def step_impl()
from behave import given, when
from behave.api.async_step import async_run_until_complete
@given('an async-step passes')
@async_run_until_complete
async def given_async_step_passes(context):
context.traced_steps.append("async-step1")
@when('an async-step passes')
@async_run_until_complete
async def when_async_step_passes(context):
context.traced_steps.append("async-step2")
# -- RUN ASYNC-STEP: Verify that async-steps can be executed.
context = Context(runner=Runner(config={}))
context.traced_steps = []
given_async_step_passes(context)
when_async_step_passes(context)
assert context.traced_steps == ["async-step1", "async-step2"]
def test_async_step_fails(self):
"""ENSURE: Failures in async-steps are detected correctly."""
step_container = SimpleStepContainer()
with use_step_import_modules(step_container):
# -- STEP-DEFINITIONS EXAMPLE (as MODULE SNIPPET):
# VARIANT 1: Use async def step_impl()
from behave import when
from behave.api.async_step import async_run_until_complete
@when('an async-step fails')
@async_run_until_complete
async def when_async_step_fails(context):
assert False, "XFAIL in async-step"
# -- RUN ASYNC-STEP: Verify that AssertionError is detected.
context = Context(runner=Runner(config={}))
with pytest.raises(AssertionError):
when_async_step_fails(context)
def test_async_step_raises_exception(self):
"""ENSURE: Failures in async-steps are detected correctly."""
step_container = SimpleStepContainer()
with use_step_import_modules(step_container):
# -- STEP-DEFINITIONS EXAMPLE (as MODULE SNIPPET):
# VARIANT 1: Use async def step_impl()
from behave import when
from behave.api.async_step import async_run_until_complete
@when('an async-step raises exception')
@async_run_until_complete
async def when_async_step_raises_exception(context):
1 / 0 # XFAIL-HERE: Raises ZeroDivisionError
# -- RUN ASYNC-STEP: Verify that raised exception is detected.
context = Context(runner=Runner(config={}))
with pytest.raises(ZeroDivisionError):
when_async_step_raises_exception(context)
|
{
"content_hash": "9447e5563d95c736264df282f9085551",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 87,
"avg_line_length": 41.465753424657535,
"alnum_prop": 0.5744962008589363,
"repo_name": "jenisys/behave",
"id": "f4068db22eb9f6c74d672a26a81162b1b8f96bcc",
"size": "6078",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/api/_test_async_step35.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "8799"
},
{
"name": "Python",
"bytes": "720530"
},
{
"name": "Shell",
"bytes": "272"
}
],
"symlink_target": ""
}
|
from unittest import mock
from heat.engine.clients.os import monasca as client_plugin
from heat.engine.resources.openstack.monasca import alarm_definition
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
sample_template = {
'heat_template_version': '2015-10-15',
'resources': {
'test_resource': {
'type': 'OS::Monasca::AlarmDefinition',
'properties': {
'name': 'sample_alarm_id',
'description': 'sample alarm def',
'expression': 'sample expression',
'match_by': ['match_by'],
'severity': 'low',
'ok_actions': ['sample_notification'],
'alarm_actions': ['sample_notification'],
'undetermined_actions': ['sample_notification'],
'actions_enabled': False
}
}
}
}
class MonascaAlarmDefinitionTest(common.HeatTestCase):
def setUp(self):
super(MonascaAlarmDefinitionTest, self).setUp()
self.ctx = utils.dummy_context()
self.stack = stack.Stack(
self.ctx, 'test_stack',
template.Template(sample_template)
)
self.test_resource = self.stack['test_resource']
# Mock client
self.test_client = mock.MagicMock()
self.test_resource.client = mock.MagicMock(
return_value=self.test_client)
# Mock client plugin
self.test_client_plugin = client_plugin.MonascaClientPlugin(self.ctx)
self.test_client_plugin._create = mock.MagicMock(
return_value=self.test_client)
self.test_resource.client_plugin = mock.MagicMock(
return_value=self.test_client_plugin)
self.test_client_plugin.get_notification = mock.MagicMock(
return_value='sample_notification')
def _get_mock_resource(self):
value = dict(id='477e8273-60a7-4c41-b683-fdb0bc7cd152')
return value
def test_resource_handle_create(self):
mock_alarm_create = self.test_client.alarm_definitions.create
mock_alarm_patch = self.test_client.alarm_definitions.patch
mock_resource = self._get_mock_resource()
mock_alarm_create.return_value = mock_resource
# validate the properties
self.assertEqual(
'sample_alarm_id',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.NAME))
self.assertEqual(
'sample alarm def',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.DESCRIPTION))
self.assertEqual(
'sample expression',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.EXPRESSION))
self.assertEqual(
['match_by'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.MATCH_BY))
self.assertEqual(
'low',
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.SEVERITY))
self.assertEqual(
['sample_notification'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.OK_ACTIONS))
self.assertEqual(
['sample_notification'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.ALARM_ACTIONS))
self.assertEqual(
['sample_notification'],
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.UNDETERMINED_ACTIONS))
self.assertEqual(
False,
self.test_resource.properties.get(
alarm_definition.MonascaAlarmDefinition.ACTIONS_ENABLED))
self.test_resource.data_set = mock.Mock()
self.test_resource.handle_create()
# validate physical resource id
self.assertEqual(mock_resource['id'], self.test_resource.resource_id)
args = dict(
name='sample_alarm_id',
description='sample alarm def',
expression='sample expression',
match_by=['match_by'],
severity='low',
ok_actions=['sample_notification'],
alarm_actions=['sample_notification'],
undetermined_actions=['sample_notification']
)
mock_alarm_create.assert_called_once_with(**args)
mock_alarm_patch.assert_called_once_with(
alarm_id=self.test_resource.resource_id,
actions_enabled=False)
def test_resource_handle_update(self):
mock_alarm_patch = self.test_client.alarm_definitions.patch
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {
alarm_definition.MonascaAlarmDefinition.NAME:
'name-updated',
alarm_definition.MonascaAlarmDefinition.DESCRIPTION:
'description-updated',
alarm_definition.MonascaAlarmDefinition.ACTIONS_ENABLED:
True,
alarm_definition.MonascaAlarmDefinition.SEVERITY:
'medium',
alarm_definition.MonascaAlarmDefinition.OK_ACTIONS:
['sample_notification'],
alarm_definition.MonascaAlarmDefinition.ALARM_ACTIONS:
['sample_notification'],
alarm_definition.MonascaAlarmDefinition.UNDETERMINED_ACTIONS:
['sample_notification']}
self.test_resource.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
args = dict(
alarm_id=self.test_resource.resource_id,
name='name-updated',
description='description-updated',
actions_enabled=True,
severity='medium',
ok_actions=['sample_notification'],
alarm_actions=['sample_notification'],
undetermined_actions=['sample_notification']
)
mock_alarm_patch.assert_called_once_with(**args)
def test_resource_handle_delete(self):
mock_alarm_delete = self.test_client.alarm_definitions.delete
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_alarm_delete.return_value = None
self.assertIsNone(self.test_resource.handle_delete())
mock_alarm_delete.assert_called_once_with(
alarm_id=self.test_resource.resource_id
)
def test_resource_handle_delete_resource_id_is_none(self):
self.test_resource.resource_id = None
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_handle_delete_not_found(self):
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_alarm_delete = self.test_client.alarm_definitions.delete
mock_alarm_delete.side_effect = client_plugin.monasca_exc.NotFound
self.assertIsNone(self.test_resource.handle_delete())
|
{
"content_hash": "e2192b321b88fc8f56ac2fdc1e881036",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 79,
"avg_line_length": 38.67027027027027,
"alnum_prop": 0.6133631534805704,
"repo_name": "openstack/heat",
"id": "8dba8d04defe36d7d9e2e719adde1946bb4e4402",
"size": "7729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/openstack/monasca/test_alarm_definition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9145593"
},
{
"name": "Shell",
"bytes": "65832"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name='widendeep',
packages=['widendeep'],
version='0.1',
description='TensorFlow Wide and Deep example',
url='https://github.com/amygdala/tensorflow-workshop',
author='Yufeng Guo',
author_email='yfg@google.com',
license='MIT',
install_requires=['tensorflow==0.12.1'],
zip_safe=False)
|
{
"content_hash": "4287ce481d42f5508f45aa8a860ed2a0",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 31.166666666666668,
"alnum_prop": 0.6417112299465241,
"repo_name": "Resly/pipeline",
"id": "77e652f97168646f68a1c85c3d40c4d899ad2139",
"size": "374",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "jupyterhub.ml/notebooks/zz_old/TensorFlow/GoogleTraining/workshop_sections/wide_n_deep/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "36325"
},
{
"name": "Batchfile",
"bytes": "21218"
},
{
"name": "C",
"bytes": "1759"
},
{
"name": "C++",
"bytes": "50538"
},
{
"name": "CSS",
"bytes": "441446"
},
{
"name": "Cuda",
"bytes": "3113"
},
{
"name": "Go",
"bytes": "9555"
},
{
"name": "HTML",
"bytes": "48376774"
},
{
"name": "Java",
"bytes": "108962"
},
{
"name": "JavaScript",
"bytes": "539670"
},
{
"name": "Jupyter Notebook",
"bytes": "18176491"
},
{
"name": "Makefile",
"bytes": "357"
},
{
"name": "Protocol Buffer",
"bytes": "137774"
},
{
"name": "Python",
"bytes": "667334"
},
{
"name": "Scala",
"bytes": "366964"
},
{
"name": "Shell",
"bytes": "110692"
},
{
"name": "XSLT",
"bytes": "26188"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rms.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "7820e3db1b877fea072c4af2c8f77ad3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 67,
"avg_line_length": 24.88888888888889,
"alnum_prop": 0.7053571428571429,
"repo_name": "Ashaba/rms",
"id": "aa80ec4730786c72dc1535a427a250bd0819101d",
"size": "246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "650487"
},
{
"name": "HTML",
"bytes": "2109946"
},
{
"name": "JavaScript",
"bytes": "3041523"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "Python",
"bytes": "10446887"
},
{
"name": "Shell",
"bytes": "3332"
}
],
"symlink_target": ""
}
|
import os
from viper.common.out import *
from viper.core.project import __project__
def store_sample(file_object):
sha256 = file_object.sha256
if not sha256:
print_error("No hash")
return None
folder = os.path.join(__project__.get_path(), 'binaries', sha256[0], sha256[1], sha256[2], sha256[3])
if not os.path.exists(folder):
os.makedirs(folder, 0750)
file_path = os.path.join(folder, sha256)
if not os.path.exists(file_path):
with open(file_path, 'wb') as stored:
for chunk in file_object.get_chunks():
stored.write(chunk)
else:
print_warning("File exists already")
return None
return file_path
def get_sample_path(sha256):
path = os.path.join(__project__.get_path(), 'binaries', sha256[0], sha256[1], sha256[2], sha256[3], sha256)
if not os.path.exists(path):
return None
return path
|
{
"content_hash": "034af3e86ac9ff487c34922eb15c6180",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 111,
"avg_line_length": 28.060606060606062,
"alnum_prop": 0.6187904967602592,
"repo_name": "rommelfs/viper",
"id": "1978567059b4f5fade23a3e1b47501889d7bb8d4",
"size": "1042",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "viper/core/storage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "306425"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
}
|
"""Generic Node base class for all workers that run on hosts."""
import errno
import os
import random
import signal
import sys
import time
import eventlet
import logging as std_logging
from oslo.config import cfg
from cinder.openstack.common import eventlet_backdoor
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import threadgroup
rpc = importutils.try_import('cinder.openstack.common.rpc')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self._services = threadgroup.ThreadGroup()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
@staticmethod
def run_service(service):
"""Start and wait for a service to finish.
:param service: service to run and wait for.
:returns: None
"""
service.start()
service.wait()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self._services.add_thread(self.run_service, service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self._services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self._services.wait()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
raise SignalExit(signo)
def wait(self):
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
status = None
try:
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
finally:
if rpc:
rpc.cleanup()
self.stop()
return status
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self):
self.children = {}
self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
signal.signal(signal.SIGTERM, self._handle_signal)
signal.signal(signal.SIGINT, self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process(self, service):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
signal.signal(signal.SIGTERM, _sigterm)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.run_service(service)
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
status = 0
try:
self._child_process(wrap.service)
except SignalExit as exc:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[exc.signo]
LOG.info(_('Caught %s, exiting'), signame)
status = exc.code
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
finally:
wrap.service.stop()
os._exit(status)
LOG.info(_('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
LOG.debug(_('Full set of CONF:'))
CONF.log_opt_values(LOG, std_logging.DEBUG)
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(.01)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
if self.sigcaught:
signame = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}[self.sigcaught]
LOG.info(_('Caught %s, stopping children'), signame)
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
def start(self):
pass
def stop(self):
self.tg.stop()
def wait(self):
self.tg.wait()
def launch(service, workers=None):
if workers:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
else:
launcher = ServiceLauncher()
launcher.launch_service(service)
return launcher
|
{
"content_hash": "c3e44a0ccacdd487bca61496bf8593ba",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 77,
"avg_line_length": 30.038216560509554,
"alnum_prop": 0.5838634435962681,
"repo_name": "cloudbau/cinder",
"id": "7cbd3690a64002b523c8f6f43b3acfa454d96aef",
"size": "10247",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/openstack/common/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5235714"
},
{
"name": "Shell",
"bytes": "8994"
}
],
"symlink_target": ""
}
|
"""
Views which allow subscribers to create and activate accounts.
"""
from django.shortcuts import redirect
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from mailinglist_registration.models import Subscriber
from mailinglist_registration.forms import RegistrationForm
class _RequestPassingFormView(FormView):
"""
A version of FormView which passes extra arguments to certain
methods, notably passing the HTTP request nearly everywhere, to
enable finer-grained processing.
"""
def get(self, request, *args, **kwargs):
# Pass request to get_form_class and get_form for per-request
# form control.
form_class = self.get_form_class(request)
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
# Pass request to get_form_class and get_form for per-request
# form control.
form_class = self.get_form_class(request)
form = self.get_form(form_class)
if form.is_valid():
# Pass request to form_valid.
return self.form_valid(request, form)
else:
return self.form_invalid(form)
def get_form_class(self, request=None):
return super(_RequestPassingFormView, self).get_form_class()
def get_form_kwargs(self, request=None, form_class=None):
return super(_RequestPassingFormView, self).get_form_kwargs()
def get_initial(self, request=None):
return super(_RequestPassingFormView, self).get_initial()
def get_success_url(self, request=None, subscriber=None):
# We need to be able to use the request and the new subscriber when
# constructing success_url.
return super(_RequestPassingFormView, self).get_success_url()
def form_valid(self, form, request=None):
return super(_RequestPassingFormView, self).form_valid(form)
def form_invalid(self, form, request=None):
return super(_RequestPassingFormView, self).form_invalid(form)
class RegistrationView(_RequestPassingFormView):
"""
Base class for subscriber registration views.
"""
disallowed_url = 'mailinglist_registration_disallowed'
form_class = RegistrationForm
http_method_names = ['get', 'post', 'head', 'options', 'trace']
success_url = None
template_name = 'mailinglist/registration_form.html'
def dispatch(self, request, *args, **kwargs):
"""
Check that subscriber signup is allowed before even bothering to
dispatch or do other processing.
"""
if not self.registration_allowed(request):
return redirect(self.disallowed_url)
return super(RegistrationView, self).dispatch(request, *args, **kwargs)
def form_valid(self, request, form):
new_subscriber = self.register(request, **form.cleaned_data)
success_url = self.get_success_url(request, new_subscriber)
# success_url may be a simple string, or a tuple providing the
# full argument set for redirect(). Attempting to unpack it
# tells us which one it is.
try:
to, args, kwargs = success_url
return redirect(to, *args, **kwargs)
except ValueError:
return redirect(success_url)
def registration_allowed(self, request):
"""
Override this to enable/disable subscriber registration, either
globally or on a per-request basis.
"""
return True
def register(self, request, **cleaned_data):
"""
Implement subscriber-registration logic here. Access to both the
request and the full cleaned_data of the registration form is
available here.
"""
raise NotImplementedError
class ActivationView(TemplateView):
"""
Base class for subscriber activation views.
"""
http_method_names = ['get']
template_name = 'mailinglist/activate.html'
def get(self, request, *args, **kwargs):
activated_subscriber = self.activate(request, *args, **kwargs)
if activated_subscriber:
success_url = self.get_success_url(request, activated_subscriber)
try:
to, args, kwargs = success_url
return redirect(to, *args, **kwargs)
except ValueError:
return redirect(success_url)
return super(ActivationView, self).get(request, *args, **kwargs)
def activate(self, request, *args, **kwargs):
"""
Implement account-activation logic here.
"""
raise NotImplementedError
def get_success_url(self, request, subscriber):
raise NotImplementedError
class DeRegistrationView(TemplateView):
http_method_names = ['get']
def get(self, request, deactivation_key, *args, **kwargs):
subscriber = Subscriber.objects.deactivate_subscriber(deactivation_key)
if subscriber:
return redirect(success_url)
return super(DeRegistrationView, self).get(request, *args, **kwargs)
|
{
"content_hash": "6f95a7040a14170ecd5b87e9e0908bd7",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 35.11486486486486,
"alnum_prop": 0.6492207042524534,
"repo_name": "remarkablerocket/django-mailinglist-registration",
"id": "73c2b584fabe805f2f58506369d01f97c7b5c1d5",
"size": "5197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailinglist_registration/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "77853"
},
{
"name": "Shell",
"bytes": "2985"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0020_auto_20170112_1440'),
]
operations = [
migrations.AddField(
model_name='staggeredstartrace',
name='algorithm',
field=models.CharField(choices=[('SA', 'Slow Assist'), ('PF', 'Photo Finish'), ('SO', 'Standard Only')], default='SA', max_length=2),
),
]
|
{
"content_hash": "c2532f86e635ef4cb98c239c34a8a582",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 145,
"avg_line_length": 26.944444444444443,
"alnum_prop": 0.5958762886597938,
"repo_name": "abecede753/trax",
"id": "e602d85a74f5981a0bb4f231e14174ef553042a9",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/events/migrations/0021_staggeredstartrace_algorithm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "39385"
},
{
"name": "CSS",
"bytes": "6173"
},
{
"name": "HTML",
"bytes": "553928"
},
{
"name": "JavaScript",
"bytes": "85739"
},
{
"name": "Jupyter Notebook",
"bytes": "176256"
},
{
"name": "Makefile",
"bytes": "325"
},
{
"name": "PHP",
"bytes": "16944"
},
{
"name": "PowerShell",
"bytes": "162"
},
{
"name": "Python",
"bytes": "162529"
},
{
"name": "Shell",
"bytes": "505"
}
],
"symlink_target": ""
}
|
import numpy as np
import math
from scipy import signal
from openpyxl import Workbook
from openpyxl import load_workbook
#from openpyxl.compat import range
import openpyxl.compat
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
import tkinter as tk
from tkinter import ttk
from tkinter.filedialog import askopenfilename, asksaveasfilename
import os
from datetime import datetime
class Filterer(object):
def __init__(self):
pass
def gaussianFilter(self, x, window_size=10, std=7):
kernel = signal.gaussian(window_size, std=std)
x_origin = np.copy(x)
x_result = np.zeros(x.shape)
for i, value in enumerate(x_origin):
offset = math.floor(window_size/2.0)
first_idx = i-offset
if first_idx < 0:
first_idx = 0
src = x_origin[first_idx : i+offset +1]
if len(src) != len(kernel):
x_result[i] = x_origin[i]
elif len(src) == len(kernel):
x_result[i] = np.sum( src * kernel / float(window_size))
return x_result
def averageFilter(self, x, window_size=3):
x_origin = np.copy(x)
x_result = np.zeros(x.shape)
for i, value in enumerate(x_origin):
offset = math.floor(window_size/2.0)
first_idx = i-offset
if first_idx < 0:
first_idx = 0
src = x_origin[first_idx: i+offset +1]
if len(src) != window_size:
x_result[i] = x_origin[i]
else:
x_result[i] = np.sum( src / float(window_size))
return x_result
def findPeak(self, x ):
x_result = np.zeros(x.shape)
for i in range(1, len(x)-1):
if x[i] > x[i-1] and x[i] >x[i+1]:
x_result[i] = 1
return x_result
class XlHandler(object):
def __init__(self):
self.wb = None
def getDataFrom(self, start, end):
return
def loadFile(self):
pass
def saveFile(self):
pass
class GraphTool(object):
def __init__(self):
self.mode = "gaussian"
self.wb = None
self.initGui()
def initGui(self):
self.text_size = 6
self.sheet_max_num_in_row = 8
self.root = tk.Tk()
self.root.wm_title("Graph Tool Controller")
self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(1, weight=1)
self.initControlFrame(self.root)
self.initGraphFrame(self.root)
for child in self.controlframe.winfo_children():
child.grid_configure(sticky=(tk.W, tk.E ))
#for child in self.graphframe.winfo_children():
# child.grid_configure(sticky=(tk.W, tk.E, tk.N, tk.S ))
self.root.bind('<Return>', lambda event, i=self: i.process())
def initControlFrame(self, root):
###
# controlframe
controlframe = ttk.Frame(root)
controlroot = ttk.Frame(root)
controlroot.grid(row=0, column=0, sticky=(tk.N, tk.W, tk.E))
menuframe = ttk.Frame(controlroot)
controlframe = ttk.Frame(controlroot)
optionframe = ttk.Frame(controlroot)
sheetframe = ttk.Frame(controlroot)
#controlframe.columnconfigure(0, weight=1)
#controlframe.rowconfigure(0, weight=1)
menuframe.grid(row=0,column=0, sticky=(tk.W))
controlframe.grid(row=2, column=0, sticky=(tk.N, tk.W))
optionframe.grid(row=2,column=2, sticky=(tk.E))
sheetframe.grid(row=1, column=0, columnspan=self.sheet_max_num_in_row+1, sticky=(tk.W,tk.E))
# controlframe column configure
#for i in openpyxl.compat.range(4):
# controlframe.columnconfigure(i, weight=3%(i+1) )
### menuframe
ttk.Button(menuframe, text="open file", command=self.openFile).grid(row=0, column=0)
ttk.Button(menuframe, text="save file", command=self.saveFile).grid(row=0, column=1)
self.menuframe = menuframe
### controlframe
self.x_start_var = tk.StringVar()
self.x_end_var = tk.StringVar()
self.y_start_var = tk.StringVar()
self.y_end_var = tk.StringVar()
# x variable
ttk.Label(controlframe, text="x start").grid(row=0, column=0)
ttk.Label(controlframe, text="x end").grid(row=0, column=2)
x_start_entry = ttk.Entry(controlframe, textvariable=self.x_start_var, width=self.text_size)
x_start_entry.grid(row=0, column=1)
x_start_entry.focus()
ttk.Entry(controlframe, textvariable=self.x_end_var, width=self.text_size).grid(row=0, column=3)
# y variable
ttk.Label(controlframe, text="y start").grid(row=1, column=0)
ttk.Label(controlframe, text="y end").grid(row=1, column=2)
ttk.Entry(controlframe, textvariable=self.y_start_var, width=self.text_size).grid(row=1, column=1)
ttk.Entry(controlframe, textvariable=self.y_end_var, width=self.text_size).grid(row=1, column=3)
# Run button
self.controlframe = controlframe
self.current_sheet_text = tk.StringVar()
self.current_sheet_label = tk.Label(sheetframe, textvariable=self.current_sheet_text)
self.current_sheet_label.grid(row=0, column=0, sticky=(tk.W,tk.E))
self.current_sheet_text.set("sheet name")
self.sheetframe = sheetframe
##
# option Frame
## real_time_frame
real_time_frame=ttk.Frame(optionframe)
self.real_time_flag = tk.IntVar()
ttk.Checkbutton(real_time_frame, text="real time", variable=self.real_time_flag).grid(row=0,column=0)
ttk.Button(real_time_frame, text="run", command=self.process).grid(row=1, column=0)
ttk.Label(real_time_frame, text="click run or enter").grid(row=2, column=0)
self.real_time_flag.set(0)
## graph_limit_frame
graph_limit_frame = ttk.Frame(optionframe)
self.graph_limit_flag = tk.IntVar()
self.graph_max_y = tk.DoubleVar()
self.graph_min_y = tk.DoubleVar()
ttk.Checkbutton(graph_limit_frame, text="graph limit", variable=self.graph_limit_flag).grid(row=0, column=0)
ttk.Label(graph_limit_frame, text="max y").grid(row=1, column=0)
ttk.Entry(graph_limit_frame, textvariable=self.graph_max_y, width=self.text_size).grid(row=1, column=1)
ttk.Label(graph_limit_frame, text="min y").grid(row=2, column=0)
ttk.Entry(graph_limit_frame, textvariable=self.graph_min_y, width=self.text_size).grid(row=2, column=1)
self.graph_limit_flag.set(1)
self.graph_max_y.set(140)
self.graph_min_y.set(0)
## filter_original_frame
filter_original_frame = ttk.Frame(optionframe)
#filter_original_frame.grid(row=0, column=0)
self.original_flag = tk.IntVar()
ttk.Checkbutton(filter_original_frame, text="original", variable=self.original_flag).grid(row=0, column=0)
self.original_flag.set(1)
for child in filter_original_frame.winfo_children():
child.grid_configure(sticky=(tk.W, tk.N))
## filter_gaussian_frame
filter_gaussian_frame = ttk.Frame(optionframe)
#filter_gaussian_frame.grid(row=0, column=1)
self.gaussian_flag = tk.IntVar()
self.gaussian_std = tk.DoubleVar()
self.gaussian_window_size = tk.IntVar()
ttk.Checkbutton(filter_gaussian_frame, text="gaussian filter", variable=self.gaussian_flag).grid(row=0,column=0)
ttk.Label(filter_gaussian_frame, text="window size").grid(row=1, column=0)
ttk.Entry(filter_gaussian_frame, textvariable=self.gaussian_window_size, width=self.text_size).grid(row=1, column=1)
ttk.Label(filter_gaussian_frame, text="std").grid(row=2, column=0)
ttk.Entry(filter_gaussian_frame, textvariable=self.gaussian_std, width=self.text_size).grid(row=2, column=1)
self.gaussian_flag.set(0)
self.gaussian_std.set(3)
self.gaussian_window_size.set(3)
for child in filter_gaussian_frame.winfo_children():
child.grid_configure(sticky=(tk.W, tk.N))
## filter_average_frame
filter_average_frame = ttk.Frame(optionframe)
#filter_average_frame.grid(row=0, column=2)
self.average_flag = tk.IntVar()
self.average_window_size = tk.IntVar()
ttk.Checkbutton(filter_average_frame, text="average filter", variable=self.average_flag).grid(row=0,column=0)
ttk.Label(filter_average_frame, text="window size").grid(row=1, column=0)
ttk.Entry(filter_average_frame, textvariable=self.average_window_size, width=self.text_size).grid(row=1, column=1)
self.average_flag.set(0)
self.average_window_size.set(3)
for child in filter_average_frame.winfo_children():
child.grid_configure(sticky=(tk.W, tk.N))
for i, child in enumerate(optionframe.winfo_children()):
child.grid_configure(row=0, column=i, sticky=(tk.W, tk.N))
def initGraphFrame(self, root):
###
# graphframe
graphframe = ttk.Frame(root)
graphframe.grid(row=1, column=0, sticky=(tk.N, tk.S, tk.W, tk.E))
self.figure = Figure()
canvas = FigureCanvasTkAgg(self.figure, master=graphframe)
canvas.show()
canvas.get_tk_widget().grid(row=0, column=0)
toolbar = NavigationToolbar2TkAgg(canvas, graphframe)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.canvas = canvas
self.toolbar = toolbar
self.graphframe = graphframe
def update(self):
if self.real_time_flag.get() == 1:
self.process()
self.root.after(100, self.update)
def draw(self):
pass
def process(self):
try:
filterer = Filterer()
x_data = self.ws[self.x_start_var.get():self.x_end_var.get()]
y_data = self.ws[self.y_start_var.get():self.y_end_var.get()]
x = []
y = []
for row in x_data:
for cell in row:
x.append(cell.value)
for row in y_data:
for cell in row:
y.append(cell.value)
# data read part
x = np.array(x)
y = np.array(y)
self.figure.clear()
f = self.figure.add_subplot(111)
if self.original_flag.get() == 1:
f.plot(x,y,color='black', label='original')
if self.average_flag.get() == 1:
y_filtered_with_average = filterer.averageFilter(y,window_size=self.average_window_size.get())
f.plot(x,y_filtered_with_average, color="green",label='average filter')
if self.gaussian_flag.get() == 1:
y_filtered_with_gaussian = filterer.gaussianFilter(y, window_size=self.gaussian_window_size.get(), std=self.gaussian_std.get())
f.plot(x,y_filtered_with_gaussian, color="red",label='gaussian filter')
if self.graph_limit_flag.get() == 1:
f.set_ylim([self.graph_min_y.get(), self.graph_max_y.get()])
# legend
f.legend(loc='upper left', frameon=False)
self.canvas.show()
#self.toolbar.update()
except:
pass
def openFile(self):
file_path = askopenfilename(#initialdir="~/",
filetypes =(("Excel Files", "*.xlsx"),("All Files","*.*")),
title = "Choose a file."
)
# when cancel the file dialog
if(file_path == ''):
return
self.wb = load_workbook(file_path, data_only=True)
for i, child in enumerate(self.sheetframe.winfo_children()):
if i != 0:
child.destroy()
self.makeSheetBtn()
def saveFile(self):
file_path = asksaveasfilename( defaultextension=".xlsx")
# TODO : add logger
if file_path == None:
return
wb = None
ws = None
if os.path.exists(file_path):
wb = load_workbook(file_path)
result_title = "result_"+str(datetime.now().year)+"_"+str(datetime.now().month)+"_"+str(datetime.now().day)+"_"+str(datetime.now().hour)+"_"+str(datetime.now().minute)+"_"+str(datetime.now().second)
ws = wb.create_sheet(title=result_title)
else:
wb = Workbook()
ws =wb.active
self.fillResult(ws)
wb.save(file_path)
def fillResult(self, ws):
filterer = Filterer()
x_data = self.ws[self.x_start_var.get():self.x_end_var.get()]
y_data = self.ws[self.y_start_var.get():self.y_end_var.get()]
x = []
y = []
for row in x_data:
for cell in row:
x.append(cell.value)
for row in y_data:
for cell in row:
y.append(cell.value)
# data read part
x = np.array(x)
y = np.array(y)
## memory allocation
for row in openpyxl.compat.range(1, len(y)+3):
for col in openpyxl.compat.range(1,5):
ws.cell(row=row,column=col)
offset = 2
## save x column
col_name_x = 'A'
start_x = col_name_x+str(offset)
end_x = col_name_x+str(len(x)+offset-1)
A = ws[start_x:end_x]
ws['A1']='x'
for i, row in enumerate(A):
for cell in row:
cell.value = x[i]
def fillCol(ws, col_name, field_name,offset, data):
## save original
col_name = col_name
start_y = col_name+str(offset)
end_y = col_name+str(len(data)+offset-1)
col = ws[start_y:end_y]
ws[col_name+'1']= field_name
for i, row in enumerate(col):
for cell in row:
cell.value = data[i]
## save original
col_name_y = 'B'
start_y = col_name_y+str(offset)
end_y = col_name_y+str(len(x)+offset-1)
B = ws[start_y:end_y]
ws['B1']='y origin'
for i, row in enumerate(B):
for cell in row:
cell.value = y[i]
## save peak of original
peak_y = filterer.findPeak(y)
print(peak_y)
fillCol(ws, 'C', 'peak y origin', offset, peak_y)
## gaussian
y_filtered_with_gaussian = filterer.gaussianFilter(y, window_size=self.gaussian_window_size.get(), std=self.gaussian_std.get())
## save original
col_name_y_gaussian = 'D'
start_y_gaussian = col_name_y_gaussian+str(offset)
end_y_gaussian = col_name_y_gaussian+str(len(x)+offset-1)
D = ws[start_y_gaussian:end_y_gaussian]
ws['D1']='y filtered with gaussian kernel'
for i, row in enumerate(D):
for cell in row:
cell.value = y_filtered_with_gaussian[i]
## save peak of gaussian
peak_y_gaussian = filterer.findPeak(y_filtered_with_gaussian)
fillCol(ws, 'E', 'peak y gaussian', offset, peak_y_gaussian)
y_filtered_with_average = filterer.averageFilter(y,window_size=self.average_window_size.get())
## save original
col_name_y_average = 'F'
start_y_average = col_name_y_average+str(offset)
end_y_average = col_name_y_average+str(len(x)+offset-1)
F = ws[start_y_average:end_y_average]
ws['F1']='y filtered with average kernel'
for i, row in enumerate(F):
for cell in row:
cell.value = y_filtered_with_average[i]
## save peak of average
peak_y_average = filterer.findPeak(y_filtered_with_average)
fillCol(ws, 'G', 'peak y average', offset, peak_y_average)
def makeSheetBtn(self):
sheet_names = self.wb.get_sheet_names()
for i, sheet_name in enumerate(sheet_names):
tmp = sheet_name
ttk.Button(self.sheetframe, text=sheet_name, command=lambda sheet_name=sheet_name: self.selectSheet(sheet_name)).grid(row=math.floor(i/self.sheet_max_num_in_row), column=(i+1)%self.sheet_max_num_in_row,sticky=(tk.W))
def selectSheet(self, sheet_name):
self.current_sheet_text.set(sheet_name)
self.ws = self.wb[sheet_name]
def run(self):
self.root.after(100, self.update)
self.root.mainloop()
if __name__=='__main__':
GraphTool().run()
#main()
#onlyOneFilter()
|
{
"content_hash": "6f7e20231608a8c0212ad599a6b97718",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 229,
"avg_line_length": 33.70459081836327,
"alnum_prop": 0.5775790595759801,
"repo_name": "goddoe/GraphTool",
"id": "1afe2b9f1dfb70abca73e0156f00b1b1df4bd9f6",
"size": "16886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/GraphTool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "19"
},
{
"name": "Python",
"bytes": "16886"
}
],
"symlink_target": ""
}
|
import requests
from flask import Flask, request
app = Flask(__name__)
@app.route("/taint_test") # $ routeSetup="/taint_test"
def test_taint(): # $ requestHandler
url = request.args['untrusted_input']
# response from a request to a user-controlled URL should be considered
# user-controlled as well.
resp = requests.get(url) # $ clientRequestUrlPart=url
requests.Response
requests.models.Response
ensure_tainted(
url, # $ tainted
# see https://docs.python-requests.org/en/latest/api/#requests.Response
resp, # $ tainted
resp.text, # $ tainted
resp.content, # $ tainted
resp.json(), # $ tainted
# file-like
resp.raw, # $ tainted
resp.raw.read(), # $ tainted
resp.links, # $ tainted
resp.links['key'], # $ tainted
resp.links.get('key'), # $ tainted
resp.cookies, # $ tainted
resp.cookies['key'], # $ tainted
resp.cookies.get('key'), # $ tainted
resp.headers, # $ tainted
resp.headers['key'], # $ tainted
resp.headers.get('key'), # $ tainted
)
for content_chunk in resp.iter_content():
ensure_tainted(content_chunk) # $ tainted
for line in resp.iter_lines():
ensure_tainted(line) # $ tainted
# for now, we don't assume that the response to ANY outgoing request is a remote
# flow source, since this could lead to FPs.
# TODO: investigate whether we should consider this a remote flow source.
trusted_url = "https://internal-api-that-i-trust.com"
resp = requests.get(trusted_url) # $ clientRequestUrlPart=trusted_url
ensure__not_tainted(resp)
|
{
"content_hash": "44afdfd63e1685059d36e1d890ed523c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 84,
"avg_line_length": 30.527272727272727,
"alnum_prop": 0.6188207266229899,
"repo_name": "github/codeql",
"id": "48ff417baa737fd7a8f5ab5d1cb79581d318ef95",
"size": "1679",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/ql/test/library-tests/frameworks/requests/taint_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP.NET",
"bytes": "3739"
},
{
"name": "Batchfile",
"bytes": "3534"
},
{
"name": "C",
"bytes": "410440"
},
{
"name": "C#",
"bytes": "21146000"
},
{
"name": "C++",
"bytes": "1352639"
},
{
"name": "CMake",
"bytes": "1809"
},
{
"name": "CodeQL",
"bytes": "32583145"
},
{
"name": "Dockerfile",
"bytes": "496"
},
{
"name": "EJS",
"bytes": "1478"
},
{
"name": "Emacs Lisp",
"bytes": "3445"
},
{
"name": "Go",
"bytes": "697562"
},
{
"name": "HTML",
"bytes": "58008"
},
{
"name": "Handlebars",
"bytes": "1000"
},
{
"name": "Java",
"bytes": "5417683"
},
{
"name": "JavaScript",
"bytes": "2432320"
},
{
"name": "Kotlin",
"bytes": "12163740"
},
{
"name": "Lua",
"bytes": "13113"
},
{
"name": "Makefile",
"bytes": "8631"
},
{
"name": "Mustache",
"bytes": "17025"
},
{
"name": "Nunjucks",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "1941"
},
{
"name": "PowerShell",
"bytes": "1295"
},
{
"name": "Python",
"bytes": "1649035"
},
{
"name": "RAML",
"bytes": "2825"
},
{
"name": "Ruby",
"bytes": "299268"
},
{
"name": "Rust",
"bytes": "234024"
},
{
"name": "Shell",
"bytes": "23973"
},
{
"name": "Smalltalk",
"bytes": "23"
},
{
"name": "Starlark",
"bytes": "27062"
},
{
"name": "Swift",
"bytes": "204309"
},
{
"name": "Thrift",
"bytes": "3020"
},
{
"name": "TypeScript",
"bytes": "219623"
},
{
"name": "Vim Script",
"bytes": "1949"
},
{
"name": "Vue",
"bytes": "2881"
}
],
"symlink_target": ""
}
|
"""The Categorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
class Categorical(distribution.Distribution):
"""Categorical distribution.
The categorical distribution is parameterized by the log-probabilities
of a set of classes.
#### Examples
Creates a 3-class distiribution, with the 2nd class, the most likely to be
drawn from.
```python
p = [0.1, 0.5, 0.4]
dist = Categorical(p=p)
```
Creates a 3-class distiribution, with the 2nd class the most likely to be
drawn from, using logits.
```python
logits = [-50, 400, 40]
dist = Categorical(logits=logits)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
The distribution functions can be evaluated on counts.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = Categorical(p=p)
dist.pmf(0) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match counts.
counts = [1, 0]
dist.pmf(counts) # Shape [2]
# p will be broadcast to shape [3, 5, 7, 3] to match counts.
counts = [[...]] # Shape [5, 7, 3]
dist.pmf(counts) # Shape [5, 7, 3]
```
"""
def __init__(
self,
logits=None,
p=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="Categorical"):
"""Initialize Categorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`p` should be passed in.
p: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of probabilities for each class. Only one of
`logits` or `p` should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Unused in this distribution.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
"""
with ops.name_scope(name, values=[logits]) as ns:
self._logits, self._p = distribution_util.get_logits_and_prob(
name=name, logits=logits, p=p, validate_args=validate_args,
multidimensional=True)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
logits_shape = array_ops.shape(self._logits, name="logits_shape")
if logits_shape_static[-1].value is not None:
self._num_classes = ops.convert_to_tensor(
logits_shape_static[-1].value,
dtype=dtypes.int32,
name="num_classes")
else:
self._num_classes = array_ops.gather(logits_shape,
self._batch_rank,
name="num_classes")
if logits_shape_static[:-1].is_fully_defined():
self._batch_shape_val = constant_op.constant(
logits_shape_static[:-1].as_list(),
dtype=dtypes.int32,
name="batch_shape")
else:
with ops.name_scope(name="batch_shape"):
self._batch_shape_val = logits_shape[:-1]
super(Categorical, self).__init__(
dtype=dtype,
parameters={"logits": self._logits, "num_classes": self._num_classes},
is_continuous=False,
is_reparameterized=False,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
@property
def num_classes(self):
"""Scalar `int32` tensor: the number of classes."""
return self._num_classes
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def p(self):
"""Vector of probabilities summing to one.
Each element is the probability of drawing that coordinate."""
return self._p
def _batch_shape(self):
# Use identity to inherit callers "name".
return array_ops.identity(self._batch_shape_val)
def _get_batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.num_classes])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = math_ops.cast(samples, self.dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat(0, ([n], self.batch_shape())))
return ret
def _log_prob(self, k):
k = ops.convert_to_tensor(k, name="k")
if self.logits.get_shape()[:-1] == k.get_shape():
logits = self.logits
else:
logits = self.logits * array_ops.ones_like(
array_ops.expand_dims(k, -1), dtype=self.logits.dtype)
logits_shape = array_ops.shape(logits)[:-1]
k *= array_ops.ones(logits_shape, dtype=k.dtype)
k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
return -nn_ops.sparse_softmax_cross_entropy_with_logits(logits, k)
def _prob(self, k):
return math_ops.exp(self._log_prob(k))
def _entropy(self):
if self.logits.get_shape().ndims == 2:
logits_2d = self.logits
else:
logits_2d = array_ops.reshape(self.logits, [-1, self.num_classes])
histogram_2d = nn_ops.softmax(logits_2d)
ret = array_ops.reshape(
nn_ops.softmax_cross_entropy_with_logits(logits_2d, histogram_2d),
self.batch_shape())
ret.set_shape(self.get_batch_shape())
return ret
def _mode(self):
ret = math_ops.argmax(self.logits, dimension=self._batch_rank)
ret = math_ops.cast(ret, self.dtype)
ret.set_shape(self.get_batch_shape())
return ret
|
{
"content_hash": "16817df840f6dbda16cfdf7dfdc746de",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 80,
"avg_line_length": 34.95673076923077,
"alnum_prop": 0.6428276715719984,
"repo_name": "juharris/tensorflow",
"id": "9fa013db3625d4d52968edd268eeb2a9b9d5d8a2",
"size": "7960",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/categorical.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156005"
},
{
"name": "C++",
"bytes": "9229239"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "783708"
},
{
"name": "Java",
"bytes": "39181"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1773496"
},
{
"name": "Protocol Buffer",
"bytes": "112087"
},
{
"name": "Python",
"bytes": "6699482"
},
{
"name": "Shell",
"bytes": "185658"
},
{
"name": "TypeScript",
"bytes": "410434"
}
],
"symlink_target": ""
}
|
'''
Test Attach/Detach Port Forwarding
@author: czhou25
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_port_forwarding as zstack_pf_header
import apibinding.inventory as inventory
import os
PfRule = test_state.PfRule
Port = test_state.Port
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
def test():
pf_vm1 = test_stub.create_dnat_vm()
test_obj_dict.add_vm(pf_vm1)
pf_vm2 = test_stub.create_dnat_vm()
test_obj_dict.add_vm(pf_vm2)
l3_name = os.environ.get('l3VlanNetworkName1')
vr1 = test_stub.create_vr_vm(test_obj_dict, l3_name)
l3_name = os.environ.get('l3NoVlanNetworkName1')
vr2 = test_stub.create_vr_vm(test_obj_dict, l3_name)
vr1_pub_ip = test_lib.lib_find_vr_pub_ip(vr1)
vr2_pub_ip = test_lib.lib_find_vr_pub_ip(vr2)
pf_vm1.check()
pf_vm2.check()
vm_nic1 = pf_vm1.vm.vmNics[0]
vm_nic_uuid1 = vm_nic1.uuid
vm_nic2 = pf_vm2.vm.vmNics[0]
vm_nic_uuid2 = vm_nic2.uuid
pri_l3_uuid = vm_nic1.l3NetworkUuid
vr = test_lib.lib_find_vr_by_l3_uuid(pri_l3_uuid)[0]
vr_pub_nic = test_lib.lib_find_vr_pub_nic(vr)
l3_uuid = vr_pub_nic.l3NetworkUuid
vip = test_stub.create_vip('pf_attach_test', l3_uuid)
test_obj_dict.add_vip(vip)
vip_uuid = vip.get_vip().uuid
#pf_creation_opt = PfRule.generate_pf_rule_option(vr1_pub_ip, protocol=inventory.TCP, vip_target_rule=Port.rule1_ports, private_target_rule=Port.rule1_ports)
pf_creation_opt1 = PfRule.generate_pf_rule_option(vr1_pub_ip, protocol=inventory.TCP, vip_target_rule=Port.rule4_ports, private_target_rule=Port.rule4_ports, vip_uuid=vip_uuid)
test_pf1 = zstack_pf_header.ZstackTestPortForwarding()
test_pf1.set_creation_option(pf_creation_opt1)
test_pf1.create()
vip.attach_pf(test_pf1)
pf_creation_opt2 = PfRule.generate_pf_rule_option(vr2_pub_ip, protocol=inventory.TCP, vip_target_rule=Port.rule5_ports, private_target_rule=Port.rule5_ports, vip_uuid=vip_uuid)
test_pf2 = zstack_pf_header.ZstackTestPortForwarding()
test_pf2.set_creation_option(pf_creation_opt2)
test_pf2.create()
vip.attach_pf(test_pf2)
pf_vm1.check()
vip.check()
test_pf1.attach(vm_nic_uuid1, pf_vm1)
test_pf2.attach(vm_nic_uuid2, pf_vm2)
vip.check()
pf_vm1.stop()
vip.check()
test_pf1.detach()
test_pf1.attach(vm_nic_uuid2, pf_vm2)
pf_vm1.start()
pf_vm1.check()
vip.check()
pf_vm1.stop()
test_pf1.detach()
test_pf2.detach()
test_pf1.attach(vm_nic_uuid1, pf_vm1)
test_pf2.attach(vm_nic_uuid1, pf_vm1)
pf_vm1.start()
pf_vm1.check()
vip.check()
vip.delete()
test_obj_dict.rm_vip(vip)
pf_vm1.destroy()
test_obj_dict.rm_vm(pf_vm1)
pf_vm2.destroy()
test_obj_dict.rm_vm(pf_vm2)
test_util.test_pass("Test Port Forwarding Attach/Detach Successfully")
#Will be called only if exception happens in test().
def error_cleanup():
global test_obj_dict
test_lib.lib_error_cleanup(test_obj_dict)
|
{
"content_hash": "9fc82941994388c5f14fe797d398dae6",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 180,
"avg_line_length": 31.22772277227723,
"alnum_prop": 0.6873811033608117,
"repo_name": "zstackio/zstack-woodpecker",
"id": "51cedfcbc9ad5f3bd8161bf1367510e08f5d2d56",
"size": "3154",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/virtualrouter/vip/test_multi_pfs_with_vip.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
}
|
import unittest
from .fixtures import ops, operations, getOperationNameForId, getOperationName
class Testcases(unittest.TestCase):
def test_ops(self):
self.assertIsInstance(ops, list)
def test_operations(self):
for op, id in operations.items():
self.assertIsInstance(op, str)
self.assertIsInstance(id, int)
def test_getOperationNameForId(self):
self.assertEqual(getOperationNameForId(0), "demooepration")
with self.assertRaises(ValueError):
getOperationNameForId(20)
def test_operation_type_decode(self):
self.assertEqual(getOperationName(0), "demooepration")
self.assertEqual(getOperationName("account_create"), "account_create")
with self.assertRaises(AssertionError):
self.assertEqual(getOperationName("-not-exist-"), "account_create")
|
{
"content_hash": "4f72a8e4850559289e4989137c05608b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 36.041666666666664,
"alnum_prop": 0.6947976878612717,
"repo_name": "xeroc/python-graphenelib",
"id": "83d42d57bb3fc50e82bdc7972a791c24fe1dde83",
"size": "889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_operationids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "872"
},
{
"name": "Python",
"bytes": "922435"
}
],
"symlink_target": ""
}
|
'''
Copyright (c) 2013, Agora Games, LLC All rights reserved.
https://github.com/agoragames/torus/blob/master/LICENSE.txt
'''
from urlparse import *
import re
from redis import Redis
from pymongo import MongoClient
from kairos import Timeseries
from sqlalchemy import create_engine
import cql
import time
def long_or_float(v):
try:
return long(v)
except ValueError:
return float(v)
class Schema(object):
'''
Implements the schema and associated data processing for data points.
'''
def __init__(self, name, config):
self._count = 0
self._name = name
self._host = config.pop('host', 'sqlite:///:memory:')
self._rolling = config.pop('rolling', 0)
self._generator = config.pop('generator',None)
config.setdefault('type', 'count')
config.setdefault('write_func', long_or_float)
config.setdefault('read_func', long_or_float)
self._transform = config.get('transform')
# parse the patterns and bind the Schema.match function
# TODO: optimize this binding even further to reduce lookups at runtime
self._patterns = config.pop('match', [])
if isinstance(self._patterns, (tuple,list)):
if len(self._patterns) != 1:
self._patterns = [ re.compile(x) for x in self._patterns ]
self.match = self._match_list
else:
self._patterns = re.compile(self._patterns[0])
self.match = self._match_single
else:
self._patterns = re.compile(self._patterns)
self.match = self._match_single
self.config = config
self.timeseries = Timeseries(self._host, **config)
# Bind some of the timeseries methods to this for convenience
self.list = self.timeseries.list
self.properties = self.timeseries.properties
self.iterate = self.timeseries.iterate
@property
def name(self):
return self._name
@property
def host(self):
return self._host
@property
def count(self):
return self._count
def generate(self):
if self._generator:
stat,value = self._generator()
return stat,value,time.time()
return None
def store(self, stat, val, timestamp=None):
'''
Store a value in this schema.
'''
if self.match(stat):
if self._transform:
stat,val = self._transform(stat,val)
if stat is None:
return False
self._count += 1
self.timeseries.insert(stat, val, timestamp, intervals=self._rolling)
return True
return False
def _match_single(self, stat):
'''
Used for when schema implements a single regular expression, returns
True if the stat matches this schema, False otherwise.
'''
if isinstance(stat,(list,tuple)):
matches = filter(None, [self._patterns.search(s) for s in stat] )
return len(matches)==len(stat)
return self._patterns.search(stat) is not None
def _match_list(self, stat):
'''
Used for when schema implements several regular expressions, returns
True if the stat matches this schema, False otherwise.
'''
matches = set()
for pattern in self._patterns:
if isinstance(stat,(list,tuple)):
for s in stat:
if pattern.search(s):
matches.add(s)
if len(matches)==len(stat):
return True
elif pattern.search(stat):
return True
return False
|
{
"content_hash": "681ec1e51d6801c15f1a90d948f601d0",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 75,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.6518853695324284,
"repo_name": "agoragames/torus",
"id": "14a2fd403fdf930a9e7ad7c073212a2478390f80",
"size": "3315",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "torus/schema.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "52513"
}
],
"symlink_target": ""
}
|
import re
from grab.spider import Spider, Task
from d_parser.helpers.cookies_init import cookies_init
from d_parser.helpers.parser_extender import check_body_errors, process_error, common_init, extend_class, check_errors, process_finally
from helpers.config import Config
from helpers.url_generator import UrlGenerator
VERSION = 27
# Warn: Don't remove task argument even if not use it (it's break grab and spider crashed)
# Warn: noinspection PyUnusedLocal
class DSpider(Spider):
initial_urls = Config.get_seq('SITE_URL')
re_product_count = re.compile('^.+: (?P<count>\d+)$')
re_product_price = re.compile('^.+:\s+(?P<price>\d+(.+\d)?).+$')
def __init__(self, thread_number, try_limit=0):
super().__init__(thread_number=thread_number, network_try_limit=try_limit, priority_mode='const')
extend_class(DSpider, [
check_body_errors,
check_errors,
process_error,
process_finally,
common_init
])
self.common_init(try_limit)
def create_grab_instance(self, **kwargs):
grab = super(DSpider, self).create_grab_instance(**kwargs)
return cookies_init(self.cookie_jar, grab)
# Fetch all categories from main page
def task_initial(self, grab, task):
try:
if self.check_body_errors(grab, task):
self.log.fatal(task, f'Err task, attempt {task.task_try_count}')
return
exclude_links_labels = ['Оплата', 'Доставка', 'Гарантия', 'Акции', 'Рекомендации по подбору', 'Информация и реквизиты',
'Новости', 'Контакты', 'Сервис-центр']
# take all links from horizontal nav, exclude anchors (#) and external links
category_list = grab.doc.select('//div[@id="navbar"]//a[starts-with(@href, "/")]')
# take links only for main cats, because its already contain all sub-cats items
for link in category_list:
# skip if label have stop words
if link.text().strip() in exclude_links_labels:
continue
link = link.attr('href')
# make absolute urls if needed
if link[:1] == '/':
link = UrlGenerator.get_page_params(self.domain, link, {})
yield Task('parse_page', url=link, priority=90, raw=True)
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
# parse page
def task_parse_page(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
# parse items
items_list = grab.doc.select('//div[@class="prod-list-cell"]//a[.!=""]')
for index, row in enumerate(items_list):
link = row.attr('href')
# make absolute urls if needed
if link[:1] == '/':
link = UrlGenerator.get_page_params(self.domain, link, {})
yield Task('parse_item', url=link, priority=100, raw=True)
except Exception as e:
self._process_error(grab, task, e)
finally:
self.process_finally(task)
# parse single item
def task_parse_item(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
# parse fields
# A = name
product_name = grab.doc.select('//h1').text()
# B = count
# C = status
product_count_string = grab.doc.select('//span[@class="p-qty-wh"]').text()
if product_count_string == 'Под заказ':
product_status = '-1'
product_count = '-1'
elif product_count_string == 'На складе: более 100':
product_status = '-1'
product_count = 100
else:
product_status = '-1'
product_count = DSpider.re_product_count.match(product_count_string).groupdict()['count']
# D = unit [const = value]
product_unit = 'ед.'
# E = price
product_price = DSpider.re_product_price.match(grab.doc.select('//div[@class="ppage-product-price"]').text()).groupdict()['price'].replace(' ', '')
# check if positive and correct price
if not product_price.isdigit():
self.log.debug(task, f'Skip item, cuz wrong price {product_price}')
return
# F = vendor code [const = skip for parsing]
product_vendor_code = ''
# G = vendor [const = value]
product_vendor = 'Stiebel Eltron'
# H = photo url
product_photo_url = UrlGenerator.get_page_params(self.domain, grab.doc.select('//img[@id="Image1"]').attr('src'), {})
# I = description
product_description = {'ОБЛАСТЬ ПРИМЕНЕНИЯ': grab.doc.select('//div[@class="col-md-14"]/p').text(default=' ')}
table = grab.doc.select('//div[@class="col-md-14"]/table//tr')
for row in table:
key = row.select('./td[1]').text()
value = row.select('./td[2]').text()
if key:
product_description[key] = value
# save
self.result.append({
'name': product_name,
'quantity': product_count,
'delivery': product_status,
'measure': product_unit,
'price': product_price,
'sku': product_vendor_code,
'manufacture': product_vendor,
'photo': product_photo_url,
'properties': product_description
})
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
|
{
"content_hash": "9f2896450a66d8ca1f2aacf0285a420f",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 159,
"avg_line_length": 34.9766081871345,
"alnum_prop": 0.5392074903862231,
"repo_name": "Holovin/D_GrabDemo",
"id": "a46b0c9c4f1e23c76cbd9f086437c10b03e63fb5",
"size": "6115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "d_parser/v_27_5/d_spider_5sti.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36271"
}
],
"symlink_target": ""
}
|
from typing import Tuple, Union
import numpy as np
import scipy.stats
from emukit.core.acquisition import Acquisition
from emukit.core.interfaces import IDifferentiable, IModel
class ManualCausalExpectedImprovement(Acquisition):
def __init__(
self, current_global_min, task, mean_function, variance_function, previous_variance, jitter: float = float(0),
) -> None:
"""
The improvement when a BO model has not yet been instantiated.
Efficient Global Optimization of Expensive Black-Box Functions
Jones, Donald R. and Schonlau, Matthias and Welch, William J.
Journal of Global Optimization
:param mean_function: the mean function for the current DCBO exploration at given temporal index
:param variance_function: the mean function for the current DCBO exploration at given temporal index
:param jitter: parameter to encourage extra exploration.
"""
self.mean_function = mean_function
self.variance_function = variance_function
self.jitter = jitter
self.current_global_min = current_global_min
self.task = task
self.previous_variance = previous_variance
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Computes the Expected Improvement.
:param x: points where the acquisition is evaluated.
"""
mean = self.mean_function(x)
# adjustment term #initial kernel variance
variance = self.previous_variance * np.ones((x.shape[0], 1)) + self.variance_function(
x
) # See Causal GP def in paper
standard_deviation = np.sqrt(variance.clip(0))
mean += self.jitter
u, pdf, cdf = get_standard_normal_pdf_cdf(self.current_global_min, mean, standard_deviation)
if self.task == "min":
improvement = standard_deviation * (u * cdf + pdf)
else:
improvement = -(standard_deviation * (u * cdf + pdf))
return improvement
@property
def has_gradients(self) -> bool:
"""
Returns that this acquisition does not have gradients.
"""
return False
class CausalExpectedImprovement(Acquisition):
def __init__(
self,
current_global_min,
task,
dynamic,
causal_prior,
temporal_index,
model: Union[IModel, IDifferentiable],
jitter: float = float(0),
) -> None:
"""
This acquisition computes for a given input the improvement over the current best observed value in
expectation. For more information see:
Efficient Global Optimization of Expensive Black-Box Functions
Jones, Donald R. and Schonlau, Matthias and Welch, William J.
Journal of Global Optimization
:param model: model that is used to compute the improvement.
:param jitter: parameter to encourage extra exploration.
"""
self.model = model
self.jitter = jitter
self.current_global_min = current_global_min
self.task = task
self.dynamic = dynamic
self.causal_prior = causal_prior
self.temporal_index = temporal_index
def evaluate(self, x: np.ndarray) -> np.ndarray:
"""
Computes the Expected Improvement.
:param x: points where the acquisition is evaluated.
"""
# Adding an extra time dimension for ABO
if self.dynamic and self.causal_prior is False:
x = np.hstack((x, np.repeat(self.temporal_index, x.shape[0])[:, np.newaxis]))
mean, variance = self.model.predict(x)
# Variance is computed with MonteCarlo so we might have some numerical stability
# This is ensuring that negative values or nan values are not generated
if np.any(np.isnan(variance)):
variance[np.isnan(variance)] = 0
elif np.any(variance < 0):
variance = variance.clip(0.0001)
standard_deviation = np.sqrt(variance)
mean += self.jitter
u, pdf, cdf = get_standard_normal_pdf_cdf(self.current_global_min, mean, standard_deviation)
if self.task == "min":
improvement = standard_deviation * (u * cdf + pdf)
else:
improvement = -(standard_deviation * (u * cdf + pdf))
return improvement
def evaluate_with_gradients(self, x: np.ndarray) -> Tuple:
"""
Computes the Expected Improvement and its derivative.
:param x: locations where the evaluation with gradients is done.
"""
# Adding an extra time dimension for ABO
# Restrict the input space via an additional function
if self.dynamic and self.causal_prior is False:
x = np.hstack((x, np.repeat(self.temporal_index, x.shape[0])[:, np.newaxis]))
mean, variance = self.model.predict(x)
# Variance is computed with MonteCarlo so we might have some numerical stability
# This is ensuring that negative values or nan values are not generated
if np.any(np.isnan(variance)):
variance[np.isnan(variance)] = 0
elif np.any(variance < 0):
variance = variance.clip(0.0001)
standard_deviation = np.sqrt(variance)
dmean_dx, dvariance_dx = self.model.get_prediction_gradients(x)
dstandard_deviation_dx = dvariance_dx / (2 * standard_deviation)
mean += self.jitter
u, pdf, cdf = get_standard_normal_pdf_cdf(self.current_global_min, mean, standard_deviation)
if self.task == "min":
improvement = standard_deviation * (u * cdf + pdf)
dimprovement_dx = dstandard_deviation_dx * pdf - cdf * dmean_dx
else:
improvement = -(standard_deviation * (u * cdf + pdf))
dimprovement_dx = -(dstandard_deviation_dx * pdf - cdf * dmean_dx)
return improvement, dimprovement_dx
@property
def has_gradients(self) -> bool:
"""Returns that this acquisition has gradients"""
return isinstance(self.model, IDifferentiable)
def get_standard_normal_pdf_cdf(
x: np.array, mean: np.array, standard_deviation: np.array
) -> Tuple[np.array, np.array, np.array]:
"""
Returns pdf and cdf of standard normal evaluated at (x - mean)/sigma
:param x: Non-standardized input
:param mean: Mean to normalize x with
:param standard_deviation: Standard deviation to normalize x with
:return: (normalized version of x, pdf of standard normal, cdf of standard normal)
"""
u = (x - mean) / standard_deviation
pdf = scipy.stats.norm.pdf(u)
cdf = scipy.stats.norm.cdf(u)
return u, pdf, cdf
|
{
"content_hash": "e65951eae3cbb4296edb0399ea3a1a9b",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 118,
"avg_line_length": 37,
"alnum_prop": 0.634984984984985,
"repo_name": "neildhir/DCBO",
"id": "4bb2465ba57b5e9b2d05afd436b31296512953bd",
"size": "6660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bayes_opt/causal_acquisition_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "391310"
},
{
"name": "Python",
"bytes": "247376"
}
],
"symlink_target": ""
}
|
import os, sys
apache_configuration = os.path.dirname(__file__)
project = os.path.dirname(apache_configuration)
workspace = os.path.dirname(project)
sys.path.append(workspace)
sys.path.append('/usr/lib/python2.4/site-packages/django')
os.environ['DJANGO_SETTINGS_MODULE'] = 'lcr.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
{
"content_hash": "6df3374a46ac9b831af80bf28ac7c08f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 58,
"avg_line_length": 31.5,
"alnum_prop": 0.7777777777777778,
"repo_name": "tmpkn/lcr",
"id": "05bc9f6633665c0e28d93ab0d15ab0d21ec9fc03",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lcr/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49894"
},
{
"name": "JavaScript",
"bytes": "451909"
},
{
"name": "PHP",
"bytes": "969"
},
{
"name": "Python",
"bytes": "292624"
}
],
"symlink_target": ""
}
|
'''
description: Download KNMI radar hdf5 files (inside a tar archive) from ftp
license: APACHE 2.0
author: Ronald van Haren, NLeSC (r.vanharen@esciencecenter.nl)
'''
from ftplib import FTP
import os
import datetime
import tarfile
class download_radar_data:
def __init__(self, dt, outputdir):
'''
Download radar data from KNMI ftp
Each tar file contains one day of hdf5 files with a frequency of 15 minutes
- dt: datetime object of day to download the tar archive
for
- outputdir: directory where outputfile should be saved
'''
self.dt = dt # datetime object
self.outputdir = outputdir
self.define_filename()
self.define_outputfile()
if not self.check_file_exists():
self.connect_to_ftp()
self.change_to_download_directory()
self.download_file()
def connect_to_ftp(self):
'''
Connect to KNMI ftp server
'''
# connect to host, default port
self.ftp = FTP('data.knmi.nl')
self.ftp.login() # user anonymous, passwd anonymous@
def change_to_download_directory(self):
'''
Change into the correct download directory on the ftp server
'''
self.year = str(self.dt.year).zfill(4)
self.month = str(self.dt.month).zfill(2)
self.day = str(self.dt.day).zfill(2)
download_dir = os.path.join('download', 'radar_tar_volume_debilt', '1.0',
'0001', self.year, self.month, self.day)
self.ftp.cwd(download_dir)
def define_filename(self):
'''
Define the filename to download
'''
basename = "RAD60_OPER_O___TARVOL__L2__"
ext = '.tar'
day1 = datetime.datetime.strftime(self.dt, "%Y%m%d")
day2 = datetime.datetime.strftime(self.dt +datetime.timedelta(days=1),
"%Y%m%d")
self.filename = (basename + day1 + 'T000000_' + day2 + 'T000000' + '_0001'
+ ext)
def download_file(self):
'''
Download the tar file from ftp server
'''
# Open output file for writing
self.file = open(self.outputfile, 'wb')
# retrieve file
self.ftp.retrbinary('RETR %s' % self.filename, self.file.write)
# close the output file
self.file.close()
def define_outputfile(self):
'''
Define name and location of the output file
'''
self.outputfile = os.path.join(self.outputdir, self.filename)
def check_file_exists(self):
'''
Check if outputfile exists and is a tar file
We don't want to redownload the tar file from ftp if the
file is already there
'''
# begin python2 compatibility
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
# end python2 compatibility
try:
if not tarfile.is_tarfile(self.outputfile):
# file exists but is not a valid tar file
os.remove(self.outputfile, dir_fd=None)
# remove output file
return False
else:
return True
except FileNotFoundError:
# file does not exist
return False
if __name__=="__main__":
download_radar_data(datetime.datetime(2014,1,1))
|
{
"content_hash": "102797a5a27b494ae0cc59052eeb4adb",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 30.03846153846154,
"alnum_prop": 0.6254801536491678,
"repo_name": "ERA-URBAN/fm128_radar_knmi",
"id": "8b7bd2ff8a613d4d01538c6690e691b6a3ad2a41",
"size": "3124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fm128_radar_knmi/download_radar_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "31119"
}
],
"symlink_target": ""
}
|
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import access_ips
from nova.api.openstack.compute.plugins.v3 import servers
from nova.api.openstack import wsgi
from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.image import fake
class AccessIPsExtTest(test.NoDBTestCase):
def setUp(self):
super(AccessIPsExtTest, self).setUp()
self.access_ips_ext = access_ips.AccessIPs(None)
def _test(self, func):
server_dict = {access_ips.AccessIPs.v4_key: '1.1.1.1',
access_ips.AccessIPs.v6_key: 'fe80::'}
create_kwargs = {}
func(server_dict, create_kwargs)
self.assertEqual(create_kwargs, {'access_ip_v4': '1.1.1.1',
'access_ip_v6': 'fe80::'})
def _test_with_ipv4_only(self, func):
server_dict = {access_ips.AccessIPs.v4_key: '1.1.1.1'}
create_kwargs = {}
func(server_dict, create_kwargs)
self.assertEqual(create_kwargs, {'access_ip_v4': '1.1.1.1'})
def _test_with_ipv6_only(self, func):
server_dict = {access_ips.AccessIPs.v6_key: 'fe80::'}
create_kwargs = {}
func(server_dict, create_kwargs)
self.assertEqual(create_kwargs, {'access_ip_v6': 'fe80::'})
def _test_without_ipv4_and_ipv6(self, func):
server_dict = {}
create_kwargs = {}
func(server_dict, create_kwargs)
self.assertEqual(create_kwargs, {})
def _test_with_ipv4_null(self, func):
server_dict = {access_ips.AccessIPs.v4_key: None}
create_kwargs = {}
func(server_dict, create_kwargs)
self.assertEqual(create_kwargs, {'access_ip_v4': None})
def _test_with_ipv6_null(self, func):
server_dict = {access_ips.AccessIPs.v6_key: None}
create_kwargs = {}
func(server_dict, create_kwargs)
self.assertEqual(create_kwargs, {'access_ip_v6': None})
def _test_with_ipv4_blank(self, func):
server_dict = {access_ips.AccessIPs.v4_key: ''}
create_kwargs = {}
func(server_dict, create_kwargs)
self.assertEqual(create_kwargs, {'access_ip_v4': None})
def _test_with_ipv6_blank(self, func):
server_dict = {access_ips.AccessIPs.v6_key: ''}
create_kwargs = {}
func(server_dict, create_kwargs)
self.assertEqual(create_kwargs, {'access_ip_v6': None})
def test_server_create(self):
self._test(self.access_ips_ext.server_create)
def test_server_create_with_ipv4_only(self):
self._test_with_ipv4_only(self.access_ips_ext.server_create)
def test_server_create_with_ipv6_only(self):
self._test_with_ipv6_only(self.access_ips_ext.server_create)
def test_server_create_without_ipv4_and_ipv6(self):
self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_create)
def test_server_create_with_ipv4_null(self):
self._test_with_ipv4_null(self.access_ips_ext.server_create)
def test_server_create_with_ipv6_null(self):
self._test_with_ipv6_null(self.access_ips_ext.server_create)
def test_server_create_with_ipv4_blank(self):
self._test_with_ipv4_blank(self.access_ips_ext.server_create)
def test_server_create_with_ipv6_blank(self):
self._test_with_ipv6_blank(self.access_ips_ext.server_create)
def test_server_update(self):
self._test(self.access_ips_ext.server_update)
def test_server_update_with_ipv4_only(self):
self._test_with_ipv4_only(self.access_ips_ext.server_update)
def test_server_update_with_ipv6_only(self):
self._test_with_ipv6_only(self.access_ips_ext.server_update)
def test_server_update_without_ipv4_and_ipv6(self):
self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_update)
def test_server_update_with_ipv4_null(self):
self._test_with_ipv4_null(self.access_ips_ext.server_update)
def test_server_update_with_ipv6_null(self):
self._test_with_ipv6_null(self.access_ips_ext.server_update)
def test_server_update_with_ipv4_blank(self):
self._test_with_ipv4_blank(self.access_ips_ext.server_update)
def test_server_update_with_ipv6_blank(self):
self._test_with_ipv6_blank(self.access_ips_ext.server_update)
def test_server_rebuild(self):
self._test(self.access_ips_ext.server_rebuild)
def test_server_rebuild_with_ipv4_only(self):
self._test_with_ipv4_only(self.access_ips_ext.server_rebuild)
def test_server_rebuild_with_ipv6_only(self):
self._test_with_ipv6_only(self.access_ips_ext.server_rebuild)
def test_server_rebuild_without_ipv4_and_ipv6(self):
self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_rebuild)
def test_server_rebuild_with_ipv4_null(self):
self._test_with_ipv4_null(self.access_ips_ext.server_rebuild)
def test_server_rebuild_with_ipv6_null(self):
self._test_with_ipv6_null(self.access_ips_ext.server_rebuild)
def test_server_rebuild_with_ipv4_blank(self):
self._test_with_ipv4_blank(self.access_ips_ext.server_rebuild)
def test_server_rebuild_with_ipv6_blank(self):
self._test_with_ipv6_blank(self.access_ips_ext.server_rebuild)
class AccessIPsExtAPIValidationTest(test.TestCase):
def setUp(self):
super(AccessIPsExtAPIValidationTest, self).setUp()
def fake_save(context, **kwargs):
pass
def fake_rebuild(*args, **kwargs):
pass
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
fake.stub_out_image_service(self.stubs)
self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
self.stubs.Set(instance_obj.Instance, 'save', fake_save)
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
def _test_create(self, params):
body = {
'server': {
'name': 'server_test',
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'flavorRef': 'http://localhost/123/flavors/3',
},
}
body['server'].update(params)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps(body)
self.controller.create(req, body=body)
def _test_update(self, params):
body = {
'server': {
},
}
body['server'].update(params)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'PUT'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps(body)
self.controller.update(req, fakes.FAKE_UUID, body=body)
def _test_rebuild(self, params):
body = {
'rebuild': {
'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
},
}
body['rebuild'].update(params)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'PUT'
req.headers['content-type'] = 'application/json'
req.body = jsonutils.dumps(body)
self.controller._action_rebuild(req, fakes.FAKE_UUID, body=body)
def test_create_server_with_access_ipv4(self):
params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
self._test_create(params)
def test_create_server_with_invalid_access_ipv4(self):
params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
self.assertRaises(exception.ValidationError, self._test_create, params)
def test_create_server_with_access_ipv6(self):
params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
self._test_create(params)
def test_create_server_with_invalid_access_ipv6(self):
params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
self.assertRaises(exception.ValidationError, self._test_create, params)
def test_update_server_with_access_ipv4(self):
params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
self._test_update(params)
def test_update_server_with_invalid_access_ipv4(self):
params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
self.assertRaises(exception.ValidationError, self._test_update, params)
def test_update_server_with_access_ipv6(self):
params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
self._test_update(params)
def test_update_server_with_invalid_access_ipv6(self):
params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
self.assertRaises(exception.ValidationError, self._test_update, params)
def test_rebuild_server_with_access_ipv4(self):
params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
self._test_rebuild(params)
def test_rebuild_server_with_invalid_access_ipv4(self):
params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
self.assertRaises(exception.ValidationError, self._test_rebuild,
params)
def test_rebuild_server_with_access_ipv6(self):
params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
self._test_rebuild(params)
def test_rebuild_server_with_invalid_access_ipv6(self):
params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
self.assertRaises(exception.ValidationError, self._test_rebuild,
params)
class AccessIPsControllerTest(test.NoDBTestCase):
def setUp(self):
super(AccessIPsControllerTest, self).setUp()
self.controller = access_ips.AccessIPsController()
def _test_with_access_ips(self, func, kwargs={'id': 'fake'}):
req = wsgi.Request({'nova.context':
fakes.FakeRequestContext('fake_user', 'fake',
is_admin=True)})
instance = {'uuid': 'fake',
'access_ip_v4': '1.1.1.1',
'access_ip_v6': 'fe80::'}
req.cache_db_instance(instance)
resp_obj = wsgi.ResponseObject(
{"server": {'id': 'fake'}})
func(req, resp_obj, **kwargs)
self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v4_key],
'1.1.1.1')
self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v6_key],
'fe80::')
def _test_without_access_ips(self, func, kwargs={'id': 'fake'}):
req = wsgi.Request({'nova.context':
fakes.FakeRequestContext('fake_user', 'fake',
is_admin=True)})
instance = {'uuid': 'fake',
'access_ip_v4': None,
'access_ip_v6': None}
req.cache_db_instance(instance)
resp_obj = wsgi.ResponseObject(
{"server": {'id': 'fake'}})
func(req, resp_obj, **kwargs)
self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v4_key],
'')
self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v6_key],
'')
def test_create(self):
self._test_with_access_ips(self.controller.create, {'body': {}})
def test_create_without_access_ips(self):
self._test_with_access_ips(self.controller.create, {'body': {}})
def test_show(self):
self._test_with_access_ips(self.controller.show)
def test_show_without_access_ips(self):
self._test_without_access_ips(self.controller.show)
def test_detail(self):
req = wsgi.Request({'nova.context':
fakes.FakeRequestContext('fake_user', 'fake',
is_admin=True)})
instance1 = {'uuid': 'fake1',
'access_ip_v4': '1.1.1.1',
'access_ip_v6': 'fe80::'}
instance2 = {'uuid': 'fake2',
'access_ip_v4': '1.1.1.2',
'access_ip_v6': 'fe81::'}
req.cache_db_instance(instance1)
req.cache_db_instance(instance2)
resp_obj = wsgi.ResponseObject(
{"servers": [{'id': 'fake1'}, {'id': 'fake2'}]})
self.controller.detail(req, resp_obj)
self.assertEqual(
resp_obj.obj['servers'][0][access_ips.AccessIPs.v4_key],
'1.1.1.1')
self.assertEqual(
resp_obj.obj['servers'][0][access_ips.AccessIPs.v6_key],
'fe80::')
self.assertEqual(
resp_obj.obj['servers'][1][access_ips.AccessIPs.v4_key],
'1.1.1.2')
self.assertEqual(
resp_obj.obj['servers'][1][access_ips.AccessIPs.v6_key],
'fe81::')
def test_detail_without_access_ips(self):
req = wsgi.Request({'nova.context':
fakes.FakeRequestContext('fake_user', 'fake',
is_admin=True)})
instance1 = {'uuid': 'fake1',
'access_ip_v4': None,
'access_ip_v6': None}
instance2 = {'uuid': 'fake2',
'access_ip_v4': None,
'access_ip_v6': None}
req.cache_db_instance(instance1)
req.cache_db_instance(instance2)
resp_obj = wsgi.ResponseObject(
{"servers": [{'id': 'fake1'}, {'id': 'fake2'}]})
self.controller.detail(req, resp_obj)
self.assertEqual(
resp_obj.obj['servers'][0][access_ips.AccessIPs.v4_key], '')
self.assertEqual(
resp_obj.obj['servers'][0][access_ips.AccessIPs.v6_key], '')
self.assertEqual(
resp_obj.obj['servers'][1][access_ips.AccessIPs.v4_key], '')
self.assertEqual(
resp_obj.obj['servers'][1][access_ips.AccessIPs.v6_key], '')
def test_update(self):
self._test_with_access_ips(self.controller.update, {'id': 'fake',
'body': {}})
def test_update_without_access_ips(self):
self._test_without_access_ips(self.controller.update, {'id': 'fake',
'body': {}})
def test_rebuild(self):
self._test_with_access_ips(self.controller.rebuild, {'id': 'fake',
'body': {}})
def test_rebuild_without_access_ips(self):
self._test_without_access_ips(self.controller.rebuild, {'id': 'fake',
'body': {}})
|
{
"content_hash": "37260e1b416bb31c3a9a2b5ee8140614",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 79,
"avg_line_length": 40.04076086956522,
"alnum_prop": 0.5923311842551747,
"repo_name": "tianweizhang/nova",
"id": "0f78f161832e1f81343f7dc5549a2099fd3db6c4",
"size": "15337",
"binary": false,
"copies": "8",
"ref": "refs/heads/v0",
"path": "nova/tests/api/openstack/compute/plugins/v3/test_access_ips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16708379"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259645"
}
],
"symlink_target": ""
}
|
import unittest
import os
import sys
from functools import wraps
from django.conf import settings
from south.hacks import hacks
# Add the tests directory so fakeapp is on sys.path
test_root = os.path.dirname(__file__)
sys.path.append(test_root)
# Note: the individual test files are imported below this.
class Monkeypatcher(unittest.TestCase):
"""
Base test class for tests that play with the INSTALLED_APPS setting at runtime.
"""
def create_fake_app(self, name):
class Fake:
pass
fake = Fake()
fake.__name__ = name
try:
fake.migrations = __import__(name + ".migrations", {}, {}, ['migrations'])
except ImportError:
pass
return fake
def setUp(self):
"""
Changes the Django environment so we can run tests against our test apps.
"""
if getattr(self, 'installed_apps', None):
hacks.set_installed_apps(self.installed_apps)
def tearDown(self):
"""
Undoes what setUp did.
"""
if getattr(self, 'installed_apps', None):
hacks.reset_installed_apps()
# Make sure skipUnless is available.
try:
# skipUnless added in Python 2.7;
from unittest import skipUnless
except ImportError:
try:
# django.utils.unittest added in Django 1.3;
from django.utils.unittest import skipUnless
except ImportError:
def skipUnless(condition, message):
def decorator(testfunc):
@wraps(testfunc)
def wrapper(self):
if condition:
# Apply method
testfunc(self)
else:
# The skip exceptions are not available either...
print "Skipping", testfunc.__name__,"--", message
return wrapper
return decorator
# Try importing all tests if asked for (then we can run 'em)
try:
skiptest = settings.SKIP_SOUTH_TESTS
except:
skiptest = True
if not skiptest:
from south.tests.db import *
from south.tests.db_mysql import *
from south.tests.logic import *
from south.tests.autodetection import *
from south.tests.logger import *
from south.tests.inspector import *
from south.tests.freezer import *
|
{
"content_hash": "f2e6f2cf53c5e368cd3ec8515ed9d78a",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 86,
"avg_line_length": 27.858823529411765,
"alnum_prop": 0.5929054054054054,
"repo_name": "peterbe/airmozilla",
"id": "4280bdf9bdd493b0651447179eec3010b1257031",
"size": "2369",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vendor-local/lib/python/south/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "70585"
},
{
"name": "JavaScript",
"bytes": "10192"
},
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "1235514"
},
{
"name": "Shell",
"bytes": "3672"
}
],
"symlink_target": ""
}
|
"""SchoolCMS-schoolcms-group."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from . import BaseHandler
from ..db import GroupList, User
import re
def _to_int(s, default):
if not s.isdigit():
return default
return int(s)
class GroupHandler(BaseHandler):
@BaseHandler.check_is_admin_user
def post(self):
userkeys = self.get_arguments('userkey')
group = self.get_argument('group')
for userkey in userkeys:
if not User.by_key(userkey, self.sql_session).scalar():
raise self.HTTPError(400)
if not GroupList.check(userkey, group, self.sql_session):
self.sql_session.add(GroupList(userkey, group))
self.sql_session.commit()
self.write({
'success': True,
})
@BaseHandler.check_is_admin_user
def delete(self):
userkeys = self.get_arguments('userkey')
group = self.get_argument('group')
for userkey in userkeys:
if not User.by_key(userkey, self.sql_session).scalar():
raise self.HTTPError(400)
if GroupList.check(userkey, group, self.sql_session):
q = self.sql_session.query(GroupList)
q = q.filter(GroupList.userkey == userkey).filter(GroupList.group == group)
q.delete()
self.sql_session.commit()
self.write({
'success': True,
})
class UserHandler(BaseHandler):
@BaseHandler.check_is_admin_user
def get(self):
start = _to_int(self.get_argument('start', ''),0)
q = self.sql_session.query(User)
total = q.count()
q = q.order_by(User.account)
q = q.offset(start).limit(10)
users = q.all()
users_list = [user.to_dict() for user in users]
for user_d in users_list:
user_d['groups'] = GroupList.get_user_groups(user_d['key'], self.sql_session)
groups = GroupList.get_all_groups(self.sql_session)
self.page_render({
'_xsrf': self.xsrf_token,
'users': users_list,
'groups': groups,
'total': total,
})
@BaseHandler.check_is_admin_user
def post(self):
self._ = {}
self._['account'] = self.get_argument('account', '')
self._['passwd'] = self.get_argument('passwd', '')
self._['name'] = self.get_argument('name', '')
self._['identity'] = self.get_argument('identity', '')
self._['admin'] = bool(self.get_argument('admin', ''))
user = self.add_user()
if user:
self.sql_session.add(user)
self.sql_session.commit()
self.write({'success': True})
else:
self.write(self._)
def add_user(self):
if not re.match(r'^[a-zA-Z0-9]{4,20}$', self._['account']):
self._['alert'] = '帳號格式錯誤(4-20個英數字)'
return None
elif not re.match(r'^.{4,20}$', self._['passwd']):
self._['alert'] = '密碼格式錯誤(4-20個任意字元)'
return None
elif not re.match(r'^[\S]{1,15}$', self._['name']):
self._['alert'] = '姓名格式錯誤(1-15個非空白字元)'
return None
elif not (self._['identity'] == '學生' or self._['identity'] == '教師'):
self._['alert'] = '請選擇帳號身份(學生或教師)'
return None
else:
q = self.sql_session.query(User.account)
q = q.filter(User.account == self._['account'])
if q.first():
self._['alert'] = '此帳號已被使用'
return None
self._['name'] = unicode(self._['name'])
return User(**self._)
|
{
"content_hash": "88fc09506da39635b5a1780c4d39589b",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 91,
"avg_line_length": 32.05982905982906,
"alnum_prop": 0.5323913623033858,
"repo_name": "team6612/School_CMS",
"id": "56af778a3436225babdb6cfc93ec5cc260029518",
"size": "3914",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "schoolcms/handler/userhandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5255"
},
{
"name": "HTML",
"bytes": "16721"
},
{
"name": "JavaScript",
"bytes": "569616"
},
{
"name": "Python",
"bytes": "62874"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from google.cloud.tasks_v2beta3 import types
from google.cloud.tasks_v2beta3.gapic import cloud_tasks_client
from google.cloud.tasks_v2beta3.gapic import enums
class CloudTasksClient(cloud_tasks_client.CloudTasksClient):
__doc__ = cloud_tasks_client.CloudTasksClient.__doc__
enums = enums
__all__ = (
'enums',
'types',
'CloudTasksClient',
)
|
{
"content_hash": "baeb8529f0ad13ded15a6fc4b78892f6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 23.823529411764707,
"alnum_prop": 0.7283950617283951,
"repo_name": "tseaver/gcloud-python",
"id": "e83b3b1a3b08ed31d6ae67c22177da5e4dfa20e8",
"size": "1007",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tasks/google/cloud/tasks_v2beta3/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "93642"
},
{
"name": "Python",
"bytes": "2874989"
},
{
"name": "Shell",
"bytes": "4436"
}
],
"symlink_target": ""
}
|
from chatschoolette import db
from chatschoolette.mod_auth.models import User
for user in User.query.all():
print(user)
user.is_active = True
print('User {} now active.'.format(user.username))
db.session.commit()
print('Done')
|
{
"content_hash": "f1b29084230a5da193b4f9f3445fb4e8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.7208333333333333,
"repo_name": "gorel/chatschoolette",
"id": "f69efb77d36ccbf7edddaf2f537cdc8ec87991e9",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "activate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2553"
},
{
"name": "HTML",
"bytes": "32783"
},
{
"name": "Python",
"bytes": "61465"
},
{
"name": "Shell",
"bytes": "469"
}
],
"symlink_target": ""
}
|
"""
Author: --<v1ll4n>
Purpose: Tester For Threadpoolex
Created: 05/13/17
"""
import unittest
import queue
import time
from threadpoolex import ThreadPoolXLabor, ThreadPoolX, _LaborFactory
#----------------------------------------------------------------------
def test_task_error(arg1, arg2=4):
""""""
time.sleep(3)
1/0
return arg1, arg2
#----------------------------------------------------------------------
def test_task(arg1, arg2=4):
""""""
time.sleep(3)
return arg1, arg2
count = 0
########################################################################
class ThreadPoolExTester(unittest.TestCase):
""""""
#----------------------------------------------------------------------
def test_labor(self):
""""""
def _labor_callback(result):
raise ValueError()
#_retqueue.put(result)
def _labor_callback_exc(result):
_retqueue.put(result)
def _task_exc_handle(e):
_retqueue.put((1,4))
print e
_retqueue = queue.Queue(1)
s = ThreadPoolXLabor('test')
s.add_task_exception_callback(_task_exc_handle)
s.add_callback(_labor_callback, _labor_callback_exc)
s.start()
s.execute(test_task, var_args=(1,), keyword_args={})
_resultTuple = _retqueue.get()
self.assertTrue(_resultTuple[0] == 1)
self.assertEqual(_resultTuple[1], 4)
s.quit()
time.sleep(1)
#----------------------------------------------------------------------
def test_pool(self):
""""""
quited = False
def print_result(result):
time.sleep(1)
print result
#_q.put(1)
return result
_q = queue.Queue()
def _count(result):
#_q.put(1)
return result
#----------------------------------------------------------------------
def test_task1(arg1, arg2=4):
""""""
time.sleep(3)
_q.put(1)
return arg1, arg2
pool = ThreadPoolX()
pool.add_callbacks(callback=print_result)
pool.add_callbacks(callback=_count)
for i in range(50):
print(i)
pool.feed(target=test_task1, vargs=(i,))
assert pool._task_queue.qsize() == 50
pool.start()
def target():
time.sleep(1)
return 'h'
def anothor_result(result):
if result == 'h':
print 'target with callback success!'
pool.feed_with_callback(target, callback=anothor_result)
time.sleep(6)
time.sleep(6)
#pool.quit()
self.assertEqual(_q.qsize(),50)
pool.quit()
#----------------------------------------------------------------------
def test_laborfactory(self):
""""""
lf = _LaborFactory(debug=True, loop_interval=0.2)
def _labor_callback(result):
raise ValueError()
#_retqueue.put(result)
def _labor_callback_exc(result):
_retqueue.put(result)
def _task_exc_handle(e):
_retqueue.put((1,4))
print e
_retqueue = queue.Queue(1)
lf.add_callbacks(_labor_callback, _labor_callback_exc)
lf.add_exception_callback(_task_exc_handle)
new_labor = lf.build_labor()
self.assertIsInstance(new_labor, ThreadPoolXLabor)
new_labor.start()
new_labor.execute(test_task)
new_labor.execute(test_task_error)
new_labor.quit()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "38507071ea3b2bfdbf400bdae7398ebc",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 79,
"avg_line_length": 25.90277777777778,
"alnum_prop": 0.4479892761394102,
"repo_name": "VillanCh/g3ar",
"id": "dbf9d03f54aa43d03004761dfba31e6786e8190b",
"size": "3766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "g3ar/threadutils/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "577360"
}
],
"symlink_target": ""
}
|
import numpy as np
import theano
from deepgraph.conf import rng
__docformat__ = 'restructedtext en'
def normal(mu=0, dev=0.01, dtype=theano.config.floatX):
"""
Return a generating function filling weights with a normal distribution
:param mu: Float
:param dev: Float
:param dtype: np.type
:return:
"""
def gen(size, name):
return theano.shared(value=np.asarray(rng.normal(mu, dev, size=size), dtype=dtype), name=name, borrow=True)
return gen
def uniform(low=-0.5, high=0.5, dtype=theano.config.floatX):
"""
Return a generating function filling weights with
:param low: Float
:param high: Float
:param dtype: np.type
:return:
"""
def gen(size, name):
return theano.shared(value=np.asarray(rng.uniform(low=low,
high=high,
size=size),
dtype=dtype),
name=name,
borrow=True)
return gen
def zeros(dtype=theano.config.floatX):
"""
Zero filled tensor
:param dtype: np.type
:return:
"""
def gen(size, name):
return theano.shared(value=np.zeros(size, dtype=dtype), name=name, borrow=True)
return gen
def constant(value=1, dtype=theano.config.floatX):
"""
Fill with a constant value (e.g. 1)
:param value:
:param dtype: np.type
:return:
"""
def gen(size, name):
arr = np.empty(size, dtype=dtype)
arr.fill(value)
return theano.shared(value=arr, name=name, borrow=True)
return gen
def xavier(gain=1.0, dtype=theano.config.floatX):
"""
Xavier init according to http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
"""
if gain == "relu":
gain = np.sqrt(2)
elif gain == "sigmoid":
gain = 4.0
def gen(size, name):
if len(size) < 2:
raise AssertionError("This initializer only works with shapes of length >= 2")
n1, n2 = size[:2]
receptive_field_size = np.prod(size[2:])
std = gain * np.sqrt(2.0 / ((n1 + n2) * receptive_field_size))
return theano.shared(value=np.asarray(rng.normal(0, std, size=size), dtype=dtype), name=name, borrow=True)
return gen
def shared(node, type):
"""
Implements weight sharing between two nodes
:param node:
:return:
"""
def gen(size, name):
# todo Check sizes
if type == "W":
return node.W
elif type == "b":
return node.b
else:
raise AssertionError("Unknown sharing type %s" % type)
return gen
|
{
"content_hash": "aa6f1f312420206b9be34a1cd463ac98",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 115,
"avg_line_length": 26.553398058252426,
"alnum_prop": 0.560146252285192,
"repo_name": "sebastian-schlecht/deepgraph",
"id": "d1ff83622e8dd1ee849c854a6dff5434f8f15acd",
"size": "2735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepgraph/nn/init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3911641"
},
{
"name": "Python",
"bytes": "122339"
}
],
"symlink_target": ""
}
|
from models import WaitingListUser
class WaitingListDB():
def __init__(self, db):
self.db = db
def get_match(self, gender, interest):
waitlist = WaitingListUser.query.all()
print("WAITLIST", waitlist, gender, interest)
for i in range(len(waitlist)):
user = waitlist[i]
print("user waiting", user.gender, user.interest)
if user.interest == gender or user.interest == "random":
print("IN 1")
if user.gender == interest or interest == "random":
print("IN 2")
Id = user.id
self.db.session.delete(user)
self.db.session.commit()
return Id
print("NO MATCH FOUND")
return None
def enlist(self, id, gender, interest):
user = WaitingListUser(id = id, gender=gender, interest=interest)
self.db.session.add(user)
self.db.session.commit()
def isWaiting(self, id):
user = WaitingListUser.query.get(id)
if user is None:
return False
return True
def delist(self, id):
try:
user = WaitingListUser.query.get(id)
self.db.session.delete(user)
self.db.session.commit()
except Exception, e:
print("Not in waitlist", str(e))
|
{
"content_hash": "9bc6b67c60298adb2ef5f11ed5d99f3a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 73,
"avg_line_length": 33.31707317073171,
"alnum_prop": 0.5424597364568082,
"repo_name": "mayukh18/BlindChat",
"id": "83dd6c8a263a055dbeb57c1d71c46fbe88557a91",
"size": "1366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DB_Wrappers/WaitingList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41"
},
{
"name": "HTML",
"bytes": "3075"
},
{
"name": "Python",
"bytes": "59076"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import shutil
from contextlib import contextmanager
from urlparse import urlparse
import boat.paths
import boat.webcomic.model as model
import boat.webcomic.views as views
from boat.cli import manager
command = manager.add_command('build', help='build/rebuild static sites')
command.parser.add_argument('-f', '--force', action='store_true',
help="rebuild everything")
command.parser.add_argument('--no-static', action='store_false', dest='static',
help="don't copy static files")
command.parser.add_argument('-o', '--output-dir',
help="output to specified directory")
def site_basedir(site):
baseurl = urlparse(site.baseurl)
# TODO: Error out on relative baseurl since RSS requires absolute urls
return baseurl.netloc or 'default'
def get_buildpath(app, args, *components):
if args.output_dir:
return os.path.join(args.output_dir, *components)
else:
return os.path.join(app.instance_path, 'build', *components)
def build_policy(args):
if args.force:
return boat.paths.always_policy
else:
return boat.paths.mtime_policy
@command.run
def run(app_factory, args):
app = app_factory()
policy = build_policy(args)
loader = model.YamlFormat(app.instance_path, policy)
loader.load()
site_names = [site.name for site in model.Site.select()]
# TODO: Build SCSS assets
for name in site_names:
site = model.Site.get(name=name)
path = get_buildpath(app, args, site_basedir(site))
formatter = views.WebFormat(app, path, build_policy(args), site,
args.static)
formatter.save()
|
{
"content_hash": "b82e0283a2f5f7f3ca290ea51e24f35c",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 31.39622641509434,
"alnum_prop": 0.6941105769230769,
"repo_name": "rhestilow/boat",
"id": "c2547b92036a84dae28a766ecdf236539e0d762c",
"size": "1664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boat/cli/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2076"
},
{
"name": "HTML",
"bytes": "3178"
},
{
"name": "JavaScript",
"bytes": "1989"
},
{
"name": "Makefile",
"bytes": "77"
},
{
"name": "Python",
"bytes": "45113"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils import timezone
from django.utils.http import urlquote
from django.utils.translation import ugettext_lazy as _
from django.core.mail import send_mail
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from accounts.managers import ExpensiUserManager
class ExpensiUser(AbstractBaseUser, PermissionsMixin):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
Email and password are required. Other fields are optional.
"""
email = models.EmailField(_('email address'), max_length=254, unique=True)
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_(
'Designates whether the user can log into this admin site.')
)
is_active = models.BooleanField(_('active'), default=True, help_text=_(
'Designates whether this user should be treated as active. Unselect this instead of deleting accounts.')
)
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = ExpensiUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
|
{
"content_hash": "6522653c7c60e19db5b23d66c5eb2f92",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 112,
"avg_line_length": 36.285714285714285,
"alnum_prop": 0.6737204724409449,
"repo_name": "lacion/expensi",
"id": "cd1798677912daccb3e82a5a91d6505540e8a3cb",
"size": "2032",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "accounts/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "32746"
},
{
"name": "Shell",
"bytes": "6709"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.Packaging.rpm
The rpm packager.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/rpm.py 3897 2009/01/13 06:45:54 scons"
import os
import string
import SCons.Builder
from SCons.Environment import OverrideEnvironment
from SCons.Tool.packaging import stripinstallbuilder, src_targz
from SCons.Errors import UserError
def package(env, target, source, PACKAGEROOT, NAME, VERSION,
PACKAGEVERSION, DESCRIPTION, SUMMARY, X_RPM_GROUP, LICENSE,
**kw):
# initialize the rpm tool
SCons.Tool.Tool('rpm').generate(env)
bld = env['BUILDERS']['Rpm']
# Generate a UserError whenever the target name has been set explicitly,
# since rpm does not allow for controlling it. This is detected by
# checking if the target has been set to the default by the Package()
# Environment function.
if str(target[0])!="%s-%s"%(NAME, VERSION):
raise UserError( "Setting target is not supported for rpm." )
else:
# This should be overridable from the construction environment,
# which it is by using ARCHITECTURE=.
# Guessing based on what os.uname() returns at least allows it
# to work for both i386 and x86_64 Linux systems.
archmap = {
'i686' : 'i386',
'i586' : 'i386',
'i486' : 'i386',
}
buildarchitecture = os.uname()[4]
buildarchitecture = archmap.get(buildarchitecture, buildarchitecture)
if kw.has_key('ARCHITECTURE'):
buildarchitecture = kw['ARCHITECTURE']
fmt = '%s-%s-%s.%s.rpm'
srcrpm = fmt % (NAME, VERSION, PACKAGEVERSION, 'src')
binrpm = fmt % (NAME, VERSION, PACKAGEVERSION, buildarchitecture)
target = [ srcrpm, binrpm ]
# get the correct arguments into the kw hash
loc=locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# if no "SOURCE_URL" tag is given add a default one.
if not kw.has_key('SOURCE_URL'):
#kw['SOURCE_URL']=(str(target[0])+".tar.gz").replace('.rpm', '')
kw['SOURCE_URL']=string.replace(str(target[0])+".tar.gz", '.rpm', '')
# mangle the source and target list for the rpmbuild
env = OverrideEnvironment(env, kw)
target, source = stripinstallbuilder(target, source, env)
target, source = addspecfile(target, source, env)
target, source = collectintargz(target, source, env)
# now call the rpm builder to actually build the packet.
return apply(bld, [env, target, source], kw)
def collectintargz(target, source, env):
""" Puts all source files into a tar.gz file. """
# the rpm tool depends on a source package, until this is chagned
# this hack needs to be here that tries to pack all sources in.
sources = env.FindSourceFiles()
# filter out the target we are building the source list for.
#sources = [s for s in sources if not (s in target)]
sources = filter(lambda s, t=target: not (s in t), sources)
# find the .spec file for rpm and add it since it is not necessarily found
# by the FindSourceFiles function.
#sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
spec_file = lambda s: string.rfind(str(s), '.spec') != -1
sources.extend( filter(spec_file, source) )
# as the source contains the url of the source package this rpm package
# is built from, we extract the target name
#tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
tarball = string.replace(str(target[0])+".tar.gz", '.rpm', '')
try:
#tarball = env['SOURCE_URL'].split('/')[-1]
tarball = string.split(env['SOURCE_URL'], '/')[-1]
except KeyError, e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
tarball = src_targz.package(env, source=sources, target=tarball,
PACKAGEROOT=env['PACKAGEROOT'], )
return (target, tarball)
def addspecfile(target, source, env):
specfile = "%s-%s" % (env['NAME'], env['VERSION'])
bld = SCons.Builder.Builder(action = build_specfile,
suffix = '.spec',
target_factory = SCons.Node.FS.File)
source.extend(bld(env, specfile, source))
return (target,source)
def build_specfile(target, source, env):
""" Builds a RPM specfile from a dictionary with string metadata and
by analyzing a tree of nodes.
"""
file = open(target[0].abspath, 'w')
str = ""
try:
file.write( build_specfile_header(env) )
file.write( build_specfile_sections(env) )
file.write( build_specfile_filesection(env, source) )
file.close()
# call a user specified function
if env.has_key('CHANGE_SPECFILE'):
env['CHANGE_SPECFILE'](target, source)
except KeyError, e:
raise SCons.Errors.UserError( '"%s" package field for RPM is missing.' % e.args[0] )
#
# mandatory and optional package tag section
#
def build_specfile_sections(spec):
""" Builds the sections of a rpm specfile.
"""
str = ""
mandatory_sections = {
'DESCRIPTION' : '\n%%description\n%s\n\n', }
str = str + SimpleTagCompiler(mandatory_sections).compile( spec )
optional_sections = {
'DESCRIPTION_' : '%%description -l %s\n%s\n\n',
'CHANGELOG' : '%%changelog\n%s\n\n',
'X_RPM_PREINSTALL' : '%%pre\n%s\n\n',
'X_RPM_POSTINSTALL' : '%%post\n%s\n\n',
'X_RPM_PREUNINSTALL' : '%%preun\n%s\n\n',
'X_RPM_POSTUNINSTALL' : '%%postun\n%s\n\n',
'X_RPM_VERIFY' : '%%verify\n%s\n\n',
# These are for internal use but could possibly be overriden
'X_RPM_PREP' : '%%prep\n%s\n\n',
'X_RPM_BUILD' : '%%build\n%s\n\n',
'X_RPM_INSTALL' : '%%install\n%s\n\n',
'X_RPM_CLEAN' : '%%clean\n%s\n\n',
}
# Default prep, build, install and clean rules
# TODO: optimize those build steps, to not compile the project a second time
if not spec.has_key('X_RPM_PREP'):
spec['X_RPM_PREP'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"' + '\n%setup -q'
if not spec.has_key('X_RPM_BUILD'):
spec['X_RPM_BUILD'] = 'mkdir "$RPM_BUILD_ROOT"'
if not spec.has_key('X_RPM_INSTALL'):
spec['X_RPM_INSTALL'] = 'scons --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"'
if not spec.has_key('X_RPM_CLEAN'):
spec['X_RPM_CLEAN'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"'
str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )
return str
def build_specfile_header(spec):
""" Builds all section but the %file of a rpm specfile
"""
str = ""
# first the mandatory sections
mandatory_header_fields = {
'NAME' : '%%define name %s\nName: %%{name}\n',
'VERSION' : '%%define version %s\nVersion: %%{version}\n',
'PACKAGEVERSION' : '%%define release %s\nRelease: %%{release}\n',
'X_RPM_GROUP' : 'Group: %s\n',
'SUMMARY' : 'Summary: %s\n',
'LICENSE' : 'License: %s\n', }
str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )
# now the optional tags
optional_header_fields = {
'VENDOR' : 'Vendor: %s\n',
'X_RPM_URL' : 'Url: %s\n',
'SOURCE_URL' : 'Source: %s\n',
'SUMMARY_' : 'Summary(%s): %s\n',
'X_RPM_DISTRIBUTION' : 'Distribution: %s\n',
'X_RPM_ICON' : 'Icon: %s\n',
'X_RPM_PACKAGER' : 'Packager: %s\n',
'X_RPM_GROUP_' : 'Group(%s): %s\n',
'X_RPM_REQUIRES' : 'Requires: %s\n',
'X_RPM_PROVIDES' : 'Provides: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\n',
'X_RPM_SERIAL' : 'Serial: %s\n',
'X_RPM_EPOCH' : 'Epoch: %s\n',
'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\n',
'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\n',
'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\n',
'X_RPM_PREFIX' : 'Prefix: %s\n',
'X_RPM_CONFLICTS' : 'Conflicts: %s\n',
# internal use
'X_RPM_BUILDROOT' : 'BuildRoot: %s\n', }
# fill in default values:
# Adding a BuildRequires renders the .rpm unbuildable under System, which
# are not managed by rpm, since the database to resolve this dependency is
# missing (take Gentoo as an example)
# if not s.has_key('x_rpm_BuildRequires'):
# s['x_rpm_BuildRequires'] = 'scons'
if not spec.has_key('X_RPM_BUILDROOT'):
spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'
str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )
return str
#
# mandatory and optional file tags
#
def build_specfile_filesection(spec, files):
""" builds the %file section of the specfile
"""
str = '%files\n'
if not spec.has_key('X_RPM_DEFATTR'):
spec['X_RPM_DEFATTR'] = '(-,root,root)'
str = str + '%%defattr %s\n' % spec['X_RPM_DEFATTR']
supported_tags = {
'PACKAGING_CONFIG' : '%%config %s',
'PACKAGING_CONFIG_NOREPLACE' : '%%config(noreplace) %s',
'PACKAGING_DOC' : '%%doc %s',
'PACKAGING_UNIX_ATTR' : '%%attr %s',
'PACKAGING_LANG_' : '%%lang(%s) %s',
'PACKAGING_X_RPM_VERIFY' : '%%verify %s',
'PACKAGING_X_RPM_DIR' : '%%dir %s',
'PACKAGING_X_RPM_DOCDIR' : '%%docdir %s',
'PACKAGING_X_RPM_GHOST' : '%%ghost %s', }
for file in files:
# build the tagset
tags = {}
for k in supported_tags.keys():
try:
tags[k]=getattr(file, k)
except AttributeError:
pass
# compile the tagset
str = str + SimpleTagCompiler(supported_tags, mandatory=0).compile( tags )
str = str + ' '
str = str + file.PACKAGING_INSTALL_LOCATION
str = str + '\n\n'
return str
class SimpleTagCompiler:
""" This class is a simple string substition utility:
the replacement specfication is stored in the tagset dictionary, something
like:
{ "abc" : "cdef %s ",
"abc_" : "cdef %s %s" }
the compile function gets a value dictionary, which may look like:
{ "abc" : "ghij",
"abc_gh" : "ij" }
The resulting string will be:
"cdef ghij cdef gh ij"
"""
def __init__(self, tagset, mandatory=1):
self.tagset = tagset
self.mandatory = mandatory
def compile(self, values):
""" compiles the tagset and returns a str containing the result
"""
def is_international(tag):
#return tag.endswith('_')
return tag[-1:] == '_'
def get_country_code(tag):
return tag[-2:]
def strip_country_code(tag):
return tag[:-2]
replacements = self.tagset.items()
str = ""
#domestic = [ (k,v) for k,v in replacements if not is_international(k) ]
domestic = filter(lambda t, i=is_international: not i(t[0]), replacements)
for key, replacement in domestic:
try:
str = str + replacement % values[key]
except KeyError, e:
if self.mandatory:
raise e
#international = [ (k,v) for k,v in replacements if is_international(k) ]
international = filter(lambda t, i=is_international: i(t[0]), replacements)
for key, replacement in international:
try:
#int_values_for_key = [ (get_country_code(k),v) for k,v in values.items() if strip_country_code(k) == key ]
x = filter(lambda t,key=key,s=strip_country_code: s(t[0]) == key, values.items())
int_values_for_key = map(lambda t,g=get_country_code: (g(t[0]),t[1]), x)
for v in int_values_for_key:
str = str + replacement % v
except KeyError, e:
if self.mandatory:
raise e
return str
|
{
"content_hash": "db8dbfd417f69adafb0e1f7b4d8138f3",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 125,
"avg_line_length": 37.30386740331492,
"alnum_prop": 0.585678317535545,
"repo_name": "kuiche/chromium",
"id": "d13ddc4357fff0f70527bff994b990d7734a307b",
"size": "13504",
"binary": false,
"copies": "3",
"ref": "refs/heads/trunk",
"path": "third_party/scons/scons-local/SCons/Tool/packaging/rpm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import re
import click
def test_other_command_invoke(runner):
@click.command()
@click.pass_context
def cli(ctx):
return ctx.invoke(other_cmd, arg=42)
@click.command()
@click.argument('arg', type=click.INT)
def other_cmd(arg):
click.echo(arg)
result = runner.invoke(cli, [])
assert not result.exception
assert result.output == '42\n'
def test_other_command_invoke_invalid_custom_error(runner):
@click.command()
@click.pass_context
def cli(ctx):
return ctx.invoke(other_cmd, 42)
@click.command()
@click.argument('arg', type=click.INT)
def other_cmd(arg):
click.echo(arg)
result = runner.invoke(cli, [])
assert isinstance(result.exception, RuntimeError)
assert 'upgrading-to-3.2' in str(result.exception)
assert click.__version__ < '5.0'
def test_other_command_forward(runner):
cli = click.Group()
@cli.command()
@click.option('--count', default=1)
def test(count):
click.echo('Count: %d' % count)
@cli.command()
@click.option('--count', default=1)
@click.pass_context
def dist(ctx, count):
ctx.forward(test)
ctx.invoke(test, count=42)
result = runner.invoke(cli, ['dist'])
assert not result.exception
assert result.output == 'Count: 1\nCount: 42\n'
def test_auto_shorthelp(runner):
@click.group()
def cli():
pass
@cli.command()
def short():
"""This is a short text."""
@cli.command()
def special_chars():
"""Login and store the token in ~/.netrc."""
@cli.command()
def long():
"""This is a long text that is too long to show as short help
and will be truncated instead."""
result = runner.invoke(cli, ['--help'])
assert re.search(
r'Commands:\n\s+'
r'long\s+This is a long text that is too long to show\.\.\.\n\s+'
r'short\s+This is a short text\.\n\s+'
r'special_chars\s+Login and store the token in ~/.netrc\.\s*',
result.output) is not None
def test_default_maps(runner):
@click.group()
def cli():
pass
@cli.command()
@click.option('--name', default='normal')
def foo(name):
click.echo(name)
result = runner.invoke(cli, ['foo'], default_map={
'foo': {'name': 'changed'}
})
assert not result.exception
assert result.output == 'changed\n'
def test_group_with_args(runner):
@click.group()
@click.argument('obj')
def cli(obj):
click.echo('obj=%s' % obj)
@cli.command()
def move():
click.echo('move')
result = runner.invoke(cli, [])
assert result.exit_code == 0
assert 'Show this message and exit.' in result.output
result = runner.invoke(cli, ['obj1'])
assert result.exit_code == 2
assert 'Error: Missing command.' in result.output
result = runner.invoke(cli, ['obj1', '--help'])
assert result.exit_code == 0
assert 'Show this message and exit.' in result.output
result = runner.invoke(cli, ['obj1', 'move'])
assert result.exit_code == 0
assert result.output == 'obj=obj1\nmove\n'
def test_base_command(runner):
import optparse
@click.group()
def cli():
pass
class OptParseCommand(click.BaseCommand):
def __init__(self, name, parser, callback):
click.BaseCommand.__init__(self, name)
self.parser = parser
self.callback = callback
def parse_args(self, ctx, args):
try:
opts, args = parser.parse_args(args)
except Exception as e:
ctx.fail(str(e))
ctx.args = args
ctx.params = vars(opts)
def get_usage(self, ctx):
return self.parser.get_usage()
def get_help(self, ctx):
return self.parser.format_help()
def invoke(self, ctx):
ctx.invoke(self.callback, ctx.args, **ctx.params)
parser = optparse.OptionParser(usage='Usage: foo test [OPTIONS]')
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
def test_callback(args, filename, verbose):
click.echo(' '.join(args))
click.echo(filename)
click.echo(verbose)
cli.add_command(OptParseCommand('test', parser, test_callback))
result = runner.invoke(cli, ['test', '-f', 'test.txt', '-q',
'whatever.txt', 'whateverelse.txt'])
assert not result.exception
assert result.output.splitlines() == [
'whatever.txt whateverelse.txt',
'test.txt',
'False',
]
result = runner.invoke(cli, ['test', '--help'])
assert not result.exception
assert result.output.splitlines() == [
'Usage: foo test [OPTIONS]',
'',
'Options:',
' -h, --help show this help message and exit',
' -f FILE, --file=FILE write report to FILE',
' -q, --quiet don\'t print status messages to stdout',
]
def test_object_propagation(runner):
for chain in False, True:
@click.group(chain=chain)
@click.option('--debug/--no-debug', default=False)
@click.pass_context
def cli(ctx, debug):
if ctx.obj is None:
ctx.obj = {}
ctx.obj['DEBUG'] = debug
@cli.command()
@click.pass_context
def sync(ctx):
click.echo('Debug is %s' % (ctx.obj['DEBUG'] and 'on' or 'off'))
result = runner.invoke(cli, ['sync'])
assert result.exception is None
assert result.output == 'Debug is off\n'
def test_other_command_invoke_with_defaults(runner):
@click.command()
@click.pass_context
def cli(ctx):
return ctx.invoke(other_cmd)
@click.command()
@click.option('--foo', type=click.INT, default=42)
@click.pass_context
def other_cmd(ctx, foo):
assert ctx.info_name == 'other_cmd'
click.echo(foo)
result = runner.invoke(cli, [])
assert not result.exception
assert result.output == '42\n'
def test_invoked_subcommand(runner):
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo('no subcommand, use default')
ctx.invoke(sync)
else:
click.echo('invoke subcommand')
@cli.command()
def sync():
click.echo('in subcommand')
result = runner.invoke(cli, ['sync'])
assert not result.exception
assert result.output == 'invoke subcommand\nin subcommand\n'
result = runner.invoke(cli)
assert not result.exception
assert result.output == 'no subcommand, use default\nin subcommand\n'
|
{
"content_hash": "8e15e63076f09b40ad336d09b8f68488",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 76,
"avg_line_length": 27.537549407114625,
"alnum_prop": 0.582603703172097,
"repo_name": "evaautomation/libdispatch",
"id": "7c8b453d69b516be723fd0e2b922ae1b0c12aba4",
"size": "6991",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "thirdparty/click/tests/test_commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "743405"
},
{
"name": "C++",
"bytes": "13425"
},
{
"name": "CMake",
"bytes": "19349"
},
{
"name": "DTrace",
"bytes": "1767"
},
{
"name": "Objective-C",
"bytes": "87793"
},
{
"name": "Objective-C++",
"bytes": "15581"
},
{
"name": "Python",
"bytes": "1866"
},
{
"name": "Shell",
"bytes": "6759"
}
],
"symlink_target": ""
}
|
"""
Implements a simple, robust, safe, Messenger class that allows one to
register callbacks for a signal/slot (or event/handler) kind of
messaging system. One can basically register a callback
function/method to be called when an object sends a particular event.
The Messenger class is Borg. So it is easy to instantiate and use.
This module is also reload-safe, so if the module is reloaded the
callback information is not lost. Method callbacks do not have a
reference counting problem since weak references are used.
The main functionality of this module is provided by three functions,
`connect`, `disconnect` and `send`.
Here is example usage with VTK::
>>> import messenger, vtk
>>> def cb(obj, evt):
... print obj.__class__.__name__, evt
...
>>> o = vtk.vtkProperty()
>>> o.AddObserver('ModifiedEvent', messenger.send)
1
>>> messenger.connect(o, 'ModifiedEvent', cb)
>>>
>>> o.SetRepresentation(1)
vtkOpenGLProperty ModifiedEvent
>>> messenger.connect(o, 'AnyEvent', cb)
>>> o.SetRepresentation(2)
vtkOpenGLProperty ModifiedEvent
vtkOpenGLProperty ModifiedEvent
>>>
>>> messenger.send(o, 'foo')
vtkOpenGLProperty foo
>>> messenger.disconnect(o, 'AnyEvent')
>>> messenger.send(o, 'foo')
>>>
This approach is necessary if you don't want to be bitten by reference
cycles. If you have a Python object holding a reference to a VTK
object and pass a method of the object to the AddObserver call, you
will get a reference cycle that cannot be collected by the garbage
collector. Using this messenger module gets around the problem.
Also note that adding a connection for 'AnyEvent' will trigger a
callback no matter what event was generated. The code above also
shows how disconnection works.
"""
# Author: Prabhu Ramachandran
# Copyright (c) 2004-2007, Enthought, Inc.
# License: BSD Style.
__all__ = ['Messenger', 'MessengerError',
'connect', 'disconnect', 'send']
import types
import sys
import weakref
#################################################################
# This code makes the module reload-safe.
#################################################################
_saved = {}
for name in ['messenger', 'tvtk.messenger']:
if sys.modules.has_key(name):
mod = sys.modules[name]
if hasattr(mod, 'Messenger'):
_saved = mod.Messenger._shared_data
del mod
break
#################################################################
# `MessengerError` class for exceptions raised by Messenger.
#################################################################
class MessengerError(Exception):
pass
#################################################################
# `Messenger` class.
#################################################################
class Messenger:
"""Implements a messenger class which deals with something like
signals and slots. Basically, an object can register a signal
that it plans to emit. Any other object can decide to handle that
signal (of that particular object) by registering itself with the
messenger. When a signal is emitted the messenger calls all
handlers. This makes it totally easy to deal with communication
between objects. The class is Borg. Rather than use this class,
please use the 'connect' and 'disconnect' functions.
"""
_shared_data = _saved
def __init__(self):
"""Create the messenger. This class is Borg. So all
instances are the same.
"""
self.__dict__ = self._shared_data
if not hasattr(self, '_signals'):
# First instantiation.
self._signals = {}
self._catch_all = ['AnyEvent', 'all']
#################################################################
# 'Messenger' interface.
#################################################################
def connect(self, obj, event, callback):
""" Registers a slot given an object and its signal to slot
into and also given a bound method in `callback` that should
have two arguments. `send` will call the callback
with the object that emitted the signal and the actual
event/signal as arguments.
Parameters
----------
- obj : Python object
Any Python object that will generate the particular event.
- event : An event (can be anything, usually strings)
The event `obj` will generate. If this is in the list
`self._catch_all`, then any event will call this callback.
- callback : `function` or `method`
This callback will be called when the object generates the
particular event. The object, event and any other arguments
and keyword arguments given by the `obj` are passed along to
the callback.
"""
typ = type(callback)
key = hash(obj)
if not self._signals.has_key(key):
self._signals[key] = {}
signals = self._signals[key]
if not signals.has_key(event):
signals[event] = {}
slots = signals[event]
callback_key = hash(callback)
if typ is types.FunctionType:
slots[callback_key] = (None, callback)
elif typ is types.MethodType:
obj = weakref.ref(callback.im_self)
name = callback.__name__
slots[callback_key] = (obj, name)
else:
raise MessengerError, \
"Callback must be a function or method. "\
"You passed a %s."%(str(callback))
def disconnect(self, obj, event=None, callback=None, obj_is_hash=False):
"""Disconnects the object and its event handlers.
Parameters
----------
- obj : Object
The object that generates events.
- event : The event. (defaults to None)
- callback : `function` or `method`
The event handler.
If `event` and `callback` are None (the default) all the
events and handlers for the object are removed. If only
`callback` is None, only this handler is removed. If `obj`
and 'event' alone are specified, all handlers for the event
are removed.
- obj_is_hash : `bool`
Specifies if the object passed is a hash instead of the object itself.
This is needed if the object is gc'd but only the hash exists and one
wants to disconnect the object.
"""
signals = self._signals
if obj_is_hash:
key = obj
else:
key = hash(obj)
if not signals.has_key(key):
return
if callback is None:
if event is None:
del signals[key]
else:
del signals[key][event]
else:
del signals[key][event][hash(callback)]
def send(self, source, event, *args, **kw_args):
"""To be called by the object `source` that desires to
generate a particular event. This function in turn invokes
all the handlers for the event passing the `source` object,
event and any additional arguments and keyword arguments. If
any connected callback is garbage collected without being
disconnected, it is silently removed from the existing slots.
Parameters
----------
- source : Python object
This is the object that generated the event.
- event : The event.
If there are handlers connected to events called 'AnyEvent'
or 'all', then any event will invoke these.
"""
try:
sigs = self._get_signals(source)
except (MessengerError, KeyError):
return
events = self._catch_all[:]
if event not in events:
events.append(event)
for evt in events:
if sigs.has_key(evt):
slots = sigs[evt]
for key in slots.keys():
obj, meth = slots[key]
if obj: # instance method
inst = obj()
if inst:
getattr(inst, meth)(source, event, *args, **kw_args)
else:
# Oops, dead reference.
del slots[key]
else: # normal function
meth(source, event, *args, **kw_args)
def is_registered(self, obj):
"""Returns if the given object has registered itself with the
messenger.
"""
try:
sigs = self._get_signals(obj)
except MessengerError:
return 0
else:
return 1
def get_signal_names(self, obj):
"""Returns a list of signal names the object passed has
registered.
"""
return self._get_signals(obj).keys()
#################################################################
# Non-public interface.
#################################################################
def _get_signals(self, obj):
"""Given an object `obj` it returns the signals of that
object.
"""
ret = self._signals.get(hash(obj))
if ret is None:
raise MessengerError, \
"No such object: %s, has registered itself "\
"with the messenger."%obj
else:
return ret
#################################################################
# Convenience functions.
#################################################################
_messenger = Messenger()
def connect(obj, event, callback):
_messenger.connect(obj, event, callback)
connect.__doc__ = _messenger.connect.__doc__
def disconnect(obj, event=None, callback=None, obj_is_hash=False):
_messenger.disconnect(obj, event, callback)
disconnect.__doc__ = _messenger.disconnect.__doc__
def send(obj, event, *args, **kw_args):
_messenger.send(obj, event, *args, **kw_args)
send.__doc__ = _messenger.send.__doc__
del _saved
|
{
"content_hash": "8318dbe2f8dc33d433a4dc8adae0c6a2",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 80,
"avg_line_length": 32.49038461538461,
"alnum_prop": 0.5537141166025451,
"repo_name": "alexandreleroux/mayavi",
"id": "7ec3a0f12891830ecb43d7436fa1794a93856160",
"size": "10137",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tvtk/messenger.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1054"
},
{
"name": "GAP",
"bytes": "34817"
},
{
"name": "Python",
"bytes": "2511883"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import logging
from django.conf import settings
from openstack_dashboard.api import base
from openstack_dashboard.api import neutron
from gbpclient.v2_0 import client as gbp_client
LOG = logging.getLogger(__name__)
def gbpclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('gbpclient connection created using token "%s" and url "%s"'
% (request.user.token.id, base.url_for(request, 'network')))
LOG.debug('user_id=%(user)s, tenant_id=%(tenant)s' %
{'user': request.user.id, 'tenant': request.user.tenant_id})
c = gbp_client.Client(token=request.user.token.id,
auth_url=base.url_for(request, 'identity'),
endpoint_url=base.url_for(request, 'network'),
insecure=insecure, ca_cert=cacert)
return c
class PT(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron endpoint group."""
def get_dict(self):
pt_dict = self._apidict
pt_dict['ep_id'] = pt_dict['id']
return pt_dict
class PTG(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron endpoint group."""
def get_dict(self):
epg_dict = self._apidict
epg_dict['epg_id'] = epg_dict['id']
return epg_dict
class ExternalPTG(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron external endpoint group."""
def get_dict(self):
eepg_dict = self._apidict
eepg_dict['eepg_id'] = eepg_dict['id']
return eepg_dict
class ExternalConnectivity(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron external segment."""
def get_dict(self):
ec_dict = self._apidict
ec_dict['ec_id'] = ec_dict['id']
return ec_dict
class Contract(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron policy_rule_set."""
def get_dict(self):
policy_rule_set_dict = self._apidict
policy_rule_set_dict['policy_rule_set_id'] = policy_rule_set_dict['id']
return policy_rule_set_dict
class PolicyRule(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron policy rule."""
def get_dict(self):
policyrule_dict = self._apidict
policyrule_dict['policyrule_dict_id'] = policyrule_dict['id']
return policyrule_dict
class PolicyClassifier(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron classifier."""
def get_dict(self):
classifier_dict = self._apidict
classifier_dict['classifier_id'] = classifier_dict['id']
return classifier_dict
class PolicyAction(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron action."""
def get_dict(self):
action_dict = self._apidict
action_dict['action_id'] = action_dict['id']
return action_dict
class L2Policy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron l2policy."""
def get_dict(self):
policy_dict = self._apidict
policy_dict['policy_id'] = policy_dict['id']
return policy_dict
class NetworkServicePolicy(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron network service policy."""
def get_dict(self):
policy_dict = self._apidict
return policy_dict
class ServiceChainSpec(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron service chain spec."""
def get_dict(self):
sc_spec_dict = self._apidict
return sc_spec_dict
class ServiceChainNode(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron service chain spec."""
def get_dict(self):
sc_node_dict = self._apidict
return sc_node_dict
class ServiceChainInstance(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron service chain spec."""
def get_dict(self):
sc_instance_dict = self._apidict
return sc_instance_dict
class ServiceProfile(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron service profile."""
def get_dict(self):
sc_profile_dict = self._apidict
return sc_profile_dict
def policy_target_create(request, **kwargs):
body = {'policy_target_group': kwargs}
policy_target = gbpclient(request).create_policy_target_group(
body).get('endpoint_group')
return PTG(policy_target)
def pt_create(request, **kwargs):
body = {'policy_target': kwargs}
pt = gbpclient(request).create_policy_target(body).get('policy_target')
return PTG(pt)
def pt_list(request, tenant_id, **kwargs):
policy_targets = gbpclient(request).list_policy_targets(
tenant_id=tenant_id, shared=False, **kwargs).get('policy_targets')
policy_targets.extend(gbpclient(request).list_policy_targets(
shared=True, **kwargs).get('policy_targets'))
return [PT(pt) for pt in policy_targets]
def pt_delete(request, pt_id):
gbpclient(request).delete_policy_target(pt_id)
def policy_target_list(request, tenant_id, **kwargs):
policy_targets = gbpclient(request).list_policy_target_groups(
tenant_id=tenant_id, shared=False, **kwargs).get(
'policy_target_groups')
policy_targets.extend(gbpclient(request).list_policy_target_groups(
shared=True, **kwargs).get('policy_target_groups'))
return [PTG(policy_target) for policy_target in policy_targets]
def policy_target_get(request, policy_target_id):
policy_target = gbpclient(request).show_policy_target_group(
policy_target_id).get('policy_target_group')
return PTG(policy_target)
def policy_target_delete(request, policy_target_id):
gbpclient(request).delete_policy_target_group(policy_target_id)
def policy_target_update(request, policy_target_id, **kwargs):
body = {'policy_target_group': kwargs}
policy_target = gbpclient(request).update_policy_target_group(
policy_target_id, body).get('policy_target_group')
return PTG(policy_target)
def ext_policy_target_create(request, **kwargs):
body = {'external_policy': kwargs}
policy_target = gbpclient(request).create_external_policy(
body).get('endpoint_group')
return ExternalPTG(policy_target)
def ext_policy_target_list(request, tenant_id, **kwargs):
policy_targets = gbpclient(request).list_external_policies(
tenant_id=tenant_id, shared=False, **kwargs).get('external_policies')
policy_targets.extend(gbpclient(request).list_external_policies(
shared=True, **kwargs).get('external_policies'))
return [ExternalPTG(policy_target) for policy_target in policy_targets]
def ext_policy_target_get(request, ext_policy_target_id):
policy_target = gbpclient(request).show_external_policy(
ext_policy_target_id).get('external_policy')
return ExternalPTG(policy_target)
def ext_policy_target_delete(request, ext_policy_target_id):
gbpclient(request).delete_external_policy(ext_policy_target_id)
def ext_policy_target_update(request, ext_policy_target_id, **kwargs):
body = {'external_policy': kwargs}
policy_target = gbpclient(request).update_external_policy(
ext_policy_target_id, body).get('external_policy')
return ExternalPTG(policy_target)
def policy_rule_set_create(request, **kwargs):
body = {'policy_rule_set': kwargs}
policy_rule_set = gbpclient(request).create_policy_rule_set(
body).get('policy_rule_set')
return Contract(policy_rule_set)
def policy_rule_set_list(request, tenant_id, **kwargs):
policy_rule_sets = gbpclient(request).list_policy_rule_sets(
tenant_id=tenant_id, shared=False, **kwargs).get('policy_rule_sets')
policy_rule_sets.extend(gbpclient(request).list_policy_rule_sets(
shared=True, **kwargs).get('policy_rule_sets'))
return [Contract(policy_rule_set) for policy_rule_set in policy_rule_sets]
def policy_rule_set_get(request, policy_rule_set_id):
policy_rule_set = gbpclient(request).show_policy_rule_set(
policy_rule_set_id).get('policy_rule_set')
return Contract(policy_rule_set)
def policy_rule_set_delete(request, policy_rule_set_id):
gbpclient(request).delete_policy_rule_set(policy_rule_set_id)
def policy_rule_set_update(request, policy_rule_set_id, **kwargs):
body = {'policy_rule_set': kwargs}
policy_rule_set = gbpclient(request).update_policy_rule_set(
policy_rule_set_id, body).get('policy_rule_set')
return Contract(policy_rule_set)
def policyrule_create(request, **kwargs):
body = {'policy_rule': kwargs}
policy_rule = gbpclient(request).create_policy_rule(
body).get('policy_rule')
return PolicyRule(policy_rule)
def policyrule_update(request, prid, **kwargs):
body = {'policy_rule': kwargs}
policy_rule = gbpclient(request).update_policy_rule(prid,
body).get('policy_rule')
return PolicyRule(policy_rule)
def policyrule_list(request, tenant_id, **kwargs):
policyrules = gbpclient(request).list_policy_rules(tenant_id=tenant_id,
shared=False, **kwargs).get('policy_rules')
policyrules.extend(gbpclient(request).list_policy_rules(shared=True,
**kwargs).get('policy_rules'))
return [PolicyRule(pr) for pr in policyrules]
def policyclassifier_create(request, **kwargs):
body = {'policy_classifier': kwargs}
classifier = gbpclient(request).create_policy_classifier(
body).get('policy_classifier')
return PolicyClassifier(classifier)
def policyclassifier_list(request, tenant_id, **kwargs):
classifiers = gbpclient(request).list_policy_classifiers(
tenant_id=tenant_id, shared=False, **kwargs).get('policy_classifiers')
classifiers.extend(gbpclient(request).list_policy_classifiers(shared=True,
**kwargs).get('policy_classifiers'))
return [PolicyClassifier(pc) for pc in classifiers]
def policyaction_create(request, **kwargs):
body = {'policy_action': kwargs}
action = gbpclient(request).create_policy_action(
body).get('policy_action')
return PolicyAction(action)
def policyaction_list(request, tenant_id, **kwargs):
actions = gbpclient(request).list_policy_actions(tenant_id=tenant_id,
shared=False, **kwargs).get('policy_actions')
actions.extend(gbpclient(request).list_policy_actions(shared=True,
**kwargs).get('policy_actions'))
return [PolicyAction(pa) for pa in actions]
def policyaction_delete(request, pa_id):
gbpclient(request).delete_policy_action(pa_id)
def policyaction_get(request, pa_id):
policyaction = gbpclient(request).show_policy_action(
pa_id).get('policy_action')
return PolicyAction(policyaction)
def policyaction_update(request, pc_id, **kwargs):
body = {'policy_action': kwargs}
classifier = gbpclient(request).update_policy_action(pc_id,
body).get('policy_action')
return PolicyClassifier(classifier)
def policyrule_get(request, pr_id):
policyrule = gbpclient(request).show_policy_rule(
pr_id).get('policy_rule')
return PolicyRule(policyrule)
def policyrule_delete(request, pr_id):
return gbpclient(request).delete_policy_rule(pr_id)
def policyclassifier_get(request, pc_id):
policyclassifier = gbpclient(request).show_policy_classifier(
pc_id).get('policy_classifier')
return PolicyClassifier(policyclassifier)
def policyclassifier_delete(request, pc_id):
gbpclient(request).delete_policy_classifier(pc_id)
def policyclassifier_update(request, pc_id, **kwargs):
body = {'policy_classifier': kwargs}
classifier = gbpclient(request).update_policy_classifier(pc_id,
body).get('policy_classifier')
return PolicyClassifier(classifier)
def l3policy_list(request, tenant_id, **kwargs):
policies = gbpclient(request).list_l3_policies(tenant_id=tenant_id,
shared=False, **kwargs).get('l3_policies')
policies.extend(gbpclient(request).list_l3_policies(shared=True,
**kwargs).get('l3_policies'))
return [L2Policy(item) for item in policies]
def l2policy_list(request, tenant_id, **kwargs):
policies = gbpclient(request).list_l2_policies(tenant_id=tenant_id,
shared=False, **kwargs).get('l2_policies')
policies.extend(gbpclient(request).list_l2_policies(shared=True,
**kwargs).get('l2_policies'))
return [L2Policy(item) for item in policies]
def networkservicepolicy_list(request, tenant_id, **kwargs):
policies = gbpclient(request).list_network_service_policies(
tenant_id=tenant_id, shared=False, **kwargs).get(
'network_service_policies')
policies.extend(gbpclient(request).list_network_service_policies(
shared=True, **kwargs).get('network_service_policies'))
return [NetworkServicePolicy(item) for item in policies]
def externalconnectivity_list(request, tenant_id, **kwargs):
external_connectivities = gbpclient(request).list_external_segments(
tenant_id=tenant_id, shared=False, **kwargs).get('external_segments')
external_connectivities.extend(gbpclient(request).list_external_segments(
shared=True, **kwargs).get('external_segments'))
return [ExternalConnectivity(external_connectivity)
for external_connectivity in external_connectivities]
def create_externalconnectivity(request, **kwargs):
body = {'external_segment': kwargs}
es = gbpclient(request).create_external_segment(
body).get('external_segment')
return ExternalConnectivity(es)
def get_externalconnectivity(request, external_connectivity_id):
es = gbpclient(request).show_external_segment(
external_connectivity_id).get('external_segment')
return ExternalConnectivity(es)
def delete_externalconnectivity(request, external_connectivity_id, **kwargs):
gbpclient(request).delete_external_segment(external_connectivity_id)
def update_externalconnectivity(request, external_connectivity_id, **kwargs):
body = {'external_segment': kwargs}
ec = gbpclient(request).update_external_segment(
external_connectivity_id, body).get('external_segment')
return ExternalConnectivity(ec)
def create_networkservice_policy(request, **kwargs):
body = {'network_service_policy': kwargs}
spolicy = gbpclient(request).create_network_service_policy(
body).get('network_service_policy')
return NetworkServicePolicy(spolicy)
def update_networkservice_policy(request, policy_id, **kwargs):
body = {'network_service_policy': kwargs}
spolicy = gbpclient(request).update_network_service_policy(
policy_id, body).get('network_service_policy')
return NetworkServicePolicy(spolicy)
def delete_networkservice_policy(request, policy_id, **kwargs):
gbpclient(request).delete_network_service_policy(policy_id)
def get_networkservice_policy(request, policy_id):
spolicy = gbpclient(request).show_network_service_policy(
policy_id).get('network_service_policy')
return NetworkServicePolicy(spolicy)
def l3policy_get(request, pc_id, **kwargs):
return gbpclient(request).show_l3_policy(pc_id).get('l3_policy')
def l3policy_create(request, **kwargs):
body = {'l3_policy': kwargs}
return gbpclient(request).create_l3_policy(body).get('l3_policy')
def l3policy_update(request, pc_id, **kwargs):
body = {'l3_policy': kwargs}
return gbpclient(request).update_l3_policy(pc_id, body).get('l3_policy')
def l3policy_delete(request, policy_id):
gbpclient(request).delete_l3_policy(policy_id)
def l2policy_get(request, pc_id, **kwargs):
return L2Policy(gbpclient(request).show_l2_policy(pc_id).get('l2_policy'))
def l2policy_create(request, **kwargs):
body = {'l2_policy': kwargs}
policy = gbpclient(request).create_l2_policy(body).get('l2_policy')
return L2Policy(policy)
def l2policy_update(request, pc_id, **kwargs):
body = {'l2_policy': kwargs}
policy = gbpclient(request).update_l2_policy(pc_id, body).get('l2_policy')
return L2Policy(policy)
def l2policy_delete(request, policy_id):
gbpclient(request).delete_l2_policy(policy_id)
def servicechainnode_list(request, tenant_id, **kwargs):
sc_nodes = gbpclient(request).list_servicechain_nodes(tenant_id=tenant_id,
shared=False, **kwargs).get('servicechain_nodes')
sc_nodes.extend(gbpclient(request).list_servicechain_nodes(shared=True,
**kwargs).get('servicechain_nodes'))
return [ServiceChainNode(item) for item in sc_nodes]
def servicechainspec_list(request, tenant_id, **kwargs):
sc_specs = gbpclient(request).list_servicechain_specs(tenant_id=tenant_id,
shared=False, **kwargs).get('servicechain_specs')
sc_specs.extend(gbpclient(request).list_servicechain_specs(shared=True,
**kwargs).get('servicechain_specs'))
return [ServiceChainSpec(item) for item in sc_specs]
def servicechaininstance_list(request, tenant_id, **kwargs):
sc_instances = gbpclient(request).list_servicechain_instances(
tenant_id=tenant_id, shared=False, **kwargs).get(
'servicechain_instances')
sc_instances.extend(gbpclient(request).list_servicechain_instances(
shared=True, **kwargs).get('servicechain_instances'))
return [ServiceChainInstance(item) for item in sc_instances]
def get_servicechain_node(request, scnode_id):
scnode = gbpclient(request).show_servicechain_node(
scnode_id).get('servicechain_node')
return ServiceChainNode(scnode)
def create_servicechain_node(request, **kwargs):
body = {'servicechain_node': kwargs}
sc_node = gbpclient(request).create_servicechain_node(
body).get('servicechain_node')
return ServiceChainNode(sc_node)
def update_servicechain_node(request, scnode_id, **kwargs):
body = {'servicechain_node': kwargs}
sc_node = gbpclient(request).update_servicechain_node(
scnode_id, body).get('servicechain_node')
return ServiceChainNode(sc_node)
def delete_servicechain_node(request, scnode_id):
gbpclient(request).delete_servicechain_node(scnode_id)
def get_servicechain_spec(request, scspec_id):
sc_spec = gbpclient(request).show_servicechain_spec(
scspec_id).get('servicechain_spec')
return ServiceChainSpec(sc_spec)
def create_servicechain_spec(request, **kwargs):
body = {'servicechain_spec': kwargs}
sc_spec = gbpclient(request).create_servicechain_spec(
body).get('servicechain_spec')
return ServiceChainSpec(sc_spec)
def update_servicechain_spec(request, scspec_id, **kwargs):
body = {'servicechain_spec': kwargs}
sc_spec = gbpclient(request).update_servicechain_spec(
scspec_id, body).get('servicechain_spec')
return ServiceChainSpec(sc_spec)
def delete_servicechain_spec(request, scspec_id):
gbpclient(request).delete_servicechain_spec(scspec_id)
def get_servicechain_instance(request, scinstance_id):
sc_instance = gbpclient(request).show_servicechain_instance(
scinstance_id).get('servicechain_instance')
return ServiceChainInstance(sc_instance)
def create_servicechain_instance(request, **kwargs):
body = {'servicechain_instance': kwargs}
sc_instance = gbpclient(request).create_servicechain_instance(
body).get('servicechain_instance')
return ServiceChainInstance(sc_instance)
def update_servicechain_instance(request, scinstance_id, **kwargs):
body = {'servicechain_instance': kwargs}
sc_instance = gbpclient(request).update_servicechain_instance(
scinstance_id, body).get('servicechain_instance')
return ServiceChainInstance(sc_instance)
def delete_servicechain_instance(request, scinstance_id):
gbpclient(request).delete_servicechain_instance(scinstance_id)
def serviceprofile_list(request, **kwargs):
sc_profile = gbpclient(request).list_service_profiles(True,
**kwargs).get('service_profiles')
return [ServiceProfile(item) for item in sc_profile]
def get_service_profile(request, service_profile_id):
service_profile = gbpclient(request).show_service_profile(
service_profile_id).get('service_profile')
return ServiceProfile(service_profile)
def create_service_profile(request, **kwargs):
body = {'service_profile': kwargs}
service_profile = gbpclient(request).create_service_profile(
body).get('service_profile')
return ServiceProfile(service_profile)
def delete_service_profile(request, service_profile_id):
gbpclient(request).delete_service_profile(service_profile_id)
|
{
"content_hash": "a2f96564769eb33a16618160a7c16bef",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 79,
"avg_line_length": 33.3562091503268,
"alnum_prop": 0.702557068678358,
"repo_name": "tbachman/group-based-policy-ui",
"id": "5ac16788e6942ffdda8441ada738579a0791122b",
"size": "20960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gbpui/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32887"
},
{
"name": "HTML",
"bytes": "58517"
},
{
"name": "JavaScript",
"bytes": "4491"
},
{
"name": "Python",
"bytes": "284077"
},
{
"name": "Shell",
"bytes": "16643"
}
],
"symlink_target": ""
}
|
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Should work under Python versions >= 1.5.2, except that source line
information is not available unless 'sys._getframe()' is.
Copyright (C) 2001-2004 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, string, socket, struct, os, traceback, types
try:
import thread
import threading
except ImportError:
thread = None
from SocketServer import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
In versions of ConfigParser which have the readfp method [typically
shipped in 2.x versions of Python], you can pass in a file-like object
rather than a filename, in which case the file-like object will be read
using readfp.
"""
import ConfigParser
cp = ConfigParser.ConfigParser(defaults)
if hasattr(cp, 'readfp') and hasattr(fname, 'readline'):
cp.readfp(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
if hasattr(logging, '_handlerList'):
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = string.split(name, '.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp.get("formatters", "keys")
if not len(flist):
return {}
flist = string.split(flist, ",")
formatters = {}
for form in flist:
form = string.strip(form)
sectname = "formatter_%s" % form
opts = cp.options(sectname)
if "format" in opts:
fs = cp.get(sectname, "format", 1)
else:
fs = None
if "datefmt" in opts:
dfs = cp.get(sectname, "datefmt", 1)
else:
dfs = None
c = logging.Formatter
if "class" in opts:
class_name = cp.get(sectname, "class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp.get("handlers", "keys")
if not len(hlist):
return {}
hlist = string.split(hlist, ",")
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
hand = string.strip(hand)
sectname = "handler_%s" % hand
klass = cp.get(sectname, "class")
opts = cp.options(sectname)
if "formatter" in opts:
fmt = cp.get(sectname, "formatter")
else:
fmt = ""
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = cp.get(sectname, "args")
args = eval(args, vars(logging))
h = apply(klass, args)
if "level" in opts:
level = cp.get(sectname, "level")
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
#temporary hack for FileHandler and MemoryHandler.
if klass == logging.handlers.MemoryHandler:
if "target" in opts:
target = cp.get(sectname,"target")
else:
target = ""
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _install_loggers(cp, handlers):
"""Create and install loggers"""
# configure the root first
llist = cp.get("loggers", "keys")
llist = string.split(llist, ",")
llist = map(lambda x: string.strip(x), llist)
llist.remove("root")
sectname = "logger_root"
root = logging.root
log = root
opts = cp.options(sectname)
if "level" in opts:
level = cp.get(sectname, "level")
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = string.split(hlist, ",")
for hand in hlist:
log.addHandler(handlers[string.strip(hand)])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = root.manager.loggerDict.keys()
#now set up the new ones...
for log in llist:
sectname = "logger_%s" % log
qn = cp.get(sectname, "qualname")
opts = cp.options(sectname)
if "propagate" in opts:
propagate = cp.getint(sectname, "propagate")
else:
propagate = 1
logger = logging.getLogger(qn)
if qn in existing:
existing.remove(qn)
if "level" in opts:
level = cp.get(sectname, "level")
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = cp.get(sectname, "handlers")
if len(hlist):
hlist = string.split(hlist, ",")
for hand in hlist:
logger.addHandler(handlers[string.strip(hand)])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
for log in existing:
root.manager.loggerDict[log].disabled = 1
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread:
raise NotImplementedError, "listen() needs threading to work"
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
import tempfile
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
#Apply new configuration. We'd like to be able to
#create a StringIO and pass that in, but unfortunately
#1.5.2 ConfigParser does not support reading file
#objects, only actual files. So we create a temporary
#file and remove it later.
file = tempfile.mktemp(".ini")
f = open(file, "w")
f.write(chunk)
f.close()
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
os.remove(file)
except socket.error, e:
if type(e.args) != types.TupleType:
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
def serve(rcvr, hdlr, port):
server = rcvr(port=port, handler=hdlr)
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return threading.Thread(target=serve,
args=(ConfigSocketReceiver,
ConfigStreamHandler, port))
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
if _listener:
logging._acquireLock()
_listener.abort = 1
_listener = None
logging._releaseLock()
|
{
"content_hash": "3ef94abf02cd6f7bb9673045ed7e52d4",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 91,
"avg_line_length": 33.30769230769231,
"alnum_prop": 0.5735476994137502,
"repo_name": "kevinr/750book-web",
"id": "f1dd6dd8e0d591bf5bb925b64ba28df42b8f153e",
"size": "11449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "750book-web-env/lib/python2.7/site-packages/gunicorn/logging_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "93233"
},
{
"name": "JavaScript",
"bytes": "120508"
},
{
"name": "Perl",
"bytes": "6181"
},
{
"name": "Python",
"bytes": "7406700"
},
{
"name": "Shell",
"bytes": "706"
}
],
"symlink_target": ""
}
|
"""
.. _tut-autogenerate-metadata:
===============================
Auto-generating Epochs metadata
===============================
This tutorial shows how to auto-generate metadata for `~mne.Epochs`, based on
events via `mne.epochs.make_metadata`.
We are going to use data from the :ref:`erp-core-dataset` (derived from
:footcite:`Kappenman2021`). This is EEG data from a single participant
performing an active visual task (Eriksen flanker task).
.. note::
If you wish to skip the introductory parts of this tutorial, you may jump
straight to :ref:`tut-autogenerate-metadata-ern` after completing the data
import and event creation in the
:ref:`tut-autogenerate-metadata-preparation` section.
This tutorial is loosely divided into two parts:
1. We will first focus on producing ERP time-locked to the **visual
stimulation**, conditional on response correctness and response time in
order to familiarize ourselves with the `~mne.epochs.make_metadata`
function.
2. After that, we will calculate ERPs time-locked to the **responses** – again,
conditional on response correctness – to visualize the error-related
negativity (ERN), i.e. the ERP component associated with incorrect
behavioral responses.
.. _tut-autogenerate-metadata-preparation:
Preparation
^^^^^^^^^^^
Let's start by reading, filtering, and producing a simple visualization of the
raw data. The data is pretty clean and contains very few blinks, so there's no
need to apply sophisticated preprocessing and data cleaning procedures.
We will also convert the `~mne.Annotations` contained in this dataset to events
by calling `mne.events_from_annotations`.
"""
# %%
from pathlib import Path
import matplotlib.pyplot as plt
import mne
data_dir = Path(mne.datasets.erp_core.data_path())
infile = data_dir / 'ERP-CORE_Subject-001_Task-Flankers_eeg.fif'
raw = mne.io.read_raw(infile, preload=True)
raw.filter(l_freq=0.1, h_freq=40)
raw.plot(start=60)
# extract events
all_events, all_event_id = mne.events_from_annotations(raw)
# %%
# Creating metadata from events
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The basics of ``make_metadata``
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Now it's time to think about the time windows to use for epoching and
# metadata generation. **It is important to understand that these time windows
# need not be the same!** That is, the automatically generated metadata might
# include information about events from only a fraction of the epochs duration;
# or it might include events that occurred well outside a given epoch.
#
# Let us look at a concrete example. In the Flankers task of the ERP CORE
# dataset, participants were required to respond to visual stimuli by pressing
# a button. We're interested in looking at the visual evoked responses (ERPs)
# of trials with correct responses. Assume that based on literature
# studies, we decide that responses later than 1500 ms after stimulus onset are
# to be considered invalid, because they don't capture the neuronal processes
# of interest here. We can approach this in the following way with the help of
# `mne.epochs.make_metadata`:
# metadata for each epoch shall include events from the range: [0.0, 1.5] s,
# i.e. starting with stimulus onset and expanding beyond the end of the epoch
metadata_tmin, metadata_tmax = 0.0, 1.5
# auto-create metadata
# this also returns a new events array and an event_id dictionary. we'll see
# later why this is important
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'])
# let's look at what we got!
metadata
# %%
# Specifying time-locked events
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We can see that the generated table has 802 rows, each one corresponding to
# an individual event in ``all_events``. The first column, ``event_name``,
# contains the name of the respective event around which the metadata of that
# specific column was generated – we'll call that the "time-locked event",
# because we'll assign it time point zero.
#
# The names of the remaining columns correspond to the event names specified in
# the ``all_event_id`` dictionary. These columns contain floats; the values
# represent the latency of that specific event in seconds, relative to
# the time-locked event (the one mentioned in the ``event_name`` column).
# For events that didn't occur within the given time window, you'll see
# a value of ``NaN``, simply indicating that no event latency could be
# extracted.
#
# Now, there's a problem here. We want investigate the visual ERPs only,
# conditional on responses. But the metadata that was just created contains
# one row for **every** event, including responses. While we **could** create
# epochs for all events, allowing us to pass those metadata, and later subset
# the created events, there's a more elegant way to handle things:
# `~mne.epochs.make_metadata` has a ``row_events`` parameter that
# allows us to specify for which events to create metadata **rows**, while
# still creating **columns for all events** in the ``event_id`` dictionary.
#
# Because the metadata, then, only pertains to a subset of our original events,
# it's important to keep the returned ``events`` and ``event_id`` around for
# later use when we're actually going to create our epochs, to ensure that
# metadata, events, and event descriptions stay in sync.
row_events = ['stimulus/compatible/target_left',
'stimulus/compatible/target_right',
'stimulus/incompatible/target_left',
'stimulus/incompatible/target_right']
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],
row_events=row_events)
metadata
# %%
# Keeping only the first events of a group
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The metadata now contains 400 rows – one per stimulation – and the same
# number of columns as before. Great!
#
# We have two types of responses in our data: ``response/left`` and
# ``response/right``. We would like to map those to "correct" and "incorrect".
# To make this easier, we can ask `~mne.epochs.make_metadata` to generate an
# entirely **new** column that refers to the first response observed during the
# given time interval. This works by passing a subset of the
# :term:`hierarchical event descriptors` (HEDs, inspired by
# :footcite:`BigdelyShamloEtAl2013`) used to name events via the ``keep_first``
# parameter. For example, in the case of the HEDs ``response/left`` and
# ``response/right``, we could pass ``keep_first='response'`` to generate a new
# column, ``response``, containing the latency of the respective event. This
# value pertains only the first (or, in this specific example: the only)
# response, regardless of side (left or right). To indicate **which** event
# type (here: response side) was matched, a second column is added:
# ``first_response``. The values in this column are the event types without the
# string used for matching, as it is already encoded as the column name, i.e.
# in our example, we expect it to only contain ``'left'`` and ``'right'``.
keep_first = 'response'
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],
row_events=row_events,
keep_first=keep_first)
# visualize response times regardless of side
metadata['response'].plot.hist(bins=50, title='Response Times')
# the "first_response" column contains only "left" and "right" entries, derived
# from the initial event named "response/left" and "response/right"
print(metadata['first_response'])
# %%
# We're facing a similar issue with the stimulus events, and now there are not
# only two, but **four** different types: ``stimulus/compatible/target_left``,
# ``stimulus/compatible/target_right``, ``stimulus/incompatible/target_left``,
# and ``stimulus/incompatible/target_right``. Even more, because in the present
# paradigm stimuli were presented in rapid succession, sometimes multiple
# stimulus events occurred within the 1.5 second time window we're using to
# generate our metadata. See for example:
metadata.loc[metadata['stimulus/compatible/target_left'].notna() &
metadata['stimulus/compatible/target_right'].notna(),
:]
# %%
# This can easily lead to confusion during later stages of processing, so let's
# create a column for the first stimulus – which will always be the time-locked
# stimulus, as our time interval starts at 0 seconds. We can pass a **list** of
# strings to ``keep_first``.
keep_first = ['stimulus', 'response']
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],
row_events=row_events,
keep_first=keep_first)
# all times of the time-locked events should be zero
assert all(metadata['stimulus'] == 0)
# the values in the new "first_stimulus" and "first_response" columns indicate
# which events were selected via "keep_first"
metadata[['first_stimulus', 'first_response']]
# %%
# Adding new columns to describe stimulation side and response correctness
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Perfect! Now it's time to define which responses were correct and incorrect.
# We first add a column encoding the side of stimulation, and then simply
# check whether the response matches the stimulation side, and add this result
# to another column.
# left-side stimulation
metadata.loc[metadata['first_stimulus'].isin(['compatible/target_left',
'incompatible/target_left']),
'stimulus_side'] = 'left'
# right-side stimulation
metadata.loc[metadata['first_stimulus'].isin(['compatible/target_right',
'incompatible/target_right']),
'stimulus_side'] = 'right'
# first assume all responses were incorrect, then mark those as correct where
# the stimulation side matches the response side
metadata['response_correct'] = False
metadata.loc[metadata['stimulus_side'] == metadata['first_response'],
'response_correct'] = True
correct_response_count = metadata['response_correct'].sum()
print(f'Correct responses: {correct_response_count}\n'
f'Incorrect responses: {len(metadata) - correct_response_count}')
# %%
# Creating ``Epochs`` with metadata, and visualizing ERPs
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# It's finally time to create our epochs! We set the metadata directly on
# instantiation via the ``metadata`` parameter. Also it is important to
# remember to pass ``events`` and ``event_id`` as returned from
# `~mne.epochs.make_metadata`, as we only created metadata for a subset of
# our original events by passing ``row_events``. Otherwise, the length
# of the metadata and the number of epochs would not match and MNE-Python
# would raise an error.
epochs_tmin, epochs_tmax = -0.1, 0.4 # epochs range: [-0.1, 0.4] s
reject = {'eeg': 250e-6} # exclude epochs with strong artifacts
epochs = mne.Epochs(raw=raw, tmin=epochs_tmin, tmax=epochs_tmax,
events=events, event_id=event_id, metadata=metadata,
reject=reject, preload=True)
# %%
# Lastly, let's visualize the ERPs evoked by the visual stimulation, once for
# all trials with correct responses, and once for all trials with correct
# responses and a response time greater than 0.5 seconds
# (i.e., slow responses).
vis_erp = epochs['response_correct'].average()
vis_erp_slow = epochs['(not response_correct) & '
'(response > 0.3)'].average()
fig, ax = plt.subplots(2, figsize=(6, 6))
vis_erp.plot(gfp=True, spatial_colors=True, axes=ax[0])
vis_erp_slow.plot(gfp=True, spatial_colors=True, axes=ax[1])
ax[0].set_title('Visual ERPs – All Correct Responses')
ax[1].set_title('Visual ERPs – Slow Correct Responses')
fig.tight_layout()
fig
# %%
# Aside from the fact that the data for the (much fewer) slow responses looks
# noisier – which is entirely to be expected – not much of an ERP difference
# can be seen.
#
# .. _tut-autogenerate-metadata-ern:
#
# Applying the knowledge: visualizing the ERN component
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In the following analysis, we will use the same dataset as above, but
# we'll time-lock our epochs to the **response events,** not to the stimulus
# onset. Comparing ERPs associated with correct and incorrect behavioral
# responses, we should be able to see the error-related negativity (ERN) in
# the difference wave.
#
# Since we want to time-lock our analysis to responses, for the automated
# metadata generation we'll consider events occurring up to 1500 ms before
# the response trigger.
#
# We only wish to consider the **last** stimulus and response in each time
# window: Remember that we're dealing with rapid stimulus presentations in
# this paradigm; taking the last response – at time point zero – and the last
# stimulus – the one closest to the response – ensures we actually create
# the right stimulus-response pairings. We can achieve this by passing the
# ``keep_last`` parameter, which works exactly like ``keep_first`` we got to
# know above, only that it keeps the **last** occurrences of the specified
# events and stores them in columns whose names start with ``last_``.
metadata_tmin, metadata_tmax = -1.5, 0
row_events = ['response/left', 'response/right']
keep_last = ['stimulus', 'response']
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],
row_events=row_events,
keep_last=keep_last)
# %%
# Exactly like in the previous example, create new columns ``stimulus_side``
# and ``response_correct``.
# left-side stimulation
metadata.loc[metadata['last_stimulus'].isin(['compatible/target_left',
'incompatible/target_left']),
'stimulus_side'] = 'left'
# right-side stimulation
metadata.loc[metadata['last_stimulus'].isin(['compatible/target_right',
'incompatible/target_right']),
'stimulus_side'] = 'right'
# first assume all responses were incorrect, then mark those as correct where
# the stimulation side matches the response side
metadata['response_correct'] = False
metadata.loc[metadata['stimulus_side'] == metadata['last_response'],
'response_correct'] = True
metadata
# %%
# Now it's already time to epoch the data! When deciding upon the epochs
# duration for this specific analysis, we need to ensure we see quite a bit of
# signal from before and after the motor response. We also must be aware of
# the fact that motor-/muscle-related signals will most likely be present
# **before** the response button trigger pulse appears in our data, so the time
# period close to the response event should not be used for baseline
# correction. But at the same time, we don't want to use a baseline
# period that extends too far away from the button event. The following values
# seem to work quite well.
epochs_tmin, epochs_tmax = -0.6, 0.4
baseline = (-0.4, -0.2)
reject = {'eeg': 250e-6}
epochs = mne.Epochs(raw=raw, tmin=epochs_tmin, tmax=epochs_tmax,
baseline=baseline, reject=reject,
events=events, event_id=event_id, metadata=metadata,
preload=True)
# %%
# Let's do a final sanity check: we want to make sure that in every row, we
# actually have a stimulus. We use ``epochs.metadata`` (and not ``metadata``)
# because when creating the epochs, we passed the ``reject`` parameter, and
# MNE-Python always ensures that ``epochs.metadata`` stays in sync with the
# available epochs.
epochs.metadata.loc[epochs.metadata['last_stimulus'].isna(), :]
# %%
# Bummer! It seems the very first two responses were recorded before the
# first stimulus appeared: the values in the ``stimulus`` column are ``None``.
# There is a very simple way to select only those epochs that **do** have a
# stimulus (i.e., are not ``None``):
epochs = epochs['last_stimulus.notna()']
# %%
# Time to calculate the ERPs for correct and incorrect responses.
# For visualization, we'll only look at sensor ``FCz``, which is known to show
# the ERN nicely in the given paradigm. We'll also create a topoplot to get an
# impression of the average scalp potentials measured in the first 100 ms after
# an incorrect response.
resp_erp_correct = epochs['response_correct'].average()
resp_erp_incorrect = epochs['not response_correct'].average()
mne.viz.plot_compare_evokeds({'Correct Response': resp_erp_correct,
'Incorrect Response': resp_erp_incorrect},
picks='FCz', show_sensors=True,
title='ERPs at FCz, time-locked to response')
# topoplot of average field from time 0.0-0.1 s
resp_erp_incorrect.plot_topomap(times=0.05, average=0.05, size=3,
title='Avg. topography 0–100 ms after '
'incorrect responses')
# %%
# We can see a strong negative deflection immediately after incorrect
# responses, compared to correct responses. The topoplot, too, leaves no doubt:
# what we're looking at is, in fact, the ERN.
#
# Some researchers suggest to construct the difference wave between ERPs for
# correct and incorrect responses, as it more clearly reveals signal
# differences, while ideally also improving the signal-to-noise ratio (under
# the assumption that the noise level in "correct" and "incorrect" trials is
# similar). Let's do just that and put it into a publication-ready
# visualization.
# difference wave: incorrect minus correct responses
resp_erp_diff = mne.combine_evoked([resp_erp_incorrect, resp_erp_correct],
weights=[1, -1])
fig, ax = plt.subplots()
resp_erp_diff.plot(picks='FCz', axes=ax, selectable=False, show=False)
# make ERP trace bolder
ax.lines[0].set_linewidth(1.5)
# add lines through origin
ax.axhline(0, ls='dotted', lw=0.75, color='gray')
ax.axvline(0, ls=(0, (10, 10)), lw=0.75, color='gray',
label='response trigger')
# mark trough
trough_time_idx = resp_erp_diff.copy().pick('FCz').data.argmin()
trough_time = resp_erp_diff.times[trough_time_idx]
ax.axvline(trough_time, ls=(0, (10, 10)), lw=0.75, color='red',
label='max. negativity')
# legend, axis labels, title
ax.legend(loc='lower left')
ax.set_xlabel('Time (s)', fontweight='bold')
ax.set_ylabel('Amplitude (µV)', fontweight='bold')
ax.set_title('Channel: FCz')
fig.suptitle('ERN (Difference Wave)', fontweight='bold')
fig
# %%
# References
# ^^^^^^^^^^
# .. footbibliography::
|
{
"content_hash": "16c30e91aaac1e3ec445a71b794d779d",
"timestamp": "",
"source": "github",
"line_count": 440,
"max_line_length": 79,
"avg_line_length": 43.127272727272725,
"alnum_prop": 0.7007799325463744,
"repo_name": "kingjr/mne-python",
"id": "270bb61532b1e4516d1a13cbca1971fed5f7acb1",
"size": "19034",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tutorials/epochs/40_autogenerate_metadata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "8008"
},
{
"name": "Jinja",
"bytes": "13067"
},
{
"name": "Makefile",
"bytes": "4528"
},
{
"name": "Python",
"bytes": "10062156"
},
{
"name": "Sass",
"bytes": "257"
},
{
"name": "Shell",
"bytes": "19906"
}
],
"symlink_target": ""
}
|
""" ExpandBuiltins replaces builtins by their full paths. """
from pythran.analyses import Globals, Locals
from pythran.passmanager import Transformation
from pythran.syntax import PythranSyntaxError
from pythran.tables import MODULES
import gast as ast
class ExpandBuiltins(Transformation):
"""
Expands all builtins into full paths.
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(): return list()")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(ExpandBuiltins, node)
>>> print(pm.dump(backend.Python, node))
def foo():
return builtins.list()
"""
def __init__(self):
Transformation.__init__(self, Locals, Globals)
def visit_NameConstant(self, node):
self.update = True
return ast.Attribute(
ast.Name('builtins', ast.Load(), None, None),
str(node.value),
ast.Load())
def visit_Name(self, node):
s = node.id
if(isinstance(node.ctx, ast.Load) and
s not in self.locals[node] and
s not in self.globals and
s in MODULES['builtins']):
if s == 'getattr':
raise PythranSyntaxError("You fool! Trying a getattr?", node)
self.update = True
return ast.Attribute(
ast.Name('builtins', ast.Load(), None, None),
s,
node.ctx)
else:
return node
|
{
"content_hash": "ee05dc63d16f2b86961221620961d3f8",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 29.88,
"alnum_prop": 0.5870147255689424,
"repo_name": "pombredanne/pythran",
"id": "88f55c86b83cf9fad36bbecddf79948b9be0ca24",
"size": "1494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythran/transformations/expand_builtins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1366767"
},
{
"name": "Makefile",
"bytes": "1185"
},
{
"name": "Python",
"bytes": "1209572"
},
{
"name": "Shell",
"bytes": "264"
}
],
"symlink_target": ""
}
|
import os
from cffi import FFI
BASE_DEFINITIONS = """
typedef struct secp256k1_context_struct secp256k1_context;
typedef struct {
unsigned char data[64];
} secp256k1_pubkey;
typedef struct {
unsigned char data[64];
} secp256k1_ecdsa_signature;
typedef int (*secp256k1_nonce_function)(
unsigned char *nonce32,
const unsigned char *msg32,
const unsigned char *key32,
const unsigned char *algo16,
void *data,
unsigned int attempt
);
#define SECP256K1_FLAGS_TYPE_MASK 255
#define SECP256K1_FLAGS_TYPE_CONTEXT 1
#define SECP256K1_FLAGS_TYPE_COMPRESSION 2
#define SECP256K1_FLAGS_BIT_CONTEXT_VERIFY 256
#define SECP256K1_FLAGS_BIT_CONTEXT_SIGN 512
#define SECP256K1_FLAGS_BIT_COMPRESSION 256
#define SECP256K1_CONTEXT_VERIFY 257
#define SECP256K1_CONTEXT_SIGN 513
#define SECP256K1_CONTEXT_NONE 1
#define SECP256K1_EC_COMPRESSED 258
#define SECP256K1_EC_UNCOMPRESSED 2
secp256k1_context* secp256k1_context_create(
unsigned int flags
);
secp256k1_context* secp256k1_context_clone(
const secp256k1_context* ctx
);
void secp256k1_context_destroy(
secp256k1_context* ctx
);
void secp256k1_context_set_illegal_callback(
secp256k1_context* ctx,
void (*fun)(const char* message, void* data),
const void* data
);
void secp256k1_context_set_error_callback(
secp256k1_context* ctx,
void (*fun)(const char* message, void* data),
const void* data
);
int secp256k1_ec_pubkey_parse(
const secp256k1_context* ctx,
secp256k1_pubkey* pubkey,
const unsigned char *input,
size_t inputlen
);
int secp256k1_ec_pubkey_serialize(
const secp256k1_context* ctx,
unsigned char *output,
size_t *outputlen,
const secp256k1_pubkey* pubkey,
unsigned int flags
);
int secp256k1_ecdsa_signature_parse_compact(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature* sig,
const unsigned char *input64
);
int secp256k1_ecdsa_signature_parse_der(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature* sig,
const unsigned char *input,
size_t inputlen
);
int secp256k1_ecdsa_signature_serialize_der(
const secp256k1_context* ctx,
unsigned char *output,
size_t *outputlen,
const secp256k1_ecdsa_signature* sig
);
int secp256k1_ecdsa_signature_serialize_compact(
const secp256k1_context* ctx,
unsigned char *output64,
const secp256k1_ecdsa_signature* sig
);
int secp256k1_ecdsa_verify(
const secp256k1_context* ctx,
const secp256k1_ecdsa_signature *sig,
const unsigned char *msg32,
const secp256k1_pubkey *pubkey
);
int secp256k1_ecdsa_signature_normalize(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature *sigout,
const secp256k1_ecdsa_signature *sigin
);
extern const secp256k1_nonce_function secp256k1_nonce_function_rfc6979;
extern const secp256k1_nonce_function secp256k1_nonce_function_default;
int secp256k1_ecdsa_sign(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature *sig,
const unsigned char *msg32,
const unsigned char *seckey,
secp256k1_nonce_function noncefp,
const void *ndata
);
int secp256k1_ec_seckey_verify(
const secp256k1_context* ctx,
const unsigned char *seckey
);
int secp256k1_ec_pubkey_create(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey,
const unsigned char *seckey
);
int secp256k1_ec_privkey_tweak_add(
const secp256k1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak
);
int secp256k1_ec_pubkey_tweak_add(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey,
const unsigned char *tweak
);
int secp256k1_ec_privkey_tweak_mul(
const secp256k1_context* ctx,
unsigned char *seckey,
const unsigned char *tweak
);
int secp256k1_ec_pubkey_tweak_mul(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey,
const unsigned char *tweak
);
int secp256k1_context_randomize(
secp256k1_context* ctx,
const unsigned char *seed32
);
int secp256k1_ec_pubkey_combine(
const secp256k1_context* ctx,
secp256k1_pubkey *out,
const secp256k1_pubkey * const * ins,
size_t n
);
"""
EXTRAKEYS_DEFINITIONS = """
typedef struct {
unsigned char data[64];
} secp256k1_xonly_pubkey;
typedef struct {
unsigned char data[96];
} secp256k1_keypair;
int secp256k1_xonly_pubkey_parse(
const secp256k1_context* ctx,
secp256k1_xonly_pubkey* pubkey,
const unsigned char *input32
);
int secp256k1_xonly_pubkey_serialize(
const secp256k1_context* ctx,
unsigned char *output32,
const secp256k1_xonly_pubkey* pubkey
);
int secp256k1_xonly_pubkey_cmp(
const secp256k1_context* ctx,
const secp256k1_xonly_pubkey* pk1,
const secp256k1_xonly_pubkey* pk2
);
int secp256k1_xonly_pubkey_from_pubkey(
const secp256k1_context* ctx,
secp256k1_xonly_pubkey *xonly_pubkey,
int *pk_parity,
const secp256k1_pubkey *pubkey
);
int secp256k1_xonly_pubkey_tweak_add(
const secp256k1_context* ctx,
secp256k1_pubkey *output_pubkey,
const secp256k1_xonly_pubkey *internal_pubkey,
const unsigned char *tweak32
);
int secp256k1_xonly_pubkey_tweak_add_check(
const secp256k1_context* ctx,
const unsigned char *tweaked_pubkey32,
int tweaked_pk_parity,
const secp256k1_xonly_pubkey *internal_pubkey,
const unsigned char *tweak32
);
int secp256k1_keypair_create(
const secp256k1_context* ctx,
secp256k1_keypair *keypair,
const unsigned char *seckey
);
int secp256k1_keypair_sec(
const secp256k1_context* ctx,
unsigned char *seckey,
const secp256k1_keypair *keypair
);
int secp256k1_keypair_pub(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey,
const secp256k1_keypair *keypair
);
int secp256k1_keypair_xonly_pub(
const secp256k1_context* ctx,
secp256k1_xonly_pubkey *pubkey,
int *pk_parity,
const secp256k1_keypair *keypair
);
int secp256k1_keypair_xonly_tweak_add(
const secp256k1_context* ctx,
secp256k1_keypair *keypair,
const unsigned char *tweak32
);
"""
RECOVERY_DEFINITIONS = """
typedef struct {
unsigned char data[65];
} secp256k1_ecdsa_recoverable_signature;
int secp256k1_ecdsa_recoverable_signature_parse_compact(
const secp256k1_context* ctx,
secp256k1_ecdsa_recoverable_signature* sig,
const unsigned char *input64,
int recid
);
int secp256k1_ecdsa_recoverable_signature_convert(
const secp256k1_context* ctx,
secp256k1_ecdsa_signature* sig,
const secp256k1_ecdsa_recoverable_signature* sigin
);
int secp256k1_ecdsa_recoverable_signature_serialize_compact(
const secp256k1_context* ctx,
unsigned char *output64,
int *recid,
const secp256k1_ecdsa_recoverable_signature* sig
);
int secp256k1_ecdsa_sign_recoverable(
const secp256k1_context* ctx,
secp256k1_ecdsa_recoverable_signature *sig,
const unsigned char *msg32,
const unsigned char *seckey,
secp256k1_nonce_function noncefp,
const void *ndata
);
int secp256k1_ecdsa_recover(
const secp256k1_context* ctx,
secp256k1_pubkey *pubkey,
const secp256k1_ecdsa_recoverable_signature *sig,
const unsigned char *msg32
);
"""
SCHNORRSIG_DEFINITIONS = """
typedef int (*secp256k1_nonce_function_hardened)(
unsigned char *nonce32,
const unsigned char *msg,
size_t msglen,
const unsigned char *key32,
const unsigned char *xonly_pk32,
const unsigned char *algo,
size_t algolen,
void *data
);
extern const secp256k1_nonce_function_hardened secp256k1_nonce_function_bip340;
typedef struct {
unsigned char magic[4];
secp256k1_nonce_function_hardened noncefp;
void* ndata;
} secp256k1_schnorrsig_extraparams;
int secp256k1_schnorrsig_sign(
const secp256k1_context* ctx,
unsigned char *sig64,
const unsigned char *msg32,
const secp256k1_keypair *keypair,
const unsigned char *aux_rand32
);
int secp256k1_schnorrsig_sign32(
const secp256k1_context* ctx,
unsigned char *sig64,
const unsigned char *msg32,
const secp256k1_keypair *keypair,
const unsigned char *aux_rand32
);
int secp256k1_schnorrsig_sign_custom(
const secp256k1_context* ctx,
unsigned char *sig64,
const unsigned char *msg,
size_t msglen,
const secp256k1_keypair *keypair,
secp256k1_schnorrsig_extraparams *extraparams
);
int secp256k1_schnorrsig_verify(
const secp256k1_context* ctx,
const unsigned char *sig64,
const unsigned char *msg,
size_t msglen,
const secp256k1_xonly_pubkey *pubkey
);
"""
ECDH_DEFINITIONS = """
int secp256k1_ecdh(
const secp256k1_context* ctx,
unsigned char *result,
const secp256k1_pubkey *pubkey,
const unsigned char *privkey,
void *hashfp,
void *data
);
"""
ffi = FFI()
ffi.cdef(BASE_DEFINITIONS)
ffi.cdef(EXTRAKEYS_DEFINITIONS)
ffi.cdef(RECOVERY_DEFINITIONS)
ffi.cdef(SCHNORRSIG_DEFINITIONS)
ffi.cdef(ECDH_DEFINITIONS)
here = os.path.dirname(os.path.abspath(__file__))
lib = ffi.dlopen(os.path.join(here, 'libsecp256k1.dll'))
|
{
"content_hash": "a0cf422f81219cfffae9efeda7925927",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 79,
"avg_line_length": 23.909574468085108,
"alnum_prop": 0.7261401557285874,
"repo_name": "ofek/coincurve",
"id": "34115a7fb6bf06170aac4b85430d8cb935629682",
"size": "8990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coincurve/_windows_libsecp256k1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8538"
},
{
"name": "Python",
"bytes": "72152"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import sys
import subprocess
import shutil
def setup_parser(parser):
parser.what_group.add_argument('--services', action='store_true',
help="Install macOS services.")
def main(args):
if not (args.all or args.services):
return
for home in args.home:
_install_services(home, args)
if sys.platform == 'darwin':
print("Refreshing local services.")
subprocess.check_call(['/System/Library/CoreServices/pbs', '-flush'])
return True
def _install_services(home, args):
sgactions_root = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
src_dir = os.path.join(sgactions_root, 'sgactions', 'platforms', 'darwin', 'Services')
dst_dir = os.path.join(home, 'Library', 'Services')
print("Installing macOS services into:", dst_dir)
if not os.path.exists(dst_dir):
print(" WARNING: directory doesn't exist.")
return
if args.dry_run:
return
service_names = os.listdir(src_dir)
service_names = [x for x in service_names if not x.startswith('.')]
for service_name in service_names:
src = os.path.join(src_dir, service_name)
dst = os.path.join(dst_dir, service_name)
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
|
{
"content_hash": "feecaefe7d8ad847ed79011df5f34eaa",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 90,
"avg_line_length": 26.384615384615383,
"alnum_prop": 0.6209912536443148,
"repo_name": "vfxetc/sgactions",
"id": "6aa3cdf2ca42e667e1e327062bfb7597eef1f571",
"size": "1372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sgactions/install/services.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55989"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "JavaScript",
"bytes": "30264"
},
{
"name": "Makefile",
"bytes": "389"
},
{
"name": "Python",
"bytes": "54124"
},
{
"name": "Shell",
"bytes": "1843"
}
],
"symlink_target": ""
}
|
"""
=========================================================================
Non-parametric between conditions cluster statistic on single trial power
=========================================================================
This script shows how to compare clusters in time-frequency
power estimates between conditions. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists of:
- extracting epochs for 2 conditions
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if the power estimates are significantly different
between conditions.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332' # restrict example to one channel
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_1.pick_channels([ch_name])
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_2.pick_channels([ch_name])
###############################################################################
# Factor to downsample the temporal dimension of the TFR computed by
# tfr_morlet. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly comptuational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = 1.5
tfr_epochs_1 = tfr_morlet(epochs_condition_1, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_2 = tfr_morlet(epochs_condition_2, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_1.apply_baseline(mode='ratio', baseline=(None, 0))
tfr_epochs_2.apply_baseline(mode='ratio', baseline=(None, 0))
epochs_power_1 = tfr_epochs_1.data[:, 0, :, :] # only 1 channel as 3D matrix
epochs_power_2 = tfr_epochs_2.data[:, 0, :, :] # only 1 channel as 3D matrix
###############################################################################
# Compute statistic
# -----------------
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2], out_type='mask',
n_permutations=100, threshold=threshold, tail=0)
###############################################################################
# View time-frequency plots
# -------------------------
times = 1e3 * epochs_condition_1.times # change unit to ms
evoked_condition_1 = epochs_condition_1.average()
evoked_condition_2 = epochs_condition_2.average()
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='gray')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
ax2 = plt.subplot(2, 1, 2)
evoked_contrast = mne.combine_evoked([evoked_condition_1, evoked_condition_2],
weights=[1, -1])
evoked_contrast.plot(axes=ax2, time_unit='s')
plt.show()
|
{
"content_hash": "6b4262583845832025e2955697731de4",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 79,
"avg_line_length": 36.13333333333333,
"alnum_prop": 0.5945059450594506,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "0d84eb96d9cdb62a6c762417318ea3ef1f4aa689",
"size": "4878",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "0.21/_downloads/d6d0db28086fd1732c34960f28cf6830/plot_stats_cluster_time_frequency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from nltk.sem import Valuation, Model, Assignment, evaluate_sents
if __name__ == '__main__':
valuation_describe = [
('ember', 'e'),
('asztal', 'a'),
(['alive', set('e')]),
(['!alive', set('a')]),
(['agent', set('e')]),
(['!agent', set('a')])
]
valuation = Valuation(valuation_describe)
valuation_province = Assignment(valuation.domain)
model = Model(valuation.domain, valuation)
sentence = 'egy asztal fut'
res = evaluate_sents([sentence], 'file:data/semantic.fcfg', model, valuation_province)
for i in res:
print("The sentence: '{}'".format(sentence))
print("The parsed tree: '{}'".format(i[0][0]))
print("The semantic formula: '{}'".format(i[0][1]))
print("The semantic value: '{}'".format(i[0][2]))
|
{
"content_hash": "0005bd81ed58a7d4c392a0a644fd5ad3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 90,
"avg_line_length": 33.03846153846154,
"alnum_prop": 0.5657741559953434,
"repo_name": "davidpgero/hungarian-nltk",
"id": "d67811f7d488326ecb3c5b7ccd169bf67f844ed6",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/semantic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
}
|
import itertools
import event_insight_lib
import backend
class ConceptModel:
"""The ConceptModel object is a container for the user's concept model."""
maturity = 1
model = dict()
def __init__(self, model=dict(), maturity=1):
self.model = model
self.maturity = maturity
def loadModel(self, email, filename='accounts.json'):
"""Given the email of a registered user, loads a single user's model out of the accounts list."""
list_of_users = json.load(open(filename))['accounts']
for user in list_of_users:
if user['email'] == email:
self.model = user['model']['concepts']
break
def saveModel(self, filename='accounts.json'):
"""Given the concept model and email of a registered user, saves their model to the accounts list."""
data = json.load(open(filename))
for i in range(0, len(data['accounts'])):
if data['accounts'][i]['email'] == self.email:
data['accounts'][i]['model']['concepts'] = self.model
break
# Re-encode and save the modified file.
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4)
def iterateModel(self, merger_concept_model, cutoff=0.2, mean=0.5):
"""
This method merges a second ConceptModel object into the current one, using a running average.
This method implements a cutoff (lower relevancies fall of the edge) and a mean (the model is continuously rebalanced).
Returns the merged ConceptModel object.
"""
# Rank the concepts in each set into two ordered lists.
bK = sorted(self.model.keys())
mK = sorted(merger_concept_model.model.keys())
# Create a placeholder model and iterate its maturity.
new_concept_model = ConceptModel(maturity=self.maturity + merger_concept_model.maturity)
# Zip them into longest-match tuple pairs and iterate through, merging them as we go along.
for pair in itertools.zip_longest(bK, mK, fillvalue=None):
if pair[0] == pair[1]:
left_relevance = self.model[pair[0]]
right_relevance = merger_concept_model.model[pair[0]]
new_model_relevance_raw = (left_relevance * self.maturity + right_relevance * merger_concept_model.maturity) / self.maturity
new_model_relevance = round(new_model_relevance_raw, 3)
new_concept_model.model[pair[0]] = new_model_relevance
else:
if pair[0] != None:
relevance = self.model[pair[0]]
new_model_relevance = round((relevance * self.maturity) / self.maturity, 3)
new_concept_model.model[pair[0]] = new_model_relevance
if pair[1] != None:
relevance = merger_concept_model.model[pair[1]]
new_model_relevance = round((relevance * merger_concept_model.maturity) / self.maturity, 3)
new_concept_model.model[pair[1]] = new_model_relevance
# Remean the concept list.
new_concept_model.remean(mean)
# Remove the elements of the list which fall below the cutoff.
# print([self.model[item] for item in self.model])
irrelevants = [item for item in new_concept_model.model if new_concept_model.model[item] <= cutoff]
print(irrelevants)
for item in irrelevants:
del new_concept_model.model[item]
# Set the current model to the optimized merged one.
self.maturity = new_concept_model.maturity
self.model = new_concept_model.model
def remean(self, mean=0.5):
"""
Analytical method.
Rebalances the values in the given concept list around the given mean.
Called by `iterateModel()`.
NOTE: Float storage in Python (as elsewhere) has prominent floating-point representation issues.
This doesn't matter too much in the code itself, but when displaying, be sure to optimize away from display 4.99999... using e.g.:
print('%.3f' % round(mean - current_mean, 3))
"""
keys = self.model.keys()
size = len(keys)
if size == 0:
return self.model
total = 0
for key in keys:
total += self.model[key]
current_mean = total/size
for key in keys:
self.model[key] += round(mean - current_mean, 3)
return self.model
def addUserInputToConceptModel(self, user_input, cutoff=0.2):
"""
This method attempts to resolve user input (`input`) into a list of related concepts.
At issue is the fact that user input has to be resolved somehow to the name of the nearest Wikipedia article, not always easy.
In cases where this is not possible, this method will return a False flag.
In cases where this happens as desired, this method will return a True flag.
"""
# Fetch the precise name of the node (article title) associated with the institution.
concept_node = event_insight_lib.annotateText(user_input, backend.getToken())
# If the correction call is successful, keep going.
if 'annotations' in concept_node.keys() and len(concept_node['annotations']) != 0:
concept_node_title = concept_node['annotations'][0]['concept']['label']
related_concepts = event_insight_lib.fetchRelatedConcepts(concept_node_title, backend.getToken())
model = backend.parseRawConceptCall(related_concepts, cutoff)
new_concept_model = ConceptModel(model=model, maturity=1)
self.iterateModel(new_concept_model)
return True
# Otherwise, if the call was not successful, return a False flag.
else:
return None
def addEventToConceptModel(self, event_text, cutoff=0.2):
"""
In the case that we are resolving a known concept, we can skip the verification step present in the above method.
We also use a different Watson API call entirely to retrieve what we want.
"""
merger_model = ConceptModel(model=backend.parseRawEventCall(event_insight_lib.annotateText(event_text, backend.getToken()), cutoff))
self.iterateModel(merger_model)
def addExplodedConceptToConceptModel(self, concept, cutoff=0.2):
"""
In the case that we are resolving a known concept, we can skip the verification step present in the above method.
We also use a different Watson API call entirely to retrieve what we want.
"""
pass
|
{
"content_hash": "f7b98903dba5d9eb98bb35491e8c2cac",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 134,
"avg_line_length": 45.359375,
"alnum_prop": 0.7218394764037203,
"repo_name": "ResidentMario/cultural-insight",
"id": "e6cc24382470db47417bf0b2833d1ebec9cf08ce",
"size": "5806",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "conceptmodel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2184"
},
{
"name": "HTML",
"bytes": "11848"
},
{
"name": "Python",
"bytes": "46311"
}
],
"symlink_target": ""
}
|
"""
Created on Sat Sep 17 04:26:02 2016
@author: naman
"""
import multiprocessing
import json
from operator import itemgetter
import os
import gensim, logging
from gensim.models.doc2vec import TaggedDocument
from gensim.models.doc2vec import LabeledSentence
import re
import numpy as np
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#def get_text(path_to_json_file):
# with open(path_to_json_file, 'r') as fp:
# script=json.load(fp)
#
# script_text = list()
# for i in range(1, len(script)+1):
# scene = script[str(i)]
# list_dialogues = scene['char_dialogues']
# list_desc = scene['scene_descriptons_list']
# list_scene = list_dialogues + list_desc
# list_scene = sorted(list_scene)
# list_scene = [l[1:] for l in list_scene ]
# list_scene = [' '.join(l) for l in list_scene]
# text = ' . '.join(list_scene).encode('utf-8')
# if len(text.split()) > 10:
# script_text.append(text)
# return script_text
def get_text(directory, filename):
fp=open(directory+'/'+filename, 'r')
script = fp.readlines()
script=[re.sub('<.*?>', '', s).rstrip('\n').rstrip('\r').strip() for s in script]
script=[s for s in script if s!='']
partition_size= len(script)/200
if partition_size == 0:
return list((-1,''))
partitions=list()
partition=''
for i in range(0,200):
for j in range(i*partition_size, (i+1)*partition_size):
partition += ' '+script[j]
index = partition_size*(i+1)-1
partitions.append(partition)
tmp_line=''.join(e for e in script[index] if e.isalpha())
if(tmp_line.isupper()):
partition=script[index]
else:
partition=''
if len(script) % 200 != 0:
partition=''
for s in script[index:]:
partition += s +' . '
partitions[-1] += partition
return list(enumerate(partitions))
directory='/home/naman/SNLP/imsdb'
text_files=os.listdir(directory)
scripts_text=list()
scripts_text1=list()
for f in text_files:
scripts_text.append((f[:-4].replace(',',''), get_text(directory,f)))
scripts_text1.append(f[:-4])
all_scripts=list()
for movie in scripts_text:
if movie[1][0] == -1:
continue
all_scripts += ([("%s_%d"% (movie[0],scene_num), scene) for scene_num, scene in movie[1]])
#==============================================================================
# docLabels = [t[0] for t in all_scripts]
# docs = [t[1] for t in all_scripts]
#
# def remove_punc(text):
# punc=['.',',','!','?']
# new_text=''.join(e for e in text if e not in punc)
# return new_text
#
# class DocIterator(object):
#
# #SPLIT_SENTENCES = re.compile(u"[.!?:]\s+") # split sentences on these characters
#
# def __init__(self, doc_list, labels_list):
# self.labels_list = labels_list
# self.doc_list = doc_list
# def __iter__(self):
# for idx, doc in enumerate(self.doc_list):
# yield TaggedDocument(words=remove_punc(doc),tags=[self.labels_list[idx]])
#
# it=DocIterator(docs, docLabels)
#
# model = gensim.models.Doc2Vec(size=300,
# window=10,
# min_count=1,
# workers=3,
# alpha=0.025,
# min_alpha=0.025) # use fixed learning rate
#
# model.build_vocab(it)
#
# for epoch in range(10):
# model.train(it)
# model.alpha -= 0.002 # decrease the learning rate
# model.min_alpha = model.alpha # fix the learning rate, no deca
# model.train(it)
# print "Epoch %d Completed" % epoch
#
# model.save('/home/naman/SNLP/imsdb_doc2vec_scriptpartitions_nopunc.model')
#
#==============================================================================
model = gensim.models.Doc2Vec.load('/home/naman/SNLP/imsdb_doc2vec_scriptpartitions.model')
centroid_vectors=list()
variance_vectors=list()
ii=-1
scripts_text2=list()
for movie,_ in scripts_text:
ii+=1
movie_labels=[movie+'_%d' % i for i in range(0,200)]
try:
vectors=model.docvecs[movie_labels]
scripts_text2.append(scripts_text1[ii])
except KeyError:
continue
centroid=np.mean(vectors,axis=0)
centroid_vectors.append((movie,centroid))
variance=np.diag(np.cov(vectors.T))
variance_vectors.append((movie, variance))
#central_vectors=np.array([c[1] for c in central_vectors])
import pandas as pd
cols=range(300)
var_vectors=[[v[0]]+list(v[1]) for v in variance_vectors]
mean_vectors=[[m[0]]+list(m[1]) for m in centroid_vectors]
df_m=pd.DataFrame(mean_vectors,columns=['Movie']+cols)
df_v=pd.DataFrame(var_vectors,columns=['Movie']+cols)
from sklearn.decomposition import PCA
mean_components=5
var_components=5
pca = PCA(n_components=mean_components)
pca.fit(df_m.ix[:,1:])
X = pca.transform(df_m.ix[:,1:])
dim_reduced=zip([v[0] for v in mean_vectors], X)
dim_reduced=[[v[0]]+list(v[1]) for v in dim_reduced]
df_mean=pd.DataFrame(dim_reduced,columns=['Movie']+range(mean_components))
pca = PCA(n_components=var_components)
pca.fit(df_v.ix[:,1:])
X = pca.transform(df_v.ix[:,1:])
dim_reduced=zip([v[0] for v in var_vectors], X)
dim_reduced=[[v[0]]+list(v[1]) for v in dim_reduced]
df_var=pd.DataFrame(dim_reduced,columns=['Movie']+range(var_components))
df=pd.merge(df_mean,df_var, on='Movie')
df_full=pd.merge(df_m, df_v, on='Movie')
ratings=pd.read_pickle('/home/naman/SNLP/ratings.pkl')['ratings']
ratings=list(ratings.items())
r_mean=np.mean([t[1] for t in ratings])
r_var=np.var([t[1] for t in ratings])
def label(a,m,v):
if a>m+v:
return 1
elif a<m-v:
return -1
else:
return 0
ratings_labels=[(r[0],label(r[1], r_mean, r_var)) for r in ratings]
df_ratings=pd.DataFrame(ratings_labels, columns=['Movie', 'Rating'])
df['Movie']=scripts_text2
df_full['Movie']=scripts_text2
df_final_full=pd.merge(df_full, df_ratings, on='Movie')
df_Shankar=pd.read_pickle('/home/naman/SNLP/char_net_final_II.pkl')
df_Shankar.columns=['Movie']+range(8)
df_Jar=pd.read_pickle('/home/naman/SNLP/emotion_binwise2.pkl')
df_Jar.columns=['Movie']+range(100,110)
df_Shankar2=pd.read_pickle('/home/naman/SNLP/topic_overlap.pkl')
df_Shankar2.columns=['Movie']+range(200,210)
df_Shankar=pd.merge(df_Shankar, df_ratings, on='Movie')
df_Shankar2=pd.merge(df_Shankar2, df_ratings, on='Movie')
df_Jar=pd.merge(df_Jar, df_ratings, on='Movie')
df_Naman=pd.merge(df, df_ratings, on='Movie')
df_final=pd.merge(df_Naman.ix[:,:-1], df_Shankar.ix[:,:-1], on='Movie')
df_final=pd.merge(df_final, df_Shankar2.ix[:,:-1], on='Movie')
df_final=pd.merge(df_final, df_Jar.ix[:,:-1], on='Movie')
df_final=pd.merge(df_final, df_ratings, on='Movie')
X_Naman=df_Naman.ix[:,1:-1]
Y_Naman=df_Naman.ix[:,-1]
X_Shankar=df_Shankar.ix[:,1:-1]
Y_Shankar=df_Shankar.ix[:,-1]
X_Shankar2=df_Shankar2.ix[:,1:-1]
Y_Shankar2=df_Shankar2.ix[:,-1]
X_Jar=df_Jar.ix[:,1:-1]
Y_Jar=df_Jar.ix[:,-1]
pd.to_pickle(df_Naman, '/home/naman/SNLP/FinalVectors_D2V.pkl')
pd.to_pickle(df_Naman.ix[:,:-1], '/home/naman/SNLP/FinalVectors_Naman.pkl')
pd.to_pickle(df_final_full, '/home/naman/SNLP/FinalVectors_Full_D2V.pkl')
X_final=df_final.ix[:,1:-1]
Y_final=df_final.ix[:,-1]
from sklearn.svm import SVC
from sklearn.cross_validation import cross_val_score
clf = SVC()
scores_Naman = cross_val_score(clf, X_Naman, Y_Naman, cv=10)
scores_Shankar = cross_val_score(clf, X_Shankar, Y_Shankar, cv=10)
scores_Shankar2 = cross_val_score(clf, X_Shankar, Y_Shankar, cv=10)
scores_Jar = cross_val_score(clf, X_Jar, Y_Jar, cv=10)
scores_final = cross_val_score(clf, X_final, Y_final, cv=10)
scores=np.vstack((scores_Shankar, scores_Shankar2, scores_Jar, scores_Naman)).T
scores=pd.DataFrame(scores, columns=['CharacterNetworks','TopicOverlap','EmotionAnalysis','Doc2Vec'])
from sklearn.cross_validation import train_test_split
#X_train, X_test, Y_train, Y_test = train_test_split(X_final, Y_final, stratify=Y_final)
clf1=SVC()
clf1.fit(X_final, Y_final)
Y_pred=clf1.predict(X_final)
from sklearn.metrics import confusion_matrix
result=confusion_matrix(Y_final, Y_pred)
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
plt.style.use('ggplot')
tsne_model = TSNE(n_components=2, random_state=0)
#tsne_op = tsne_model.fit_transform(np.array([c[1] for c in variance_vectors]))
tsne_op = tsne_model.fit_transform(np.array(df_final_full.ix[:,1:-1]))
plt.figure(figsize=(10,10))
plt.scatter(tsne_op[:,0], tsne_op[:,1])
plt.show()
#plotting some movies document vectors for all scenes
movie= 'Terminator Salvation'
movie_labels=[movie+'_%d' % i for i in range(0,200)]
vectors=model.docvecs[movie_labels]
tsne_model = TSNE(n_components=2, random_state=0)
tsne_op = tsne_model.fit_transform(vectors)
plt.figure(figsize=(10,10))
plt.scatter(tsne_op[:,0], tsne_op[:,1])
plt.show()
|
{
"content_hash": "6b1bc23e626e4ac7d6179cdd0871d65c",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 101,
"avg_line_length": 31.954063604240282,
"alnum_prop": 0.6295477164657747,
"repo_name": "njordsir/Movie-Script-Analysis",
"id": "7ad1a02b21254c3d2bc2321d3db3b3d841f844ba",
"size": "9067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Doc2Vec and Classification/doc2vec_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1684666"
},
{
"name": "Python",
"bytes": "39950"
}
],
"symlink_target": ""
}
|
"""
Classes that implement ASN.1 data structures.
"""
from __future__ import absolute_import
from scapy.asn1.asn1 import ASN1_Class_UNIVERSAL, ASN1_NULL, ASN1_Error, \
ASN1_Object, ASN1_INTEGER
from scapy.asn1.ber import BER_tagging_dec, BER_Decoding_Error, BER_id_dec, \
BER_tagging_enc
from scapy.volatile import RandInt, RandChoice, RandNum, RandString, RandOID, \
GeneralizedTime
from scapy.compat import orb, raw
from scapy.base_classes import BasePacket
from scapy.utils import binrepr
from scapy import packet
from functools import reduce
import scapy.modules.six as six
from scapy.modules.six.moves import range
class ASN1F_badsequence(Exception):
pass
class ASN1F_element(object):
pass
##########################
# Basic ASN1 Field #
##########################
class ASN1F_field(ASN1F_element):
holds_packets = 0
islist = 0
ASN1_tag = ASN1_Class_UNIVERSAL.ANY
context = ASN1_Class_UNIVERSAL
def __init__(self, name, default, context=None,
implicit_tag=None, explicit_tag=None,
flexible_tag=False):
self.context = context
self.name = name
if default is None:
self.default = None
elif isinstance(default, ASN1_NULL):
self.default = default
else:
self.default = self.ASN1_tag.asn1_object(default)
self.flexible_tag = flexible_tag
if (implicit_tag is not None) and (explicit_tag is not None):
err_msg = "field cannot be both implicitly and explicitly tagged"
raise ASN1_Error(err_msg)
self.implicit_tag = implicit_tag
self.explicit_tag = explicit_tag
# network_tag gets useful for ASN1F_CHOICE
self.network_tag = implicit_tag or explicit_tag or self.ASN1_tag
def i2repr(self, pkt, x):
return repr(x)
def i2h(self, pkt, x):
return x
def any2i(self, pkt, x):
return x
def m2i(self, pkt, s):
"""
The good thing about safedec is that it may still decode ASN1
even if there is a mismatch between the expected tag (self.ASN1_tag)
and the actual tag; the decoded ASN1 object will simply be put
into an ASN1_BADTAG object. However, safedec prevents the raising of
exceptions needed for ASN1F_optional processing.
Thus we use 'flexible_tag', which should be False with ASN1F_optional.
Regarding other fields, we might need to know whether encoding went
as expected or not. Noticeably, input methods from cert.py expect
certain exceptions to be raised. Hence default flexible_tag is False.
"""
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
# this implies that flexible_tag was True
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
if self.flexible_tag:
return codec.safedec(s, context=self.context)
else:
return codec.dec(s, context=self.context)
def i2m(self, pkt, x):
if x is None:
return b""
if isinstance(x, ASN1_Object):
if (self.ASN1_tag == ASN1_Class_UNIVERSAL.ANY or
x.tag == ASN1_Class_UNIVERSAL.RAW or
x.tag == ASN1_Class_UNIVERSAL.ERROR or
self.ASN1_tag == x.tag):
s = x.enc(pkt.ASN1_codec)
else:
raise ASN1_Error("Encoding Error: got %r instead of an %r for field [%s]" % (x, self.ASN1_tag, self.name)) # noqa: E501
else:
s = self.ASN1_tag.get_codec(pkt.ASN1_codec).enc(x)
return BER_tagging_enc(s, implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag)
def extract_packet(self, cls, s):
if len(s) > 0:
try:
c = cls(s)
except ASN1F_badsequence:
c = packet.Raw(s)
cpad = c.getlayer(packet.Raw)
s = b""
if cpad is not None:
s = cpad.load
del(cpad.underlayer.payload)
return c, s
else:
return None, s
def build(self, pkt):
return self.i2m(pkt, getattr(pkt, self.name))
def dissect(self, pkt, s):
v, s = self.m2i(pkt, s)
self.set_val(pkt, v)
return s
def do_copy(self, x):
if hasattr(x, "copy"):
return x.copy()
if isinstance(x, list):
x = x[:]
for i in range(len(x)):
if isinstance(x[i], BasePacket):
x[i] = x[i].copy()
return x
def set_val(self, pkt, val):
setattr(pkt, self.name, val)
def is_empty(self, pkt):
return getattr(pkt, self.name) is None
def get_fields_list(self):
return [self]
def __str__(self):
return repr(self)
def randval(self):
return RandInt()
############################
# Simple ASN1 Fields #
############################
class ASN1F_BOOLEAN(ASN1F_field):
ASN1_tag = ASN1_Class_UNIVERSAL.BOOLEAN
def randval(self):
return RandChoice(True, False)
class ASN1F_INTEGER(ASN1F_field):
ASN1_tag = ASN1_Class_UNIVERSAL.INTEGER
def randval(self):
return RandNum(-2**64, 2**64 - 1)
class ASN1F_enum_INTEGER(ASN1F_INTEGER):
def __init__(self, name, default, enum, context=None,
implicit_tag=None, explicit_tag=None):
ASN1F_INTEGER.__init__(self, name, default, context=context,
implicit_tag=implicit_tag,
explicit_tag=explicit_tag)
i2s = self.i2s = {}
s2i = self.s2i = {}
if isinstance(enum, list):
keys = range(len(enum))
else:
keys = list(enum)
if any(isinstance(x, six.string_types) for x in keys):
i2s, s2i = s2i, i2s
for k in keys:
i2s[k] = enum[k]
s2i[enum[k]] = k
def i2m(self, pkt, s):
if isinstance(s, str):
s = self.s2i.get(s)
return super(ASN1F_enum_INTEGER, self).i2m(pkt, s)
def i2repr(self, pkt, x):
if x is not None and isinstance(x, ASN1_INTEGER):
r = self.i2s.get(x.val)
if r:
return "'%s' %s" % (r, repr(x))
return repr(x)
class ASN1F_BIT_STRING(ASN1F_field):
ASN1_tag = ASN1_Class_UNIVERSAL.BIT_STRING
def __init__(self, name, default, default_readable=True, context=None,
implicit_tag=None, explicit_tag=None):
if default is not None and default_readable:
default = b"".join(binrepr(orb(x)).zfill(8).encode("utf8") for x in default) # noqa: E501
ASN1F_field.__init__(self, name, default, context=context,
implicit_tag=implicit_tag,
explicit_tag=explicit_tag)
def randval(self):
return RandString(RandNum(0, 1000))
class ASN1F_STRING(ASN1F_field):
ASN1_tag = ASN1_Class_UNIVERSAL.STRING
def randval(self):
return RandString(RandNum(0, 1000))
class ASN1F_NULL(ASN1F_INTEGER):
ASN1_tag = ASN1_Class_UNIVERSAL.NULL
class ASN1F_OID(ASN1F_field):
ASN1_tag = ASN1_Class_UNIVERSAL.OID
def randval(self):
return RandOID()
class ASN1F_ENUMERATED(ASN1F_enum_INTEGER):
ASN1_tag = ASN1_Class_UNIVERSAL.ENUMERATED
class ASN1F_UTF8_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.UTF8_STRING
class ASN1F_NUMERIC_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING
class ASN1F_PRINTABLE_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING
class ASN1F_T61_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.T61_STRING
class ASN1F_VIDEOTEX_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING
class ASN1F_IA5_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.IA5_STRING
class ASN1F_UTC_TIME(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.UTC_TIME
def randval(self):
return GeneralizedTime()
class ASN1F_GENERALIZED_TIME(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME
def randval(self):
return GeneralizedTime()
class ASN1F_ISO646_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.ISO646_STRING
class ASN1F_UNIVERSAL_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.UNIVERSAL_STRING
class ASN1F_BMP_STRING(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.BMP_STRING
class ASN1F_SEQUENCE(ASN1F_field):
# Here is how you could decode a SEQUENCE
# with an unknown, private high-tag prefix :
# class PrivSeq(ASN1_Packet):
# ASN1_codec = ASN1_Codecs.BER
# ASN1_root = ASN1F_SEQUENCE(
# <asn1 field #0>,
# ...
# <asn1 field #N>,
# explicit_tag=0,
# flexible_tag=True)
# Because we use flexible_tag, the value of the explicit_tag does not matter. # noqa: E501
ASN1_tag = ASN1_Class_UNIVERSAL.SEQUENCE
holds_packets = 1
def __init__(self, *seq, **kwargs):
name = "dummy_seq_name"
default = [field.default for field in seq]
for kwarg in ["context", "implicit_tag",
"explicit_tag", "flexible_tag"]:
setattr(self, kwarg, kwargs.get(kwarg))
ASN1F_field.__init__(self, name, default, context=self.context,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
flexible_tag=self.flexible_tag)
self.seq = seq
self.islist = len(seq) > 1
def __repr__(self):
return "<%s%r>" % (self.__class__.__name__, self.seq)
def is_empty(self, pkt):
return all(f.is_empty(pkt) for f in self.seq)
def get_fields_list(self):
return reduce(lambda x, y: x + y.get_fields_list(), self.seq, [])
def m2i(self, pkt, s):
"""
ASN1F_SEQUENCE behaves transparently, with nested ASN1_objects being
dissected one by one. Because we use obj.dissect (see loop below)
instead of obj.m2i (as we trust dissect to do the appropriate set_vals)
we do not directly retrieve the list of nested objects.
Thus m2i returns an empty list (along with the proper remainder).
It is discarded by dissect() and should not be missed elsewhere.
"""
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
i, s, remain = codec.check_type_check_len(s)
if len(s) == 0:
for obj in self.seq:
obj.set_val(pkt, None)
else:
for obj in self.seq:
try:
s = obj.dissect(pkt, s)
except ASN1F_badsequence:
break
if len(s) > 0:
raise BER_Decoding_Error("unexpected remainder", remaining=s)
return [], remain
def dissect(self, pkt, s):
_, x = self.m2i(pkt, s)
return x
def build(self, pkt):
s = reduce(lambda x, y: x + y.build(pkt), self.seq, b"")
return self.i2m(pkt, s)
class ASN1F_SET(ASN1F_SEQUENCE):
ASN1_tag = ASN1_Class_UNIVERSAL.SET
class ASN1F_SEQUENCE_OF(ASN1F_field):
ASN1_tag = ASN1_Class_UNIVERSAL.SEQUENCE
holds_packets = 1
islist = 1
def __init__(self, name, default, cls, context=None,
implicit_tag=None, explicit_tag=None):
self.cls = cls
ASN1F_field.__init__(self, name, None, context=context,
implicit_tag=implicit_tag, explicit_tag=explicit_tag) # noqa: E501
self.default = default
def is_empty(self, pkt):
return ASN1F_field.is_empty(self, pkt)
def m2i(self, pkt, s):
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
i, s, remain = codec.check_type_check_len(s)
lst = []
while s:
c, s = self.extract_packet(self.cls, s)
lst.append(c)
if len(s) > 0:
raise BER_Decoding_Error("unexpected remainder", remaining=s)
return lst, remain
def build(self, pkt):
val = getattr(pkt, self.name)
if isinstance(val, ASN1_Object) and val.tag == ASN1_Class_UNIVERSAL.RAW: # noqa: E501
s = val
elif val is None:
s = b""
else:
s = b"".join(raw(i) for i in val)
return self.i2m(pkt, s)
def randval(self):
return packet.fuzz(self.cls())
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.name)
class ASN1F_SET_OF(ASN1F_SEQUENCE_OF):
ASN1_tag = ASN1_Class_UNIVERSAL.SET
class ASN1F_IPADDRESS(ASN1F_STRING):
ASN1_tag = ASN1_Class_UNIVERSAL.IPADDRESS
class ASN1F_TIME_TICKS(ASN1F_INTEGER):
ASN1_tag = ASN1_Class_UNIVERSAL.TIME_TICKS
#############################
# Complex ASN1 Fields #
#############################
class ASN1F_optional(ASN1F_element):
def __init__(self, field):
field.flexible_tag = False
self._field = field
def __getattr__(self, attr):
return getattr(self._field, attr)
def m2i(self, pkt, s):
try:
return self._field.m2i(pkt, s)
except (ASN1_Error, ASN1F_badsequence, BER_Decoding_Error):
# ASN1_Error may be raised by ASN1F_CHOICE
return None, s
def dissect(self, pkt, s):
try:
return self._field.dissect(pkt, s)
except (ASN1_Error, ASN1F_badsequence, BER_Decoding_Error):
self._field.set_val(pkt, None)
return s
def build(self, pkt):
if self._field.is_empty(pkt):
return b""
return self._field.build(pkt)
def any2i(self, pkt, x):
return self._field.any2i(pkt, x)
def i2repr(self, pkt, x):
return self._field.i2repr(pkt, x)
class ASN1F_CHOICE(ASN1F_field):
"""
Multiple types are allowed: ASN1_Packet, ASN1F_field and ASN1F_PACKET(),
See layers/x509.py for examples.
Other ASN1F_field instances than ASN1F_PACKET instances must not be used.
"""
holds_packets = 1
ASN1_tag = ASN1_Class_UNIVERSAL.ANY
def __init__(self, name, default, *args, **kwargs):
if "implicit_tag" in kwargs:
err_msg = "ASN1F_CHOICE has been called with an implicit_tag"
raise ASN1_Error(err_msg)
self.implicit_tag = None
for kwarg in ["context", "explicit_tag"]:
setattr(self, kwarg, kwargs.get(kwarg))
ASN1F_field.__init__(self, name, None, context=self.context,
explicit_tag=self.explicit_tag)
self.default = default
self.current_choice = None
self.choices = {}
self.pktchoices = {}
for p in args:
if hasattr(p, "ASN1_root"): # should be ASN1_Packet
if hasattr(p.ASN1_root, "choices"):
for k, v in six.iteritems(p.ASN1_root.choices):
self.choices[k] = v # ASN1F_CHOICE recursion
else:
self.choices[p.ASN1_root.network_tag] = p
elif hasattr(p, "ASN1_tag"):
if isinstance(p, type): # should be ASN1F_field class
self.choices[p.ASN1_tag] = p
else: # should be ASN1F_PACKET instance
self.choices[p.network_tag] = p
self.pktchoices[hash(p.cls)] = (p.implicit_tag, p.explicit_tag) # noqa: E501
else:
raise ASN1_Error("ASN1F_CHOICE: no tag found for one field")
def m2i(self, pkt, s):
"""
First we have to retrieve the appropriate choice.
Then we extract the field/packet, according to this choice.
"""
if len(s) == 0:
raise ASN1_Error("ASN1F_CHOICE: got empty string")
_, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
explicit_tag=self.explicit_tag)
tag, _ = BER_id_dec(s)
if tag not in self.choices:
if self.flexible_tag:
choice = ASN1F_field
else:
raise ASN1_Error("ASN1F_CHOICE: unexpected field")
else:
choice = self.choices[tag]
if hasattr(choice, "ASN1_root"):
# we don't want to import ASN1_Packet in this module...
return self.extract_packet(choice, s)
elif isinstance(choice, type):
return choice(self.name, b"").m2i(pkt, s)
else:
# XXX check properly if this is an ASN1F_PACKET
return choice.m2i(pkt, s)
def i2m(self, pkt, x):
if x is None:
s = b""
else:
s = raw(x)
if hash(type(x)) in self.pktchoices:
imp, exp = self.pktchoices[hash(type(x))]
s = BER_tagging_enc(s, implicit_tag=imp,
explicit_tag=exp)
return BER_tagging_enc(s, explicit_tag=self.explicit_tag)
def randval(self):
randchoices = []
for p in six.itervalues(self.choices):
if hasattr(p, "ASN1_root"): # should be ASN1_Packet class
randchoices.append(packet.fuzz(p()))
elif hasattr(p, "ASN1_tag"):
if isinstance(p, type): # should be (basic) ASN1F_field class # noqa: E501
randchoices.append(p("dummy", None).randval())
else: # should be ASN1F_PACKET instance
randchoices.append(p.randval())
return RandChoice(*randchoices)
class ASN1F_PACKET(ASN1F_field):
holds_packets = 1
def __init__(self, name, default, cls, context=None,
implicit_tag=None, explicit_tag=None):
self.cls = cls
ASN1F_field.__init__(self, name, None, context=context,
implicit_tag=implicit_tag, explicit_tag=explicit_tag) # noqa: E501
if cls.ASN1_root.ASN1_tag == ASN1_Class_UNIVERSAL.SEQUENCE:
if implicit_tag is None and explicit_tag is None:
self.network_tag = 16 | 0x20
self.default = default
def m2i(self, pkt, s):
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.cls.ASN1_root.ASN1_tag, # noqa: E501
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
p, s = self.extract_packet(self.cls, s)
return p, s
def i2m(self, pkt, x):
if x is None:
s = b""
else:
s = raw(x)
return BER_tagging_enc(s, implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag)
def randval(self):
return packet.fuzz(self.cls())
class ASN1F_BIT_STRING_ENCAPS(ASN1F_BIT_STRING):
"""
We may emulate simple string encapsulation with explicit_tag=0x04,
but we need a specific class for bit strings because of unused bits, etc.
"""
holds_packets = 1
def __init__(self, name, default, cls, context=None,
implicit_tag=None, explicit_tag=None):
self.cls = cls
ASN1F_BIT_STRING.__init__(self, name, None, context=context,
implicit_tag=implicit_tag,
explicit_tag=explicit_tag)
self.default = default
def m2i(self, pkt, s):
bit_string, remain = ASN1F_BIT_STRING.m2i(self, pkt, s)
if len(bit_string.val) % 8 != 0:
raise BER_Decoding_Error("wrong bit string", remaining=s)
p, s = self.extract_packet(self.cls, bit_string.val_readable)
if len(s) > 0:
raise BER_Decoding_Error("unexpected remainder", remaining=s)
return p, remain
def i2m(self, pkt, x):
s = b"" if x is None else raw(x)
s = b"".join(binrepr(orb(x)).zfill(8).encode("utf8") for x in s)
return ASN1F_BIT_STRING.i2m(self, pkt, s)
class ASN1F_FLAGS(ASN1F_BIT_STRING):
def __init__(self, name, default, mapping, context=None,
implicit_tag=None, explicit_tag=None):
self.mapping = mapping
ASN1F_BIT_STRING.__init__(self, name, default,
default_readable=False,
context=context,
implicit_tag=implicit_tag,
explicit_tag=explicit_tag)
def get_flags(self, pkt):
fbytes = getattr(pkt, self.name).val
return [self.mapping[i] for i, positional in enumerate(fbytes)
if positional == '1' and i < len(self.mapping)]
def i2repr(self, pkt, x):
if x is not None:
pretty_s = ", ".join(self.get_flags(pkt))
return pretty_s + " " + repr(x)
return repr(x)
|
{
"content_hash": "d3eb4daa53cfff2ba5e16500c8d90708",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 136,
"avg_line_length": 34.39209726443769,
"alnum_prop": 0.5565620857269111,
"repo_name": "4shadoww/usploit",
"id": "87679b550770a0d4a686c46595401c2017b870c7",
"size": "22878",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/scapy/asn1fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7981066"
}
],
"symlink_target": ""
}
|
import unittest
from pyowm.utils import strings
class Placeholder:
pass # don't move!
class TestStringUtils(unittest.TestCase):
def test_obfuscate_API_key(self):
API_key = '22e28da2669c4283acdbd9cfa7dc0903'
expected = '************************a7dc0903'
self.assertEqual(expected, strings.obfuscate_API_key(API_key))
self.assertIsNone(strings.obfuscate_API_key(None))
def test_version_tuple_to_str(self):
version_tuple = (1, 4, 6)
expected = '1.4.6'
result = strings.version_tuple_to_str(version_tuple)
self.assertEqual(expected, result)
version_tuple = (1, 4, 6, 9)
separator = ';'
expected = '1;4;6;9'
result = strings.version_tuple_to_str(version_tuple, separator=separator)
self.assertEqual(expected, result)
def test_class_from_dotted_path(self):
path = 'tests.unit.utils.test_strings.Placeholder'
result = strings.class_from_dotted_path(path)
assert result == Placeholder
|
{
"content_hash": "576bd74a4ab897a0af26d63474bc456e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 32.1875,
"alnum_prop": 0.6388349514563106,
"repo_name": "csparpa/pyowm",
"id": "2cf32946d14ecbca5689b6621ca072498ed4b543",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/utils/test_strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6699"
},
{
"name": "Makefile",
"bytes": "6758"
},
{
"name": "Python",
"bytes": "1045787"
},
{
"name": "Shell",
"bytes": "6424"
}
],
"symlink_target": ""
}
|
from sqlalchemy import cast
from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import tuple_
from sqlalchemy import union
from sqlalchemy.sql import column
from sqlalchemy.sql import literal
from sqlalchemy.sql import table
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
table2 = table(
"myothertable", column("otherid", Integer), column("othername", String)
)
metadata = MetaData()
parent = Table(
"parent",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
)
child = Table(
"child",
metadata,
Column("id", Integer, primary_key=True),
Column("parent_id", ForeignKey("parent.id")),
Column("data", String(50)),
)
grandchild = Table(
"grandchild",
metadata,
Column("id", Integer, primary_key=True),
Column("child_id", ForeignKey("child.id")),
)
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_old_bracket_style_fail(self):
with expect_raises_message(
exc.ArgumentError,
r"Column expression, FROM clause, or other columns clause .*"
r".*Did you mean to say",
):
select([table1.c.myid])
def test_new_calling_style(self):
stmt = select(table1.c.myid).where(table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
)
def test_new_calling_style_clauseelement_thing_that_has_iter(self):
class Thing:
def __clause_element__(self):
return table1
def __iter__(self):
return iter(["a", "b", "c"])
stmt = select(Thing())
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable",
)
def test_new_calling_style_inspectable_ce_thing_that_has_iter(self):
class Thing:
def __iter__(self):
return iter(["a", "b", "c"])
class InspectedThing:
def __clause_element__(self):
return table1
from sqlalchemy.inspection import _inspects
@_inspects(Thing)
def _ce(thing):
return InspectedThing()
stmt = select(Thing())
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable",
)
def test_join_nofrom_implicit_left_side_explicit_onclause(self):
stmt = select(table1).join(table2, table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_nofrom_implicit_left_side_explicit_onclause_3level(self):
stmt = (
select(parent)
.join(child, child.c.parent_id == parent.c.id)
.join(grandchild, grandchild.c.child_id == child.c.id)
)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON child.parent_id = parent.id "
"JOIN grandchild ON grandchild.child_id = child.id",
)
def test_join_nofrom_explicit_left_side_explicit_onclause(self):
stmt = select(table1).join_from(
table1, table2, table1.c.myid == table2.c.otherid
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_outerjoin_nofrom_explicit_left_side_explicit_onclause(self):
stmt = select(table1).outerjoin_from(
table1, table2, table1.c.myid == table2.c.otherid
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable LEFT OUTER JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_nofrom_implicit_left_side_implicit_onclause(self):
stmt = select(parent).join(child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_nofrom_implicit_left_side_implicit_onclause_3level(self):
stmt = select(parent).join(child).join(grandchild)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id "
"JOIN grandchild ON child.id = grandchild.child_id",
)
def test_join_nofrom_explicit_left_side_implicit_onclause(self):
stmt = select(parent).join_from(parent, child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_froms_implicit_left_side_explicit_onclause(self):
stmt = (
select(table1)
.select_from(table1)
.join(table2, table1.c.myid == table2.c.otherid)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_froms_explicit_left_side_explicit_onclause(self):
stmt = (
select(table1)
.select_from(table1)
.join_from(table1, table2, table1.c.myid == table2.c.otherid)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_froms_implicit_left_side_implicit_onclause(self):
stmt = select(parent).select_from(parent).join(child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_froms_explicit_left_side_implicit_onclause(self):
stmt = select(parent).select_from(parent).join_from(parent, child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_implicit_left_side_wo_cols_onelevel(self):
"""test issue #6503"""
stmt = select(parent).join(child).with_only_columns(child.c.id)
self.assert_compile(
stmt,
"SELECT child.id FROM parent "
"JOIN child ON parent.id = child.parent_id",
)
def test_join_implicit_left_side_wo_cols_onelevel_union(self):
"""test issue #6698, regression from #6503.
this issue didn't affect Core but testing it here anyway."""
stmt = select(parent).join(child).with_only_columns(child.c.id)
stmt = stmt.union(select(child.c.id))
self.assert_compile(
stmt,
"SELECT child.id FROM parent "
"JOIN child ON parent.id = child.parent_id "
"UNION "
"SELECT child.id FROM child",
)
def test_join_implicit_left_side_wo_cols_twolevel(self):
"""test issue #6503"""
stmt = (
select(parent)
.join(child)
.with_only_columns(child.c.id)
.join(grandchild)
.with_only_columns(grandchild.c.id)
)
self.assert_compile(
stmt,
"SELECT grandchild.id FROM parent "
"JOIN child ON parent.id = child.parent_id "
"JOIN grandchild ON child.id = grandchild.child_id",
)
def test_join_implicit_left_side_wo_cols_twolevel_union(self):
"""test issue #6698, regression from #6503.
this issue didn't affect Core but testing it here anyway."""
stmt = (
select(parent)
.join(child)
.with_only_columns(child.c.id)
.join(grandchild)
.with_only_columns(grandchild.c.id)
)
stmt = union(stmt, select(grandchild.c.id))
self.assert_compile(
stmt,
"SELECT grandchild.id FROM parent "
"JOIN child ON parent.id = child.parent_id "
"JOIN grandchild ON child.id = grandchild.child_id "
"UNION "
"SELECT grandchild.id FROM grandchild",
)
def test_right_nested_inner_join(self):
inner = child.join(grandchild)
stmt = select(parent).outerjoin_from(parent, inner)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent "
"LEFT OUTER JOIN "
"(child JOIN grandchild ON child.id = grandchild.child_id) "
"ON parent.id = child.parent_id",
)
def test_joins_w_filter_by(self):
stmt = (
select(parent)
.filter_by(data="p1")
.join(child)
.filter_by(data="c1")
.join_from(table1, table2, table1.c.myid == table2.c.otherid)
.filter_by(otherid=5)
)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id, mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid "
"WHERE parent.data = :data_1 AND child.data = :data_2 "
"AND myothertable.otherid = :otherid_1",
checkparams={"data_1": "p1", "data_2": "c1", "otherid_1": 5},
)
def test_filter_by_from_col(self):
stmt = select(table1.c.myid).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_func(self):
"""test #6414"""
stmt = select(func.count(table1.c.myid)).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT count(mytable.myid) AS count_1 "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_func_not_the_first_arg(self):
"""test #6414"""
stmt = select(func.bar(True, table1.c.myid)).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT bar(:bar_2, mytable.myid) AS bar_1 "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_cast(self):
"""test #6414"""
stmt = select(cast(table1.c.myid, Integer)).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT CAST(mytable.myid AS INTEGER) AS myid "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_binary(self):
"""test #6414"""
stmt = select(table1.c.myid == 5).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT mytable.myid = :myid_1 AS anon_1 "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_label(self):
"""test #6414"""
stmt = select(table1.c.myid.label("some_id")).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT mytable.myid AS some_id "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_no_property_from_table(self):
assert_raises_message(
exc.InvalidRequestError,
'Entity namespace for "mytable" has no property "foo"',
select(table1).filter_by,
foo="bar",
)
def test_filter_by_no_property_from_col(self):
assert_raises_message(
exc.InvalidRequestError,
'Entity namespace for "mytable.myid" has no property "foo"',
select(table1.c.myid).filter_by,
foo="bar",
)
def test_select_tuple_outer(self):
stmt = select(tuple_(table1.c.myid, table1.c.name))
assert_raises_message(
exc.CompileError,
r"Most backends don't support SELECTing from a tuple\(\) object. "
"If this is an ORM query, consider using the Bundle object.",
stmt.compile,
)
def test_select_tuple_subquery(self):
subq = select(
table1.c.name, tuple_(table1.c.myid, table1.c.name)
).subquery()
stmt = select(subq.c.name)
# if we aren't fetching it, then render it
self.assert_compile(
stmt,
"SELECT anon_1.name FROM (SELECT mytable.name AS name, "
"(mytable.myid, mytable.name) AS anon_2 FROM mytable) AS anon_1",
)
@testing.combinations(
("union_all", "UNION ALL"),
("union", "UNION"),
("intersect_all", "INTERSECT ALL"),
("intersect", "INTERSECT"),
("except_all", "EXCEPT ALL"),
("except_", "EXCEPT"),
)
def test_select_multiple_compound_elements(self, methname, joiner):
stmt = select(literal(1))
meth = getattr(stmt, methname)
stmt = meth(select(literal(2)), select(literal(3)))
self.assert_compile(
stmt,
"SELECT :param_1 AS anon_1"
" %(joiner)s SELECT :param_2 AS anon_2"
" %(joiner)s SELECT :param_3 AS anon_3" % {"joiner": joiner},
)
class ColumnCollectionAsSelectTest(fixtures.TestBase, AssertsCompiledSQL):
"""tests related to #8285."""
__dialect__ = "default"
def test_c_collection_as_from(self):
stmt = select(parent.c)
# this works because _all_selected_columns expands out
# ClauseList. it does so in the same way that it works for
# Table already. so this is free
eq_(stmt._all_selected_columns, [parent.c.id, parent.c.data])
self.assert_compile(stmt, "SELECT parent.id, parent.data FROM parent")
def test_c_sub_collection_str_stmt(self):
stmt = select(table1.c["myid", "description"])
self.assert_compile(
stmt, "SELECT mytable.myid, mytable.description FROM mytable"
)
subq = stmt.subquery()
self.assert_compile(
select(subq.c[0]).where(subq.c.description == "x"),
"SELECT anon_1.myid FROM (SELECT mytable.myid AS myid, "
"mytable.description AS description FROM mytable) AS anon_1 "
"WHERE anon_1.description = :description_1",
)
def test_c_sub_collection_int_stmt(self):
stmt = select(table1.c[2, 0])
self.assert_compile(
stmt, "SELECT mytable.description, mytable.myid FROM mytable"
)
subq = stmt.subquery()
self.assert_compile(
select(subq.c.myid).where(subq.c[1] == "x"),
"SELECT anon_1.myid FROM (SELECT mytable.description AS "
"description, mytable.myid AS myid FROM mytable) AS anon_1 "
"WHERE anon_1.myid = :myid_1",
)
def test_c_sub_collection_str(self):
coll = table1.c["myid", "description"]
is_(coll.myid, table1.c.myid)
eq_(list(coll), [table1.c.myid, table1.c.description])
def test_c_sub_collection_int(self):
coll = table1.c[2, 0]
is_(coll.myid, table1.c.myid)
eq_(list(coll), [table1.c.description, table1.c.myid])
def test_missing_key(self):
with expect_raises_message(KeyError, "unknown"):
table1.c["myid", "unknown"]
def test_missing_index(self):
with expect_raises_message(IndexError, "5"):
table1.c["myid", 5]
|
{
"content_hash": "a9b66b41c2d9f5eca82b1fc28a8346d7",
"timestamp": "",
"source": "github",
"line_count": 515,
"max_line_length": 79,
"avg_line_length": 32.22135922330097,
"alnum_prop": 0.5779197300228999,
"repo_name": "sqlalchemy/sqlalchemy",
"id": "ad4b4db95916f19962876409b3e083899369621e",
"size": "16594",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "test/sql/test_select.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
}
|
"""
Cat Facts Test for Bots
"""
__author__ = 'Steve Cvar'
__license__ = 'MIT'
|
{
"content_hash": "5f1946bb9e0541e01d030c46189a518b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 25,
"avg_line_length": 15.6,
"alnum_prop": 0.5641025641025641,
"repo_name": "vgan/catFactsTest",
"id": "31cd4698e2fb8463fdf3c493c5f54da19604b76c",
"size": "106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4366"
}
],
"symlink_target": ""
}
|
import sys
from midiutil.MidiFile import MIDIFile
import random
ratios = [0, 2, 4, 7, 9, 12]
def generateScale(rootNote):
return [rootNote+x for x in ratios]
def pickPitch(charNum, rootNote):
#C Major Penatonic
notes = generateScale(rootNote)
#print notes
return notes[ord(charNum)%len(notes)]
def main(argv=None):
if argv is None:
argv = sys.argv
txtFileName = argv[1]
txtFile = open(txtFileName, 'r')
charsForNotes = txtFile.read()
txtFile.close()
#Setup default values
rootNote = 0;
track = 0
time = 0
tempo = 120
channel = 0
duration = 1
volume = 100
midiFile = MIDIFile(12)
midiFile.addTrackName(0, time, "1")
midiFile.addTrackName(1, time, "2")
midiFile.addTrackName(2, time, "3")
midiFile.addTrackName(3, time, "4")
midiFile.addTrackName(4, time, "5")
midiFile.addTempo(track, time, tempo)
i = 0
while(i < len(charsForNotes)):
j = 0
#double every 4th beat
durationMult = (i%4 == 0) + 1
while(j < 5):
pitch = pickPitch(charsForNotes[i], rootNote+(12*j))
if charsForNotes[i] is ' ':
midiFile.addNote(j, channel, pitch, time, duration*durationMult, 0)
else:
midiFile.addNote(j, channel, pitch, time, duration*durationMult, volume)
j += 1
time += 1
i += 1
mFile = open("mozart.mid", 'wb')
midiFile.writeFile(mFile)
mFile.close()
if __name__ == "__main__":
main()
|
{
"content_hash": "64c8d09768bbaae9c79fe9222475331c",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 76,
"avg_line_length": 20.149253731343283,
"alnum_prop": 0.6688888888888889,
"repo_name": "rcgilbert/csc344-wi14",
"id": "ba034d7138d032c72c3ed4b2fdf044b8c4240f42",
"size": "1350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project4/mozart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11495015"
},
{
"name": "C++",
"bytes": "12400956"
},
{
"name": "CSS",
"bytes": "117677"
},
{
"name": "DOT",
"bytes": "15057"
},
{
"name": "Erlang",
"bytes": "1470"
},
{
"name": "Java",
"bytes": "24037"
},
{
"name": "JavaScript",
"bytes": "146242"
},
{
"name": "M",
"bytes": "2372"
},
{
"name": "Objective-C",
"bytes": "1152224"
},
{
"name": "PHP",
"bytes": "12544"
},
{
"name": "Perl",
"bytes": "391898"
},
{
"name": "Python",
"bytes": "1350"
},
{
"name": "R",
"bytes": "7719"
},
{
"name": "Shell",
"bytes": "9212"
}
],
"symlink_target": ""
}
|
"""
Tests for L{twisted.internet.fdesc}.
"""
import os, sys
import errno
try:
import fcntl
except ImportError:
skip = "not supported on this platform"
else:
from twisted.internet import fdesc
from twisted.python.util import untilConcludes
from twisted.trial import unittest
class NonBlockingTests(unittest.SynchronousTestCase):
"""
Tests for L{fdesc.setNonBlocking} and L{fdesc.setBlocking}.
"""
def test_setNonBlocking(self):
"""
L{fdesc.setNonBlocking} sets a file description to non-blocking.
"""
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertFalse(fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK)
fdesc.setNonBlocking(r)
self.assertTrue(fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK)
def test_setBlocking(self):
"""
L{fdesc.setBlocking} sets a file description to blocking.
"""
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
fdesc.setNonBlocking(r)
fdesc.setBlocking(r)
self.assertFalse(fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK)
class ReadWriteTests(unittest.SynchronousTestCase):
"""
Tests for L{fdesc.readFromFD}, L{fdesc.writeToFD}.
"""
def setUp(self):
"""
Create a non-blocking pipe that can be used in tests.
"""
self.r, self.w = os.pipe()
fdesc.setNonBlocking(self.r)
fdesc.setNonBlocking(self.w)
def tearDown(self):
"""
Close pipes.
"""
try:
os.close(self.w)
except OSError:
pass
try:
os.close(self.r)
except OSError:
pass
def write(self, d):
"""
Write data to the pipe.
"""
return fdesc.writeToFD(self.w, d)
def read(self):
"""
Read data from the pipe.
"""
l = []
res = fdesc.readFromFD(self.r, l.append)
if res is None:
if l:
return l[0]
else:
return b""
else:
return res
def test_writeAndRead(self):
"""
Test that the number of bytes L{fdesc.writeToFD} reports as written
with its return value are seen by L{fdesc.readFromFD}.
"""
n = self.write(b"hello")
self.assertTrue(n > 0)
s = self.read()
self.assertEqual(len(s), n)
self.assertEqual(b"hello"[:n], s)
def test_writeAndReadLarge(self):
"""
Similar to L{test_writeAndRead}, but use a much larger string to verify
the behavior for that case.
"""
orig = b"0123456879" * 10000
written = self.write(orig)
self.assertTrue(written > 0)
result = []
resultlength = 0
i = 0
while resultlength < written or i < 50:
result.append(self.read())
resultlength += len(result[-1])
# Increment a counter to be sure we'll exit at some point
i += 1
result = b"".join(result)
self.assertEqual(len(result), written)
self.assertEqual(orig[:written], result)
def test_readFromEmpty(self):
"""
Verify that reading from a file descriptor with no data does not raise
an exception and does not result in the callback function being called.
"""
l = []
result = fdesc.readFromFD(self.r, l.append)
self.assertEqual(l, [])
self.assertIsNone(result)
def test_readFromCleanClose(self):
"""
Test that using L{fdesc.readFromFD} on a cleanly closed file descriptor
returns a connection done indicator.
"""
os.close(self.w)
self.assertEqual(self.read(), fdesc.CONNECTION_DONE)
def test_writeToClosed(self):
"""
Verify that writing with L{fdesc.writeToFD} when the read end is closed
results in a connection lost indicator.
"""
os.close(self.r)
self.assertEqual(self.write(b"s"), fdesc.CONNECTION_LOST)
def test_readFromInvalid(self):
"""
Verify that reading with L{fdesc.readFromFD} when the read end is
closed results in a connection lost indicator.
"""
os.close(self.r)
self.assertEqual(self.read(), fdesc.CONNECTION_LOST)
def test_writeToInvalid(self):
"""
Verify that writing with L{fdesc.writeToFD} when the write end is
closed results in a connection lost indicator.
"""
os.close(self.w)
self.assertEqual(self.write(b"s"), fdesc.CONNECTION_LOST)
def test_writeErrors(self):
"""
Test error path for L{fdesc.writeTod}.
"""
oldOsWrite = os.write
def eagainWrite(fd, data):
err = OSError()
err.errno = errno.EAGAIN
raise err
os.write = eagainWrite
try:
self.assertEqual(self.write(b"s"), 0)
finally:
os.write = oldOsWrite
def eintrWrite(fd, data):
err = OSError()
err.errno = errno.EINTR
raise err
os.write = eintrWrite
try:
self.assertEqual(self.write(b"s"), 0)
finally:
os.write = oldOsWrite
class CloseOnExecTests(unittest.SynchronousTestCase):
"""
Tests for L{fdesc._setCloseOnExec} and L{fdesc._unsetCloseOnExec}.
"""
program = '''
import os, errno
try:
os.write(%d, b'lul')
except OSError as e:
if e.errno == errno.EBADF:
os._exit(0)
os._exit(5)
except:
os._exit(10)
else:
os._exit(20)
'''
def _execWithFileDescriptor(self, fObj):
pid = os.fork()
if pid == 0:
try:
os.execv(sys.executable, [sys.executable, '-c', self.program % (fObj.fileno(),)])
except:
import traceback
traceback.print_exc()
os._exit(30)
else:
# On Linux wait(2) doesn't seem ever able to fail with EINTR but
# POSIX seems to allow it and on OS X it happens quite a lot.
return untilConcludes(os.waitpid, pid, 0)[1]
def test_setCloseOnExec(self):
"""
A file descriptor passed to L{fdesc._setCloseOnExec} is not inherited
by a new process image created with one of the exec family of
functions.
"""
with open(self.mktemp(), 'wb') as fObj:
fdesc._setCloseOnExec(fObj.fileno())
status = self._execWithFileDescriptor(fObj)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(os.WEXITSTATUS(status), 0)
def test_unsetCloseOnExec(self):
"""
A file descriptor passed to L{fdesc._unsetCloseOnExec} is inherited by
a new process image created with one of the exec family of functions.
"""
with open(self.mktemp(), 'wb') as fObj:
fdesc._setCloseOnExec(fObj.fileno())
fdesc._unsetCloseOnExec(fObj.fileno())
status = self._execWithFileDescriptor(fObj)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(os.WEXITSTATUS(status), 20)
|
{
"content_hash": "9d0f5cbee78a9ca0656668ec76da6c06",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 97,
"avg_line_length": 27.745247148288975,
"alnum_prop": 0.570097300260381,
"repo_name": "EricMuller/mynotes-backend",
"id": "1f8b62740bbdedae59619c6859d6ccd91a796dbd",
"size": "7370",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "requirements/twisted/Twisted-17.1.0/src/twisted/test/test_fdesc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "11880"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "6613"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "233863"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "22991176"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "13496"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
}
|
# coding: utf-8
from sqlalchemy.test.testing import eq_
from sqlalchemy import *
from sqlalchemy import types as sqltypes, exc
from sqlalchemy.sql import table, column
from sqlalchemy.test import *
from sqlalchemy.test.testing import eq_, assert_raises, assert_raises_message
from sqlalchemy.test.engines import testing_engine
from sqlalchemy.dialects.oracle import cx_oracle, base as oracle
from sqlalchemy.engine import default
from sqlalchemy.util import jython
from decimal import Decimal
import datetime
import os
class OutParamTest(TestBase, AssertsExecutionResults):
__only_on__ = 'oracle+cx_oracle'
@classmethod
def setup_class(cls):
testing.db.execute("""
create or replace procedure foo(x_in IN number, x_out OUT number, y_out OUT number, z_out OUT varchar) IS
retval number;
begin
retval := 6;
x_out := 10;
y_out := x_in * 15;
z_out := NULL;
end;
""")
def test_out_params(self):
result = \
testing.db.execute(text('begin foo(:x_in, :x_out, :y_out, '
':z_out); end;',
bindparams=[bindparam('x_in', Float),
outparam('x_out', Integer),
outparam('y_out', Float),
outparam('z_out', String)]), x_in=5)
eq_(result.out_parameters, {'x_out': 10, 'y_out': 75, 'z_out'
: None})
assert isinstance(result.out_parameters['x_out'], int)
@classmethod
def teardown_class(cls):
testing.db.execute("DROP PROCEDURE foo")
class CompileTest(TestBase, AssertsCompiledSQL):
__dialect__ = oracle.OracleDialect()
def test_owner(self):
meta = MetaData()
parent = Table('parent', meta, Column('id', Integer,
primary_key=True), Column('name', String(50)),
schema='ed')
child = Table('child', meta, Column('id', Integer,
primary_key=True), Column('parent_id', Integer,
ForeignKey('ed.parent.id')), schema='ed')
self.assert_compile(parent.join(child),
'ed.parent JOIN ed.child ON ed.parent.id = '
'ed.child.parent_id')
def test_subquery(self):
t = table('sometable', column('col1'), column('col2'))
s = select([t])
s = select([s.c.col1, s.c.col2])
self.assert_compile(s, "SELECT col1, col2 FROM (SELECT "
"sometable.col1 AS col1, sometable.col2 "
"AS col2 FROM sometable)")
def test_limit(self):
t = table('sometable', column('col1'), column('col2'))
s = select([t])
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c.result_map['col1'][1])
s = select([t]).limit(10).offset(20)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable) WHERE ROWNUM <= '
':ROWNUM_1) WHERE ora_rn > :ora_rn_1')
c = s.compile(dialect=oracle.OracleDialect())
assert t.c.col1 in set(c.result_map['col1'][1])
s = select([s.c.col1, s.c.col2])
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, col2 '
'FROM (SELECT col1, col2, ROWNUM AS ora_rn '
'FROM (SELECT sometable.col1 AS col1, '
'sometable.col2 AS col2 FROM sometable) '
'WHERE ROWNUM <= :ROWNUM_1) WHERE ora_rn > '
':ora_rn_1)')
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, col2 '
'FROM (SELECT col1, col2, ROWNUM AS ora_rn '
'FROM (SELECT sometable.col1 AS col1, '
'sometable.col2 AS col2 FROM sometable) '
'WHERE ROWNUM <= :ROWNUM_1) WHERE ora_rn > '
':ora_rn_1)')
s = select([t]).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= '
':ROWNUM_1) WHERE ora_rn > :ora_rn_1')
s = select([t], for_update=True).limit(10).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= :ROWNUM_1 '
'FOR UPDATE')
s = select([t],
for_update=True).limit(10).offset(20).order_by(t.c.col2)
self.assert_compile(s,
'SELECT col1, col2 FROM (SELECT col1, '
'col2, ROWNUM AS ora_rn FROM (SELECT '
'sometable.col1 AS col1, sometable.col2 AS '
'col2 FROM sometable ORDER BY '
'sometable.col2) WHERE ROWNUM <= '
':ROWNUM_1) WHERE ora_rn > :ora_rn_1 FOR '
'UPDATE')
def test_long_labels(self):
dialect = default.DefaultDialect()
dialect.max_identifier_length = 30
ora_dialect = oracle.dialect()
m = MetaData()
a_table = Table(
'thirty_characters_table_xxxxxx',
m,
Column('id', Integer, primary_key=True)
)
other_table = Table(
'other_thirty_characters_table_',
m,
Column('id', Integer, primary_key=True),
Column('thirty_characters_table_id',
Integer,
ForeignKey('thirty_characters_table_xxxxxx.id'),
primary_key=True
)
)
anon = a_table.alias()
self.assert_compile(select([other_table,
anon]).
select_from(
other_table.outerjoin(anon)).apply_labels(),
'SELECT other_thirty_characters_table_.id '
'AS other_thirty_characters__1, '
'other_thirty_characters_table_.thirty_char'
'acters_table_id AS other_thirty_characters'
'__2, thirty_characters_table__1.id AS '
'thirty_characters_table__3 FROM '
'other_thirty_characters_table_ LEFT OUTER '
'JOIN thirty_characters_table_xxxxxx AS '
'thirty_characters_table__1 ON '
'thirty_characters_table__1.id = '
'other_thirty_characters_table_.thirty_char'
'acters_table_id', dialect=dialect)
self.assert_compile(select([other_table,
anon]).select_from(
other_table.outerjoin(anon)).apply_labels(),
'SELECT other_thirty_characters_table_.id '
'AS other_thirty_characters__1, '
'other_thirty_characters_table_.thirty_char'
'acters_table_id AS other_thirty_characters'
'__2, thirty_characters_table__1.id AS '
'thirty_characters_table__3 FROM '
'other_thirty_characters_table_ LEFT OUTER '
'JOIN thirty_characters_table_xxxxxx '
'thirty_characters_table__1 ON '
'thirty_characters_table__1.id = '
'other_thirty_characters_table_.thirty_char'
'acters_table_id', dialect=ora_dialect)
def test_outer_join(self):
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
table2 = table(
'myothertable',
column('otherid', Integer),
column('othername', String),
)
table3 = table(
'thirdtable',
column('userid', Integer),
column('otherstuff', String),
)
query = select([table1, table2], or_(table1.c.name == 'fred',
table1.c.myid == 10, table2.c.othername != 'jack'
, 'EXISTS (select yay from foo where boo = lar)'
), from_obj=[outerjoin(table1, table2,
table1.c.myid == table2.c.otherid)])
self.assert_compile(query,
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername FROM mytable, '
'myothertable WHERE (mytable.name = '
':name_1 OR mytable.myid = :myid_1 OR '
'myothertable.othername != :othername_1 OR '
'EXISTS (select yay from foo where boo = '
'lar)) AND mytable.myid = '
'myothertable.otherid(+)',
dialect=oracle.OracleDialect(use_ansi=False))
query = table1.outerjoin(table2, table1.c.myid
== table2.c.otherid).outerjoin(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable LEFT OUTER JOIN myothertable '
'ON mytable.myid = myothertable.otherid '
'LEFT OUTER JOIN thirdtable ON '
'thirdtable.userid = myothertable.otherid')
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable, myothertable, thirdtable '
'WHERE thirdtable.userid(+) = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid(+)',
dialect=oracle.dialect(use_ansi=False))
query = table1.join(table2, table1.c.myid
== table2.c.otherid).join(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select(),
'SELECT mytable.myid, mytable.name, '
'mytable.description, myothertable.otherid,'
' myothertable.othername, '
'thirdtable.userid, thirdtable.otherstuff '
'FROM mytable, myothertable, thirdtable '
'WHERE thirdtable.userid = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid',
dialect=oracle.dialect(use_ansi=False))
query = table1.join(table2, table1.c.myid
== table2.c.otherid).outerjoin(table3,
table3.c.userid == table2.c.otherid)
self.assert_compile(query.select().order_by(table1.c.name).
limit(10).offset(5),
'SELECT myid, name, description, otherid, '
'othername, userid, otherstuff FROM '
'(SELECT myid, name, description, otherid, '
'othername, userid, otherstuff, ROWNUM AS '
'ora_rn FROM (SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description, myothertable.otherid AS '
'otherid, myothertable.othername AS '
'othername, thirdtable.userid AS userid, '
'thirdtable.otherstuff AS otherstuff FROM '
'mytable, myothertable, thirdtable WHERE '
'thirdtable.userid(+) = '
'myothertable.otherid AND mytable.myid = '
'myothertable.otherid ORDER BY '
'mytable.name) WHERE ROWNUM <= :ROWNUM_1) '
'WHERE ora_rn > :ora_rn_1',
dialect=oracle.dialect(use_ansi=False))
subq = select([table1]).select_from(table1.outerjoin(table2,
table1.c.myid == table2.c.otherid)).alias()
q = select([table3]).select_from(table3.outerjoin(subq,
table3.c.userid == subq.c.myid))
self.assert_compile(q,
'SELECT thirdtable.userid, '
'thirdtable.otherstuff FROM thirdtable '
'LEFT OUTER JOIN (SELECT mytable.myid AS '
'myid, mytable.name AS name, '
'mytable.description AS description FROM '
'mytable LEFT OUTER JOIN myothertable ON '
'mytable.myid = myothertable.otherid) '
'anon_1 ON thirdtable.userid = anon_1.myid'
, dialect=oracle.dialect(use_ansi=True))
self.assert_compile(q,
'SELECT thirdtable.userid, '
'thirdtable.otherstuff FROM thirdtable, '
'(SELECT mytable.myid AS myid, '
'mytable.name AS name, mytable.description '
'AS description FROM mytable, myothertable '
'WHERE mytable.myid = myothertable.otherid('
'+)) anon_1 WHERE thirdtable.userid = '
'anon_1.myid(+)',
dialect=oracle.dialect(use_ansi=False))
q = select([table1.c.name]).where(table1.c.name == 'foo')
self.assert_compile(q,
'SELECT mytable.name FROM mytable WHERE '
'mytable.name = :name_1',
dialect=oracle.dialect(use_ansi=False))
subq = select([table3.c.otherstuff]).where(table3.c.otherstuff
== table1.c.name).label('bar')
q = select([table1.c.name, subq])
self.assert_compile(q,
'SELECT mytable.name, (SELECT '
'thirdtable.otherstuff FROM thirdtable '
'WHERE thirdtable.otherstuff = '
'mytable.name) AS bar FROM mytable',
dialect=oracle.dialect(use_ansi=False))
def test_alias_outer_join(self):
address_types = table('address_types', column('id'),
column('name'))
addresses = table('addresses', column('id'), column('user_id'),
column('address_type_id'),
column('email_address'))
at_alias = address_types.alias()
s = select([at_alias,
addresses]).select_from(addresses.outerjoin(at_alias,
addresses.c.address_type_id
== at_alias.c.id)).where(addresses.c.user_id
== 7).order_by(addresses.c.id, address_types.c.id)
self.assert_compile(s,
'SELECT address_types_1.id, '
'address_types_1.name, addresses.id, '
'addresses.user_id, addresses.address_type_'
'id, addresses.email_address FROM '
'addresses LEFT OUTER JOIN address_types '
'address_types_1 ON addresses.address_type_'
'id = address_types_1.id WHERE '
'addresses.user_id = :user_id_1 ORDER BY '
'addresses.id, address_types.id')
def test_compound(self):
t1 = table('t1', column('c1'), column('c2'), column('c3'))
t2 = table('t2', column('c1'), column('c2'), column('c3'))
self.assert_compile(union(t1.select(), t2.select()),
'SELECT t1.c1, t1.c2, t1.c3 FROM t1 UNION '
'SELECT t2.c1, t2.c2, t2.c3 FROM t2')
self.assert_compile(except_(t1.select(), t2.select()),
'SELECT t1.c1, t1.c2, t1.c3 FROM t1 MINUS '
'SELECT t2.c1, t2.c2, t2.c3 FROM t2')
class CompatFlagsTest(TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
def test_ora8_flags(self):
def server_version_info(self):
return (8, 2, 5)
dialect = oracle.dialect()
dialect._get_server_version_info = server_version_info
# before connect, assume modern DB
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
dialect.initialize(testing.db.connect())
assert not dialect._supports_char_length
assert not dialect._supports_nchar
assert not dialect.use_ansi
self.assert_compile(String(50),"VARCHAR(50)",dialect=dialect)
self.assert_compile(Unicode(50),"VARCHAR(50)",dialect=dialect)
self.assert_compile(UnicodeText(),"CLOB",dialect=dialect)
def test_default_flags(self):
"""test with no initialization or server version info"""
dialect = oracle.dialect()
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
self.assert_compile(String(50),"VARCHAR(50 CHAR)",dialect=dialect)
self.assert_compile(Unicode(50),"NVARCHAR2(50)",dialect=dialect)
self.assert_compile(UnicodeText(),"NCLOB",dialect=dialect)
def test_ora10_flags(self):
def server_version_info(self):
return (10, 2, 5)
dialect = oracle.dialect()
dialect._get_server_version_info = server_version_info
dialect.initialize(testing.db.connect())
assert dialect._supports_char_length
assert dialect._supports_nchar
assert dialect.use_ansi
self.assert_compile(String(50),"VARCHAR(50 CHAR)",dialect=dialect)
self.assert_compile(Unicode(50),"NVARCHAR2(50)",dialect=dialect)
self.assert_compile(UnicodeText(),"NCLOB",dialect=dialect)
class MultiSchemaTest(TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
# currently assuming full DBA privs for the user.
# don't really know how else to go here unless
# we connect as the other user.
for stmt in """
create table test_schema.parent(
id integer primary key,
data varchar2(50)
);
create table test_schema.child(
id integer primary key,
data varchar2(50),
parent_id integer references test_schema.parent(id)
);
create synonym test_schema.ptable for test_schema.parent;
create synonym test_schema.ctable for test_schema.child;
-- can't make a ref from local schema to the
-- remote schema's table without this,
-- *and* cant give yourself a grant !
-- so we give it to public. ideas welcome.
grant references on test_schema.parent to public;
grant references on test_schema.child to public;
""".split(";"):
if stmt.strip():
testing.db.execute(stmt)
@classmethod
def teardown_class(cls):
for stmt in """
drop table test_schema.child;
drop table test_schema.parent;
drop synonym test_schema.ctable;
drop synonym test_schema.ptable;
""".split(";"):
if stmt.strip():
testing.db.execute(stmt)
def test_create_same_names_explicit_schema(self):
schema = testing.db.dialect.default_schema_name
meta = MetaData(testing.db)
parent = Table('parent', meta,
Column('pid', Integer, primary_key=True),
schema=schema
)
child = Table('child', meta,
Column('cid', Integer, primary_key=True),
Column('pid', Integer, ForeignKey('%s.parent.pid' % schema)),
schema=schema
)
meta.create_all()
try:
parent.insert().execute({'pid':1})
child.insert().execute({'cid':1, 'pid':1})
eq_(child.select().execute().fetchall(), [(1, 1)])
finally:
meta.drop_all()
def test_create_same_names_implicit_schema(self):
meta = MetaData(testing.db)
parent = Table('parent', meta,
Column('pid', Integer, primary_key=True),
)
child = Table('child', meta,
Column('cid', Integer, primary_key=True),
Column('pid', Integer, ForeignKey('parent.pid')),
)
meta.create_all()
try:
parent.insert().execute({'pid':1})
child.insert().execute({'cid':1, 'pid':1})
eq_(child.select().execute().fetchall(), [(1, 1)])
finally:
meta.drop_all()
def test_reflect_alt_owner_explicit(self):
meta = MetaData(testing.db)
parent = Table('parent', meta, autoload=True, schema='test_schema')
child = Table('child', meta, autoload=True, schema='test_schema')
self.assert_compile(parent.join(child),
"test_schema.parent JOIN test_schema.child ON "
"test_schema.parent.id = test_schema.child.parent_id")
select([parent, child]).\
select_from(parent.join(child)).\
execute().fetchall()
def test_reflect_local_to_remote(self):
testing.db.execute('CREATE TABLE localtable (id INTEGER '
'PRIMARY KEY, parent_id INTEGER REFERENCES '
'test_schema.parent(id))')
try:
meta = MetaData(testing.db)
lcl = Table('localtable', meta, autoload=True)
parent = meta.tables['test_schema.parent']
self.assert_compile(parent.join(lcl),
'test_schema.parent JOIN localtable ON '
'test_schema.parent.id = '
'localtable.parent_id')
select([parent,
lcl]).select_from(parent.join(lcl)).execute().fetchall()
finally:
testing.db.execute('DROP TABLE localtable')
def test_reflect_alt_owner_implicit(self):
meta = MetaData(testing.db)
parent = Table('parent', meta, autoload=True,
schema='test_schema')
child = Table('child', meta, autoload=True, schema='test_schema'
)
self.assert_compile(parent.join(child),
'test_schema.parent JOIN test_schema.child '
'ON test_schema.parent.id = '
'test_schema.child.parent_id')
select([parent,
child]).select_from(parent.join(child)).execute().fetchall()
def test_reflect_alt_owner_synonyms(self):
testing.db.execute('CREATE TABLE localtable (id INTEGER '
'PRIMARY KEY, parent_id INTEGER REFERENCES '
'test_schema.ptable(id))')
try:
meta = MetaData(testing.db)
lcl = Table('localtable', meta, autoload=True,
oracle_resolve_synonyms=True)
parent = meta.tables['test_schema.ptable']
self.assert_compile(parent.join(lcl),
'test_schema.ptable JOIN localtable ON '
'test_schema.ptable.id = '
'localtable.parent_id')
select([parent,
lcl]).select_from(parent.join(lcl)).execute().fetchall()
finally:
testing.db.execute('DROP TABLE localtable')
def test_reflect_remote_synonyms(self):
meta = MetaData(testing.db)
parent = Table('ptable', meta, autoload=True,
schema='test_schema',
oracle_resolve_synonyms=True)
child = Table('ctable', meta, autoload=True,
schema='test_schema',
oracle_resolve_synonyms=True)
self.assert_compile(parent.join(child),
'test_schema.ptable JOIN '
'test_schema.ctable ON test_schema.ptable.i'
'd = test_schema.ctable.parent_id')
select([parent,
child]).select_from(parent.join(child)).execute().fetchall()
class ConstraintTest(TestBase):
__only_on__ = 'oracle'
def setup(self):
global metadata
metadata = MetaData(testing.db)
foo = Table('foo', metadata, Column('id', Integer,
primary_key=True))
foo.create(checkfirst=True)
def teardown(self):
metadata.drop_all()
def test_oracle_has_no_on_update_cascade(self):
bar = Table('bar', metadata, Column('id', Integer,
primary_key=True), Column('foo_id', Integer,
ForeignKey('foo.id', onupdate='CASCADE')))
assert_raises(exc.SAWarning, bar.create)
bat = Table('bat', metadata, Column('id', Integer,
primary_key=True), Column('foo_id', Integer),
ForeignKeyConstraint(['foo_id'], ['foo.id'],
onupdate='CASCADE'))
assert_raises(exc.SAWarning, bat.create)
class TypesTest(TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
__dialect__ = oracle.OracleDialect()
def test_no_clobs_for_string_params(self):
"""test that simple string params get a DBAPI type of
VARCHAR, not CLOB. This is to prevent setinputsizes
from setting up cx_oracle.CLOBs on
string-based bind params [ticket:793]."""
class FakeDBAPI(object):
def __getattr__(self, attr):
return attr
dialect = oracle.OracleDialect()
dbapi = FakeDBAPI()
b = bindparam("foo", "hello world!")
assert b.type.dialect_impl(dialect).get_dbapi_type(dbapi) == 'STRING'
b = bindparam("foo", u"hello world!")
assert b.type.dialect_impl(dialect).get_dbapi_type(dbapi) == 'STRING'
@testing.fails_on('+zxjdbc', 'zxjdbc lacks the FIXED_CHAR dbapi type')
def test_fixed_char(self):
m = MetaData(testing.db)
t = Table('t1', m,
Column('id', Integer, primary_key=True),
Column('data', CHAR(30), nullable=False)
)
t.create()
try:
t.insert().execute(
dict(id=1, data="value 1"),
dict(id=2, data="value 2"),
dict(id=3, data="value 3")
)
eq_(t.select().where(t.c.data=='value 2').execute().fetchall(),
[(2, 'value 2 ')]
)
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
assert type(t2.c.data.type) is CHAR
eq_(t2.select().where(t2.c.data=='value 2').execute().fetchall(),
[(2, 'value 2 ')]
)
finally:
t.drop()
def test_type_adapt(self):
dialect = cx_oracle.dialect()
for start, test in [
(Date(), cx_oracle._OracleDate),
(oracle.OracleRaw(), cx_oracle._OracleRaw),
(String(), String),
(VARCHAR(), cx_oracle._OracleString),
(DATE(), DATE),
(String(50), cx_oracle._OracleString),
(Unicode(), cx_oracle._OracleNVarChar),
(Text(), cx_oracle._OracleText),
(UnicodeText(), cx_oracle._OracleUnicodeText),
(NCHAR(), cx_oracle._OracleNVarChar),
(oracle.RAW(50), cx_oracle._OracleRaw),
]:
assert isinstance(start.dialect_impl(dialect), test), \
"wanted %r got %r" % (test, start.dialect_impl(dialect))
@testing.requires.returning
def test_int_not_float(self):
m = MetaData(testing.db)
t1 = Table('t1', m, Column('foo', Integer))
t1.create()
try:
r = t1.insert().values(foo=5).returning(t1.c.foo).execute()
x = r.scalar()
assert x == 5
assert isinstance(x, int)
x = t1.select().scalar()
assert x == 5
assert isinstance(x, int)
finally:
t1.drop()
@testing.provide_metadata
def test_rowid(self):
t = Table('t1', metadata,
Column('x', Integer)
)
t.create()
t.insert().execute(x=5)
s1 = select([t])
s2 = select([column('rowid')]).select_from(s1)
rowid = s2.scalar()
# the ROWID type is not really needed here,
# as cx_oracle just treats it as a string,
# but we want to make sure the ROWID works...
rowid_col= column('rowid', oracle.ROWID)
s3 = select([t.c.x, rowid_col]).\
where(rowid_col == cast(rowid, oracle.ROWID))
eq_(s3.select().execute().fetchall(),
[(5, rowid)]
)
@testing.fails_on('+zxjdbc',
'Not yet known how to pass values of the '
'INTERVAL type')
def test_interval(self):
for type_, expected in [(oracle.INTERVAL(),
'INTERVAL DAY TO SECOND'),
(oracle.INTERVAL(day_precision=3),
'INTERVAL DAY(3) TO SECOND'),
(oracle.INTERVAL(second_precision=5),
'INTERVAL DAY TO SECOND(5)'),
(oracle.INTERVAL(day_precision=2,
second_precision=5),
'INTERVAL DAY(2) TO SECOND(5)')]:
self.assert_compile(type_, expected)
metadata = MetaData(testing.db)
interval_table = Table('intervaltable', metadata, Column('id',
Integer, primary_key=True,
test_needs_autoincrement=True),
Column('day_interval',
oracle.INTERVAL(day_precision=3)))
metadata.create_all()
try:
interval_table.insert().\
execute(day_interval=datetime.timedelta(days=35,
seconds=5743))
row = interval_table.select().execute().first()
eq_(row['day_interval'], datetime.timedelta(days=35,
seconds=5743))
finally:
metadata.drop_all()
def test_numerics(self):
m = MetaData(testing.db)
t1 = Table('t1', m,
Column('intcol', Integer),
Column('numericcol', Numeric(precision=9, scale=2)),
Column('floatcol1', Float()),
Column('floatcol2', FLOAT()),
Column('doubleprec', oracle.DOUBLE_PRECISION),
Column('numbercol1', oracle.NUMBER(9)),
Column('numbercol2', oracle.NUMBER(9, 3)),
Column('numbercol3', oracle.NUMBER),
)
t1.create()
try:
t1.insert().execute(
intcol=1,
numericcol=5.2,
floatcol1=6.5,
floatcol2 = 8.5,
doubleprec = 9.5,
numbercol1=12,
numbercol2=14.85,
numbercol3=15.76
)
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
for row in (
t1.select().execute().first(),
t2.select().execute().first()
):
for i, (val, type_) in enumerate((
(1, int),
(Decimal("5.2"), Decimal),
(6.5, float),
(8.5, float),
(9.5, float),
(12, int),
(Decimal("14.85"), Decimal),
(15.76, float),
)):
eq_(row[i], val)
assert isinstance(row[i], type_), '%r is not %r' \
% (row[i], type_)
finally:
t1.drop()
@testing.provide_metadata
def test_numerics_broken_inspection(self):
"""Numeric scenarios where Oracle type info is 'broken',
returning us precision, scale of the form (0, 0) or (0, -127).
We convert to Decimal and let int()/float() processors take over.
"""
# this test requires cx_oracle 5
foo = Table('foo', metadata,
Column('idata', Integer),
Column('ndata', Numeric(20, 2)),
Column('ndata2', Numeric(20, 2)),
Column('nidata', Numeric(5, 0)),
Column('fdata', Float()),
)
foo.create()
foo.insert().execute(
{'idata':5, 'ndata':Decimal("45.6"), 'ndata2':Decimal("45.0"),
'nidata':Decimal('53'), 'fdata':45.68392},
)
stmt = """
SELECT
idata,
ndata,
ndata2,
nidata,
fdata
FROM foo
"""
row = testing.db.execute(stmt).fetchall()[0]
eq_([type(x) for x in row], [int, Decimal, Decimal, int, float])
eq_(
row,
(5, Decimal('45.6'), Decimal('45'), 53, 45.683920000000001)
)
# with a nested subquery,
# both Numeric values that don't have decimal places, regardless
# of their originating type, come back as ints with no useful
# typing information beyond "numeric". So native handler
# must convert to int.
# this means our Decimal converters need to run no matter what.
# totally sucks.
stmt = """
SELECT
(SELECT (SELECT idata FROM foo) FROM DUAL) AS idata,
(SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2)) FROM DUAL)
AS ndata,
(SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2)) FROM DUAL)
AS ndata2,
(SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0)) FROM DUAL)
AS nidata,
(SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL) AS fdata
FROM dual
"""
row = testing.db.execute(stmt).fetchall()[0]
eq_([type(x) for x in row], [int, Decimal, int, int, Decimal])
eq_(
row,
(5, Decimal('45.6'), 45, 53, Decimal('45.68392'))
)
row = testing.db.execute(text(stmt,
typemap={
'idata':Integer(),
'ndata':Numeric(20, 2),
'ndata2':Numeric(20, 2),
'nidata':Numeric(5, 0),
'fdata':Float()
})).fetchall()[0]
eq_([type(x) for x in row], [int, Decimal, Decimal, Decimal, float])
eq_(row,
(5, Decimal('45.6'), Decimal('45'), Decimal('53'), 45.683920000000001)
)
stmt = """
SELECT
anon_1.idata AS anon_1_idata,
anon_1.ndata AS anon_1_ndata,
anon_1.ndata2 AS anon_1_ndata2,
anon_1.nidata AS anon_1_nidata,
anon_1.fdata AS anon_1_fdata
FROM (SELECT idata, ndata, ndata2, nidata, fdata
FROM (
SELECT
(SELECT (SELECT idata FROM foo) FROM DUAL) AS idata,
(SELECT CAST((SELECT ndata FROM foo) AS NUMERIC(20, 2))
FROM DUAL) AS ndata,
(SELECT CAST((SELECT ndata2 FROM foo) AS NUMERIC(20, 2))
FROM DUAL) AS ndata2,
(SELECT CAST((SELECT nidata FROM foo) AS NUMERIC(5, 0))
FROM DUAL) AS nidata,
(SELECT CAST((SELECT fdata FROM foo) AS FLOAT) FROM DUAL)
AS fdata
FROM dual
)
WHERE ROWNUM >= 0) anon_1
"""
row =testing.db.execute(stmt).fetchall()[0]
eq_([type(x) for x in row], [int, Decimal, int, int, Decimal])
eq_(row, (5, Decimal('45.6'), 45, 53, Decimal('45.68392')))
row = testing.db.execute(text(stmt,
typemap={
'anon_1_idata':Integer(),
'anon_1_ndata':Numeric(20, 2),
'anon_1_ndata2':Numeric(20, 2),
'anon_1_nidata':Numeric(5, 0),
'anon_1_fdata':Float()
})).fetchall()[0]
eq_([type(x) for x in row], [int, Decimal, Decimal, Decimal, float])
eq_(row,
(5, Decimal('45.6'), Decimal('45'), Decimal('53'), 45.683920000000001)
)
row = testing.db.execute(text(stmt,
typemap={
'anon_1_idata':Integer(),
'anon_1_ndata':Numeric(20, 2, asdecimal=False),
'anon_1_ndata2':Numeric(20, 2, asdecimal=False),
'anon_1_nidata':Numeric(5, 0, asdecimal=False),
'anon_1_fdata':Float(asdecimal=True)
})).fetchall()[0]
eq_([type(x) for x in row], [int, float, float, float, Decimal])
eq_(row,
(5, 45.6, 45, 53, Decimal('45.68392'))
)
def test_reflect_dates(self):
metadata = MetaData(testing.db)
Table(
"date_types", metadata,
Column('d1', DATE),
Column('d2', TIMESTAMP),
Column('d3', TIMESTAMP(timezone=True)),
Column('d4', oracle.INTERVAL(second_precision=5)),
)
metadata.create_all()
try:
m = MetaData(testing.db)
t1 = Table(
"date_types", m,
autoload=True)
assert isinstance(t1.c.d1.type, DATE)
assert isinstance(t1.c.d2.type, TIMESTAMP)
assert not t1.c.d2.type.timezone
assert isinstance(t1.c.d3.type, TIMESTAMP)
assert t1.c.d3.type.timezone
assert isinstance(t1.c.d4.type, oracle.INTERVAL)
finally:
metadata.drop_all()
def test_reflect_raw(self):
types_table = Table('all_types', MetaData(testing.db),
Column('owner', String(30), primary_key=True),
Column('type_name', String(30), primary_key=True),
autoload=True, oracle_resolve_synonyms=True
)
for row in types_table.select().execute().fetchall():
[row[k] for k in row.keys()]
def test_reflect_nvarchar(self):
metadata = MetaData(testing.db)
t = Table('t', metadata,
Column('data', sqltypes.NVARCHAR(255))
)
metadata.create_all()
try:
m2 = MetaData(testing.db)
t2 = Table('t', m2, autoload=True)
assert isinstance(t2.c.data.type, sqltypes.NVARCHAR)
if testing.against('oracle+cx_oracle'):
# nvarchar returns unicode natively. cx_oracle
# _OracleNVarChar type should be at play here.
assert isinstance(
t2.c.data.type.dialect_impl(testing.db.dialect),
cx_oracle._OracleNVarChar)
data = u'm’a réveillé.'
t2.insert().execute(data=data)
res = t2.select().execute().first()['data']
eq_(res, data)
assert isinstance(res, unicode)
finally:
metadata.drop_all()
def test_char_length(self):
self.assert_compile(VARCHAR(50),"VARCHAR(50 CHAR)")
oracle8dialect = oracle.dialect()
oracle8dialect.server_version_info = (8, 0)
self.assert_compile(VARCHAR(50),"VARCHAR(50)",dialect=oracle8dialect)
self.assert_compile(NVARCHAR(50),"NVARCHAR2(50)")
self.assert_compile(CHAR(50),"CHAR(50)")
metadata = MetaData(testing.db)
t1 = Table('t1', metadata,
Column("c1", VARCHAR(50)),
Column("c2", NVARCHAR(250)),
Column("c3", CHAR(200))
)
t1.create()
try:
m2 = MetaData(testing.db)
t2 = Table('t1', m2, autoload=True)
eq_(t2.c.c1.type.length, 50)
eq_(t2.c.c2.type.length, 250)
eq_(t2.c.c3.type.length, 200)
finally:
t1.drop()
def test_longstring(self):
metadata = MetaData(testing.db)
testing.db.execute("""
CREATE TABLE Z_TEST
(
ID NUMERIC(22) PRIMARY KEY,
ADD_USER VARCHAR2(20) NOT NULL
)
""")
try:
t = Table("z_test", metadata, autoload=True)
t.insert().execute(id=1.0, add_user='foobar')
assert t.select().execute().fetchall() == [(1, 'foobar')]
finally:
testing.db.execute("DROP TABLE Z_TEST")
@testing.fails_on('+zxjdbc', 'auto_convert_lobs not applicable')
def test_raw_lobs(self):
engine = testing_engine(options=dict(auto_convert_lobs=False))
metadata = MetaData()
t = Table("z_test", metadata, Column('id', Integer, primary_key=True),
Column('data', Text), Column('bindata', LargeBinary))
t.create(engine)
try:
engine.execute(t.insert(), id=1,
data='this is text',
bindata='this is binary')
row = engine.execute(t.select()).first()
eq_(row['data'].read(), 'this is text')
eq_(row['bindata'].read(), 'this is binary')
finally:
t.drop(engine)
class DontReflectIOTTest(TestBase):
"""test that index overflow tables aren't included in
table_names."""
__only_on__ = 'oracle'
def setup(self):
testing.db.execute("""
CREATE TABLE admin_docindex(
token char(20),
doc_id NUMBER,
token_frequency NUMBER,
token_offsets VARCHAR2(2000),
CONSTRAINT pk_admin_docindex PRIMARY KEY (token, doc_id))
ORGANIZATION INDEX
TABLESPACE users
PCTTHRESHOLD 20
OVERFLOW TABLESPACE users
""")
def teardown(self):
testing.db.execute("drop table admin_docindex")
def test_reflect_all(self):
m = MetaData(testing.db)
m.reflect()
eq_(
set(t.name for t in m.tables.values()),
set(['admin_docindex'])
)
class BufferedColumnTest(TestBase, AssertsCompiledSQL):
__only_on__ = 'oracle'
@classmethod
def setup_class(cls):
global binary_table, stream, meta
meta = MetaData(testing.db)
binary_table = Table('binary_table', meta,
Column('id', Integer, primary_key=True),
Column('data', LargeBinary)
)
meta.create_all()
stream = os.path.join(
os.path.dirname(__file__), "..",
'binary_data_one.dat')
stream = file(stream).read(12000)
for i in range(1, 11):
binary_table.insert().execute(id=i, data=stream)
@classmethod
def teardown_class(cls):
meta.drop_all()
def test_fetch(self):
result = binary_table.select().execute().fetchall()
eq_(result, [(i, stream) for i in range(1, 11)])
@testing.fails_on('+zxjdbc', 'FIXME: zxjdbc should support this')
def test_fetch_single_arraysize(self):
eng = testing_engine(options={'arraysize':1})
result = eng.execute(binary_table.select()).fetchall()
eq_(result, [(i, stream) for i in range(1, 11)])
class UnsupportedIndexReflectTest(TestBase):
__only_on__ = 'oracle'
def setup(self):
global metadata
metadata = MetaData(testing.db)
t1 = Table('test_index_reflect', metadata,
Column('data', String(20), primary_key=True)
)
metadata.create_all()
def teardown(self):
metadata.drop_all()
def test_reflect_functional_index(self):
testing.db.execute('CREATE INDEX DATA_IDX ON '
'TEST_INDEX_REFLECT (UPPER(DATA))')
m2 = MetaData(testing.db)
t2 = Table('test_index_reflect', m2, autoload=True)
class RoundTripIndexTest(TestBase):
__only_on__ = 'oracle'
def test_basic(self):
engine = testing.db
metadata = MetaData(engine)
table=Table("sometable", metadata,
Column("id_a", Unicode(255), primary_key=True),
Column("id_b", Unicode(255), primary_key=True, unique=True),
Column("group", Unicode(255), primary_key=True),
Column("col", Unicode(255)),
UniqueConstraint('col','group'),
)
# "group" is a keyword, so lower case
normalind = Index('tableind', table.c.id_b, table.c.group)
# create
metadata.create_all()
try:
# round trip, create from reflection
mirror = MetaData(engine)
mirror.reflect()
metadata.drop_all()
mirror.create_all()
# inspect the reflected creation
inspect = MetaData(engine)
inspect.reflect()
def obj_definition(obj):
return obj.__class__, tuple([c.name for c in
obj.columns]), getattr(obj, 'unique', None)
# find what the primary k constraint name should be
primaryconsname = engine.execute(
text("""SELECT constraint_name
FROM all_constraints
WHERE table_name = :table_name
AND owner = :owner
AND constraint_type = 'P' """),
table_name=table.name.upper(),
owner=engine.url.username.upper()).fetchall()[0][0]
reflectedtable = inspect.tables[table.name]
# make a dictionary of the reflected objects:
reflected = dict([(obj_definition(i), i) for i in
reflectedtable.indexes
| reflectedtable.constraints])
# assert we got primary key constraint and its name, Error
# if not in dict
assert reflected[(PrimaryKeyConstraint, ('id_a', 'id_b',
'group'), None)].name.upper() \
== primaryconsname.upper()
# Error if not in dict
assert reflected[(Index, ('id_b', 'group'), False)].name \
== normalind.name
assert (Index, ('id_b', ), True) in reflected
assert (Index, ('col', 'group'), True) in reflected
assert len(reflectedtable.constraints) == 1
assert len(reflectedtable.indexes) == 3
finally:
metadata.drop_all()
class SequenceTest(TestBase, AssertsCompiledSQL):
def test_basic(self):
seq = Sequence('my_seq_no_schema')
dialect = oracle.OracleDialect()
assert dialect.identifier_preparer.format_sequence(seq) \
== 'my_seq_no_schema'
seq = Sequence('my_seq', schema='some_schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== 'some_schema.my_seq'
seq = Sequence('My_Seq', schema='Some_Schema')
assert dialect.identifier_preparer.format_sequence(seq) \
== '"Some_Schema"."My_Seq"'
class ExecuteTest(TestBase):
__only_on__ = 'oracle'
def test_basic(self):
eq_(testing.db.execute('/*+ this is a comment */ SELECT 1 FROM '
'DUAL').fetchall(), [(1, )])
def test_sequences_are_integers(self):
seq = Sequence('foo_seq')
seq.create(testing.db)
try:
val = testing.db.execute(seq)
eq_(val, 1)
assert type(val) is int
finally:
seq.drop(testing.db)
@testing.provide_metadata
def test_limit_offset_for_update(self):
# oracle can't actually do the ROWNUM thing with FOR UPDATE
# very well.
t = Table('t1', metadata, Column('id', Integer, primary_key=True),
Column('data', Integer)
)
metadata.create_all()
t.insert().execute(
{'id':1, 'data':1},
{'id':2, 'data':7},
{'id':3, 'data':12},
{'id':4, 'data':15},
{'id':5, 'data':32},
)
# here, we can't use ORDER BY.
eq_(
t.select(for_update=True).limit(2).execute().fetchall(),
[(1, 1),
(2, 7)]
)
# here, its impossible. But we'd prefer it to raise ORA-02014
# instead of issuing a syntax error.
assert_raises_message(
exc.DatabaseError,
"ORA-02014",
t.select(for_update=True).limit(2).offset(3).execute
)
|
{
"content_hash": "2eed319981c51a5ecb2f67f6d33fbb81",
"timestamp": "",
"source": "github",
"line_count": 1265,
"max_line_length": 105,
"avg_line_length": 40.54229249011858,
"alnum_prop": 0.49580782279764457,
"repo_name": "dbbhattacharya/kitsune",
"id": "29d18b988c6a8ac6c899f63f76c251488419491f",
"size": "51290",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vendor/packages/sqlalchemy/test/dialect/test_oracle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
}
|
import argparse
import json
import sys
if len(sys.argv) < 3:
print("syntax: "+sys.argv[0]+" INFILE OUTFILE")
sys.exit(0)
with open(sys.argv[1], "r") as infile:
genesis = json.load(infile)
with open(sys.argv[2], "w") as outfile:
json.dump(genesis, outfile, indent=2, sort_keys=True)
|
{
"content_hash": "28b49824c9d4794acd684c28a8c53478",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 57,
"avg_line_length": 25,
"alnum_prop": 0.6633333333333333,
"repo_name": "FinanceChainFoundation/FinChain-core",
"id": "bb518fc52ba4e8eb35b80ac10f309cebdbd5c27d",
"size": "324",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "programs/genesis_util/python_format.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3059032"
},
{
"name": "CMake",
"bytes": "31973"
},
{
"name": "Dockerfile",
"bytes": "608"
},
{
"name": "Perl",
"bytes": "4937"
},
{
"name": "Python",
"bytes": "35817"
},
{
"name": "Shell",
"bytes": "413"
}
],
"symlink_target": ""
}
|
"""Job related commands"""
# pylint: disable=W0401,W0613,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0613: Unused argument, since all functions follow the same API
# W0614: Unused import %s from wildcard import (since we need cli)
# C0103: Invalid name gnt-job
from ganeti.cli import *
from ganeti import constants
from ganeti import errors
from ganeti import utils
from ganeti import cli
from ganeti import qlang
#: default list of fields for L{ListJobs}
_LIST_DEF_FIELDS = ["id", "status", "summary"]
#: map converting the job status contants to user-visible
#: names
_USER_JOB_STATUS = {
constants.JOB_STATUS_QUEUED: "queued",
constants.JOB_STATUS_WAITING: "waiting",
constants.JOB_STATUS_CANCELING: "canceling",
constants.JOB_STATUS_RUNNING: "running",
constants.JOB_STATUS_CANCELED: "canceled",
constants.JOB_STATUS_SUCCESS: "success",
constants.JOB_STATUS_ERROR: "error",
}
def _FormatStatus(value):
"""Formats a job status.
"""
try:
return _USER_JOB_STATUS[value]
except KeyError:
raise errors.ProgrammerError("Unknown job status code '%s'" % value)
def _FormatSummary(value):
"""Formats a job's summary. Takes possible non-ascii encoding into account.
"""
return ','.encode('utf-8').join(item.encode('utf-8') for item in value)
_JOB_LIST_FORMAT = {
"status": (_FormatStatus, False),
"summary": (_FormatSummary, False),
}
_JOB_LIST_FORMAT.update(dict.fromkeys(["opstart", "opexec", "opend"],
(lambda value: map(FormatTimestamp,
value),
None)))
def _ParseJobIds(args):
"""Parses a list of string job IDs into integers.
@param args: list of strings
@return: list of integers
@raise OpPrereqError: in case of invalid values
"""
try:
return [int(a) for a in args]
except (ValueError, TypeError), err:
raise errors.OpPrereqError("Invalid job ID passed: %s" % err,
errors.ECODE_INVAL)
def ListJobs(opts, args):
"""List the jobs
@param opts: the command line options selected by the user
@type args: list
@param args: should be an empty list
@rtype: int
@return: the desired exit code
"""
selected_fields = ParseFields(opts.output, _LIST_DEF_FIELDS)
if opts.archived and "archived" not in selected_fields:
selected_fields.append("archived")
qfilter = qlang.MakeSimpleFilter("status", opts.status_filter)
cl = GetClient()
return GenericList(constants.QR_JOB, selected_fields, args, None,
opts.separator, not opts.no_headers,
format_override=_JOB_LIST_FORMAT, verbose=opts.verbose,
force_filter=opts.force_filter, namefield="id",
qfilter=qfilter, isnumeric=True, cl=cl)
def ListJobFields(opts, args):
"""List job fields.
@param opts: the command line options selected by the user
@type args: list
@param args: fields to list, or empty for all
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
return GenericListFields(constants.QR_JOB, args, opts.separator,
not opts.no_headers, cl=cl)
def ArchiveJobs(opts, args):
"""Archive jobs.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain the job IDs to be archived
@rtype: int
@return: the desired exit code
"""
client = GetClient()
rcode = 0
for job_id in args:
if not client.ArchiveJob(job_id):
ToStderr("Failed to archive job with ID '%s'", job_id)
rcode = 1
return rcode
def AutoArchiveJobs(opts, args):
"""Archive jobs based on age.
This will archive jobs based on their age, or all jobs if a 'all' is
passed.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the age as a time spec
that can be parsed by L{ganeti.cli.ParseTimespec} or the
keyword I{all}, which will cause all jobs to be archived
@rtype: int
@return: the desired exit code
"""
client = GetClient()
age = args[0]
if age == "all":
age = -1
else:
age = ParseTimespec(age)
(archived_count, jobs_left) = client.AutoArchiveJobs(age)
ToStdout("Archived %s jobs, %s unchecked left", archived_count, jobs_left)
return 0
def _MultiJobAction(opts, args, cl, stdout_fn, ask_fn, question, action_fn):
"""Applies a function to multipe jobs.
@param opts: Command line options
@type args: list
@param args: Job IDs
@rtype: int
@return: Exit code
"""
if cl is None:
cl = GetClient()
if stdout_fn is None:
stdout_fn = ToStdout
if ask_fn is None:
ask_fn = AskUser
result = constants.EXIT_SUCCESS
if bool(args) ^ (opts.status_filter is None):
raise errors.OpPrereqError("Either a status filter or job ID(s) must be"
" specified and never both", errors.ECODE_INVAL)
if opts.status_filter is not None:
response = cl.Query(constants.QR_JOB, ["id", "status", "summary"],
qlang.MakeSimpleFilter("status", opts.status_filter))
jobs = [i for ((_, i), _, _) in response.data]
if not jobs:
raise errors.OpPrereqError("No jobs with the requested status have been"
" found", errors.ECODE_STATE)
if not opts.force:
(_, table) = FormatQueryResult(response, header=True,
format_override=_JOB_LIST_FORMAT)
for line in table:
stdout_fn(line)
if not ask_fn(question):
return constants.EXIT_CONFIRMATION
else:
jobs = args
for job_id in jobs:
(success, msg) = action_fn(cl, job_id)
if not success:
result = constants.EXIT_FAILURE
stdout_fn(msg)
return result
def CancelJobs(opts, args, cl=None, _stdout_fn=ToStdout, _ask_fn=AskUser):
"""Cancel not-yet-started jobs.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain the job IDs to be cancelled
@rtype: int
@return: the desired exit code
"""
if opts.kill:
action_name = "KILL"
if not opts.yes_do_it:
raise errors.OpPrereqError("The --kill option must be confirmed"
" with --yes-do-it", errors.ECODE_INVAL)
else:
action_name = "Cancel"
return _MultiJobAction(opts, args, cl, _stdout_fn, _ask_fn,
"%s job(s) listed above?" % action_name,
lambda cl, job_id: cl.CancelJob(job_id,
kill=opts.kill))
def ChangePriority(opts, args):
"""Change priority of jobs.
@param opts: Command line options
@type args: list
@param args: Job IDs
@rtype: int
@return: Exit code
"""
if opts.priority is None:
ToStderr("--priority option must be given.")
return constants.EXIT_FAILURE
return _MultiJobAction(opts, args, None, None, None,
"Change priority of job(s) listed above?",
lambda cl, job_id:
cl.ChangeJobPriority(job_id, opts.priority))
def _ListOpcodeTimestamp(name, ts, container):
""" Adds the opcode timestamp to the given container.
"""
if isinstance(ts, (tuple, list)):
container.append((name, FormatTimestamp(ts), "opcode_timestamp"))
else:
container.append((name, "N/A", "opcode_timestamp"))
def _CalcDelta(from_ts, to_ts):
""" Calculates the delta between two timestamps.
"""
return to_ts[0] - from_ts[0] + (to_ts[1] - from_ts[1]) / 1000000.0
def _ListJobTimestamp(name, ts, container, prior_ts=None):
""" Adds the job timestamp to the given container.
@param prior_ts: The timestamp used to calculate the amount of time that
passed since the given timestamp.
"""
if ts is not None:
delta = ""
if prior_ts is not None:
delta = " (delta %.6fs)" % _CalcDelta(prior_ts, ts)
output = "%s%s" % (FormatTimestamp(ts), delta)
container.append((name, output, "job_timestamp"))
else:
container.append((name, "unknown (%s)" % str(ts), "job_timestamp"))
def ShowJobs(opts, args):
"""Show detailed information about jobs.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain the job IDs to be queried
@rtype: int
@return: the desired exit code
"""
selected_fields = [
"id", "status", "ops", "opresult", "opstatus", "oplog",
"opstart", "opexec", "opend", "received_ts", "start_ts", "end_ts",
]
qfilter = qlang.MakeSimpleFilter("id", _ParseJobIds(args))
cl = GetClient()
result = cl.Query(constants.QR_JOB, selected_fields, qfilter).data
job_info_container = []
for entry in result:
((_, job_id), (rs_status, status), (_, ops), (_, opresult), (_, opstatus),
(_, oplog), (_, opstart), (_, opexec), (_, opend), (_, recv_ts),
(_, start_ts), (_, end_ts)) = entry
# Detect non-normal results
if rs_status != constants.RS_NORMAL:
job_info_container.append("Job ID %s not found" % job_id)
continue
# Container for produced data
job_info = [("Job ID", job_id)]
if status in _USER_JOB_STATUS:
status = _USER_JOB_STATUS[status]
else:
raise errors.ProgrammerError("Unknown job status code '%s'" % status)
job_info.append(("Status", status))
_ListJobTimestamp("Received", recv_ts, job_info)
_ListJobTimestamp("Processing start", start_ts, job_info, prior_ts=recv_ts)
_ListJobTimestamp("Processing end", end_ts, job_info, prior_ts=start_ts)
if end_ts is not None and recv_ts is not None:
job_info.append(("Total processing time", "%.6f seconds" %
_CalcDelta(recv_ts, end_ts)))
else:
job_info.append(("Total processing time", "N/A"))
opcode_container = []
for (opcode, result, status, log, s_ts, x_ts, e_ts) in \
zip(ops, opresult, opstatus, oplog, opstart, opexec, opend):
opcode_info = []
opcode_info.append(("Opcode", opcode["OP_ID"]))
opcode_info.append(("Status", status))
_ListOpcodeTimestamp("Processing start", s_ts, opcode_info)
_ListOpcodeTimestamp("Execution start", x_ts, opcode_info)
_ListOpcodeTimestamp("Processing end", e_ts, opcode_info)
opcode_info.append(("Input fields", opcode))
opcode_info.append(("Result", result))
exec_log_container = []
for serial, log_ts, log_type, log_msg in log:
time_txt = FormatTimestamp(log_ts)
encoded = FormatLogMessage(log_type, log_msg)
# Arranged in this curious way to preserve the brevity for multiple
# logs. This content cannot be exposed as a 4-tuple, as time contains
# the colon, causing some YAML parsers to fail.
exec_log_info = [
("Time", time_txt),
("Content", (serial, log_type, encoded,)),
]
exec_log_container.append(exec_log_info)
opcode_info.append(("Execution log", exec_log_container))
opcode_container.append(opcode_info)
job_info.append(("Opcodes", opcode_container))
job_info_container.append(job_info)
PrintGenericInfo(job_info_container)
return 0
def WatchJob(opts, args):
"""Follow a job and print its output as it arrives.
@param opts: the command line options selected by the user
@type args: list
@param args: Contains the job ID
@rtype: int
@return: the desired exit code
"""
job_id = args[0]
msg = ("Output from job %s follows" % job_id)
ToStdout(msg)
ToStdout("-" * len(msg))
retcode = 0
try:
cli.PollJob(job_id)
except errors.GenericError, err:
(retcode, job_result) = cli.FormatError(err)
ToStderr("Job %s failed: %s", job_id, job_result)
return retcode
def WaitJob(opts, args):
"""Wait for a job to finish, not producing any output.
@param opts: the command line options selected by the user
@type args: list
@param args: Contains the job ID
@rtype: int
@return: the desired exit code
"""
job_id = args[0]
retcode = 0
try:
cli.PollJob(job_id, feedback_fn=lambda _: None)
except errors.GenericError, err:
(retcode, job_result) = cli.FormatError(err)
ToStderr("Job %s failed: %s", job_id, job_result)
return retcode
_KILL_OPT = \
cli_option("--kill", default=False,
action="store_true", dest="kill",
help="Kill running jobs with SIGKILL")
_YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
help="Really use --kill", action="store_true")
_PENDING_OPT = \
cli_option("--pending", default=None,
action="store_const", dest="status_filter",
const=constants.JOBS_PENDING,
help="Select jobs pending execution or being cancelled")
_RUNNING_OPT = \
cli_option("--running", default=None,
action="store_const", dest="status_filter",
const=frozenset([
constants.JOB_STATUS_RUNNING,
]),
help="Show jobs currently running only")
_ERROR_OPT = \
cli_option("--error", default=None,
action="store_const", dest="status_filter",
const=frozenset([
constants.JOB_STATUS_ERROR,
]),
help="Show failed jobs only")
_FINISHED_OPT = \
cli_option("--finished", default=None,
action="store_const", dest="status_filter",
const=constants.JOBS_FINALIZED,
help="Show finished jobs only")
_ARCHIVED_OPT = \
cli_option("--archived", default=False,
action="store_true", dest="archived",
help="Include archived jobs in list (slow and expensive)")
_QUEUED_OPT = \
cli_option("--queued", default=None,
action="store_const", dest="status_filter",
const=frozenset([
constants.JOB_STATUS_QUEUED,
]),
help="Select queued jobs only")
_WAITING_OPT = \
cli_option("--waiting", default=None,
action="store_const", dest="status_filter",
const=frozenset([
constants.JOB_STATUS_WAITING,
]),
help="Select waiting jobs only")
commands = {
"list": (
ListJobs, [ArgJobId()],
[NOHDR_OPT, SEP_OPT, FIELDS_OPT, VERBOSE_OPT, FORCE_FILTER_OPT,
_PENDING_OPT, _RUNNING_OPT, _ERROR_OPT, _FINISHED_OPT, _ARCHIVED_OPT],
"[job_id ...]",
"Lists the jobs and their status. The available fields can be shown"
" using the \"list-fields\" command (see the man page for details)."
" The default field list is (in order): %s." %
utils.CommaJoin(_LIST_DEF_FIELDS)),
"list-fields": (
ListJobFields, [ArgUnknown()],
[NOHDR_OPT, SEP_OPT],
"[fields...]",
"Lists all available fields for jobs"),
"archive": (
ArchiveJobs, [ArgJobId(min=1)], [],
"<job-id> [<job-id> ...]", "Archive specified jobs"),
"autoarchive": (
AutoArchiveJobs,
[ArgSuggest(min=1, max=1, choices=["1d", "1w", "4w", "all"])],
[],
"<age>", "Auto archive jobs older than the given age"),
"cancel": (
CancelJobs, [ArgJobId()],
[FORCE_OPT, _KILL_OPT, _PENDING_OPT, _QUEUED_OPT, _WAITING_OPT,
_YES_DOIT_OPT],
"{[--force] [--kill --yes-do-it] {--pending | --queued | --waiting} |"
" <job-id> [<job-id> ...]}",
"Cancel jobs"),
"info": (
ShowJobs, [ArgJobId(min=1)], [],
"<job-id> [<job-id> ...]",
"Show detailed information about the specified jobs"),
"wait": (
WaitJob, [ArgJobId(min=1, max=1)], [],
"<job-id>", "Wait for a job to finish"),
"watch": (
WatchJob, [ArgJobId(min=1, max=1)], [],
"<job-id>", "Follows a job and prints its output as it arrives"),
"change-priority": (
ChangePriority, [ArgJobId()],
[PRIORITY_OPT, FORCE_OPT, _PENDING_OPT, _QUEUED_OPT, _WAITING_OPT],
"--priority <priority> {[--force] {--pending | --queued | --waiting} |"
" <job-id> [<job-id> ...]}",
"Change the priority of jobs"),
}
#: dictionary with aliases for commands
aliases = {
"show": "info",
}
def Main():
return GenericMain(commands, aliases=aliases)
|
{
"content_hash": "4643f7f96b3ca2856f443b429a0fa6a7",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 79,
"avg_line_length": 29.41485507246377,
"alnum_prop": 0.6161852558970253,
"repo_name": "leshchevds/ganeti",
"id": "3dd4eff0db11942d6afdcfc46e67a7d2ae1fd53a",
"size": "17590",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/client/gnt_job.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "2664853"
},
{
"name": "JavaScript",
"bytes": "8855"
},
{
"name": "M4",
"bytes": "32087"
},
{
"name": "Makefile",
"bytes": "97737"
},
{
"name": "Python",
"bytes": "6099533"
},
{
"name": "Shell",
"bytes": "122593"
}
],
"symlink_target": ""
}
|
from __future__ import division
import cvxopt
import numpy as np
from pylab import *
import math
from cvxpy import *
# Taken from CVX website http://cvxr.com/cvx/examples/
# Example: Section 5.2.5: Mixed strategies for matrix games (LP formulation)
# Ported from cvx matlab to cvxpy by Misrab Faizullah-Khan
# Original comments below
# Boyd & Vandenberghe, "Convex Optimization"
# Joelle Skaf - 08/24/05
#
# Player 1 wishes to choose u to minimize his expected payoff u'Pv, while
# player 2 wishes to choose v to maximize u'Pv, where P is the payoff
# matrix, u and v are the probability distributions of the choices of each
# player (i.e. u>=0, v>=0, sum(u_i)=1, sum(v_i)=1)
# LP formulation: minimize t
# s.t. u >=0 , sum(u) = 1, P'*u <= t*1
# maximize t
# s.t. v >=0 , sum(v) = 1, P*v >= t*1
# Input data
n = 12
m = 12
P = cvxopt.normal(n,m)
# Variables for two players
x = Variable(n)
y = Variable(m)
t1 = Variable()
t2 = Variable()
# Note in one case we are maximizing; in the other we are minimizing
objective1 = Minimize(t1)
objective2 = Maximize(t2)
constraints1 = [ x>=0, sum_entries(x)==1, P.T*x <= t1 ]
constraints2 = [ y>=0, sum_entries(y)==1, P*y >= t2 ]
p1 = Problem(objective1, constraints1)
p2 = Problem(objective2, constraints2)
# Optimal strategy for Player 1
print 'Computing the optimal strategy for player 1 ... '
result1 = p1.solve()
print 'Done!'
# Optimal strategy for Player 2
print 'Computing the optimal strategy for player 2 ... '
result2 = p2.solve()
print 'Done!'
# Displaying results
print '------------------------------------------------------------------------'
print 'The optimal strategies for players 1 and 2 are respectively: '
print x.value, y.value
print 'The expected payoffs for player 1 and player 2 respectively are: '
print result1, result2
print 'They are equal as expected!'
## ISSUE: THEY AREN'T EXACTLY EQUAL FOR SOME REASON!
|
{
"content_hash": "4cdfccac2cf505aa415df0ca09727559",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 29.328358208955223,
"alnum_prop": 0.6529262086513995,
"repo_name": "riadnassiffe/Simulator",
"id": "54ab67e080fccf5bcb1f39cd4b5969305d9378d7",
"size": "1988",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "src/tools/ecos/cvxpy/examples/matrix_games_LP.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66812"
}
],
"symlink_target": ""
}
|
'''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.utils.http import urlencode
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from datetime import datetime
from django.shortcuts import redirect
from django.forms.models import model_to_dict
from django.contrib import messages
from contest.contest_info import get_scoreboard
from contest.contest_info import get_scoreboard_csv
from contest.contest_info import get_public_user_password_csv
from contest.contest_info import get_clarifications
from contest.contest_info import can_ask
from contest.contest_info import can_reply
from contest.contest_info import can_create_contest
from contest.contest_info import can_edit_contest
from contest.contest_info import can_delete_contest
from contest.contest_info import get_contest_or_404
from contest.contest_archive import get_contests
from contest.contest_archive import add_contestants
from contest.models import Contest
from contest.models import Contestant
from contest.models import Clarification
from contest.forms import ContestForm
from contest.forms import ClarificationForm
from contest.forms import ReplyForm
from contest.register_contest import user_register_contest
from contest.register_contest import group_register_contest
from contest.register_contest import public_user_register_contest
from contest.public_user import is_integer
from contest.contest_info import can_create_contest
from contest.contest_info import can_edit_contest
from contest.contest_info import can_delete_contest
from contest.contest_info import get_contest_or_404
from contest.public_user import get_public_contestant
from problem.problem_info import get_testcase
from group.models import Group
from group.group_info import get_owned_group
from group.group_info import get_group_or_404
from utils.log_info import get_logger
from utils import user_info
from utils.render_helper import render_index, get_current_page
from status.views import *
from django.conf import settings
logger = get_logger()
def archive(request):
all_contests = get_contests(request.user)
contests = get_current_page(request, all_contests)
return render_index(request,
'contest/contestArchive.html',
{'contests':contests})
#dynamically load contest info and register page
def contest_info(request, cid):
contest = get_contest_or_404(cid)
contest = add_contestants(contest)
return render_index(request,
'contest/contestInfo.html',
{'contest':contest})
def register_page(request, cid):
contest = get_contest_or_404(cid)
groups = get_owned_group(request.user)
public_user = len(get_public_contestant(contest))
return render_index(request,
'contest/register.html',
{'contest':contest, 'groups':groups,'max_public_user':settings.MAX_PUBLIC_USER,
'public_user':public_user})
#contest datail page
def contest(request, cid):
user = user_info.validate_user(request.user)
try:
contest = Contest.objects.get(id = cid)
except Contest.DoesNotExist:
logger.warning('Contest: Can not find contest %s!' % cid)
raise Http404('Contest does not exist')
now = datetime.now()
#if contest has not started and user is not the owner
if ((contest.start_time < now) or\
user_info.has_contest_ownership(user,contest) or\
user.has_admin_auth()):
for problem in contest.problem.all():
problem.testcase = get_testcase(problem)
scoreboard = get_scoreboard(contest)
status = contest_status(request, contest)
clarifications = get_clarifications(user,contest)
initial_form = {'contest':contest,'asker':user}
form = ClarificationForm(initial=initial_form)
initial_reply_form = {'contest':contest,'replier':user}
reply_form = ReplyForm(initial = initial_reply_form)
return render_index(request, 'contest/contest.html',
{'contest':contest, 'clarifications':clarifications,
'form':form, 'reply_form':reply_form,
'scoreboard':scoreboard, 'status': status})
else:
raise PermissionDenied
@login_required
def new(request):
title = "New Contest"
if can_create_contest(request.user):
if request.method == 'GET':
form = ContestForm(initial=\
{'owner':request.user, 'user':request.user, 'method':request.method})
return render_index(request,'contest/editContest.html',
{'form':form,'title':title})
if request.method == 'POST':
form = ContestForm(request.POST, initial={'method':request.method})
if form.is_valid():
new_contest = form.save()
logger.info('Contest: User %s Create a new contest %s!' %
(request.user ,new_contest.id))
message = 'Contest %s- "%s" created!' % (new_contest.id, new_contest.cname)
messages.success(request, message)
return redirect('contest:contest', new_contest.id)
else:
message = 'Some fields are invalid!'
messages.error(request, message)
return render_index(request,'contest/editContest.html',
{'form':form,'title':title})
raise PermissionDenied
@login_required
def edit(request, cid):
try:
contest = Contest.objects.get(id = cid)
except Contest.DoesNotExist:
logger.warning('Contest: Can not edit contest %s! Contest not found!' % cid)
raise Http404('Contest does not exist, can not edit.')
title = "Edit Contest"
if can_edit_contest(request.user,contest):
contest_dic = model_to_dict(contest)
contest_dic['user'] = request.user
contest_dic['method'] = request.method
if request.method == 'GET':
form = ContestForm(initial = contest_dic)
return render_index(request,'contest/editContest.html',
{'form':form, 'title':title, 'contest':contest})
if request.method == 'POST':
form = ContestForm(request.POST, instance = contest,
initial={'method':request.method})
if form.is_valid():
modified_contest = form.save()
logger.info('Contest: User %s edited contest %s!' %
(request.user, modified_contest.id))
message = 'Contest %s- "%s" edited!' % \
(modified_contest.id, modified_contest.cname)
messages.success(request, message)
return redirect('contest:contest', modified_contest.id)
else:
message = 'Some fields are invalid!'
messages.error(request, message)
return render_index(request,'contest/editContest.html',
{'form':form,'title':title, 'contest':contest})
raise PermissionDenied
@login_required
def delete(request, cid):
try:
contest = Contest.objects.get(id = cid)
except Contest.DoesNotExist:
logger.warning('Contest: Can not delete contest %s! Contest not found!' % cid)
raise Http404('Contest does not exist, can not delete.')
if can_delete_contest(request.user, contest):
deleted_cid = contest.id
contest.delete()
message = 'Contest %s deleted!' % (deleted_cid)
messages.warning(request, message)
logger.info('Contest: User %s delete contest %s!' %
(request.user, deleted_cid))
return redirect('contest:archive')
raise PermissionDenied
@login_required
def register(request, cid):
contest = get_contest_or_404(cid)
#get group id or register as single user
group_id = request.POST.get('group')
public_user = request.POST.get('public_user')
#get group id or register as single user
if(group_id is not None):
return register_group(request, group_id, contest)
#get group id or register as single user
elif(public_user is not None):
return register_public_user(request, public_user, contest)
else:
if user_register_contest(request.user, contest):
message = 'User %s register Contest %s- "%s"!' % \
(request.user.username, contest.id, contest.cname)
messages.success(request, message)
else:
message = 'Register Error!'
messages.error(request, message)
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required
def register_group(request, group_id, contest):
group = get_group_or_404(group_id)
if user_info.has_group_ownership(request.user, group):
if group_register_contest(group, contest):
message = 'Group %s- "%s" registered Contest %s- "%s"!' % \
(group.id, group.gname, contest.id, contest.cname)
messages.success(request, message)
else:
message = 'Register Error!'
messages.error(request, message)
else:
message = 'Register Error! %s does not have Group %s- "%s" ownership' % \
(request.user.username, group.id, group.gname)
messages.error(request, message)
logger.warning('Contest: User %s can not register group %s. Does not have ownership!'
% (request.user.username, group_id))
return redirect('contest:archive')
@login_required
def register_public_user(request, public_user, contest):
user = user_info.validate_user(request.user)
if (user_info.has_contest_ownership(user, contest) or
user.has_admin_auth()):
if not is_integer(public_user):
message = 'invalid input!'
messages.warning(request, message)
return redirect('contest:archive')
user_registered = public_user_register_contest(public_user, contest)
if user_registered:
message = 'User %s registered %s public users to Contest %s- "%s"!' % \
(user.username, user_registered, contest.id, contest.cname)
messages.success(request, message)
if int(public_user) > settings.MAX_PUBLIC_USER:
message = 'Requested more than max! Set public users to %s' % \
(settings.MAX_PUBLIC_USER)
messages.warning(request, message)
download_url = reverse('contest:download') + '?cid=' + str(contest.id)
return HttpResponseRedirect(download_url)
else:
if int(public_user) == 0:
message = 'Remove all public users!'
messages.warning(request, message)
return redirect('contest:archive')
else:
message = 'Cannot register public user to Contest %s- "%s"!' % \
(contest.id, contest.cname)
messages.error(request, message)
return redirect('contest:archive')
raise PermissionDenied
@login_required
def ask(request):
try:
contest = request.POST['contest']
contest_obj = Contest.objects.get(pk = contest)
except:
logger.warning('Clarification: User %s can not create Clarification!' %
request.user.username)
raise Http404('Contest does not exist, can not ask.')
if can_ask(request.user,contest_obj):
if request.method == 'POST':
form = ClarificationForm(request.POST)
if form.is_valid():
new_clarification = form.save()
new_clarification.reply = ' '
new_clarification.save()
logger.info('Clarification: User %s create Clarification %s!'
% (request.user.username, new_clarification.id))
message = 'User %s successfully asked!' % \
(request.user.username)
messages.success(request, message)
return redirect('contest:contest', contest)
message = 'User %s cannot ask!' % \
(request.user.username)
messages.error(request, message)
return redirect('contest:contest', contest)
@login_required
def reply(request):
try:
clarification = request.POST['clarification']
instance = Clarification.objects.get(pk = clarification)
contest_obj = instance.contest
contest = contest_obj.id
except:
logger.warning('Clarification: User %s can not reply Clarification!'
% (request.user.username))
raise Http404('Contest does not exist, can not reply.')
if can_reply(request.user,contest_obj):
if request.method == 'POST':
form = ReplyForm(request.POST, instance = instance)
if form.is_valid():
replied_clarification = form.save()
replied_clarification.reply_time = datetime.now()
replied_clarification.save()
logger.info('Clarification: User %s reply Clarification %s!'
% (request.user.username, replied_clarification.id))
message = 'User %s successfully replied!' % \
(request.user.username)
messages.success(request, message)
else:
logger.warning('Clarification: User %s can not reply Clarification %s!'
% (request.user.username, replied_clarification.id))
message = 'Some fields are wrong!'
messages.error(request, message)
return redirect('contest:contest',contest)
message = 'User %s cannot reply!' % \
(request.user.username)
messages.error(request, message)
return redirect('contest:archive')
def download(request):
user = user_info.validate_user(request.user)
if request.method == 'POST':
what = request.POST.get('type')
if what == 'scoreboard':
scoreboard_type = request.POST.get('scoreboard_type')
cid = request.POST.get('contest')
scoreboard_file = get_scoreboard_csv(cid, scoreboard_type)
return scoreboard_file
elif what == 'public_user_password':
cid = request.POST.get('contest')
contest = get_contest_or_404(cid)
if user_info.has_contest_ownership(user, contest) or\
user.has_admin_auth():
logger.info('Contest:User %s download Contest %s - %s public user password!' %
(request.user, contest.id, contest.cname))
return get_public_user_password_csv(contest)
else:
raise PermissionDenied
raise Http404('file not found')
elif request.method == 'GET':
if request.GET.get('cid'):
cid = request.GET.get('cid')
contest = get_contest_or_404(cid)
if user_info.has_contest_ownership(user, contest) or user.has_admin_auth():
return render_index(request,'contest/download.html',{'contest':contest})
else:
raise PermissionDenied
|
{
"content_hash": "08fac7a83df6aa93db42a04774fab34e",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 94,
"avg_line_length": 42.056994818652846,
"alnum_prop": 0.6419243562892695,
"repo_name": "drowsy810301/NTHUOJ_web",
"id": "464693553ea4bc3d2951be6539bccfb6d0d58cb4",
"size": "16234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contest/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17630"
},
{
"name": "HTML",
"bytes": "118458"
},
{
"name": "JavaScript",
"bytes": "47460"
},
{
"name": "Python",
"bytes": "236617"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Group, Permission
from mks.models import Member
from links.models import LinkType
from committees.models import Committee, Topic
from committees.models import TOPIC_REJECTED
class TopicsTest(TestCase):
def setUp(self):
self.committee_1 = Committee.objects.create(name='c1')
self.committee_2 = Committee.objects.create(name='c2')
self.meeting_1 = self.committee_1.meetings.create(date=datetime.now(),
protocol_text='''jacob:
I am a perfectionist
adrian:
I have a deadline''')
self.meeting_1.create_protocol_parts()
self.meeting_2 = self.committee_1.meetings.create(date=datetime.now(),
protocol_text='m2')
self.meeting_2.create_protocol_parts()
self.jacob = User.objects.create_user('jacob', 'jacob@example.com',
'JKM')
self.ofri = User.objects.create_user('ofri', 'ofri@example.com',
'ofri')
(self.group, created) = Group.objects.get_or_create(name='Valid Email')
if created:
self.group.save()
self.group.permissions.add(Permission.objects.get(name='Can add Topic'))
self.jacob.groups.add(self.group)
self.mk_1 = Member.objects.create(name='mk 1')
self.topic = self.committee_1.topic_set.create(creator=self.jacob,
title="hello", description="hello world")
self.topic2 = self.committee_1.topic_set.create(creator=self.ofri,
title="bye", description="goodbye")
self.linktype = LinkType.objects.create(title='default')
def testBasic(self):
self.topic2.set_status(TOPIC_REJECTED, "just because")
self.assertEqual(self.committee_1.topic_set.get_public().count(), 1)
self.assertEqual(Topic.objects.get_public().count(), 1)
self.topic.set_status(TOPIC_REJECTED, "because I feel like it")
self.assertEqual(self.committee_1.topic_set.get_public().count(), 0)
def testPermissions(self):
self.assertTrue(self.topic.can_edit(self.jacob))
self.assertFalse(self.topic.can_edit(self.ofri))
self.topic.editors.add(self.ofri)
self.assertTrue(self.topic.can_edit(self.ofri))
self.topic.editors.remove(self.ofri)
def test_edit_topic_form(self):
res = self.client.get(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}))
self.assertEqual(res.status_code, 302) # login required
self.assertTrue(self.client.login(username='ofri',
password='ofri'))
res = self.client.get(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}))
self.assertEqual(res.status_code, 403) # user is not an editor
self.assertTrue(self.client.login(username='jacob',
password='JKM'))
res = self.client.get(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}))
self.assertEqual(res.status_code, 200) # user is an editor
self.assertTemplateUsed(res, 'committees/edit_topic.html')
def test_edit_topic_logged_required(self):
res = self.client.post(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}),
{'title': 'test topic title',
'description': 'test topic description',
'committees': self.committee_1.id,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-TOTAL_FORMS': 3})
self.assertEqual(res.status_code, 302) # redirect to login
self.assertTrue(res['location'].startswith('%s%s' %
('http://testserver', settings.LOGIN_URL)))
def test_edit_topic(self):
self.assertTrue(self.client.login(username='jacob',
password='JKM'))
res = self.client.post(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id,
'topic_id': self.topic.id}),
{'title': 'test topic title',
'description': 'test topic description',
'committees': self.committee_1.id,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-TOTAL_FORMS': 3})
self.assertEqual(res.status_code, 302) # redirect after POST
t = Topic.objects.get(pk=self.topic.id)
self.assertEqual(t.title, 'test topic title')
self.assertEqual(t.description, 'test topic description')
self.assertEqual(Topic.objects.count(), 2) # make sure we didn't create
# a new topic
def test_add_topic(self):
self.assertTrue(self.client.login(username='jacob',
password='JKM'))
res = self.client.post(reverse('edit-committee-topic',
kwargs={'committee_id': self.committee_1.id}),
{'title': 'test topic title',
'description': 'test topic description',
'committees': self.committee_1.id,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-TOTAL_FORMS': 3})
self.assertEqual(res.status_code, 302) # redirect after POST
topic_id = res['location'].split('/')[-2] # id of the new topic
t = Topic.objects.get(pk=topic_id)
self.assertEqual(t.title, 'test topic title')
self.assertEqual(t.description, 'test topic description')
self.assertEqual(Topic.objects.count(), 3) # make sure we created
# a new topic
# cleanup
t.delete()
def testListView(self):
res = self.client.get(reverse('topic-list'))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'committees/topic_list.html')
self.assertQuerysetEqual(res.context['topics'].order_by('pk'),
["<Topic: hello>", "<Topic: bye>"])
def testRanking(self):
self.assertQuerysetEqual(Topic.objects.order_by('pk'),
["<Topic: hello>", "<Topic: bye>"])
self.topic2.rating.add(score=4, user=self.ofri, ip_address="127.0.0.1")
self.assertQuerysetEqual(Topic.objects.by_rank(),
["<Topic: bye>", "<Topic: hello>"])
def tearDown(self):
self.meeting_1.delete()
self.meeting_2.delete()
self.committee_1.delete()
self.committee_2.delete()
self.jacob.delete()
self.group.delete()
self.mk_1.delete()
self.topic.delete()
|
{
"content_hash": "a2d05eea0ef4aaa9f08289d5eeebebd3",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 96,
"avg_line_length": 52.513333333333335,
"alnum_prop": 0.5277389869239558,
"repo_name": "OriHoch/Open-Knesset",
"id": "2d96a15813cd758f0c0858ad30c582e29bc645ca",
"size": "7877",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "committees/tests/test_topics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "350330"
},
{
"name": "HTML",
"bytes": "763338"
},
{
"name": "JavaScript",
"bytes": "220620"
},
{
"name": "Python",
"bytes": "4504481"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import cx_Oracle
import db_config
con = cx_Oracle.connect(db_config.user, db_config.pw, db_config.dsn)
cur = con.cursor()
# Create table
cur.execute("""begin
execute immediate 'drop table testgeometry';
exception when others then
if sqlcode <> -942 then
raise;
end if;
end;""")
cur.execute("""create table testgeometry (
id number(9) not null,
geometry MDSYS.SDO_GEOMETRY not null)""")
# Create and populate Oracle objects
typeObj = con.gettype("MDSYS.SDO_GEOMETRY")
elementInfoTypeObj = con.gettype("MDSYS.SDO_ELEM_INFO_ARRAY")
ordinateTypeObj = con.gettype("MDSYS.SDO_ORDINATE_ARRAY")
obj = typeObj.newobject()
obj.SDO_GTYPE = 2003
obj.SDO_ELEM_INFO = elementInfoTypeObj.newobject()
obj.SDO_ELEM_INFO.extend([1, 1003, 3])
obj.SDO_ORDINATES = ordinateTypeObj.newobject()
obj.SDO_ORDINATES.extend([1, 1, 5, 7])
pointTypeObj = con.gettype("MDSYS.SDO_POINT_TYPE")
obj.SDO_POINT = pointTypeObj.newobject()
obj.SDO_POINT.X = 1
obj.SDO_POINT.Y = 2
obj.SDO_POINT.Z = 3
print("Created object", obj)
# Add a new row
print("Adding row to table...")
cur.execute("insert into testgeometry values (1, :objbv)", objbv = obj)
print("Row added!")
# Define a function to dump the contents of an Oracle object
def dumpobject(obj, prefix = " "):
if obj.type.iscollection:
print(prefix, "[")
for value in obj.aslist():
if isinstance(value, cx_Oracle.Object):
dumpobject(value, prefix + " ")
else:
print(prefix + " ", repr(value))
print(prefix, "]")
else:
print(prefix, "{")
for attr in obj.type.attributes:
value = getattr(obj, attr.name)
if isinstance(value, cx_Oracle.Object):
print(prefix + " " + attr.name + " :")
dumpobject(value, prefix + " ")
else:
print(prefix + " " + attr.name + " :", repr(value))
print(prefix, "}")
# Query the row
print("Querying row just inserted...")
cur.execute("select id, geometry from testgeometry")
for (id, obj) in cur:
print("Id: ", id)
dumpobject(obj)
|
{
"content_hash": "86abf6a50ef131532c73321fefd663dc",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 71,
"avg_line_length": 31.95774647887324,
"alnum_prop": 0.6024680475980608,
"repo_name": "kawamon/hue",
"id": "4f21eda07f313fc2b523bdc3648bc3b2df8a935f",
"size": "2694",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/cx_Oracle-6.4.1/samples/tutorial/solutions/bind_sdo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
from gnuradio import gr
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import time, struct, sys
from gnuradio import digital
# from current dir
from uhd_interface import uhd_transmitter
class my_top_block(gr.top_block):
def __init__(self, options):
gr.top_block.__init__(self)
if(options.tx_freq is not None):
self.sink = uhd_transmitter(options.args,
options.bandwidth,
options.tx_freq, options.tx_gain,
options.spec, options.antenna,
options.verbose)
elif(options.to_file is not None):
self.sink = gr.file_sink(gr.sizeof_gr_complex, options.to_file)
else:
self.sink = gr.null_sink(gr.sizeof_gr_complex)
# do this after for any adjustments to the options that may
# occur in the sinks (specifically the UHD sink)
self.txpath = gr.message_source(gr.sizeof_gr_complex, 3)
self.amp = gr.multiply_const_cc(options.amp)
self.connect(self.txpath, self.amp, self.sink)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
def send_pkt(payload='', timestamp=None, eof=False):
if eof:
msg = gr.message(1)
else:
msg = gr.message_from_string(payload)
if timestamp is not None:
secs = long(timestamp)
frac_secs = timestamp - long(timestamp)
msg.set_timestamp(secs, frac_secs)
return tb.txpath.msgq().insert_tail(msg)
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-n", "--num", type="eng_float", default=1,
help="set number of packets [default=%default]")
parser.add_option("-g", "--gap", type="eng_float", default=0.005,
help="set number of packets [default=%default]")
parser.add_option("","--data-file", default=None,
help="use complex input file for transmission")
parser.add_option("","--to-file", default=None,
help="Output file for modulated samples")
parser.add_option("-W", "--bandwidth", type="eng_float",
default=4e6,
help="set symbol bandwidth [default=%default]")
parser.add_option("", "--amp", type="eng_float", default=0.1,
help="set gain factor for complex baseband floats [default=%default]")
parser.add_option("", "--time", action="store_true", default=False,
help="set timed tx mode")
#transmit_path.add_options(parser, expert_grp)
#digital.ofdm_mod.add_options(parser, expert_grp)
uhd_transmitter.add_options(parser)
(options, args) = parser.parse_args ()
# build the graph
tb = my_top_block(options)
r = gr.enable_realtime_scheduling()
if r != gr.RT_OK:
print "Warning: failed to enable realtime scheduling"
tb.start() # start flow graph
###########################################################################
if options.data_file is None:
sys.stderr.write("You must specify data file\n")
parser.print_help(sys.stderr)
sys.exit(1)
MAX_READ_BYTES = 1000000000
file_object = open(options.data_file)
data = file_object.read(MAX_READ_BYTES)
print "Length of payload = ", len(data), " | MAX_READ = ", MAX_READ_BYTES
file_object.close()
secs, frac = tb.sink.get_usrp_time()
print "USRP Time: ", secs
cnt = 0
GAP = options.gap
startTime = secs+0.1
while cnt < options.num:
if options.time:
send_pkt(data, startTime+cnt*GAP, eof=False)
else:
send_pkt(data, eof=False)
if (options.gap > 0.0):
sys.stdout.flush()
time.sleep(options.gap)
#print "Send pkt no.", cnt
cnt = cnt + 1
send_pkt(eof=True)
print "End of Tx | cnt = ", cnt
time.sleep(1)
###########################################################################
tb.wait() # wait for it to finish
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
{
"content_hash": "4466f8ccddc341413d80c84711bf6bcc",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 92,
"avg_line_length": 37.21311475409836,
"alnum_prop": 0.5231277533039648,
"repo_name": "YaguangZhang/EarsMeasurementCampaignCode",
"id": "96b52a092aad9845297433c7dec842a583ad7a58",
"size": "5423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PostProcessing/lib/ext/gnuradio-tools/python/raw_msgqtx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "CMake",
"bytes": "2141"
},
{
"name": "HTML",
"bytes": "149170"
},
{
"name": "Java",
"bytes": "1053"
},
{
"name": "MATLAB",
"bytes": "1925561"
},
{
"name": "Python",
"bytes": "486745"
},
{
"name": "Shell",
"bytes": "199"
},
{
"name": "TeX",
"bytes": "88978"
}
],
"symlink_target": ""
}
|
""" Snapcast Client. """
import logging
import queue
import socket
import threading
import time
from snapcast.client.messages import (hello_packet, request_packet,
command_packet, packet,
basemessage, BASE_SIZE)
from snapcast.client.gstreamer import GstreamerAppSrc
__version__ = '0.0.1-py'
SERVER_PORT = 1704
SYNC_AFTER = 1
BUFFER_SIZE = 30
CMD_START_STREAM = 'startStream'
MSG_SERVER_SETTINGS = 'ServerSettings'
MSG_SAMPLE_FORMAT = 'SampleFormat'
MSG_WIRE_CHUNK = 'WireChunk'
MSG_HEADER = 'Header'
MSG_TIME = 'Time'
_LOGGER = logging.getLogger(__name__)
def mac():
""" Get MAC. """
from uuid import getnode as get_mac
return ':'.join(("%012x" % get_mac())[i:i+2] for i in range(0, 12, 2))
class Client:
""" Snapcast Client. """
def __init__(self, host, port):
""" Setup. """
self._queue = queue.Queue()
self._buffer = queue.Queue()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((host, port))
self._source = GstreamerAppSrc()
self._last_sync = time.time()
self._connected = False
self._buffered = False
threading.Thread(target=self._read_socket, daemon=True).start()
threading.Thread(target=self._write_socket, daemon=True).start()
threading.Thread(target=self._play, daemon=True).start()
_LOGGER.info('Connected to %s:%s', host, port)
def register(self):
""" Transact with server. """
self._queue.put(hello_packet(socket.gethostname(), mac(), __version__))
self._queue.put(request_packet(MSG_SERVER_SETTINGS))
self._queue.put(request_packet(MSG_SAMPLE_FORMAT))
self._queue.put(request_packet(MSG_HEADER))
def request_start(self):
""" Indicate readiness to receive stream.
This is a blocking call.
"""
self._queue.put(command_packet(CMD_START_STREAM))
_LOGGER.info('Requesting stream')
self._source.run()
def _read_socket(self):
""" Process incoming messages from socket. """
while True:
base_bytes = self._socket.recv(BASE_SIZE)
base = basemessage.parse(base_bytes)
payload_bytes = self._socket.recv(base.payload_length)
self._handle_message(packet.parse(base_bytes + payload_bytes))
def _handle_message(self, data):
""" Handle messages. """
if data.type == MSG_SERVER_SETTINGS:
_LOGGER.info(data.payload)
elif data.type == MSG_SAMPLE_FORMAT:
_LOGGER.info(data.payload)
self._connected = True
elif data.type == MSG_TIME:
if not self._buffered:
_LOGGER.info('Buffering')
elif data.type == MSG_HEADER:
# Push to app source and start playing.
_LOGGER.info(data.payload.codec.decode('ascii'))
self._source.push(data.payload.header)
self._source.play()
elif data.type == MSG_WIRE_CHUNK:
# Add chunks to play queue.
self._buffer.put(data.payload.chunk)
if self._buffer.qsize() > BUFFER_SIZE:
self._buffered = True
if self._buffer.empty():
self._buffered = False
def _write_socket(self):
""" Pass messages from queue to socket. """
while True:
now = time.time()
if self._connected and (self._last_sync + SYNC_AFTER) < now:
self._queue.put(request_packet(MSG_TIME))
self._last_sync = now
if not self._queue.empty():
self._socket.send(self._queue.get())
def _play(self):
""" Relay buffer to app source. """
while True:
if self._buffered:
self._source.push(self._buffer.get())
|
{
"content_hash": "b4e7f9c5c97aa7826a5994c83888c93d",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 79,
"avg_line_length": 33.51724137931034,
"alnum_prop": 0.5787037037037037,
"repo_name": "happyleavesaoc/python-snapcast",
"id": "0d6f599cdfcca6657b474dbaafbc13e9fb620d00",
"size": "3888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snapcast/client/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "65108"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import tempfile
from contextlib import contextmanager
from jinja2 import Template
from cloudify_rest_client import exceptions as rest_exceptions
from cloudify import ctx
from cloudify.state import ctx_parameters as inputs
from cloudify import exceptions
from cloudify import utils
CONFIG_PATH = '/opt/ghost/config.js'
TEMPLATE_RESOURCE_NAME = 'resources/ghost/ghost.default.template'
def configure(subject=None):
ctx.logger.info('Configuring ghost.')
_run('sudo apt-get -y install unzip',error_message='Failed installing unzip')
_run('sudo curl -L https://ghost.org/zip/ghost-latest.zip -o /opt/ghost.zip',error_message='Failed downloading ghost')
_run('sudo unzip -uo /opt/ghost.zip -d /opt/ghost',error_message='Failed unzipping ghost')
writeConfig(subject)
def writeConfig(subject=None):
subject = subject or ctx
postgre_ip_address = subject.instance.runtime_properties["postgre_ip_address"]
template = Template(ctx.get_resource(TEMPLATE_RESOURCE_NAME))
ctx.logger.debug('Building a dict object that will contain variables '
'to write to the Jinja2 template.')
config = subject.node.properties.copy()
config.update(dict(
postgre_ip=postgre_ip_address))
ctx.logger.debug('Rendering the Jinja2 template to {0}.'.format(CONFIG_PATH))
ctx.logger.debug('The config dict: {0}.'.format(config))
with tempfile.NamedTemporaryFile(delete=False) as temp_config:
temp_config.write(template.render(config))
#_run('sudo /usr/sbin/haproxy -f {0} -c'.format(temp_config.name),
# error_message='Failed to Configure')
_run('sudo mv {0} {1}'.format(temp_config.name, CONFIG_PATH),
error_message='Failed to write to {0}.'.format(CONFIG_PATH))
def stop():
_run('sudo killall node',error_message='Failed killing nodejs')
def _run(command, error_message):
runner = utils.LocalCommandRunner(logger=ctx.logger)
try:
runner.run(command)
except exceptions.CommandExecutionException as e:
raise exceptions.NonRecoverableError('{0}: {1}'.format(error_message, e))
def _main():
invocation = inputs['invocation']
function = invocation['function']
args = invocation.get('args', [])
kwargs = invocation.get('kwargs', {})
globals()[function](*args, **kwargs)
if __name__ == '__main__':
_main()
|
{
"content_hash": "f1f44b352e0f62ad7aad766b42433e0e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 122,
"avg_line_length": 31.723684210526315,
"alnum_prop": 0.6943177104935712,
"repo_name": "Frank-G/orchestration_comparision",
"id": "f9e2d549201810b826e993ac14832af3266a2d99",
"size": "3134",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudify/ghost-openstack/scripts/ghost/ghost.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "3962"
},
{
"name": "Pascal",
"bytes": "4287"
},
{
"name": "Puppet",
"bytes": "103292"
},
{
"name": "Python",
"bytes": "7144"
},
{
"name": "Ruby",
"bytes": "178323"
},
{
"name": "Shell",
"bytes": "7053"
}
],
"symlink_target": ""
}
|
import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import color_style
from django.template.loader import get_template
from django_extensions.compat import add_to_builtins_compat
from django_extensions.management.utils import signalcommand
from django_extensions.utils import validatingtemplatetags
#
# TODO: Render the template with fake request object ?
#
class Command(BaseCommand):
args = ''
help = "Validate templates on syntax and compile errors"
option_list = BaseCommand.option_list + (
make_option('--break', '-b', action='store_true', dest='break',
default=False, help="Break on first error."),
make_option('--check-urls', '-u', action='store_true', dest='check_urls',
default=False, help="Check url tag view names are quoted appropriately"),
make_option('--force-new-urls', '-n', action='store_true', dest='force_new_urls',
default=False, help="Error on usage of old style url tags (without {% load urls from future %}"),
make_option('--include', '-i', action='append', dest='includes',
default=[], help="Append these paths to TEMPLATE_DIRS")
)
@signalcommand
def handle(self, *args, **options):
from django.conf import settings
style = color_style()
template_dirs = set(settings.TEMPLATE_DIRS)
template_dirs |= set(options.get('includes', []))
template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))
# Load in Templates from 1.8
if hasattr(settings, 'TEMPLATES'):
for template_engine in settings.TEMPLATES:
if 'DIRS' in template_engine:
template_dirs |= set(template_engine['DIRS'])
settings.TEMPLATE_DIRS = list(template_dirs)
settings.TEMPLATE_DEBUG = True
verbosity = int(options.get('verbosity', 1))
errors = 0
# Replace built in template tags with our own validating versions
if options.get('check_urls', False):
add_to_builtins_compat(
'django_extensions.utils.validatingtemplatetags')
for template_dir in template_dirs:
for root, dirs, filenames in os.walk(template_dir):
for filename in filenames:
if filename.endswith(".swp"):
continue
if filename.endswith("~"):
continue
filepath = os.path.join(root, filename)
if verbosity > 1:
print(filepath)
validatingtemplatetags.before_new_template(options.get('force_new_urls', False))
try:
get_template(filename, [root])
except Exception as e:
errors += 1
print("%s: %s" % (filepath, style.ERROR("%s %s" % (e.__class__.__name__, str(e)))))
template_errors = validatingtemplatetags.get_template_errors()
for origin, line, message in template_errors:
errors += 1
print("%s(%s): %s" % (origin, line, style.ERROR(message)))
if errors and options.get('break', False):
raise CommandError("Errors found")
if errors:
raise CommandError("%s errors found" % errors)
print("%s errors found" % errors)
|
{
"content_hash": "ae82f6fde27fe8e2d9c59bc6e40ffd0e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 117,
"avg_line_length": 44.22222222222222,
"alnum_prop": 0.5787269681742043,
"repo_name": "pabulumm/neighbors",
"id": "e5d4ed0b87c7c2421748f9abd1bf2a7cacb43595",
"size": "3582",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python3.4/site-packages/django_extensions/management/commands/validate_templates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "167622"
},
{
"name": "HTML",
"bytes": "221496"
},
{
"name": "JavaScript",
"bytes": "325471"
},
{
"name": "Python",
"bytes": "7896264"
},
{
"name": "Shell",
"bytes": "12645"
},
{
"name": "Smarty",
"bytes": "789"
}
],
"symlink_target": ""
}
|
""" setup.py
Basic setup file to enable pip install
http://python-distribute.org/distribute_setup.py
python setup.py register sdist upload
"""
from setuptools import setup, find_packages
setup(
name = 'demoing',
version = '0.0.1',
description = 'OpenWest Demo Application',
url = 'https://github.com/SmithSamuelM/openwestdemo.git',
packages = find_packages(exclude=[]),
package_data={'': ['*.txt', '*.ico', '*.json', '*.md', '*.conf', ''
'*.js', '*.html', '*.css', '*.png', 'libs/*.txt',
'libs/angular/*.txt',
'libs/angular/*.js', 'libs/angular/i18n/*.js',
'libs/angular-gold/*.js', 'libs/bootstrap/css/*.css',
'libs/bootstrap/img/*.png', 'libs/bootstrap/js/*.js',]},
install_requires = ['bottle', 'simplejson', 'gevent', 'brining', ],
extras_require = { },
tests_require = ['webtest', 'nose'],
test_suite = 'nose.collector',
author='Samuel M Smith',
author_email='smith.samuel.m@gmail.com',
license="MIT",
keywords='AngularJS BottlePY NoSQL',
)
|
{
"content_hash": "e07432d5675286f9b1ae8f99ea56d8c0",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 33.64705882352941,
"alnum_prop": 0.5524475524475524,
"repo_name": "SmithSamuelM/openwestdemo",
"id": "9729fa61bc59cef96a50a41f5fb417f3eda067fb",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16735"
},
{
"name": "CoffeeScript",
"bytes": "21350"
},
{
"name": "Python",
"bytes": "24399"
},
{
"name": "Shell",
"bytes": "778"
}
],
"symlink_target": ""
}
|
"""
WSGI config for Djember Sample project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.environ["DJANGO_SETTINGS_MODULE"] = "settings"
sys.path.append(BASE_DIR)
application = get_wsgi_application()
|
{
"content_hash": "e64aaeab0fbe0838c97add7cd5d99c1f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 24.45,
"alnum_prop": 0.754601226993865,
"repo_name": "drf-forms/ember_sample",
"id": "140dded7627c237537fcaf860aa5fbde44e82085",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "back/djember_sample/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "214646"
},
{
"name": "HTML",
"bytes": "25570"
},
{
"name": "JavaScript",
"bytes": "161408"
},
{
"name": "Python",
"bytes": "36929"
},
{
"name": "Shell",
"bytes": "471"
}
],
"symlink_target": ""
}
|
import numpy as np
import pickle
import ray
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.misc import normc_initializer
from ray.rllib.models.tf.recurrent_net import RecurrentNetwork
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf
tf1, tf, tfv = try_import_tf()
class SpyLayer(tf.keras.layers.Layer):
"""A keras Layer, which intercepts its inputs and stored them as pickled."""
output = np.array(0, dtype=np.int64)
def __init__(self, num_outputs, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=num_outputs, kernel_initializer=normc_initializer(0.01)
)
def call(self, inputs, **kwargs):
"""Does a forward pass through our Dense, but also intercepts inputs."""
del kwargs
spy_fn = tf1.py_func(
self.spy,
[
inputs[0], # observations
inputs[2], # seq_lens
inputs[3], # h_in
inputs[4], # c_in
inputs[5], # h_out
inputs[6], # c_out
],
tf.int64, # Must match SpyLayer.output's type.
stateful=True,
)
# Compute outputs
with tf1.control_dependencies([spy_fn]):
return self.dense(inputs[1])
@staticmethod
def spy(inputs, seq_lens, h_in, c_in, h_out, c_out):
"""The actual spy operation: Store inputs in internal_kv."""
if len(inputs) == 1:
# don't capture inference inputs
return SpyLayer.output
# TF runs this function in an isolated context, so we have to use
# redis to communicate back to our suite
ray.experimental.internal_kv._internal_kv_put(
"rnn_spy_in_{}".format(RNNSpyModel.capture_index),
pickle.dumps(
{
"sequences": inputs,
"seq_lens": seq_lens,
"state_in": [h_in, c_in],
"state_out": [h_out, c_out],
}
),
overwrite=True,
)
RNNSpyModel.capture_index += 1
return SpyLayer.output
class RNNSpyModel(RecurrentNetwork):
capture_index = 0
cell_size = 3
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
self.cell_size = RNNSpyModel.cell_size
# Create a keras LSTM model.
inputs = tf.keras.layers.Input(shape=(None,) + obs_space.shape, name="input")
state_in_h = tf.keras.layers.Input(shape=(self.cell_size,), name="h")
state_in_c = tf.keras.layers.Input(shape=(self.cell_size,), name="c")
seq_lens = tf.keras.layers.Input(shape=(), name="seq_lens", dtype=tf.int32)
lstm_out, state_out_h, state_out_c = tf.keras.layers.LSTM(
self.cell_size, return_sequences=True, return_state=True, name="lstm"
)(
inputs=inputs,
mask=tf.sequence_mask(seq_lens),
initial_state=[state_in_h, state_in_c],
)
logits = SpyLayer(num_outputs=self.num_outputs)(
[
inputs,
lstm_out,
seq_lens,
state_in_h,
state_in_c,
state_out_h,
state_out_c,
]
)
# Value branch.
value_out = tf.keras.layers.Dense(
units=1, kernel_initializer=normc_initializer(1.0)
)(lstm_out)
self.base_model = tf.keras.Model(
[inputs, seq_lens, state_in_h, state_in_c],
[logits, value_out, state_out_h, state_out_c],
)
self.base_model.summary()
@override(RecurrentNetwork)
def forward_rnn(self, inputs, state, seq_lens):
# Previously, a new class object was created during
# deserialization and this `capture_index`
# variable would be refreshed between class instantiations.
# This behavior is no longer the case, so we manually refresh
# the variable.
RNNSpyModel.capture_index = 0
model_out, value_out, h, c = self.base_model(
[inputs, seq_lens, state[0], state[1]]
)
self._value_out = value_out
return model_out, [h, c]
@override(ModelV2)
def value_function(self):
return tf.reshape(self._value_out, [-1])
@override(ModelV2)
def get_initial_state(self):
return [
np.zeros(self.cell_size, np.float32),
np.zeros(self.cell_size, np.float32),
]
|
{
"content_hash": "2ccd30554dd8924c413daf2a3eb4fb1e",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 85,
"avg_line_length": 33.276595744680854,
"alnum_prop": 0.558610400682012,
"repo_name": "ray-project/ray",
"id": "fdf280f043f833285c96b5d354edb902e8af4de6",
"size": "4692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/examples/models/rnn_spy_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
"""Tf-Idf similarity measure"""
from __future__ import division
from math import log, sqrt
import collections
from py_stringmatching import utils
from py_stringmatching.similarity_measure.token_similarity_measure import \
TokenSimilarityMeasure
class TfIdf(TokenSimilarityMeasure):
"""Tf-Idf similarity measure class.
Parameters:
corpus_list (list of lists): Corpus list (default is set to None) of strings. If set to None,
the input list are considered the only corpus.
dampen (boolean): Flag to indicate whether 'log' should be applied to tf and idf measure.
"""
def __init__(self, corpus_list=None, dampen=False):
self.__corpus_list = corpus_list
self.__document_frequency = {}
self.__compute_document_frequency()
self.__corpus_size = 0 if self.__corpus_list is None else (
len(self.__corpus_list))
self.dampen = dampen
super(TfIdf, self).__init__()
def get_raw_score(self, bag1, bag2):
"""
Compute TF-IDF measure between two lists given the corpus information.
This measure employs the notion of TF/IDF score commonly used in information retrieval (IR) to
find documents that are relevant to keyword queries. The intuition underlying the TF/IDF measure
is that two strings are similar if they share distinguishing terms.
Args:
bag1,bag2 (list): Input lists
Returns:
TF-IDF measure between the input lists (float)
Raises:
TypeError : If the inputs are not lists or if one of the inputs is None
Examples:
>>> tfidf = TfIdf([['a', 'b', 'a'], ['a', 'c'], ['a']])
>>> tfidf.get_raw_score(['a', 'b', 'a'], ['a', 'c'])
0.17541160386140586
>>> tfidf.get_raw_score(['a', 'b', 'a'], ['a'])
0.5547001962252291
>>> tfidf = TfIdf([['a', 'b', 'a'], ['a', 'c'], ['a'], ['b']], True)
>>> tfidf.get_raw_score(['a', 'b', 'a'], ['a', 'c'])
0.11166746710505392
>>> tfidf = TfIdf([['x', 'y'], ['w'], ['q']])
>>> tfidf.get_raw_score(['a', 'b', 'a'], ['a'])
0.0
>>> tfidf = TfIdf([['x', 'y'], ['w'], ['q']], True)
>>> tfidf.get_raw_score(['a', 'b', 'a'], ['a'])
0.0
>>> tfidf = TfIdf()
>>> tfidf.get_raw_score(['a', 'b', 'a'], ['a'])
0.7071067811865475
"""
# input validations
utils.sim_check_for_none(bag1, bag2)
utils.sim_check_for_list_or_set_inputs(bag1, bag2)
# if the strings match exactly return 1.0
if utils.sim_check_for_exact_match(bag1, bag2):
return 1.0
# if one of the strings is empty return 0
if utils.sim_check_for_empty(bag1, bag2):
return 0
# term frequency for input strings
tf_x, tf_y = collections.Counter(bag1), collections.Counter(bag2)
# find unique elements in the input lists and their document frequency
local_df = {}
for element in tf_x:
local_df[element] = local_df.get(element, 0) + 1
for element in tf_y:
local_df[element] = local_df.get(element, 0) + 1
# if corpus is not provided treat input string as corpus
curr_df, corpus_size = (local_df, 2) if self.__corpus_list is None else (
(self.__document_frequency, self.__corpus_size))
idf_element, v_x, v_y, v_x_y, v_x_2, v_y_2 = (0.0, 0.0, 0.0,
0.0, 0.0, 0.0)
# tfidf calculation
for element in local_df.keys():
df_element = curr_df.get(element)
if df_element is None:
continue
idf_element = corpus_size * 1.0 / df_element
v_x = 0 if element not in tf_x else (log(idf_element) * log(tf_x[element] + 1)) if self.dampen else (
idf_element * tf_x[element])
v_y = 0 if element not in tf_y else (log(idf_element) * log(tf_y[element] + 1)) if self.dampen else (
idf_element * tf_y[element])
v_x_y += v_x * v_y
v_x_2 += v_x * v_x
v_y_2 += v_y * v_y
return 0.0 if v_x_y == 0 else v_x_y / (sqrt(v_x_2) * sqrt(v_y_2))
def get_sim_score(self, bag1, bag2):
"""
Compute normalized TF-IDF similarity between two lists given the corpus information.
This measure employs the notion of TF/IDF score commonly used in information retrieval (IR) to
find documents that are relevant to keyword queries. The intuition underlying the TF/IDF measure
is that two strings are similar if they share distinguishing terms.
Args:
bag1,bag2 (list): Input lists
Returns:
Normalized TF-IDF similarity between the input lists (float)
Raises:
TypeError : If the inputs are not lists or if one of the inputs is None
Examples:
>>> tfidf = TfIdf([['a', 'b', 'a'], ['a', 'c'], ['a']])
>>> tfidf.get_sim_score(['a', 'b', 'a'], ['a', 'c'])
0.17541160386140586
>>> tfidf.get_sim_score(['a', 'b', 'a'], ['a'])
0.5547001962252291
>>> tfidf = TfIdf([['a', 'b', 'a'], ['a', 'c'], ['a'], ['b']], True)
>>> tfidf.get_sim_score(['a', 'b', 'a'], ['a', 'c'])
0.11166746710505392
>>> tfidf = TfIdf([['x', 'y'], ['w'], ['q']])
>>> tfidf.get_sim_score(['a', 'b', 'a'], ['a'])
0.0
>>> tfidf = TfIdf([['x', 'y'], ['w'], ['q']], True)
>>> tfidf.get_sim_score(['a', 'b', 'a'], ['a'])
0.0
>>> tfidf = TfIdf()
>>> tfidf.get_sim_score(['a', 'b', 'a'], ['a'])
0.7071067811865475
"""
return self.get_raw_score(bag1, bag2)
def get_dampen(self):
"""
Get dampen flag
Returns:
dampen flag (boolean)
"""
return self.dampen
def get_corpus_list(self):
"""
Get corpus list
Returns:
corpus list (list of lists)
"""
return self.__corpus_list
def set_dampen(self, dampen):
"""
Set dampen flag
Args:
dampen (boolean): Flag to indicate whether 'log' should be applied to tf and idf measure.
"""
self.dampen = dampen
return True
def set_corpus_list(self, corpus_list):
"""
Set corpus list
Args:
corpus_list (list of lists): Corpus list
"""
self.__corpus_list = corpus_list
self.__document_frequency = {}
self.__compute_document_frequency()
self.__corpus_size = 0 if self.__corpus_list is None else (
len(self.__corpus_list))
return True
def __compute_document_frequency(self):
if self.__corpus_list != None:
for document in self.__corpus_list:
for element in set(document):
self.__document_frequency[element] = (
self.__document_frequency.get(element, 0) + 1)
|
{
"content_hash": "4192060ab26bd4cd3575203d1f67d02a",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 113,
"avg_line_length": 37.81218274111675,
"alnum_prop": 0.5069136796885488,
"repo_name": "Anson-Doan/py_stringmatching",
"id": "d3dded2b63bd4a2143ac3e416fa209f6cc262d46",
"size": "7449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py_stringmatching/similarity_measure/tfidf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1843"
},
{
"name": "PowerShell",
"bytes": "3112"
},
{
"name": "Python",
"bytes": "234429"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.