prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in complianc | e with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the Li | cense.
# Convenience imports for public API components.
# Importing non-modules that are not used explicitly
from horizon.tables.actions import Action
from horizon.tables.actions import BatchAction
from horizon.tables.actions import DeleteAction
from horizon.tables.actions import FilterAction
from horizon.tables.actions import FixedFilterAction
from horizon.tables.actions import LinkAction
from horizon.tables.actions import NameFilterAction
from horizon.tables.base import Column
from horizon.tables.base import DataTable
from horizon.tables.base import Row
from horizon.tables.base import WrappingColumn
from horizon.tables.views import DataTableView
from horizon.tables.views import MixedDataTableView
from horizon.tables.views import MultiTableMixin
from horizon.tables.views import MultiTableView
from horizon.tables.views import PagedTableMixin
__all__ = [
'Action',
'BatchAction',
'DeleteAction',
'FilterAction',
'FixedFilterAction',
'LinkAction',
'NameFilterAction',
'Column',
'DataTable',
'Row',
'WrappingColumn',
'DataTableView',
'MixedDataTableView',
'MultiTableMixin',
'MultiTableView',
'PagedTableMixin',
]
|
#!/usr/bin/env python
try:
from setuptools import setup
except:
from distutils.core import setup
setup(name='natural',
version= | '0.2.0',
description='Convert data to their natural (human-readable) format',
long_description='''
Example Usage
=============
Basic usage::
>>> from natural.file import accessed
>>> print accessed(__file__)
just now
We speak your language (with `your support`_)::
>>> import locale
>>> locale.setlocale(locale.LC_MESSAGES, 'nl_NL')
>>> print accessed(__file__)
| zojuist
Bugs/Features
=============
You can issue a ticket in GitHub: https://github.com/tehmaze/natural/issues
Documentation
=============
The project documentation can be found at http://natural.rtfd.org/
.. _your support: http://natural.readthedocs.org/en/latest/locales.html
''',
author='Wijnand Modderman-Lenstra',
author_email='maze@pyth0n.org',
license='MIT',
keywords='natural data date file number size',
url='https://github.com/tehmaze/natural',
packages=['natural'],
package_data={'natural': ['locale/*/LC_MESSAGES/*.mo']},
install_requires=['six'],
)
|
import asyncio
import discord
from discord.ext import commands
def setup(bot):
# Disabled for now
return
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Monitor(bot, settings))
# This is the Monitor module. It keeps track of how many messages fail
class Monitor(commands.Cog):
# Init with the bot reference
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.commands = []
self.commandCount = 25 # Keep 25 commands in the list max
self.threshold = 0.9 # If we fall below 90% success - reboot the bot
async def oncommand(self, command, ctx):
# Check previous commands and see if we need to reboot
passed = 0
checked = 0
for command in self.commands:
checked += 1
if command['Success'] == True:
passed += 1
if checked > 1 and float(passed/checked) < self.threshold:
# We checked at least one command - and are below threshold
print('Command success below threshold - rebooting...')
self.settings.flushSettings(self.settings.file, True)
# Logout, stop the event loop, close the loop, quit
try:
tasks = asyncio.Task.all_tasks()
except AttributeError:
tasks = asyncio.all_tasks()
for task in tasks:
try:
task.cancel()
except Exception:
continue
try:
await self.bot.logout()
self.bot.loop.stop()
self.bot.loop.close()
except Exception: |
pass
try:
await exit(0)
except Exception:
pass
# Once we're here - we add our new command
# Save the command to a list with the message
newCommand = { 'Message': c | tx.message, 'Success': False }
self.commands.append(newCommand)
while len(self.commands) > self.commandCount:
# Remove the first item in the array until we're at our limit
self.commands.pop(0)
async def oncommandcompletion(self, command, ctx):
for command in self.commands:
# command passed
if command['Message'] == ctx.message:
command['Success'] = True
|
##############################################################################
# Parte do livro Introdução à | Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2014
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Primeira reimpressão - Outubro/2011
# Segunda reimpressão - Novembro/1012
# Terceira reimpressão - Agosto/2013
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: exercicios_resolvidos\capitul | o 05\exercicio-05-17.py
##############################################################################
# O programa pára logo após imprimir a quantidade de cédulas de R$50,00
|
# -*- coding: utf-8 -*-
import scrapy
import logging
from datetime import datetime
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from .delta_helper import DeltaHelper
class BaseSpider(scrapy.Spider):
# need overwrite in subclass
logger = logging.getLogger(__name__)
def init(self):
self.delta = DeltaHelper(self)
self.delta.connect_db()
def extract_int(self, text):
i = -1
for i in range(len(text)):
if text[i].isdigit():
break
j = len(text) if i >= 0 else -1
for j in range(i + 1, len(text)):
if not text[j].isdigit():
break
try:
return int(text[i:j])
except ValueError:
self.logger.warning('cannot extract integer from "%s"', text)
return None
def aa2urls(self, aa):
urls = []
for a in aa:
urls.append(a.attrib['hr | ef'])
return urls
def add_host(self, s):
return urljoin(self.settings['HOST'], s)
# return index of the | first exists class
def find_classes_exists(self, d, classes):
for i in range(len(classes)):
if d(classes[i]):
return i
return None
def text2date(self, date):
if date.count('-') == 1:
date = '{}-{}'.format(datetime.now().year % 100, date)
try:
date = datetime.strptime(date, '%y-%m-%d')
except ValueError:
self.logger.warning('not a valid date: "%s"', date)
date = None
return date
|
#!/usr/bin | /env python
# encoding: utf-8
from random import choice
def random_string(length, random_range):
result = ""
for i in range(length):
result += choic | e(random_range)
return result
|
'''
Created on 2015年1月19日
@author: Guan-yu Willie Chen
'''
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.keys import Keys
import time
#browser = webdriver.Firefox()
#browser = webdriver.Ie()
browser = webdriver.Chrome("chromedriver.exe")
URL = ""
browser.get(URL+"/insurance/gs/sp/spLogin")
# 登入
browser.find_element_by_xpath("//input[@id='login:userName']").send_keys('')
browser.find_element_by_xpath("//input[@id='login:password']").send_keys('' + Keys.RETURN)
#進入前台
browser.find_element_by_xpath("//img[@name='Adminstration']").click()
#進入條碼列印作業
browser.get(URL+"insurance/eclaim/qrcodePrint.do")
# 選擇賠案號碼起
claimStartNo = browser.find_element_by_name("claimStartNo").send_keys("CLBR14V000000")
# 選擇文件名稱
docId = browser.find_element_by_name("docId")
for n in enumerate(docId.te | xt.split("\n")):
print(n)
select = Select(docId)
select.select_by_index(1)
# 查詢
browser.find_element_by_xpath("//input[@name='queryBtn']").click()
# 分頁
browser.find_element_by_xpath("//input[@id='gotoPageNo']").send_keys(Keys.BACKSPACE)
browser.find_element_by_xpath("//input[@id='gotoPageNo']").send_keys("3")
browser.find_element_by_xpath("//div[@id='turnpage']/table/tbody/tr/td/input[@valu | e='跳至']").click()
|
# | !/usr/bin/env python
import os
import shutil
import sys
import ratemyflight
class ProjectException(Exception):
pass
def create_project():
"""
Copies the contents of the project_template directory to a new directory
specified as an argument to the command line.
"""
# Ensure a directory name is specified.
script_name = os.path.basename( | sys.argv[0])
usage_text = "Usage: ratemyflight project_name"
usage_text += "\nProject names beginning with \"-\" are illegal."
if len(sys.argv) != 2:
raise ProjectException(usage_text)
project_name = sys.argv[1]
if project_name.startswith("-"):
raise ProjectException(usage_text)
# Ensure the given directory name doesn't clash with an existing Python
# package/module.
try:
__import__(project_name)
except ImportError:
pass
else:
raise ProjectException("'%s' conflicts with the name of an existing "
"Python module and cannot be used as a project name. Please try "
"another name." % project_name)
ratemyflight_path = os.path.dirname(os.path.abspath(ratemyflight.__file__))
from_path = os.path.join(ratemyflight_path, "project_template")
to_path = os.path.join(os.getcwd(), project_name)
shutil.copytree(from_path, to_path)
shutil.move(os.path.join(to_path, "local_settings.py.template"),
os.path.join(to_path, "local_settings.py"))
if __name__ == "__main__":
try:
create_project()
except ProjectException, e:
print
print e
print
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; wi | thout even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################ | ###
{
'name': 'Survey',
'summary': 'Survey Module used by MostlyOpen Solutions.',
'version': '2.0.0',
'author': 'Carlos Eduardo Vercelino - CLVsol',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'website': 'http://mostlyopen.org',
'depends': [
'survey',
'myo_base',
],
'data': [
'security/survey_security.xml',
'views/survey_survey_view.xml',
'views/survey_page_view.xml',
'views/survey_question_view.xml',
'views/survey_label_view.xml',
'views/survey_user_input_view.xml',
'wizard/survey_update_wizard_view.xml',
],
'demo': [],
'test': [],
'init_xml': [],
'test': [],
'update_xml': [],
'installable': True,
'application': False,
'active': False,
'css': [],
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
impo | rt pytest
# pylint: disable=attribute-defined-outside-init
class TestOutput(object):
@pytest.fixture(autouse= | True)
def init(self, ssh_audit):
self.Output = ssh_audit.Output
self.OutputBuffer = ssh_audit.OutputBuffer
def test_output_buffer_no_lines(self, output_spy):
output_spy.begin()
with self.OutputBuffer() as obuf:
pass
assert output_spy.flush() == []
output_spy.begin()
with self.OutputBuffer() as obuf:
pass
obuf.flush()
assert output_spy.flush() == []
def test_output_buffer_no_flush(self, output_spy):
output_spy.begin()
with self.OutputBuffer():
print(u'abc')
assert output_spy.flush() == []
def test_output_buffer_flush(self, output_spy):
output_spy.begin()
with self.OutputBuffer() as obuf:
print(u'abc')
print()
print(u'def')
obuf.flush()
assert output_spy.flush() == [u'abc', u'', u'def']
def test_output_defaults(self):
out = self.Output()
# default: on
assert out.batch is False
assert out.colors is True
assert out.minlevel == 'info'
def test_output_colors(self, output_spy):
out = self.Output()
# test without colors
out.colors = False
output_spy.begin()
out.info('info color')
assert output_spy.flush() == [u'info color']
output_spy.begin()
out.head('head color')
assert output_spy.flush() == [u'head color']
output_spy.begin()
out.good('good color')
assert output_spy.flush() == [u'good color']
output_spy.begin()
out.warn('warn color')
assert output_spy.flush() == [u'warn color']
output_spy.begin()
out.fail('fail color')
assert output_spy.flush() == [u'fail color']
if not out.colors_supported:
return
# test with colors
out.colors = True
output_spy.begin()
out.info('info color')
assert output_spy.flush() == [u'info color']
output_spy.begin()
out.head('head color')
assert output_spy.flush() == [u'\x1b[0;36mhead color\x1b[0m']
output_spy.begin()
out.good('good color')
assert output_spy.flush() == [u'\x1b[0;32mgood color\x1b[0m']
output_spy.begin()
out.warn('warn color')
assert output_spy.flush() == [u'\x1b[0;33mwarn color\x1b[0m']
output_spy.begin()
out.fail('fail color')
assert output_spy.flush() == [u'\x1b[0;31mfail color\x1b[0m']
def test_output_sep(self, output_spy):
out = self.Output()
output_spy.begin()
out.sep()
out.sep()
out.sep()
assert output_spy.flush() == [u'', u'', u'']
def test_output_levels(self):
out = self.Output()
assert out.getlevel('info') == 0
assert out.getlevel('good') == 0
assert out.getlevel('warn') == 1
assert out.getlevel('fail') == 2
assert out.getlevel('unknown') > 2
def test_output_minlevel_property(self):
out = self.Output()
out.minlevel = 'info'
assert out.minlevel == 'info'
out.minlevel = 'good'
assert out.minlevel == 'info'
out.minlevel = 'warn'
assert out.minlevel == 'warn'
out.minlevel = 'fail'
assert out.minlevel == 'fail'
out.minlevel = 'invalid level'
assert out.minlevel == 'unknown'
def test_output_minlevel(self, output_spy):
out = self.Output()
# visible: all
out.minlevel = 'info'
output_spy.begin()
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 5
# visible: head, warn, fail
out.minlevel = 'warn'
output_spy.begin()
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 3
# visible: head, fail
out.minlevel = 'fail'
output_spy.begin()
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 2
# visible: head
out.minlevel = 'invalid level'
output_spy.begin()
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 1
def test_output_batch(self, output_spy):
out = self.Output()
# visible: all
output_spy.begin()
out.minlevel = 'info'
out.batch = False
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 5
# visible: all except head
output_spy.begin()
out.minlevel = 'info'
out.batch = True
out.info('info color')
out.head('head color')
out.good('good color')
out.warn('warn color')
out.fail('fail color')
assert len(output_spy.flush()) == 4
|
from __future__ import annotations
from typing import Generic, TypeVar
T = TypeVar("T")
class DisjointSetTreeNode(Generic[T]):
# Disjoint Set Node to store the parent and rank
def __init__(self, data: T) -> None:
self.data = data
self.parent = self
self.rank = 0
class DisjointSetTree(Generic[T]):
# Disjoint Set DataStructure
def __init__(self) -> None:
# map from node name to the node object
self.map: dict[T, DisjointSetTreeNode[T]] = {}
def make_set(self, data: T) -> None:
# create a new set with x as its member
self.map[data] = DisjointSetTreeNode(data)
def find_set(self, data: T) -> DisjointSetTreeNode[T]:
# find the set x belongs to (with path-compression)
elem_ref = self.map[data]
if elem_ref != elem_ref.parent:
elem_ref.parent = self.find_set(elem_ref.parent.data)
return elem_ref.parent
def link(
self, node1: DisjointSetTreeNode[T], node2: DisjointSetTreeNode[T]
) -> None:
# helper function for union operation
if node1.rank > node2.rank:
node2.parent = node1
else:
node1.parent = node2
if node1.rank == node2.rank:
node2.rank += 1
| def union(self, data1: T, data2: T) -> None:
# merge 2 disjoint sets
self.link(self.find_set(data1), self.find_set(data2))
class GraphUndirectedWeighted(Generic[T]):
def __init__(self) -> None:
# connections: map from the node to the neighbouring nodes (with weights)
self.connections: dict[T, dict[T, int]] = {}
def add_node( | self, node: T) -> None:
# add a node ONLY if its not present in the graph
if node not in self.connections:
self.connections[node] = {}
def add_edge(self, node1: T, node2: T, weight: int) -> None:
# add an edge with the given weight
self.add_node(node1)
self.add_node(node2)
self.connections[node1][node2] = weight
self.connections[node2][node1] = weight
def kruskal(self) -> GraphUndirectedWeighted[T]:
# Kruskal's Algorithm to generate a Minimum Spanning Tree (MST) of a graph
"""
Details: https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
Example:
>>> g1 = GraphUndirectedWeighted[int]()
>>> g1.add_edge(1, 2, 1)
>>> g1.add_edge(2, 3, 2)
>>> g1.add_edge(3, 4, 1)
>>> g1.add_edge(3, 5, 100) # Removed in MST
>>> g1.add_edge(4, 5, 5)
>>> assert 5 in g1.connections[3]
>>> mst = g1.kruskal()
>>> assert 5 not in mst.connections[3]
>>> g2 = GraphUndirectedWeighted[str]()
>>> g2.add_edge('A', 'B', 1)
>>> g2.add_edge('B', 'C', 2)
>>> g2.add_edge('C', 'D', 1)
>>> g2.add_edge('C', 'E', 100) # Removed in MST
>>> g2.add_edge('D', 'E', 5)
>>> assert 'E' in g2.connections["C"]
>>> mst = g2.kruskal()
>>> assert 'E' not in mst.connections['C']
"""
# getting the edges in ascending order of weights
edges = []
seen = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start))
edges.append((start, end, self.connections[start][end]))
edges.sort(key=lambda x: x[2])
# creating the disjoint set
disjoint_set = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(node)
# MST generation
num_edges = 0
index = 0
graph = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections) - 1:
u, v, w = edges[index]
index += 1
parent_u = disjoint_set.find_set(u)
parent_v = disjoint_set.find_set(v)
if parent_u != parent_v:
num_edges += 1
graph.add_edge(u, v, w)
disjoint_set.union(u, v)
return graph
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def run_test(self):
tips = self.nodes[0].getchaintips()
assert_equal(len(tips), 1)
assert_equal(tips[0]['branchlen'], 0)
assert_equal(tips[0]['height'], 200)
assert_equal(tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network()
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
self.nodes[2].generatetoaddress(20, self.nodes[2].get_deterministic_priv_key().address)
self. | sync_all([self.nodes[:2], self.nodes[2:]])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210) |
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapps', '0008_remove_china_queue'),
]
operation | s = [
migrations.RemoveField(
model_name= | 'webapp',
name='hosted_url',
),
]
|
import unittest
import transaction
import os
import csv
from pyramid import testing
from thesis.models import DBSession
from sqlalchemy import create_engine
from thesis.models import (
Base,
GriddedMappablePoint,
Layer
)
class TestGriddedMappableItem(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
engine = create_engine('postgresql+psycopg2://thesis_db_user:_89_hHh_989g2988h08g2As@127.0.0.1:5432/thesis_test_db')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
# Add TestLayer1
test_layer_1 = Layer(name='TestLayer1')
test_layer_1.mappable_points = [
GriddedMappablePoint('Point(30 10)'),
GriddedMappablePoint('Point(20 10)'),
]
DBSession.add(test_layer_1)
# Add TestLayer2
test_layer_2 = Layer(name='TestLayer2')
test_layer_2.mappable_points = [
GriddedMappablePoint('Point(10 15)'),
GriddedMappablePoint('Point(10 15)'),
GriddedMappablePoint('Point(30 15)'),
]
DBSession.add(test_layer_2)
# Add Emu Layer
tests_path = os.path.dirname(os.path.abspath(__file__))
test_fixtures_path = os.path.join(tests_path, 'fixtures')
emu_csv_path = os.path.join(test_fixtures_path, 'emu.csv')
emu_layer = Layer(name='Emu')
with open(emu_csv_path, 'rb') as csvfile:
emu_reader = csv.reader(csvfile)
rownum = 0
header = None
for row in emu_reader:
# Save header row.
if rownum == 0:
header = row
else:
colnum = 0
latitude = 0
longitude = 0
for col in row:
column_label = header[colnum]
if column_label == "LNGDEC":
longitude = col
elif column_label == "LATDEC":
latitude = col
| # print '%-8s: %s' % (column_label, col)
colnum += 1
if longitude and latitude:
| mappable_point = GriddedMappablePoint('Point(%s %s)' % (longitude, latitude))
emu_layer.mappable_points.append(mappable_point)
rownum += 1
DBSession.add(emu_layer)
def tearDown(self):
DBSession.remove()
testing.tearDown()
engine = create_engine('postgresql+psycopg2://thesis_db_user:_89_hHh_989g2988h08g2As@127.0.0.1:5432/thesis_test_db')
DBSession.configure(bind=engine)
# Drop all the models
Base.metadata.drop_all(engine)
def test_search_layers_by_name(self):
test_layer_1 = DBSession.query(Layer).\
filter_by(name='TestLayer1').one()
self.assertEqual(test_layer_1.name, 'TestLayer1')
self.assertEqual(len(test_layer_1.mappable_points), 2)
test_layer_2 = DBSession.query(Layer).\
filter_by(name='TestLayer2').one()
self.assertEqual(test_layer_2.name, 'TestLayer2')
self.assertEqual(len(test_layer_2.mappable_points), 3)
def test_emu_fixure_loaded(self):
test_emu_layer = DBSession.query(Layer).\
filter_by(name='Emu').one()
self.assertGreater(len(test_emu_layer.mappable_points), 5)
def test_get_layer_points_as_geo_json(self):
test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one()
test_layer_2 = DBSession.query(Layer).filter_by(name='TestLayer2').one()
q = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=1)
result = q.all()
# self.assertEqual(result[0].locations, '{"type":"MultiPoint","coordinates":[[20,10]]}')
# self.assertEqual(result[1].locations, '{"type":"MultiPoint","coordinates":[[30,10]]}')
self.assertEqual(result[0].cluster_size, 1)
self.assertEqual(result[1].cluster_size, 1)
q2 = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=100)
result2 = q2.all()
# self.assertEqual(result2[0].locations, '{"type":"MultiPoint","coordinates":[[30,10],[20,10]]}')
self.assertEqual(result2[0].cluster_size, 2)
q3 = GriddedMappablePoint.get_points_as_geojson(test_layer_2, grid_size=1)
result3 = q3.all()
# self.assertEqual(result3[0].locations, '{"type":"MultiPoint","coordinates":[[10,15],[10,15]]}')
# self.assertEqual(result3[1].locations, '{"type":"MultiPoint","coordinates":[[30,15]]}')
self.assertEqual(result3[0].cluster_size, 2)
self.assertEqual(result3[1].cluster_size, 1)
def test_get_cluster_centroids_as_geo_json(self):
test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one()
test_layer_2 = DBSession.query(Layer).filter_by(name='TestLayer2').one()
q = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=1)
result = q.all()
self.assertEqual(result[0].centroid, '{"type":"Point","coordinates":[20,10]}')
self.assertEqual(result[1].centroid, '{"type":"Point","coordinates":[30,10]}')
q2 = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=100)
result2 = q2.one()
self.assertEqual(result2.centroid, '{"type":"Point","coordinates":[25,10]}')
q3 = GriddedMappablePoint.get_points_as_geojson(test_layer_2, grid_size=100)
result3 = q3.one()
self.assertEqual(result3.centroid, '{"type":"Point","coordinates":[16.6666666666667,15]}')
def test_get_layer_points_as_wkt(self):
test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one()
q = GriddedMappablePoint.get_points_as_wkt(test_layer_1, grid_size=1)
result = q.all()
# self.assertEqual(result[0].locations, 'MULTIPOINT(20 10)')
# self.assertEqual(result[1].locations, 'MULTIPOINT(30 10)')
def test_normalise_grid_size(self):
grid_size_1 = GriddedMappablePoint.normalise_grid_size(10)
self.assertEqual(grid_size_1, 8)
grid_size_2 = GriddedMappablePoint.normalise_grid_size(0.00001)
self.assertEqual(grid_size_2, 0)
grid_size_3 = GriddedMappablePoint.normalise_grid_size(0.9)
self.assertEqual(grid_size_3, 0.5)
grid_size_4 = GriddedMappablePoint.normalise_grid_size(1.1)
self.assertEqual(grid_size_4, 1)
|
for attr in dir(self):
attr = getattr(self, attr)
if isinstance(attr, Attribute):
yield attr
def GetAttribute(self, name):
for i in self.ListAttributes():
# Attributes are accessible by predicate or name
if i.name == name or i.predicate == name:
return i
def __getattr__(self, attr):
"""For unknown attributes just return None.
Often the actual object returned is not the object that is expected. In
those cases attempting to retrieve a specific named attribute will raise,
e.g.:
fd = aff4.FACTORY.Open(urn)
fd.Get(fd.Schema.SOME_ATTRIBUTE, default_value)
This simply ensures that the default is chosen.
Args:
attr: Some ignored attribute.
"""
return None
# Make sure that when someone references the schema, they receive an instance
# of the class.
@property
def Schema(self): # pylint: disable=g-bad-name
return self.SchemaCls()
def __init__(self, urn, mode="r", parent=None, clone=None, token=None,
local_cache=None, age=NEWEST_TIME, follow_symlinks=True):
if urn is not None:
urn = rdfvalue.RDFURN(urn)
self.urn = urn
self.mode = mode
self.parent = parent
self.token = token
self.age_policy = age
self.follow_symlinks = follow_symlinks
self.lock = utils.PickleableLock()
# This flag will be set whenever an attribute is changed that has the
# creates_new_object_version flag set.
self._new_version = False
# Mark out attributes to delete when Flushing()
self._to_delete = set()
# We maintain two attribute caches - self.synced_attributes reflects the
# attributes which are synced with the data_store, while self.new_attributes
# are new attributes which still need to be flushed to the data_store. When
# this object is instantiated we populate self.synced_attributes with the
# data_store, while the finish method flushes new changes.
if clone is not None:
if isinstance(clone, dict):
# Just use these as the attributes, do not go to the data store. This is
# a quick way of creating an object with data which was already fetched.
self.new_attributes = {}
self.synced_attributes = clone
elif isinstance(clone, AFF4Object):
# We were given another object to clone - we do not need to access the
# data_store now.
self.new_attributes = clone.new_attributes.copy()
self.synced_attributes = clone.synced_attributes.copy()
else:
raise RuntimeError("Cannot clone from %s." % clone)
else:
self.new_attributes = {}
self.synced_attributes = {}
if "r" in mode:
if local_cache:
try:
for attribute, value, ts in local_cache[utils.SmartUnicode(urn)]:
self.DecodeValueFromAttribute(attribute, value, ts)
except KeyError:
pass
else:
# Populate the caches from the data store.
for urn, values in FACTORY.GetAttributes([urn], age=age,
token=self.token):
for attribute_name, value, ts in values:
self.DecodeValueFromAttribute(attribute_name, value, ts)
if clone is None:
self.Initialize()
def Initialize(self):
"""The method is called after construction to initialize the object.
This will be called after construction, and each time the object is
unserialized from the datastore.
An AFF4 object contains attributes which can be populated from the
database. This method is called to obtain a fully fledged object from
a collection of attributes.
"""
def DecodeValueFromAttribute(self, attribute_name, value, ts):
"""Given a serialized value, decode the attribute.
Only attributes which have been previously defined are permitted.
Args:
attribute_name: The string name of the attribute.
value: The serialized attribute value.
ts: The timestamp of this attribute.
"""
try:
# Get the Attribute object from our schema.
attribute = Attribute.PREDICATES[attribute_name]
cls = attribute.attribute_type
self._AddAttributeToCache(attribute, LazyDecoder(cls, value, ts),
self.synced_attributes)
except KeyError:
if not attribute_name.startswith("index:"):
logging.debug("Attribute %s not defined, skipping.", attribute_name)
except (ValueError, rdfvalue.DecodeError):
logging.debug("%s: %s invalid encoding. Skipping.",
self.urn, attribute_name)
def _AddAttributeToCache(self, attribute_name, value, cache):
"""Helper to add a new attribute to a cache."""
cache.setdefault(attribute_name, []).append(value)
def CheckLease(self):
if self.locked:
leased_until = self.Get(self.Schema.LEASED_UNTIL)
now = rdfvalue.RDFDatetime().Now()
if leased_until < now:
raise LockError("Lease for this object is expired "
"(leased until %s, now %s)!" % (leased_until, now))
def UpdateLease(self, duration):
"""U | pdates the lease and flushes the object.
The lease is set to expire after the "dura | tion" time from the present
moment.
This method is supposed to be used when operation that requires locking
may run for a time that exceeds the lease time specified in OpenWithLock().
See flows/hunts locking for an example.
Args:
duration: Integer number of seconds. Lease expiry time will be set
to "time.time() + duration".
Raises:
LockError: if the object is not currently locked or the lease has
expired.
"""
if not self.locked:
raise LockError(
"Object must be locked to update the lease: %s." % self.urn)
# Check that current lease has not expired yet
self.CheckLease()
self.Set(
self.Schema.LEASED_UNTIL,
rdfvalue.RDFDatetime().FromSecondsFromEpoch(time.time() + duration))
self.Flush()
def Flush(self, sync=True):
"""Syncs this object with the data store, maintaining object validity."""
self.CheckLease()
self._WriteAttributes(sync=sync)
self._SyncAttributes()
if self.parent:
self.parent.Flush(sync=sync)
def Close(self, sync=True):
"""Close and destroy the object.
This is similar to Flush, but does not maintain object validity. Hence the
object should not be interacted with after Close().
Args:
sync: Write the attributes synchronously to the data store.
Raises:
LockError: The lease for this object has expired.
"""
self.CheckLease()
if self.locked:
self.Set(self.Schema.LEASED_UNTIL, rdfvalue.RDFDatetime(0))
self._WriteAttributes(sync=self.locked or sync)
if self.parent:
self.parent.Close(sync=sync)
# Interacting with a closed object is a bug. We need to catch this ASAP so
# we remove all mode permissions from this object.
self.mode = ""
@utils.Synchronized
def _WriteAttributes(self, sync=True):
"""Write the dirty attributes to the data store."""
# If the object is not opened for writing we do not need to flush it to the
# data_store.
if "w" not in self.mode:
return
if self.urn is None:
raise RuntimeError("Storing of anonymous AFF4 objects not supported.")
to_set = {}
for attribute_name, value_array in self.new_attributes.iteritems():
to_set_list = to_set.setdefault(attribute_name, [])
for value in value_array:
to_set_list.append((value.SerializeToDataStore(), value.age))
if self._dirty:
# We determine this object has a new version only if any of the versioned
# attributes have changed. Non-versioned attributes do not represent a new
# object version. The type of an object is versioned and represents a
# version point in the life of the object.
if self._new_version:
to_set[self.Schema.TYPE] = [
(rdfvalue.RDFString(self.__class__.__name__).SerializeToDataStore(),
rdfvalue.RDFDatetime(). |
# This file is part of the Simulation Manager project for VecNet.
# For copyright and licensing information about this project, see the
# NOTICE.txt and LICENSE.md files in its top-level directory; they are
# available at https://github.com/vecnet/simulation-manager
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License (MPL), version 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Tests for the submit_group.py script.
"""
import random
import sys
from crc_nd.utils.test_io import WritesOutputFiles
from django.test import LiveServerTestCase
from mock import patch
from path import path
from vecnet.simulation import ExecutionRequest, sim_model, Simulation, SimulationGroup as SimGroup, submission_status
from .constants import TEST_OUTPUT_ROOT
from .mixins import UsesDatabaseApi
from sim_manager import scripts, working_dirs
from sim_manager.models import SimulationGroup
from sim_manager.scripts import api_urls, batch, input_files, submit_group
from sim_manager.scripts.batch import test_utils
from sim_manager.scripts.constants import SIMULATION_DEFINITION_FILENAME, SIMULATION_SCRIPT
class MainTests(LiveServerTestCase, UsesDatabaseApi, WritesOutputFiles):
"""
Tests for the script's main function.
"""
@classmethod
def setUpClass(cls):
super(MainTests, cls).setUpClass()
cls.setup_database_api_user()
cls.set_output_root(TEST_OUTPUT_ROOT)
working_dirs.TestingApi.use_testing_root()
# Add the scripts package's directory to the module search path so the loading of the batch system in the
# submit_group.py script works. When the script is executed at the command line, the package directory will
# automatically be added to the search path. But here in the test suite, the package is imported, so it's
# directory is not added automatically. Therefore, we explicitly add it.
scripts_dir = path(scripts.__file__).dirname()
sys.path.append(scripts_dir)
cls.simulation_script = scripts_dir / SIMULATION_SCRIPT
@classmethod
def tearDownClass(cls):
cls.remove_database_api_user()
working_dirs.TestingApi.reset_root_to_default()
sys.path.pop()
@patch('sim_manager.scripts.submit_group.BATCH_SYSTEM', batch.MOCK)
def test_run_script(self):
group = SimulationGroup.objects.create(submitter=self.test_user)
self.group_id = group.id
self.assertEqual(group.script_status, submission_status.READY_TO_RUN)
self.sim_group = SimGroup()
simulation_1 = Simulation(model=sim_model.OPEN_MALARIA, model_version='32', id_on_client='349',
output_url='http://ingestor.example.com/output-files/')
simulation_1.input_files['scenario.xml'] = 'http://www.example.com/data/scenarios/1234/scenario.xml'
simulation_2 = Simulation(model=sim_model.EMOD, model_version='1.6', cmd_line_args=['--foo', 'bar'],
id_on_client='350', output_url=simulation_1.output_url)
simulation_2.input_files['config.json'] = 'https://files.vecnet.org/4710584372'
simulation_2.input_files['campaign.json'] = 'https://files.vecnet.org/678109'
self.sim_group.simulations = [simulation_1, simulation_2]
self.execution_request = ExecutionRequest(simulation_group=self.sim_group)
group.setup_working_dir(self.execution_request)
group_url = self.live_server_url + ('/api/v1/sim-groups/%s/' % group.id)
simulations_url = self.live_server_url + '/api/v1/simulations/'
api_urls.write_for_group(group.working_dir, group_url, simulations_url)
self.check_expected_state = self.expect_script_started
group.working_dir.chdir()
self.initialize_output_dir()
stdout = self.get_output_dir() / 'stdout.txt'
with stdout.open('w') as f:
exit_status = submit_group.main('foo', 'bar', stdout=f, test_callback=self.callback)
self.assertEqual(exit_status, 0)
group = SimulationGroup.objects.get(id=group.id)
self.assertEqual(group.script_status, submission_status.SCRIPT_DONE)
def callback(self):
if self.check_expected_state:
self.check_expected_state()
else:
self.fail('callback unexpectedly called')
def expect_script_started(self):
"""
Confirm that the submission script was started.
"""
self.assertGroupScriptStatus(submission_status.STARTED_SCRIPT)
self.check_expected_state = self.expect_cached_files
def expect_cached_files(self):
"""
Confirm that the submission script cached input files.
"""
self.assertGroupScriptStatus(submission_status.CACHING_FILES)
self.assertTrue(input_files.TestingApi.add_to_cache_mock.called)
args, kwargs = input_files.TestingApi.add_to_cache_mock.call_args
self.assertEqual((self.execution_request.input_files,), args)
self.check_expected_state = self.expect_simulation_created
self.simulations_created = 0
test_utils.Mocks.submit_job.reset_mock()
test_utils.Mocks.submit_job.return_value = generate_job_id()
def expect_simulation_created(self):
"""
Confirm that the submission script has created a new simulation in the database.
"""
self.assertGroupScriptStatus(submission_status.SUBMITTING_JOBS)
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.simulation_set.count(), self.simulations_created + 1)
self.simulations_created += 1
# Check that the working directory is set up properly for the simulation that was just created
simulation = group.simulation_set.order_by('created_when').last()
self.assertTrue(simulation.working_dir.isdir())
sim_definition_path = simulation.working_dir / SIMULATION_DEFINITION_FILENAME
self.assertTrue(sim_definition_path.isfile())
sim_definition = Simulation.read_json_file(sim_definition_path)
expected_sim_definition = self.sim_group.simulations[self.simulations_created - 1]
self.assertEqual(sim_definition.model, expected_sim_definition.model)
self.assertEqual(sim_definition.model_version, expected_sim_definition.model_version)
self.assertEqual(sim_definition.input_files, expected_sim_definition.input_files)
self.assertEqual(sim_definition.cmd_line_args, expected_sim_definition.cmd_line_args)
self.assertEqual(sim_definition.id_on_client, expected_sim_definition.id_on_client)
self.assertEqual(sim_definition.output_url, expected_sim_definition.output_url)
# Chec | k that the simulation was submitted to the batch system.
self.assertTrue(test_utils.Mocks.submit_job.called)
args, kwargs = test_uti | ls.Mocks.submit_job.call_args
executable, working_dir, cmd_args = args[0], args[1], args[2:]
self.assertEqual(executable, sys.executable)
self.assertEqual(working_dir, simulation.working_dir)
self.assertEqual(list(cmd_args), [self.simulation_script])
self.assertEqual(simulation.batch_job_id, test_utils.Mocks.submit_job.return_value)
test_utils.Mocks.submit_job.reset_mock()
if self.simulations_created < len(self.sim_group.simulations):
test_utils.Mocks.submit_job.return_value = generate_job_id()
else:
self.check_expected_state = None
def assertGroupScriptStatus(self, expected_status):
group = SimulationGroup.objects.get(id=self.group_id)
self.assertEqual(group.script_status, expected_status)
def generate_job_id():
return str(random.randint(1, 100000)) |
SECR | ET_KEY | = "foo" |
from subprocess import check_call
import logging
import | datetime
def setup(name):
formatter = logging.Formatter(fmt='%(asctime)s - %(le | velname)s - %(message)s')
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
timestamp = datetime.datetime.now().strftime('%Y_%m_%d-%H%M')
file_handler = logging.FileHandler("data/logs/{}.log".format(timestamp))
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
log = setup(__name__)
def logged_check_call(parameters):
log.info("run {command}".format(command=' '.join(parameters)))
check_call(parameters)
log.info("finished")
|
#
# CvHelp.py -- help classes for the Cv drawing
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import math
import numpy
import cv2
from ginga import colors
class Pen(object):
def __init__(self, color='black', linewidth=1, alpha=1.0):
self.color = color
self.linewidth = linewidth
self.alpha = alpha
class Brush(object):
def __init__(self, color='black', fill=False, alpha=1.0):
self.color = color
self.fill = fill
self.alpha = alpha
class Font(object):
def __init__(self, fontname='ariel', fontsize=12.0, color='black',
linewidth=1, alpha=1.0):
self.fontname = fontname
self.fontsize = fontsize
self.color = color
self.linewidth = linewidth
# scale relative to a 12pt font
self.scale = fontsize / 12.0
self.alpha = alpha
# TODO: currently there is only support for some simple built-in
# fonts. What kind of fonts/lookup can we use for this?
self.font = cv2.FONT_HERSHEY_SIMPLEX
class CvContext(object):
def __init__(self, canvas):
self.canvas = canvas
def set_canvas(self, canvas):
self.canvas = canvas
def get_color(self, color, alpha=1.0):
if isinstance(color, str) or isinstance(color, type(u"")):
r, g, b = colors.lookup_color(color)
elif isinstance(color, tuple):
# color is assumed to be a 3-tuple of RGB values as floats
# between 0 and 1
r, g, b = color
else:
r, g, b = 1.0, 1.0, 1.0
# According to documentation, OpenCV expects colors as BGRA tuple
# BUT, seems we need to specify RGBA--I suppose we need to match
# what is defined as _rgb_order attribute in ImageViewCv class
#return (int(alpha*255), int(b*255), int(g*255), int(r*255))
return (int(r*255), int(g*255), int(b*255), int(alpha*255))
def get_pen(self, color, linewidth=1, alpha=1.0):
# if hasattr(self, 'linestyle'):
# if self.linestyle == 'dash':
# cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
#op = int(alpha * 255)
color = self.get_color(color, alpha=alpha)
return Pen(color=color, linewidth=linewidth, alpha=alpha)
def get_brush(self, color, alpha=1.0):
color = self.get_color(color, alpha=alpha)
return Brush(color=color, fill=True, alpha=alpha) |
def get_font(self, name, size, color, linewidth=1, alpha=1.0):
color = self.get_color(color, alpha=alpha)
return Font(fontname=name, fontsize=size, color=color,
linewidth=linewidth, alpha=alpha)
def text_extents(self, text, font):
## retval, baseline = cv2.getTextSize(text, font.font, font.fontsize,
## | font.linewidth)
retval, baseline = cv2.getTextSize(text, font.font, font.scale,
font.linewidth)
wd, ht = retval
return wd, ht
def text(self, pt, text, font):
x, y = pt
## cv2.putText(self.canvas, text, (x, y), font.font, font.scale,
## font.color, thickness=font.linewidth,
## lineType=cv2.CV_AA)
cv2.putText(self.canvas, text, (x, y), font.font, font.scale,
font.color, thickness=font.linewidth)
def line(self, pt1, pt2, pen):
x1, y1 = int(round(pt1[0])), int(round(pt1[1]))
x2, y2 = int(round(pt2[0])), int(round(pt2[1]))
cv2.line(self.canvas, (x1, y1), (x2, y2), pen.color, pen.linewidth)
def circle(self, pt, radius, pen, brush):
x, y = pt
radius = int(radius)
if (brush is not None) and brush.fill:
cv2.circle(self.canvas, (x, y), radius, brush.color, -1)
cv2.circle(self.canvas, (x, y), radius, pen.color, pen.linewidth)
def rectangle(self, pt1, pt2, pen, brush):
x1, y1 = pt1
x2, y2 = pt2
cv2.rectangle(self.canvas, (x1, y1), (x2, y2), pen.color, pen.linewidth)
def ellipse(self, pt, xr, yr, theta, pen, brush):
x, y = pt
if (brush is not None) and brush.fill:
cv2.ellipse(self.canvas, (x, y), (xr, yr), theta, 0.0, 360.0,
brush.color, -1)
cv2.ellipse(self.canvas, (x, y), (xr, yr), theta, 0.0, 360.0,
pen.color, pen.linewidth)
def polygon(self, points, pen, brush):
pts = numpy.array(points, numpy.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(self.canvas, [pts], True, pen.color, pen.linewidth)
if (brush is not None) and brush.fill:
cv2.fillPoly(self.canvas, [pts], brush.color)
def path(self, points, pen):
pts = numpy.array(points, numpy.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(self.canvas, [pts], False, pen.color, pen.linewidth)
#END
|
__all__ = (
'Net',
)
import builtins
import collections
import itertools
from .errors import InternalError, NodeError
from .marking import Marking
from .net_element import NamedNetElement
from .node import Node
from .place import Place
from .transition import Transition
class Net(NamedNetElement):
__dict_factory__ = collections.OrderedDict
__defaultdict_factory__ = collections.defaultdict
__globals__ = {attr_name: getattr(builtins, attr_name) for attr_name in dir(builtins)}
def __init__(self, name=None, globals_d=None):
super().__init__(name=name, net=self)
self._places = self.__dict_factory__()
self._transitions = self.__dict_factory__()
self._engines = []
if globals_d is None:
globals_d = self.__globals__.copy()
self._globals_d = globals_d
### declare names:
def declare(self, name, value):
self._globals_d[name] = value
@property
def globals_d(self):
return self._globals_d
### add engine:
def add_engine(self, engine):
self._engines.append(engine)
def engines(self):
yield from self._engines
### add nodes:
def _check_node(self, node_type, node, **kwargs):
if isinstance(node, str):
node = node_type(net=self, name=node, **kwargs)
elif isinstance(node, node_type):
if node.net is not None and node.net is not self:
raise NodeError("cannot add {!r}: already bound".format(node))
if kwargs:
node.update(**kwargs)
else:
raise NodeError("cannot add {!r}: not a {}".format(node, node_type.__name__))
if node.name in self._places or node.name in self._transitions:
raise NodeError("cannot add {!r}: a node with the same name already exists".format(node))
node.bind(net=self)
return node
def add_place(self, place, tokens=None, **kwargs):
self.notify_net_changed()
place = self._check_node(Place, place, tokens=tokens, **kwargs)
self._places[place.name] = place
return place
def add_transition(self, transition, guard=None, **kwargs):
self.notify_net_changed()
transition = self._check_node(Transition, transition, guard=guard, **kwargs)
self._transitions[transition.name] = transition
return transition
def add_node(self, node):
if isinstance(node, Place):
return self.add_place(node)
elif isinstance(node, Transition):
return self.add_transition(node)
else:
raise NodeError("cannot add {!r}: not a valid node".format(node))
### get nodes:
def place(self, name):
return self._places[name]
def places(self):
yield from self._places.values()
def transition(self, name):
return self._transitions[name]
def transitions(self):
yield from self._transitions.values()
def node(self, name):
if name in self._places:
return self._places[name]
elif name in self._transitions:
return self._transitions[name]
else:
raise KeyError(name)
def nodes(self):
yield from self.places()
yield from self.transitions()
### add arcs:
def add_input(self, place, transition, annotation):
self.notify_net_changed()
if isinstance(place, str):
place = self.place(place)
if isinstance(transition, str):
transition = self.transition(transition)
arc = transition.add_input(place, annotation=annotation)
return arc
def add_output(self, place, transition, annotation):
self.notify_net_changed()
if isinstance(place, str):
place = self.place(place)
if isinstance(transition, str):
transition = self.transition(transition)
arc = transition.add_output(place, annotation=annotation)
return arc
def _get_node(self, node):
if isinstance(node, Node):
if node.net is not self:
raise NodeError("{!r}: node {!r} not bound to this net".format(self, node))
else:
node = self.node(node)
return node
def input_arcs(self, node):
node = self._get_node(node)
return node.input_arcs()
def inputs(self, node):
node = self._get_node(node)
return node.inputs()
def output_arcs(self, node):
node = self._get_node(node)
return node.output_arcs()
def outputs(self, node):
node = self._get_node(node)
return node.outputs()
### root nodes:
def root_places(self):
for node in self.places():
if len(node.inputs()) == 0:
yield node
def root_transitions(self):
for node in self.transitions():
if len(node.inputs()) == 0:
yield node
def root_nodes(self):
yield from self.root_places()
yield from self.root_transitions()
### dict interface:
def __getitem__(self, name):
return self.node(name)
def __iter__(self):
yield from self.nodes()
def __len__(self):
return len(self._places) + len(self._transitions)
### walk:
def walk(self, *, depth_first=False, first_only=True):
if first_only:
seen = set()
def not_seen(node):
b = node not in seen
if b:
seen.add(node)
return b
else:
not_seen = lambda node: True
nodes = itertools | .chain(self.root_nodes())
while True:
if depth_first:
try:
node = next(nodes)
except StopIteration:
break
yield node
next_nodes = itertool | s.chain(filter(not_seen, self.outputs(node)), nodes)
nodes = iter(next_nodes)
else:
next_nodes = []
for node in nodes:
yield node
new_nodes = filter(not_seen, self.outputs(node))
next_nodes.extend(new_nodes)
if not next_nodes:
break
nodes = iter(next_nodes)
### marking:
def get_marking(self):
marking = Marking()
for place in self._places.values():
if place.tokens:
marking[place.name] = place.tokens.copy()
return marking
def set_marking(self, marking):
for place in self._places.values():
tokens = marking.get(place.name)
place.tokens.clear()
if tokens:
place.tokens.extend(tokens)
### notifications:
def notify_transition_fired(self, transition):
for engine in self._engines:
engine.notify_transition_fired(transition)
def notify_net_changed(self):
for engine in self._engines:
engine.notify_net_changed()
|
"""
We are playing the Guess Game. The game is as follows:
I pick a number from 1 to n. You have to guess which number I picked.
Every time you guess wrong, I'll tell you whether the number I picked is higher or lower.
However, when you guess a particular number x, and you guess wrong, you pay $x. You win the game when you guess the number I picked.
Example:
n = 10, I pick 8.
First round: You guess 5, I tell you that it's higher. You pay $5.
Second round: You guess 7, I tell you that it's higher. You pay $ | 7.
Third round: You guess 9, I tell you that it's lower. You pay $9.
Game over. 8 is the number I picked.
You end up paying $5 + $7 + $9 = $21.
Given a particular n ≥ 1, find out how much money you need to have to guarantee a win.
"""
class Solution(object):
def get | MoneyAmount(self, n):
"""
:type n: int
:rtype: int
"""
self.dp = [[0] * (n + 1) for _ in range(n + 1)]
return self.helper(1, n)
def helper(self, s, e):
if s >= e:
return 0
if self.dp[s][e] != 0:
return self.dp[s][e]
res = float('inf')
for i in range(s, e + 1):
res = min(res, i + max(self.helper(s, i - 1), self.helper(i + 1, e)))
self.dp[s][e] = res
return res |
gs, pearl_output, \
pearl_param_mapping
class Pearl(AbstractInst):
def __init__(self, **kwargs):
self._inst_settings = instrument_settings.InstrumentSettings(
param_map=pearl_param_mapping.attr_mapping, adv_conf_dict=pearl_advanced_config.get_all_adv_variables(),
kwargs=kwargs)
super(Pearl, self).__init__(user_name=self._inst_settings.user_name,
calibration_dir=self._inst_settings.calibration_dir,
output_dir=self._inst_settings.output_dir, inst_prefix="PEARL")
self._cached_run_details = {}
def focus(self, **kwargs):
self._switch_long_mode_inst_settings(kwargs.get("long_mode"))
self._inst_settings.update_attributes(kwargs=kwargs)
return self._focus(run_number_string=self._inst_settings.run_number,
do_absorb_corrections=self._inst_settings.absorb_corrections,
do_van_normalisation=self._inst_settings.van_norm)
def create_vanadium(self, **kwargs):
self._switch_long_mode_inst_settings(kwargs.get("long_mode"))
kwargs["perform_attenuation"] = None # Hard code this off as we do not need an attenuation file
self._inst_settings.update_attributes(kwargs=kwargs)
if str(self._inst_settings.tt_mode).lower() == "all":
for new_tt_mode in ["tt35", "tt70", "tt88"]:
self._inst_settings.tt_mode = new_tt_mode
self._run_create_vanadium()
else:
self._run_create_vanadium()
def create_cal(self, **kwargs):
self._switch_long_mode_inst_settings(kwargs.get("long_mode"))
self._inst_settings.update_attributes(kwargs=kwargs)
run_details = self._get_run_details(self._inst_settings.run_number)
cross_correlate_params = {"ReferenceSpectra": self._inst_settings.reference_spectra,
"WorkspaceIndexMin": self._inst_settings.cross_corr_ws_min,
"WorkspaceIndexMax": self._inst_settings.cross_corr_ws_max,
"XMin": self._inst_settings.cross_corr_x_min,
"XMax": self._inst_settings.cross_corr_x_max}
get_detector_offsets_params = {"DReference": self._inst_settings.d_reference,
"Step": self._inst_settings.get_det_offsets_step,
"XMin": self._inst_settings.get_det_offsets_x_min,
"XMax": self._inst_settings.get_det_offsets_x_max}
return pearl_calibration_algs.create_calibration(calibration_runs=self._inst_settings.run_number,
instrument=self,
offset_file_name=run_details.offset_file_path,
grouping_file_name=run_details.grouping_file_path,
calibration_dir=self._inst_settings.calibration_dir,
rebin_1_params=self._inst_settings.cal_rebin_1,
rebin_2_params=self._inst_settings.cal_rebin_2,
cross_correlate_params=cross_correlate_params,
get_det_offset_params=get_detector_offsets_params)
def _run_create_vanadium(self):
# Provides a minimal wrapper so if we have tt_mode 'all' we can loop round
return self._create_vanadium(run_number_string=self._inst_settings.run_in_range,
do_absorb_corrections=self._inst_settings.absorb_corrections)
def _get_run_details(self, run_number_string):
run_number_string_key = self._generate_run_details_fingerprint(run_number_string,
self._inst_settings.file_extension,
self._inst_settings.tt_mode)
if run_number_string_key in self._cached_run_details:
return self._cached_run_details[run_number_string_key]
self._cached_run_details[run_number_string_key] = pearl_algs.get_run_details(
run_number_string=run_number_string, inst_settings=self._inst_settings, is_vanadium_run=self._is_vanadium)
return self._cached_run_details[run_number_string_key]
def _generate_output_file_name(self, run_number_string):
inst = self._inst_settings
return pearl_algs.generate_out_name(run_number_string=run_number_string,
| long_mode_on=inst.long_mode, tt_mode=inst.tt_mode)
def _normalise_ws_current(self, ws_to_correct):
monitor_spectra = self._inst_settings.monitor_spec_no
monitor_ws = common.extract_single_spectrum(ws_to_process=ws_to_correct,
spectrum_number_to_extract=monitor_spectra)
normalised_ws = pearl_algs.normalise_ws_current(ws_to_correct=ws_to_correct, monitor_ws=monitor_ws,
spline_coeff=self._inst_settings.monitor_spline,
integration_range=self._inst_settings.monitor_integration_range,
lambda_values=self._inst_settings.monitor_lambda,
ex_regions=self._inst_settings.monitor_mask_regions)
common.remove_intermediate_workspace(monitor_ws)
return normalised_ws
def _get_current_tt_mode(self):
return self._inst_settings.tt_mode
def _spline_vanadium_ws(self, focused_vanadium_spectra):
focused_vanadium_spectra = pearl_algs.strip_bragg_peaks(focused_vanadium_spectra)
splined_list = common.spline_workspaces(focused_vanadium_spectra=focused_vanadium_spectra,
num_splines=self._inst_settings.spline_coefficient)
# Ensure the name is unique if we are in tt_mode all
new_workspace_names = []
for ws in splined_list:
new_name = ws.getName() + '_' + self._inst_settings.tt_mode
new_workspace_names.append(mantid.RenameWorkspace(InputWorkspace=ws, OutputWorkspace=new_name))
return new_workspace_names
def _output_focused_ws(self, processed_spectra, run_details, output_mode=None):
if not output_mode:
output_mode = self._inst_settings.focus_mode
if self._inst_settings.perform_atten:
attenuation_path = self._inst_settings.attenuation_file_path
else:
attenuation_path = None
output_spectra = \
pearl_output.generate_and_save_focus_output(self, processed_spectra=processed_spectra,
run_details=run_details, focus_mode=output_mode,
attenuation_filepath=attenuation_path)
group_name = "PEARL" + str(run_details.output_run_string)
group_name += '_' + self._inst_settings.tt_mode + "-Results-D-Grp"
grouped_d_spacing = mantid.GroupWorkspaces(InputWorkspaces=output_spectra, OutputWorkspace=group_name)
return grouped_d_spacing, None
def _crop_banks_to_user_tof(self, focused_banks):
return common.crop_banks_using_crop_list(focused_banks, self._inst_settings.tof_cropping_values)
def _crop_raw_to_expected_tof_range(self, ws_to_crop):
out_ws = common.crop_in_tof(ws_to_crop=ws_to_crop, x_min=self._inst_settings.raw_data_crop_vals[0],
x_max=self._inst_settings.raw_data_crop_vals[-1])
return out_ws
def _crop_van_to_expected_tof_range(self, van_ws_to_crop):
cropped_ws = common.crop_in_tof(ws_to_crop=van_ws_to_crop, x_min=self._inst_settings.van_tof_cropping[0],
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# compute/__init__.py
"""
See |compute.subsystem|, |compute.network|, |compute.distance|, and
|compute.parallel| for documentation.
Attributes:
all_complexes: Alias for :func:`pyphi.compute.network.all_complexes`.
ces: Alias for :func:`pyphi.compute.subsystem.ces`.
ces_distance: Alias for :func:`pyphi.compute.distance.ces_distance`.
complexes: Alias for :func:`pyphi.compute.network.complexes`.
concept_distance: Alias for
:func:`pyphi.compute.distance.concept_distance`.
conceptual_info: Alias for :func:`pyphi.compute.subsystem.conceptual_info`.
condensed: Alias for :func:`pyphi.compute.network.condensed`.
evaluate_cut: Alias for :func:`pyphi.compute.subsystem.evaluate_cut`.
major_complex: Alias for :func:`pyphi.compute.network.major_complex`.
phi: Alias for :func:`pyphi.compute.subsystem.phi`.
possible_complexes: Alias for
| :func:`pyphi.compute.network.possible_complex | es`.
sia: Alias for :func:`pyphi.compute.subsystem.sia`.
subsystems: Alias for :func:`pyphi.compute.network.subsystems`.
"""
# pylint: disable=unused-import
from .distance import ces_distance, concept_distance
from .network import (
all_complexes,
complexes,
condensed,
major_complex,
possible_complexes,
subsystems,
)
from .subsystem import (
ConceptStyleSystem,
SystemIrreducibilityAnalysisConceptStyle,
ces,
concept_cuts,
conceptual_info,
evaluate_cut,
phi,
sia,
sia_concept_style,
)
|
# GPL, (c) Reinout van Rees
#
# | Script to show the diff with the last relevant tag.
import logging
import sys
import zest.releaser.choose
from | zest.releaser.utils import system
from zest.releaser import utils
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=utils.loglevel(),
format="%(levelname)s: %(message)s")
vcs = zest.releaser.choose.version_control()
if len(sys.argv) > 1:
found = sys.argv[-1]
else:
found = utils.get_last_tag(vcs)
name = vcs.name
full_tag = vcs.tag_url(found)
logger.debug("Picked tag %r for %s (currently at %r).",
full_tag, name, vcs.version)
logger.info("Showing differences from the last commit against tag %s",
full_tag)
diff_command = vcs.cmd_diff_last_commit_against_tag(found)
print diff_command
print system(diff_command)
|
import copy
import sys
import random
import itertools
def rotate_matrix(A):
for i in range(len(A) // 2):
for j in range(i, len(A) - i - 1):
temp = A[i][j]
A[i][j] = A[-1 - j][i]
A[-1 - j][i] = A[-1 - i][-1 - j]
A[-1 - i][-1 - j] = A[j][-1 - i]
A[j][-1 - i] = temp
# @include
class RotatedMatrix:
def __init__(self, square_matrix):
self._square_matrix = square_matrix
def read_entry(self, i, j):
# Note that A[~i] for i in [0, len(A) - 1] is A[~(i + 1)].
return self._square_matrix[~j][i]
def write_entry(self, i, j, v):
self._square_matrix[~j][i] = v
# @exclude
def check_answer(A, B):
rA = RotatedMatrix(A)
for i in range(len(A)):
for j in range(len(A)):
assert rA.read_entry(i, j) == B[i][j]
def main():
if len(sys.argv) == 2:
n = int(sys.argv[1])
k = itertools.count(1)
A = []
for _ in range(1 << n):
A.append([next(k) for _ in range(1 << n)])
B = copy.deepcopy(A)
rotate_matrix(B)
check_answer(A, B)
else:
for _ in range(100):
n = random.randint(1, 10)
k = itertools.count(1)
A = [] |
for _ in rang | e(1 << n):
A.append([next(k) for _ in range(1 << n)])
B = copy.deepcopy(A)
rotate_matrix(B)
check_answer(A, B)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from mcfw.properties import unicode_property, long_property
class SolutionPharmacyOrderTO(object):
key = unicode_property('1')
descript | ion = unicode_property('2')
status = long_property('3')
sender_name = unicode_property('4')
sender_avatar_url = unicode_property('5') |
timestamp = long_property('6')
picture_url = unicode_property('7')
remarks = unicode_property('8')
solution_inbox_message_key = unicode_property('9')
@staticmethod
def fromModel(model):
to = SolutionPharmacyOrderTO()
to.key = unicode(model.solution_order_key)
to.description = model.description
to.status = model.status
to.sender_name = model.get_sender().name
to.sender_avatar_url = model.get_sender().avatar_url
to.timestamp = model.timestamp
to.picture_url = model.picture_url
to.remarks = model.remarks
to.solution_inbox_message_key = model.solution_inbox_message_key
return to
|
#
#
# Copyright (C) 2004 Philip J Freeman
#
# This file is part of halo_radio
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the G | NU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANT | ABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import HaloRadio.TopListMaker as TopListMaker
class PlaylistListMaker(TopListMaker.TopListMaker):
"""
- PlaylistListMaker -
This class is for working with lists of playlists
"""
def __init__( self ):
self.list = [ ]
self.tablename = "playlists"
return
|
from nose.tool | s import ok_
def fail(msg):
raise AssertionError(msg)
def assert_in(thing, seq, msg=None):
msg = msg or "'%s' not found in %s" % (thing, seq)
ok_(thing in seq, msg)
def assert_not_in(thing, seq, msg=None):
msg = msg or "unexpected '%s' found in %s" % (thing, seq)
ok_(thing not in seq, msg)
def assert_has_keys(dict, required=[], optional=[]):
keys = dict.keys()
for k in required: |
assert_in(k, keys, "required key %s missing from %s" % (k, dict))
allowed_keys = set(required) | set(optional)
extra_keys = set(keys).difference(set(required + optional))
if extra_keys:
fail("found unexpected keys: %s" % list(extra_keys))
def assert_isinstance(thing, kls):
ok_(isinstance(thing, kls), "%s is not an instance of %s" % (thing, kls))
|
import pytest
# TODO: re-enable
# from app import render
# from app.models import SourceLine
class AttrDict(dict):
__getattr__ = dict.__getitem__
# http://stackoverflow.com/a/11924754/143880
def is_subset(d1, obj) | :
d2 = vars(obj)
return set(d1.items()).issubset(set(d2.items()))
@pytest.mark.skip("OLD: uses SourceLine")
@pytest.mark.django_db
def test_make_skeleton():
symbols = [
SourceLine(path="a.py", length=3),
SourceLine(path="a.py", length=3),
SourceLine(path="b.py", length=3),
]
for sym in symbols:
sym.line_number = -1
SourceLine.objects.bulk_create(symbols)
skeleton = list(render.make_skeleton(SourceLine.objects.all()))
result = [ | (sk.position, sk.x, sk.y) for sk in skeleton]
# X BUG: should be two-pixel "smudge" after a.py ends
assert result == [(0, 0, 0), (3, 0, 1), (6, 1, 3)]
|
#!/usr/bin/env python
#-.- encoding: utf-8 -.-
import csv,re
#medlemsfil = 'medlemmer_20032014.csv'
#medlemsfil = 'Medlemsliste 08.03.2015.csv'
medlemsfil = 'Medlemsliste 03.09.2015.csv'
def parse():
f = open(medlemsfil)
r = csv.reader(f)
index = 0
headings = None
members = None
category = None
for row in r:
print "ROW", row
if row[0] == 'Hjelpekorps':
headings.append('category_id')
return headings, members
if row[0].find('korps')!=-1:
category = row[0]
if members != None:
if category != None:
row.append(category)
members.append(row)
if index == 5:
headings = row
members = []
index += 1
return headings,members
postnr_re = re.compile('(\d{4}) ')
klasse_til_dato = {'2' : '2. kl',
'3' : '3. kl',
'4' : '4. kl',
'5' : '5. kl',
'6' : '6. kl',
'8' : '8. kl',
'9' : '9. kl',
'VGS' : 'VGS'}
def hent_postnr(postnr):
if not postnr:
return '1406 Ski'
postnr = postnr_re.search(postnr).group(1)
return postnr
def get_members(headings, members):
for m in members:
oerp = {}
o = zip(headings, m)
d = {}
for k,v in o:
d[k] = v
if not d['Etternavn'] or d['Etternavn']=='Etternavn' \
or d['Etternavn'] == 'Aspirantkorps' or d['Etternavn'] == 'Juniorkorps':
continue
"""
{'Etternavn': 'Refsdal', 'Postnr': '1400 Ski', 'Postadresse': 'Lysneveien 8 A', 'Telefon': '97544646', 'Kl': '3', 'Instrument': 'Slagverk', 'Start\xc3\xa5r': '2012', 'Fornavn': 'Adrian Normann', 'Epost': 'mona@refsdal.org'}
"""
oerp['name'] = ' '.join([d['Fornavn'], d['Etternavn']])
oerp['street'] = d['Postadresse']
oerp['city'] = 'Ski'
oerp['zip'] = hent_postnr(d['Postnr'])
#oerp['email'] = d['Epost']
epost = d['Epost'].split('/')
oerp['email'] = epost[0]
if len(epost)>1:
oerp['email2'] = epost[1]
tlf = d['Telefon'].split('/')
print "TLF", d['Telefon'], tlf, d['Telefon'].split('/')
oerp['mobile'] = tlf[0]
if len(tlf)>1:
oerp['mobile2'] = tlf[1]
print "D_CATEG", d['category_id']
oerp['category_id'] = d['category_id']
# Startår
joined = d['Startår']
print "STARTÅR", joined
oerp['join_date'] = '01-01-%s' % joined
# Kl
oerp['birthdate'] = klasse_til_dato[d['Kl']]
oerp['instrument'] = d['I | nstrument']
#prin | t "OERP", oerp
yield oerp
if __name__=='__main__':
headings, members = parse()
print "HE", headings
#
for mem in get_members(headings, members):
print mem
|
"""
Instructions:
1) Set up testing/config.py (copy from config.py.example and fill in the fields)
2) Run this script
3) Look | inside your GCP_BUCKET under test_doodad and you should see results in secret.txt
"""
import os
import doodad
from doodad.utils import TESTING_DIR
from testing.config import GCP_PROJECT, GCP_BUCKET, GCP_IMAGE
def run():
gcp_mount = doodad.MountGCP(
gcp_path='secret_output',
mount_point='/output'
)
local_mount = doodad.MountLocal(
local_dir=TESTING_DIR,
mount_point='/data',
output=False
)
mounts = [local_mount, gcp_mount]
launch | er = doodad.GCPMode(
gcp_bucket=GCP_BUCKET,
gcp_log_path='test_doodad/gcp_test',
gcp_project=GCP_PROJECT,
instance_type='f1-micro',
zone='us-west1-a',
gcp_image=GCP_IMAGE,
gcp_image_project=GCP_PROJECT
)
doodad.run_command(
command='cat /data/secret.txt > /output/secret.txt',
mode=launcher,
mounts=mounts,
verbose=True
)
if __name__ == '__main__':
run()
|
from django.conf.urls import include, url |
from django.contrib import admin
from django.core.urlresolvers import reverse_lazy
from django.views.generic import RedirectView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('products.urls', name | space='products')),
url(r'^', include('users.urls', namespace='users')),
url(r'^$', RedirectView.as_view(permanent=True,
url=reverse_lazy('products:products_list')), name='home'),
]
|
ertificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates), 1)
self.assertEqual(certificates[0].get('name'), 'Name 0')
self.assertEqual(certificates[0].get('description'), 'Description 0')
def test_can_delete_certificate_with_slash_prefix_signatory(self):
"""
Delete certificate
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format="/" + SIGNATORY_PATH)
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.assert_event_emitted(
'edx.certificate.configuration.deleted',
course_id=six.text_type(self.course.id),
configuration_id='1',
)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates), 1)
self.assertEqual(certificates[0].get('name'), 'Name 0')
self.assertEqual(certificates[0].get('description'), 'Description 0')
@ddt.data("not_a_valid_asset_key{}.png", "/not_a_valid_asset_key{}.png")
def test_can_delete_certificate_with_invalid_signatory(self, signatory_path):
"""
Delete certificate
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format=signatory_path)
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.assert_event_emitted(
'edx.certificate.configuration.deleted',
course_id=six.text_type(self.course.id),
configuration_id='1',
)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates), 1)
self.assertEqual(certificates[0].get('name'), 'Name 0')
self.assertEqual(certificates[0].get('description'), 'Description 0')
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_delete_certificate_without_write_permissions(self, signatory_path):
"""
Tests certificate deletion without write permission on course.
"""
self._add_course_certificates(count=2, signatory_count=1, asset_path_format=signatory_path)
user = UserFactory()
self.client.login(username=user.username, password='test')
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_delete_certificate_without_global_staff_ | permissions(self, signatory_path):
"""
Tests deletion of an active certificate without global staff permission on course.
"""
self. | _add_course_certificates(count=2, signatory_count=1, is_active=True, asset_path_format=signatory_path)
user = UserFactory()
for role in [CourseInstructorRole, CourseStaffRole]:
role(self.course.id).add_users(user)
self.client.login(username=user.username, password='test')
response = self.client.delete(
self._url(cid=1),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_update_active_certificate_without_global_staff_permissions(self, signatory_path):
"""
Tests update of an active certificate without global staff permission on course.
"""
self._add_course_certificates(count=2, signatory_count=1, is_active=True, asset_path_format=signatory_path)
cert_data = {
u'id': 1,
u'version': CERTIFICATE_SCHEMA_VERSION,
u'name': u'New test certificate',
u'description': u'New test description',
u'course_title': u'Course Title Override',
u'org_logo_path': '',
u'is_active': False,
u'signatories': []
}
user = UserFactory()
for role in [CourseInstructorRole, CourseStaffRole]:
role(self.course.id).add_users(user)
self.client.login(username=user.username, password='test')
response = self.client.put(
self._url(cid=1),
data=json.dumps(cert_data),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 403)
def test_delete_non_existing_certificate(self):
"""
Try to delete a non existing certificate. It should return status code 404 Not found.
"""
self._add_course_certificates(count=2)
response = self.client.delete(
self._url(cid=100),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_can_delete_signatory(self, signatory_path):
"""
Delete an existing certificate signatory
"""
self._add_course_certificates(count=2, signatory_count=3, asset_path_format=signatory_path)
certificates = self.course.certificates['certificates']
signatory = certificates[1].get("signatories")[1]
image_asset_location = AssetKey.from_string(signatory['signature_image_path'])
content = contentstore().find(image_asset_location)
self.assertIsNotNone(content)
test_url = '{}/signatories/1'.format(self._url(cid=1))
response = self.client.delete(
test_url,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.reload_course()
# Verify that certificates are properly updated in the course.
certificates = self.course.certificates['certificates']
self.assertEqual(len(certificates[1].get("signatories")), 2)
# make sure signatory signature image is deleted too
self.assertRaises(NotFoundError, contentstore().find, image_asset_location)
@ddt.data(C4X_SIGNATORY_PATH, SIGNATORY_PATH)
def test_deleting_signatory_without_signature(self, signatory_path):
"""
Delete an signatory whose signature image is already removed or does not exist
"""
self._add_course_certificates(count=2, signatory_count=4, asset_path_format=signatory_path)
test_url = '{}/signatories/3'.format(self._url(cid=1))
response = self.client.delete(
test_url,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
def test_delete_signatory_non_existing_certificate(self):
"""
Try to delete a non existing certificate signatory. It should return status code 404 Not found.
"""
self._add_course_certificates(count=2)
test_url = '{}/signatories/1'.format(self._url(cid=100))
response = self.client.delete(
test_url,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTE |
accessblock_def_i_bstate_start', 'bstate', 'startts'),
Index('accessblock_def_i_startts', 'startts'),
Trigger('before', 'insert', 't_accessblock_def_bi'),
Trigger('before', 'update', 't_accessblock_def_bu'),
Trigger('after', 'insert', 't_accessblock_def_ai'),
Trigger('after', 'update', 't_accessblock_def_au'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ENTITIES', # FIXME
'cap_read' : 'ENTITIES_LIST', # FIXME
'cap_create' : 'ENTITIES_EDIT', # FIXME
'cap_edit' : 'ENTITIES_EDIT', # FIXME
'cap_delete' : 'ENTITIES_EDIT', # FIXME
'menu_name' : _('Access Blocks'),
'default_sort' : ({ 'property': 'startts' ,'direction': 'ASC' },),
'grid_view' : ('abid', 'entity', 'startts', 'endts', 'bstate'),
'grid_hidden' : ('abid',),
'form_view' : ('entity', 'startts', 'endts', 'bstate', 'oldstate'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new access block'))
}
}
)
id = Column(
'abid',
UInt32(),
Sequence('accessblock_def_abid_seq'),
Comment('Access block ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
entity_id = Column(
'entityid',
UInt32(),
Comment('Access entity ID'),
ForeignKey('entities_access.entityid', name='accessblock_def_fk_entityid', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False,
info={
'header_string' : _('Account'),
'column_flex' : 2,
'filter_type' : 'none'
}
)
start = Column(
'startts',
TIMESTAMP(),
Comment('Start of block'),
CurrentTimestampDefault(),
nullable=False,
info={
'header_string' : _('Start'),
'column_flex' : 1
}
)
end = Column(
'endts',
TIMESTAMP(),
Comment('End of block'),
nullable=False,
info={
'header_string' : _('End'),
'column_flex' : 1
}
)
state = Column(
'bstate',
AccessBlockState.db_type(),
Comment('Block state'),
nullable=False,
default=AccessBlockState.expired,
server_default=AccessBlockState.expired,
info={
'header_string' : _('State')
}
)
old_entity_state = Column(
'oldstate',
UInt8(),
Comment('Old entity state'),
nullable=False,
default=0,
server_default=text('0'),
info={
'header_string' : _('Access State')
}
)
def __str__(self):
# FIXME: use datetime range with formats
return '%s:' % str(self.entity)
class AccessEntityLinkType(Base):
"""
Access entity link type object.
"""
__tablename__ = 'entities_access_linktypes'
__table_args__ = (
Comment('Access entity link types'),
Index('entities_access_linktypes_u_name', 'name', unique=True),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ENTITIES', # FIXME
'cap_read' : 'ENTITIES_LIST', # FIXME
'cap_create' : 'ENTITIES_EDIT', # FIXME
'cap_edit' : 'ENTITIES_EDIT', # FIXME
'cap_delete' : 'ENTITIES_EDIT', # FIXME
'show_in_menu' : 'admin',
'menu_name' : _('Link Types'),
'default_sort' : ({ 'property': 'name' ,'direction': 'ASC' },),
'grid_view' : ('ltid', 'name'),
'grid_hidden' : ('ltid',),
'form_view' : ('name', 'descr'),
'easy_search' : ('name',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new link type'))
}
}
)
id = Column(
'ltid',
UInt32(),
Sequence('entities_access_linktypes_ltid_seq'),
Comment('Link type ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
name = Column(
Unicode(255),
Comment('Link type name'),
nullable=False,
info={
'header_string' : _('Name'),
'column_flex' : 1
}
)
description = Column(
'descr',
UnicodeText(),
Comment('Link type description'),
nullable=True,
default=None,
server_default=text('NULL'),
info={
'header_string' : _('Description')
}
)
def __str__(self):
return '%s' % self.name
class AccessEntityLink(Base):
"""
Access entity link object.
"""
__tablename__ = 'entities_access_links'
__table_args__ = (
Comment('Access entity links'),
Index('entities_access_links_i_entityid', 'entityid'),
Index('entities_access_links_i_ltid', 'ltid'),
Index('entities_access_links_i_value', 'value'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_ENTITIES', # FIXME
'cap_read' : 'ENTITIES_LIST', # FIXME
'cap_create' : 'ENTITIES_EDIT', # FIXME
'cap_edit' : 'ENTITIES_EDIT', # FIXME
'cap_delete' : 'ENTITIES_EDIT', # FIXME
'menu_name' : _('Links'),
'default_sort' : ({ 'property': 'ltid' ,'direction': 'ASC' },),
'grid_view' : ('lid', 'entity', 'type', 'ts', 'value'),
'grid_hidden' : ('lid',),
'easy_search' : ('value',),
'create_wizard' : SimpleWizard(title=_('Add new link'))
}
}
)
id = Column(
'lid',
UInt32(),
Sequence('entities_access_links_lid_seq'),
Comment('Link ID'),
primary_key=True,
nullable=False,
info={
'header_string' : _('ID')
}
)
entity_id = Column(
'entityid',
UInt32(),
Comment('Access entity ID'),
ForeignKey('entities_access.entityid', name='entities_access_links_fk_entityid', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False,
info={
'header_string' : _('Entity'),
'column_flex' : 2
}
)
type_id = Column(
'ltid',
UInt32(),
Comment('Link type ID'),
ForeignKey('entities_access_linktypes.ltid', name='entities_access_links_fk_ltid', ondelete='CASCADE', onupdate='CASCADE'),
nullable=False,
info={
'header_string' : _('Ty | pe'),
'column_flex' : 2
}
)
timestamp = Column(
'ts',
TIMESTAMP(),
Comment('Service timestamp'),
CurrentTimestampDefault(),
nullable=True,
default=None,
| info={
'header_string' : _('Timestamp'),
'column_flex' : 1
}
)
value = Column(
Unicode(255),
Comment('Link value'),
nullable=False,
info={
'header_string' : _('Value'),
'column_flex' : 3
}
)
entity = relationship(
'AccessEntity',
innerjoin=True,
backref=backref(
'links',
cascade='all, delete-orphan',
passive_deletes=True
)
)
type = relationship(
'AccessEntityLinkType',
innerjoin=True,
backref=backref(
'links',
cascade='all, delete-orphan',
passive_deletes=True
)
)
CheckAuthFunction = SQLFunction(
'check_auth',
args=(
SQLFunctionArgument('name', Unicode(255)),
SQLFunctionArgument('pass', Unicode(255)),
),
returns=Boolean(),
comment='Check auth information',
writes_sql=False
)
AcctAddProcedure = SQLFunction(
'acct_add',
args=(
InArgument('aeid', UInt32()),
InArgument('username', Unicode(255)),
InArgument('tin', Traffic()),
InArgument('teg', Traffic()),
InArgument('ts', DateTime())
),
comment='Add accounting information',
label='aafunc',
is_procedure=True
)
AcctAuthzProcedure = SQLFunction(
'acct_authz',
args=(
InArgument('name', Unicode(255)),
),
comment='Get authorized account info',
writes_sql=False,
label='authzfunc',
is_procedure=True
)
AcctPollProcedure = SQLFunction(
'acct_poll',
args=(
InArgument('ts', DateTime()),
),
comment='Poll accounts for time-based changes',
is_procedure=True
)
AcctRateModsProcedure = SQLFunction(
'acct_rate_mods',
args=(
InArgument('ts', DateTime()),
InArgument('rateid', UInt32()),
InArgument('entityid', UInt32()),
InOutArgument('oqsum_in', Money()),
InOutArgument('oqsum_eg', Money()),
InOutArgument('oqsum_sec', Money()),
InOutArgument('pol_in', ASCIIString(255)),
InOutArgument('pol_eg', ASCIIString(255))
),
comment='Apply rate modifiers',
writes_sql=False,
label='armfunc',
is_procedure=True
)
AcctRollbackProcedure = SQLFunction(
'acct_rollback',
args=(
InArgument('aeid', UInt32()),
InArgument('ts', DateTime()),
InOutArgument('xstashid', UInt32()),
InArgument('xrateid_old', UInt32()),
InOutArgument('xrateid_new', UInt32()),
InOutArgument('uti', Traffic()),
InOutArgument('ute', Traffic()),
InOutArgument('xqpend', DateTime()),
InOutArgument(' |
from datetime import date
class YearInfo(object):
def __init__(self, year, months_ok, months_na):
self.year = year
self.months = set(range(1, 13))
self.months_ok = set(months_ok)
self.months_na = set(months_na)
self.months_er = self.months - (self.months_ok | self.months_na)
today = date.today()
if self.year == today.year:
self.months_er -= set(range(today.month, 13))
def __unicode__(self):
return u'%s' % self.year
def missing(self):
return len(self.months_er) != 0
def payments_by_month(payments_list):
monthly_data = set()
if not payments_list:
return []
f | or payment in payments_list:
for m in payment.formonths():
monthly_data.add(m)
since_year = payment.user.date_joined.year
since_month = payment.user.date_joined.mont | h
years = set(range(since_year, date.today().year+1))
out = []
for y in years:
ok = map(lambda x: x[1],
filter(lambda x: x[0] == y, monthly_data))
na = []
if y == since_year:
na = range(1, since_month)
yi = YearInfo(y, ok, na)
out.append(yi)
return out
def no_missing_payments(payments_list):
plist = payments_by_month(payments_list)
for year in plist:
if year.missing():
return False
return True
def missing_months(payments_list):
plist = payments_by_month(payments_list)
missing = []
for yi in plist:
if yi.missing():
for month in yi.months_er:
missing.append((yi.year, month))
return missing
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANG | O_SETTINGS_MODULE", "DiscreteDistributions.settings")
from d | jango.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
# -*- coding: utf-8 -*-
import abc
import tensorflow as tf
from inferbeddings.models import embeddings
import sys
class BaseModel(metaclass=abc.ABCMeta):
def __init__(self, entity_embeddings=None, predicate_embeddings=None, similarity_function=None,
reuse_variables=False, *args, **kwargs):
"""
Abstract class inherited by all models.
:param entity_embeddings: (batch_size, 2, entity_embedding_size) Tensor.
:param predicate_embeddings: (batch_size, walk_size, predicate_embedding_size) Tensor.
:param similarity_function: similarity function.
:param reuse_variables: States whether the variables within the model need to be reused.
"""
self.entity_embeddings = entity_embeddings
self.predicate_embeddings = predicate_embeddings
self.similarity_function = similarity_function
self.reuse_variables = reuse_variables
@abc.abstractmethod
def __call__(self):
raise NotImplementedError
@property
def parameters(self):
return []
class TranslatingModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of a compositional extension of the Translating Embeddings model [1].
[1] Bordes, A. et al. - Translating Embeddings for Modeling Multi-relational Data - NIPS 2013
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
walk_embedding = embeddings.additive_walk_embedding(self.predicate_embeddings)
translated_subject_embedding = subject_embedding + walk_embedding
return self.similarity_function(translated_subject_embedding, object_embedding)
class BilinearDiagonalModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of a compositional extension of the Bilinear-Diagonal model [1]
[1] Yang, B. et al. - Embedding Entities and Relations for Learning and Inference in Knowledge Bases - ICLR 2015
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
walk_embedding = embeddings.bilinear_diagonal_walk_embedding(self.predicate_embeddings)
scaled_subject_embedding = subject_embedding * walk_embedding
return self.similarity_function(scaled_subject_embedding, object_embedding)
class BilinearModel(BaseModel):
def __init__(self, *args, **kwargs):
"""
Implementation of a compositional extension of the Bilinear model [1]
[1] Nickel, M. et al. - A Three-Way Model for Collective Learning on Multi-Relational Data - ICML 2011
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
entity_embedding_size = subject_embedding.get_shape()[-1].value
walk_embedding = embeddings.bilinear_walk_embedding(self.predicate_embeddings, entity_embedding_size)
es = tf.expand_dims(subject_embedding, 1) |
sW = tf.matmul(es, walk_embedding)[:, 0, :]
return self.similarity_function(sW, object_embedding)
class ComplexModel(BaseModel) | :
def __init__(self, *args, **kwargs):
"""
Implementation of a compositional extension of the ComplEx model [1]
[1] Trouillon, T. et al. - Complex Embeddings for Simple Link Prediction - ICML 2016
"""
super().__init__(*args, **kwargs)
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
walk_embedding = embeddings.complex_walk_embedding(self.predicate_embeddings)
es_re, es_im = tf.split(value=subject_embedding, num_or_size_splits=2, axis=1)
eo_re, eo_im = tf.split(value=object_embedding, num_or_size_splits=2, axis=1)
ew_re, ew_im = tf.split(value=walk_embedding, num_or_size_splits=2, axis=1)
def dot3(arg1, rel, arg2):
return self.similarity_function(arg1 * rel, arg2)
score = dot3(es_re, ew_re, eo_re) + dot3(es_re, ew_im, eo_im) + dot3(es_im, ew_re, eo_im) - dot3(es_im, ew_im, eo_re)
return score
class ERMLP(BaseModel):
def __init__(self, hidden_size=None, f=tf.tanh, *args, **kwargs):
"""
Implementation of the ER-MLP model described in [1, 2]
[1] Dong, X. L. et al. - Knowledge Vault: A Web-Scale Approach to Probabilistic Knowledge Fusion - KDD 2014
[2] Nickel, M. et al. - A Review of Relational Machine Learning for Knowledge Graphs - IEEE 2016
"""
super().__init__(*args, **kwargs)
self.f = f
# ent_emb_size, pred_emb_size = self.entity_embeddings_size, self.predicate_embeddings_size
ent_emb_size = self.entity_embeddings.get_shape()[-1].value
pred_emb_size = self.predicate_embeddings.get_shape()[-1].value
input_size = ent_emb_size + ent_emb_size + pred_emb_size
with tf.variable_scope("ERMLP", reuse=self.reuse_variables) as _:
self.C = tf.get_variable('C', shape=[input_size, hidden_size], initializer=tf.contrib.layers.xavier_initializer())
self.w = tf.get_variable('w', shape=[hidden_size, 1], initializer=tf.contrib.layers.xavier_initializer())
def __call__(self):
"""
:return: (batch_size) Tensor containing the scores associated by the models to the walks.
"""
subject_embedding, object_embedding = self.entity_embeddings[:, 0, :], self.entity_embeddings[:, 1, :]
# This model is non-compositional in nature, so it might not be trivial to represent a walk embedding
walk_embedding = self.predicate_embeddings[:, 0, :]
e_ijk = tf.concat(values=[subject_embedding, object_embedding, walk_embedding], axis=1)
h_ijk = tf.matmul(e_ijk, self.C)
f_ijk = tf.squeeze(tf.matmul(self.f(h_ijk), self.w), axis=1)
return f_ijk
@property
def parameters(self):
params = super().parameters + [self.C, self.w]
return params
# Aliases
TransE = TranslatingEmbeddings = TranslatingModel
DistMult = BilinearDiagonal = BilinearDiagonalModel
RESCAL = Bilinear = BilinearModel
ComplEx = ComplexE = ComplexModel
ER_MLP = ERMLP
def get_function(function_name):
this_module = sys.modules[__name__]
if not hasattr(this_module, function_name):
raise ValueError('Unknown model: {}'.format(function_name))
return getattr(this_module, function_name)
|
# $HeadURL$
import sys
def t | est_import():
""" Test to make sure the project imports OK.
"""
import pp.testing
def test_app():
""" Test the command-line app runs OK.
"""
from pp.testing.scripts import app
sys.argv = []
app.main()
if __name__ == '__main__':
# Run this tet file through py.test i | f executed on the cmdline
import pytest
pytest.main(args=[sys.argv[0]])
|
import math
import torch
from .optimizer import Optimizer
class SparseAdam(Optimizer):
r"""Implements lazy version of Adam algorithm suitable for sparse tensors.
In this variant, only moments that show up in the gradient get updated, and
only those portions of the gradient get applied to the parameters.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8):
if not 0.0 < lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 < eps:
raise ValueE | rror("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("I | nvalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps)
super(SparseAdam, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if not grad.is_sparse:
raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['step'] += 1
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
# Decay the first and second moment running average coefficient
# old <- b * old + (1 - b) * new
# <==> old += (1 - b) * (new - old)
old_exp_avg_values = exp_avg.sparse_mask(grad)._values()
exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1)
exp_avg.add_(make_sparse(exp_avg_update_values))
old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values()
exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2)
exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values))
# Dense addition again is intended, avoiding another sparse_mask
numer = exp_avg_update_values.add_(old_exp_avg_values)
exp_avg_sq_update_values.add_(old_exp_avg_sq_values)
denom = exp_avg_sq_update_values.sqrt_().add_(group['eps'])
del exp_avg_update_values, exp_avg_sq_update_values
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(make_sparse(-step_size * numer.div_(denom)))
return loss
|
import EoN
import networkx as nx
import matplotlib.pyplot as plt
import scipy
import random
print(r"Warning, book says \tau=2\gamma/<K>, but it's really 1.5\gamma/<K>")
print(r"Warning - for the power law graph the text says k_{max}=110, but I believe it is 118.")
N=1000
gamma = 1.
iterations = 200
rho = 0.05
tmax = 15
tcount = 101
kave = 20
tau = 1.5*gamma/kave
def simulate_process(graph_function, iterations, tmax, tcount, rho, kave, tau, gamma, symbol):
Isum = scipy.zeros(tcount)
report_times = scipy.linspace(0,tmax,tcount)
for counter in range(iterations):
G = graph_function()
t, S, I = EoN.fast_SIS(G, tau, gamma, rho=rho, tmax=tmax)
I = EoN.subsample(report_times, t, I)
Isum += I
plt.plot(report_times, Isum*1./(N*iterations), symbol)
#regular
symbol = 'o'
graph_function = lambda : nx.configuration_model(N*[kave])
simulate_process(graph_function, iterations, tmax, tcount, rho, kave, tau, gamma, symbol)
#bimodal
symbol='x'
graph_function = lambda: nx.configuration_model([5,35]*int(N/2+0.01))
simulate_process(graph_function, iterations, tmax, tcount, rho, kave, tau, gamma, symbol)
#erdos-renyi
symbol | = 's'
graph_function = lambda : nx.fast_gnp_random_graph(N, kave/(N-1.))
simulate_process(graph_function, iterations, tmax, tcount, rho, kave, tau, gamma, symbol)
symbol = 'd'
pl_kmax = 118
pl_kmin = 7
pl_alpha = 2.
Pk | ={}
for k in range(pl_kmin, pl_kmax+1):
Pk[k] = k**(-pl_alpha)
valsum = sum(Pk.values())
for k in Pk.keys():
Pk[k] /= valsum
#print sum(k*Pk[k] for k in Pk.keys())
def generate_sequence(Pk, N):
while True:
sequence = []
for counter in range(N):
r = random.random()
for k in Pk.keys():
if r< Pk[k]:
break
else:
r-=Pk[k]
sequence.append(k)
if sum(sequence)%2==0:
break
return sequence
graph_function = lambda : nx.configuration_model(generate_sequence(Pk,N))
simulate_process(graph_function, iterations, tmax, tcount, rho, kave, tau, gamma, symbol)
symbol = '--'
S0 = (1-rho)*N
I0 = rho*N
t, S, I = EoN.SIS_homogeneous_meanfield(S0, I0, kave, tau, gamma, tmax=tmax, tcount=tcount)
plt.plot(t, I/N, symbol)
symbol = '-'
S0 = (1-rho)*N
I0 = rho*N
SI0 = (1-rho)*N*kave*rho
SS0 = (1-rho)*N*kave*(1-rho)
t, S, I = EoN.SIS_homogeneous_pairwise(S0, I0, SI0, SS0, kave, tau, gamma, tmax=tmax, tcount=tcount)
plt.plot(t, I/N, symbol)
plt.xlabel('$t$')
plt.ylabel('Prevalence')
plt.savefig('fig4p11.png') |
# coding: utf8
# OeQ autogenerated lookup function for 'Window/Wall Ratio East in correlation to year of construction, based on the source data of the survey for the "German Building Typology developed by the "Institut für Wohnen und Umwelt", Darmstadt/Germany, 2011-2013'
import math
import numpy as np
import oeqLookuptable as oeq
def get(*xin):
l_lookup = oeq.lookuptable(
[
1849,0,
1850,0,
1851,0,
1852,0,
1853,0,
1854,0,
1855,0,
1856,0,
1857,0,
1858,0,
1859,0,
1860,0,
1861,0,
1862,0,
1863,0,
1864,0,
1865,0,
1866,0,
1867,0,
1868,0,
1869,0,
1870,0,
1871,0,
1872,0,
1873,0,
1874,0,
1875,0,
1876,0,
1877,0,
1878,0,
1879,0,
1880,0,
1881,0,
1882,0,
1883,0,
1884,0,
1885,0,
1886,0,
1887,0,
1888,0,
1889,0,
1890,0,
1891,0,
1892,0,
1893,0,
1894,0,
1895,0,
1 | 896,0,
1897,0,
1898,0,
1899,0,
1900,0,
1901,0,
1902,0,
1903,0,
1904,0,
1905,0,
1906,0,
1907,0,
1908,0,
1909,0,
1910,0,
1911,0,
1912,0,
1913,0,
1914,0,
1915,0,
1916,0,
1917,0,
1918,0,
1919,0,
1920,0,
1921,0,
1922,0,
1923,0,
1924,0,
1925,0,
1926,0,
1927,0,
1928, | 0,
1929,0,
1930,0,
1931,0,
1932,0,
1933,0,
1934,0,
1935,0,
1936,0,
1937,0,
1938,0,
1939,0,
1940,0,
1941,0,
1942,0,
1943,0,
1944,0,
1945,0,
1946,0,
1947,0,
1948,0,
1949,0,
1950,0,
1951,0,
1952,0,
1953,0,
1954,0,
1955,0,
1956,0,
1957,0,
1958,0.001,
1959,0.002,
1960,0.002,
1961,0,
1962,0,
1963,0,
1964,0,
1965,0,
1966,0.019,
1967,0.046,
1968,0.077,
1969,0.11,
1970,0.141,
1971,0.169,
1972,0.195,
1973,0.22,
1974,0.22,
1975,0.22,
1976,0.22,
1977,0.22,
1978,0.161,
1979,0.089,
1980,0.028,
1981,0,
1982,0.019,
1983,0.07,
1984,0.131,
1985,0.18,
1986,0.2,
1987,0.199,
1988,0.188,
1989,0.18,
1990,0.184,
1991,0.192,
1992,0.195,
1993,0.18,
1994,0.142,
1995,0.09,
1996,0.038,
1997,0,
1998,0,
1999,0,
2000,0.007,
2001,0.025,
2002,0.038,
2003,0.045,
2004,0.049,
2005,0.05,
2006,0.05,
2007,0.051,
2008,0.05,
2009,0.05,
2010,0.05,
2011,0.05,
2012,0.05,
2013,0.05,
2014,0.05,
2015,0.05,
2016,0.05,
2017,0.05,
2018,0.05,
2019,0.05,
2020,0.05,
2021,0.05])
return(l_lookup.lookup(xin))
|
"linear_transform"))
def test_parameterized_mixing_encoder(self, model_arch,
mixing_layer_name):
config = dummy_config(model_arch=model_arch)
frozen_config = ml_collections.FrozenConfigDict(config)
encoder = models.EncoderModel(config=frozen_config)
rng = jax.random.PRNGKey(0)
init_batch = init_encoder_batch(config)
params = init_model_params(rng, encoder, init_batch)
expected_keys = {
"embedder", "encoder_0", "encoder_1", "feed_forward_0",
"feed_forward_1", f"{mixing_layer_name}_0", f"{mixing_layer_name}_1",
"pooler"
}
self.assertEqual(params.keys(), expected_keys)
inputs = dummy_inputs(rng, config)
hidden_states, pooled_output = encoder.apply({"params": params},
rngs={"dropout": rng},
**inputs)
expected_hidden_states_shape = (config.train_batch_size,
config.max_seq_length, config.d_model)
self.assertEqual(hidden_states.shape, expected_hidden_states_shape)
expected_pooled_output_shape = (config.train_batch_size, config.d_model)
self.assertEqual(pooled_output.shape, expected_pooled_output_shape)
@parameterized.parameters(
dict(
attention_layout=HybridAttentionLayout.BOTTOM,
num_attention_layers=0,
expected_attention_layers=[]),
dict(
attention_layout=HybridAttentionLayout.MIDDLE,
num_attention_layers=2,
expected_attention_layers=[1, 2]),
dict(
attention_layout=HybridAttentionLayout.MIXED,
num_attention_layers=2,
expected_attention_layers=[0, 2]),
dict(
attention_layout=HybridAttentionLayout.TOP,
num_attention_layers=1,
expected_attention_layers=[3]))
def test_hybrid_encoder(self, attention_layout,
num_attention_layers,
expected_attention_layers):
config = dummy_config(model_arch=ModelArchitecture.F_NET)
with config.unlocked():
config.num_layers = 4
config.attention_layout = attention_layout
config.num_attention_layers = num_attention_layers
frozen_config = ml_collections.FrozenConfigDict(config)
encoder = models.EncoderModel(config=frozen_config)
rng = jax.random.PRNGKey(0)
init_batch = init_encoder_batch(config)
params = init_model_params(rng, encoder, init_batch)
expected_keys = {
"embedder", "encoder_0", "encoder_1", "encoder_2", "encoder_3",
"feed_forward_0", "feed_forward_1", "feed_forward_2", "feed_forward_3",
"pooler"
}
for expected_attention_layer in expected_attention_layers:
expected_keys.add(f"self_attention_{expected_attention_layer}")
self.assertEqual(params.keys(), expected_keys)
inputs = dummy_inputs(rng, config)
hidden_states, pooled_output = encoder.apply({"params": params},
rngs={"dropout": rng},
**inputs)
expected_hidden_states_shape = (config.train_batch_size,
config.max_seq_length, config.d_model)
self.assertEqual(hidden_states.shape, expected_hidden_states_shape)
expected_pooled_output_shape = (config.train_batch_size, config.d_model)
self.assertEqual(pooled_output.shape, expected_pooled_output_shape)
def test_pretraining_model(self):
config = dummy_config(model_arch=ModelArchitecture.F_NET)
with config.unlocked():
config.max_predictions_per_seq = 7
frozen_config = ml_collections.FrozenConfigDict(config)
model = models.PreTrainingModel(config=frozen_config)
rng = jax.random.PRNGKey(0)
init_batch = init_encoder_batch(config)
# Pre-training model needs MLM and NSP inputs to be initialized.
init_batch.update({
"masked_lm_positions":
jnp.ones((1, config.max_predictions_per_seq), jnp.int32),
"masked_lm_labels":
jnp.ones((1, config.max_predictions_per_seq), jnp.int32),
"masked_lm_weights":
jnp.ones((1, config.max_predictions_per_seq), jnp.float32),
"next_sentence_labels":
jnp.ones((1, 1), jnp.int32)
})
params = init_model_params(rng, model, init_batch)
expected_keys = {
"encoder", "predictions_dense", "predictions_output", "classification",
"predictions_layer_norm"
}
self.assertEqual(params.keys(), expected_keys)
inputs = dummy_inputs(rng, config)
inputs.update({
"masked_lm_positions":
jnp.ones((config.train_batch_size, config.max_predictions_per_seq),
jnp.int32),
"masked_lm_labels":
jnp.ones((config.train_batch_size, config.max_predictions_per_seq),
jnp.int32),
"masked_lm_weights":
jnp.ones((config.train_batch_size, config.max_predictions_per_seq),
jnp.int32),
"next_sentence_labels":
jnp.ones((config.train_batch_size, 1), jnp.int32)
})
metrics = model.apply({"params": params}, rngs={"dropout": rng}, **inputs)
expected_metrics = {
"loss", "masked_lm_loss", "masked_lm_normalization",
"masked_lm_correct", "masked_lm_total", "next_sentence_loss",
"num_next_sentence_labels", "next_sentence_correct"
}
self.assertEqual(metrics.keys(), expected_metrics)
# Because model is randomly initialized, we can only check the sign of most
# metrics.
self.assertGreater(metrics["loss"], 0.0)
self.assertGreater(metrics["masked_lm_loss"], 0.0)
self.assertGreater(metrics["next_sentence_loss"], 0.0)
self.assertGreater(metrics["masked_lm_normalization"], 0.0)
self.assertGreater(metrics["num_next_sentence_labels"], 0.0)
self.assertGreater(metrics["masked_lm_total"], 0.0)
# Number of correct labels is bound by the batch size.
self.assertLessEqual(
metrics["masked_lm_correct"],
config.train_batch_size * config.max_predictions_per_seq)
self.assertLessEqual(metrics["num_next_sentence_labels"],
config.train_batch_size)
def test_classification_model(self):
n_classes = 2
config = dummy_config(model_arch=ModelArchitecture.BERT)
with config.unlocked():
config.dataset_name = "dummy/classification_dataset"
frozen_config = ml_collections.FrozenConfigDict(config) |
model = models.SequenceClassificationModel(
config=frozen_config, n_classes=n_classes)
rng = jax.random.PRNGKey(0)
init_batch = init_encoder_batch(config)
params = init_model_params(rng, model, init_batch)
self.as | sertEqual(params.keys(), {"encoder", "classification"})
# Logits for eval/prediction (no labels supplied).
eval_inputs = dummy_inputs(rng, config)
eval_inputs["deterministic"] = True
logits = model.apply({"params": params}, **eval_inputs)
expected_logits_shape = (config.train_batch_size, n_classes)
self.assertEqual(jnp.shape(logits), expected_logits_shape)
# Metrics for training (labels supplied).
train_inputs = dummy_inputs(rng, config)
train_inputs["labels"] = jnp.ones(config.train_batch_size, jnp.int32)
metrics = model.apply({"params": params},
rngs={"dropout": rng},
**train_inputs)
self.assertEqual(metrics.keys(),
{"loss", "correct_predictions", "num_labels"})
def test_regression_model(self):
n_classes = 1 # Only one label for regression
config = dummy_config(model_arch=ModelArchitecture.F_NET)
with config.unlocked():
config.dataset_name = "glue/stsb" # regression task dataset
frozen_config = ml_collections.FrozenConfigDict(config)
model = models.SequenceClassificationModel(
config=frozen_config, n_classes=n_classes)
rng = jax.random.PRNGKey(0)
init_batch = init_encoder_batch(config)
params = init_model_params(rng, model, init_batch)
self.assertEqual(params.keys(), {"encoder", "classification"})
# Logit |
# i | mport ipdb; ipdb.set_trace()
from .posts import PostAPIHandler, Post | CategoriesAPIHandler
from .tweets import TweetsAPIHandler
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.o | rg/licenses/LICENSE-2.0
#
# Unless requir | ed by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import _magic
|
#!/usr/bin/env python
#to create a file in codesnippets folder
import pyperclip
import os
import re
import subprocess
def get_extension(file_name):
if file_name.find('.')!=-1:
ext = file_name.split('.')
return (ext[1])
else:
return 'txt'
def cut(str, len1):
return str[len1 + 1:] #to remove first line which is meant for reading from which file
#for displaying contents
def find(name, path):
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
#ubuntu notification (message sending)
def sendmessage(message):
subprocess.Popen(['notify-send', message])
return
while True:
str = pyperclip.paste()
if (str==" "):
continue
str_low = str.lower()
str_lower=str_low.split("\n") #this is to ensure that only create a file if "add to code snippets" line is first line since if this line is present with other text which is not intended to be saved (i.e in btw that unwanted text
#as we are using regular expression it checks a pattern in a given text so "add to code snippets " must be definitely first line
if(str_lower[0]=="stop -safe"):
sendmessage("Stopped the background process for code snippet management...byebye")
os.exit()
if (str_lower[0].find("add") != -1 and str_lower[0].find("code")!=-1 and
str_lower[0].find("snippets") !=-1 and str_lower[0].find("-safe") !=-1 ):
if re.search(r'\w+\.[a-z,A-Z]',str_lower[0])==None:
sendmessage("SPECIFY FILEEXTENSION (default file type is txt)")
str1 = str.split('\n')
str2 = str1[0].split(' ')
length = len(str2)
file_name = str2[length - 2]
new_str = cut(str, len(str1[0]))
# until here we removed first line which contains " add this to code snippet filename"
# print new_str
# creating a file with the above name
try:
# code_snippets is the head folder
if not os.path.exists('/home/nikhil/code_snippets'):
os.makedirs('/home/nikhil/code_snippets') # creating the directory if not exists
extension = get_extension(file_name)
# creating a folder with respective extenion names in uppercase
if not os.path.exists('/home/nikhil/code_snippets/'
+ extension.upper()):
os.makedirs('/home/nikhil/code_snippets/' + extension.upper())
print
# creating a file in respective folder
if not os.path.exists('/home/nikhil/code_snippets/' + extension.upper() + '/'
+ file_name):
name = open('/home/nikhil/code_snippets/' + extension.upper() + '/'
+ file_name, 'w')
name.write(new_str)
name.truncate()
name.close()
sendmessage("successfully added to code snippets collection")
pyperclip.copy(" ")
except Exception:
try:
already_exists = open('/home/nikhil/code_snippets/' + extension.upper() + '/'
+ file_name, 'a+')
| #new_str = cut(str, len(str1[0]))
str_from_file = already_exists.read()
#already_exists.seek(0) #http://stackoverflow.com/questions/6648493/open-file-for-both-reading-and-writing#answer-15976014
already_exists.write('\n\n@@\n'+new_str)
already_exists.truncate()
already_exists.close()
sendmessage("successfully added to code snippets co | llection (code has been appended to already existing file with same name)")
str=pyperclip.copy(" ")
except:
print "oops some error in finding file to append content"
sendmessage("ERROR OCCURED")
pyperclip.copy(" ")
os.system('python /home/nikhil/Desktop/haha.py')
|
# encoding: utf8
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
fields = [(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True),), ('name', models.CharField(max_length=255),), ('email', models.EmailField(max_length=75),), ('message', models.TextField(),), ('date', models.DateField(auto_now=True),)],
bases = (models.Model,),
options = {},
name = 'Contact',
),
migrations.CreateModel(
fields = [(u'id', models.AutoField(verbose_name=u'ID', serialize=False, auto_created=True, primary_key=True),), ('date', models.DateTimeField(),), ('title', models.CharField(max_length=255),), ('code', models.CharField(max_lengt | h=255),), ('summary', models.TextField(),)],
bases = (models.Model,),
options = {},
name | = 'Commits',
),
]
|
from django.db.backends import BaseDatabaseIntrospection
import pyodbc as Database
import types
import datetime
import decimal
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
types.StringType: 'TextField',
types.UnicodeType: 'TextField',
types.LongType: 'IntegerField',
types.IntType: 'IntegerField',
types.BooleanType: 'BooleanField',
| types.FloatType: 'FloatField',
da | tetime.datetime: 'DateTimeField',
datetime.date: 'DateField',
datetime.time: 'TimeField',
decimal.Decimal: 'DecimalField',
}
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# db = cursor.db.alias
# if db == 'default':
db = 'public'
cursor.execute("""
SELECT distinct objname
FROM _v_obj_relation
WHERE objclass IN (4905,4906,4908,4907,4909,4940,4911,4913,4953);""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name, identity_check=True):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return cursor.description
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_relations(self, cursor, table_name):
return []
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT fk.ORDINAL_POSITION, col.ORDINAL_POSITION, fk.REFERENCE_TABLE_NAME
FROM FOREIGN_KEYS fk
INNER JOIN COLUMNS col on fk.REFERENCE_COLUMN_NAME = col.COLUMN_NAME
and fk.REFERENCE_TABLE_NAME = col.TABLE_NAME
WHERE fk.TABLE_NAME = %s
""", [table_name])
relations = {}
for row in cursor.fetchall():
# row[0] and row[1] are like "{2}", so strip the curly braces.
relations[row[0]] = (row[1], row[2])
return relations
def get_indexes(self, cursor, table_name):
return []
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index,
'db_index': boolean representing whether it's a non-unique index}
"""
cursor.execute("""
SELECT col.COLUMN_NAME,pk.CONSTRAINT_TYPE
FROM V_CATALOG.COLUMNS col
left join V_CATALOG.PRIMARY_KEYS pk
ON col.TABLE_NAME = pk.TABLE_NAME AND col.COLUMN_NAME = pk.COLUMN_NAME
WHERE col.TABLE_NAME = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': row[1] == 'p', 'unique': False}
return indexes
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
try:
return self.data_types_reverse[data_type]
except:
print '*' * 10,'DEBUG add the type', data_type, 'to introspection.py'
raise
|
# coding: utf-8
from __future__ import unicode_literals, absolute_import
from ..exception import TigrisException
import urllib.parse
class Permission(object):
""" Tigris Permission object """
BASE_ENDPOINT = 'permissions'
def __init__(self, permission_obj, session):
"""
:param permission_obj:
The permission data.
:type permission_obj:
`dict`
:param session:
The network session.
:type session:
:class:`TigrisSession`
"""
self._session = session
self._populate(permission_obj)
@property
def id(self):
return self._id
def _populate(self, permission_obj):
try:
self._id = permission_obj['id']
except KeyError:
self._id = False
try:
self.name = permission_obj['name']
except KeyError:
self.name = None
try:
self.description = permission_obj['description']
except KeyError:
self.description = None
try:
self.is_active = permission_obj['is_active']
except KeyError:
self.is_active = None
def activate(self):
"""
Changes `is_active` to `True`
"""
if not self._id:
raise TigrisException(
'ERROR: You are activate an unsaved permission. '
'Please save it first, then activate.')
self.is_active = True
query = '?' + urllib.parse.urlencode({'activate': self.is_active})
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
url = url + query
self._session._put(url, data={})
def deactivate(self):
"""
Changes `is_active` to `False`
"""
if not self._id:
raise TigrisException(
'ERROR: You are activate an unsaved permission. '
'Please save it first, then activate.')
self.is_active = False
query = '?' + urllib.parse.urlencode({'activate': self.is_active})
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
url = url + query
self._session._put(url, data={})
def destroy(self):
"""
Deletes Permission
| """
url = '{0}/{1}'.format(self.BASE_ENDPOINT, se | lf._id)
self._session._delete(url)
def get(self):
"""
Retrieves Permission
:rtype:
`dict`
"""
if self._id:
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
content, status_code, headers = self._session._get(url)
self._populate(content)
return content
else:
return None
def save(self, new=False):
"""
Upserts the User object.
:param new:
Determines whether or not this User is to be inserted or updated.
:type new:
`bool`
:rtype:
`dict`
"""
permission_obj = dict(vars(self))
del permission_obj['_session']
del permission_obj['_id']
if new:
content, status_code, headers = self._session._post(
self.BASE_ENDPOINT,
data={'fields': permission_obj})
if 'error' in content:
raise TigrisException(content['error'])
self._populate(content)
else:
url = '{0}/{1}'.format(self.BASE_ENDPOINT, self._id)
content, status_code, headers = self._session._patch(
url,
data={'fields': permission_obj})
if 'error' in content:
raise TigrisException(content['error'])
self.get()
return self
|
import json
from json_url_rewriter import config
from json_url_rewriter.rewrite import URLRewriter
class HeaderToPathPrefixRewriter(object):
"""
A rewriter to take the value of a header and prefix any path.
"""
def __init__(self, keys, base, header_name):
self.keys = | keys
self.base = base
self.header_name = header_name
@property
def regex(self):
return '(%s)(.*)' % self.base
def header(self):
| return 'HTTP_' + self.header_name.upper().replace('-', '_')
def __call__(self, doc, environ):
key = self.header()
if not key in environ:
return doc
prefix = environ[key]
def replacement(match):
base, path = match.groups()
return '%s/%s%s' % (base, prefix, path)
rewriter = URLRewriter(self.keys, self.regex, replacement)
return rewriter(doc)
class RewriteMiddleware(object):
def __init__(self, app, rewriter):
self.app = app
self.rewriter = rewriter
@staticmethod
def content_type(headers):
return dict([(k.lower(), v) for k, v in headers]).get('content-type')
def is_json(self, headers):
return 'json' in self.content_type(headers)
@staticmethod
def ok(status):
return status.startswith('20')
def rewrite(self, resp, environ):
doc = self.rewriter(self.json(resp), environ)
return json.dumps(doc)
def json(self, resp):
return json.loads(''.join(resp))
def __call__(self, environ, start_response):
# Set a local variable for the request
self.do_rewrite = False
# Our request local start response wrapper to grab the
# response headers
def sr(status, response_headers, exc_info=None):
if self.ok(status) and self.is_json(response_headers):
self.do_rewrite = True
# Call the original start_response
return start_response(status, response_headers, exc_info)
# call our app
resp = self.app(environ, sr)
# Our local variable should have been set to True if we should
# rewrite
if self.do_rewrite:
return [self.rewrite(resp, environ)]
return resp
def json_url_rewriter_filter_factory(global_conf, *args, **kw):
print(global_conf, args, kw)
raise Exception('Blastoff')
|
#!/usr/bin/env python
from __future__ import print_functio | n
with open("../File_example.txt") as file_in:
for line in file_in:
print(line.strip())
print('#' * 40)
file_to_wri | te = open("../File_example.txt", "wt")
print(file_to_write)
file_to_write.write("Line one\nLine two\nLine three\n")
file_to_write.flush()
file_to_write.close()
print('#' * 40)
append_file = open("../File_example.txt", "at")
append_file.write("Line Three and Half\n")
append_file.flush()
append_file.seek(0)
append_file.write("Line addition\n")
append_file.flush()
append_file.close()
print(append_file)
file_read = open("../File_example.txt")
print(file_read.read())
|
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from pypowervm.utils import uuid as uuid_utils
import unittest
class TestUUID(unittest.TestCase):
"""Unit tests for the uuid."""
def test_uuid_conversion(self):
uuid = '089ffb20-5d19-4a8c-bb80-13650627d985'
pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid)
self.assertEqual(uuid, pvm_uuid)
uuid = '989ffb20-5d19-4a8c-bb80-13650627d985'
pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid)
self.assertEqual('1' + uuid[1:], pvm_uuid)
uuid = 'c89ffb20-5d19-4a8c-bb80-13650627d985'
pvm_uuid = uuid_utils.convert_uuid_to_pvm(uuid)
self.assertEqual('4' + uuid[1:], pvm_uuid)
def test_id_or_uuid(self):
self.assertEqual((False, 123), uuid_utils.id_or_uuid(123))
# Test all stringish permutations
converters = [lambda x: x, six.text_type]
for conv in converters:
self.assertEqual((False, 123), uuid_utils.id_or_uuid(conv('123')))
uuid = conv('12345678-abcd-ABCD-0000-0a1B2c3D4e5F')
self.assertEqual((True, uuid), uuid_utils.id_or_uu | id(uuid))
uuid = conv('12345678abcdABCD00000a1B2c3D4e5 | F')
self.assertEqual((True, uuid), uuid_utils.id_or_uuid(uuid))
# This one has too many digits
self.assertRaises(ValueError, uuid_utils.id_or_uuid,
conv('12345678-abcd-ABCD-0000-0a1B2c3D4e5F0'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-28 15:17
from __future__ import unicode_literals
import DjangoUeditor.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogadmin', '0006_auto_20170827_1142'),
]
operations = [
migrations.CreateModel(
name='BookReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbos | e_name='\u6807\u9898')),
('tag', models.CharField(blank=True, max_length=32, null=True, verbose_name='\u6807\u7b7e')),
('pub_time', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('content', DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\u6b63\u6587')),
| ],
options={
'ordering': ['-update_time'],
'verbose_name': '\u4e66\u520a\u8bc4\u8bba',
'verbose_name_plural': '\u4e66\u520a\u8bc4\u8bba',
},
),
migrations.CreateModel(
name='Essay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbose_name='\u6807\u9898')),
('tag', models.CharField(blank=True, max_length=32, null=True, verbose_name='\u6807\u7b7e')),
('pub_time', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('content', DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\u6b63\u6587')),
],
options={
'ordering': ['-update_time'],
'verbose_name': '\u6742\u6587',
'verbose_name_plural': '\u6742\u6587',
},
),
migrations.CreateModel(
name='FilmReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbose_name='\u6807\u9898')),
('tag', models.CharField(blank=True, max_length=32, null=True, verbose_name='\u6807\u7b7e')),
('pub_time', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('content', DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\u6b63\u6587')),
],
options={
'ordering': ['-update_time'],
'verbose_name': '\u5f71\u89c6\u8bc4\u8bba',
'verbose_name_plural': '\u5f71\u89c6\u8bc4\u8bba',
},
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-update_time'], 'verbose_name': '\u6280\u672f\u7c7b\u535a\u5ba2', 'verbose_name_plural': '\u6280\u672f\u7c7b\u535a\u5ba2'},
),
migrations.AlterField(
model_name='article',
name='category',
field=models.CharField(choices=[('web', 'Web\u5f00\u53d1'), ('linux', '\u7cfb\u7edf\u8fd0\u7ef4'), ('algorithm', '\u7b97\u6cd5'), ('language', '\u7f16\u7a0b\u8bed\u8a00'), ('others', '\u5176\u4ed6')], default='web', max_length=64, verbose_name='\u7c7b\u522b'),
),
]
|
"""SCons.Tool.pdf
Common PDF Builder definition for various other Tool modules that use it.
Add an explicit action to | run epstopdf to convert .eps files to .pdf
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associate | d documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/pdf.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import SCons.Builder
import SCons.Tool
PDFBuilder = None
EpsPdfAction = SCons.Action.Action('$EPSTOPDFCOM', '$EPSTOPDFCOMSTR')
def generate(env):
try:
env['BUILDERS']['PDF']
except KeyError:
global PDFBuilder
if PDFBuilder is None:
PDFBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.PDFLaTeXScanner,
prefix = '$PDFPREFIX',
suffix = '$PDFSUFFIX',
emitter = {},
source_ext_match = None,
single_source=True)
env['BUILDERS']['PDF'] = PDFBuilder
env['PDFPREFIX'] = ''
env['PDFSUFFIX'] = '.pdf'
# put the epstopdf builder in this routine so we can add it after
# the pdftex builder so that one is the default for no source suffix
def generate2(env):
bld = env['BUILDERS']['PDF']
#bld.add_action('.ps', EpsPdfAction) # this is covered by direct Ghostcript action in gs.py
bld.add_action('.eps', EpsPdfAction)
env['EPSTOPDF'] = 'epstopdf'
env['EPSTOPDFFLAGS'] = SCons.Util.CLVar('')
env['EPSTOPDFCOM'] = '$EPSTOPDF $EPSTOPDFFLAGS ${SOURCE} --outfile=${TARGET}'
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
# pylin | t: disable= | redefined-outer-name, missing-docstring
import sys
import pytest
sys.path.append('..')
from batchflow import Config
@pytest.fixture
def config():
_config = dict(key1='val1', key2=dict())
_config['key2']['subkey1'] = 'val21'
return Config(_config)
class TestConfig:
def test_getitem_key(self, config):
assert config['key1'] == config.config['key1']
def test_getitem_missing_key(self, config):
with pytest.raises(KeyError):
_ = config['missing key']
def test_getitem_nested_key(self, config):
assert config['key2/subkey1'] == config.config['key2']['subkey1']
def test_get_key(self, config):
assert config.get('key1') == config.config.get('key1')
def test_get_nested_key(self, config):
assert config.get('key2/subkey1') == config.config['key2']['subkey1']
def test_get_missing_key(self, config):
assert config.get('missing key') is None
def test_get_missing_key_with_default(self, config):
assert config.get('missing key', default=1) == 1
def test_get_nested_missing_key_with_default(self, config):
assert config.get('key2/missing key', default=1) == 1
def test_pop_key(self, config):
val = config.config.get('key1')
assert config.pop('key1') == val
assert 'key1' not in config, 'key should have been deleted'
def test_pop_nested_key(self, config):
val = config.config['key2']['subkey1']
assert config.pop('key2/subkey1') == val
assert 'subkey1' not in config, 'nested key should have been deleted'
assert 'key2' in config, 'outer key should remain'
def test_pop_missing_key(self, config):
with pytest.raises(KeyError):
_ = config.pop('missing key')
def test_pop_missing_key_with_default(self, config):
assert config.pop('missing key', default=1) == 1
def test_pop_nested_missing_key_with_default(self, config):
assert config.pop('key2/missing key', default=1) == 1
def test_setitem_key(self, config):
config['key1'] = 'new_val1'
assert config['key1'] == config.config['key1']
assert config.config['key1'] == 'new_val1'
def test_setitem_nested_key(self, config):
config['key2/subkey1'] = 'new_val21'
assert config['key2/subkey1'] == config.config['key2']['subkey1']
assert config.config['key2']['subkey1'] == 'new_val21'
def test_setitem_new_key(self, config):
config['key0'] = 'new_val0'
assert config['key0'] == config.config['key0']
assert config.config['key0'] == 'new_val0'
def test_setitem_nested_new_key(self, config):
config['key2/subkey2'] = 'new_val22'
assert config['key2/subkey2'] == config.config['key2']['subkey2']
assert config.config['key2']['subkey2'] == 'new_val22'
|
# Copyright Vertex.AI
import ctypes
import json
class Context(object):
def __init__(self, lib):
self._as_parameter_ = lib.vai_alloc_ctx()
if not self._as_parameter_:
raise MemoryError('PlaidML operation context')
self._free = lib.vai_free_ctx
self._cancel = lib.vai_cancel_ctx
self._set_eve | ntlog = lib.vai_set_eventlog
def __del__(self):
self.shutdown()
def cancel(self):
self._cancel(self)
def set_eventlog_filename(self, filename):
config = {
'@type': 'type.vertex.ai/vertexai.ev | enting.file.proto.EventLog',
'filename': filename
}
self._set_eventlog(self, json.dumps(config))
def shutdown(self):
if hasattr(self, '_free') and self._as_parameter_:
self._free(self)
self._as_parameter_ = None
|
import pytest # noqa
import python_jsonschema_objects as pjo
def test_regression_156(markdown_examples):
builder = pjo.ObjectBuilder(
markdown_examples["MultipleObjects"], resolved=markdown_examples
)
classes = builder.build_classes(named_only=True)
er = classes.ErrorResponse(message="Danger!", status=99)
vgr = classes.VersionGetResponse(local=False, version="1.2.3")
# round-trip serialize-deserialize into named classes
classes.ErrorResponse.from_json(er.serialize())
classes.VersionGetResponse.from_json(vgr.serialize())
# round-trip serialize-deserialize into class defined with `oneOf`
classes.Multipleobjects.from_json(er.s | erialize())
classes.Multipleobjects.from_json(vgr.serialize())
def test_toplevel_oneof_gets_a_name(markdown_examples):
builder = pjo.Obj | ectBuilder(
markdown_examples["MultipleObjects"], resolved=markdown_examples
)
classes = builder.build_classes(named_only=True)
assert classes.Multipleobjects.__title__ is not None
|
"""
Gnome keyring parser.
Sources:
- Gnome Keyring source code,
function generate_file() in keyrings/gkr-keyring.c,
Author: Victor Stinner
Creation date: 2008-04-09
"""
from hachoir_core.tools import paddingSize
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet,
Bit, NullBits, NullBytes,
UInt8, UInt32, String, RawBytes, Enum,
TimestampUnix64, CompressedField,
SubFile)
from hachoir_core.endian import BIG_ENDIAN
try:
import hashlib
def sha256(data):
hash = hashlib.new('sha256')
hash.update(data)
return hash.digest()
except ImportError:
def sha256(data):
raise ImportError("hashlib module is missing")
try:
from Crypto.Cipher import AES
class DeflateStream:
def __init__(self, stream):
hash_iterations = 1234
password = "x" * 8
salt = "\0" * 8
key, iv = generate_key(password, salt, hash_iterations)
self.cipher = AES.new(key, AES.MODE_CBC, iv)
def __call__(self, size, data=None):
if data is None:
return ''
return self.cipher.decrypt(data)
def Deflate(field):
CompressedField(field, DeflateStream)
return field
except ImportError:
def Deflate(field):
return field
class KeyringString(FieldSet):
def createFields(self):
yield UInt32(self, "length")
length = self["length"].value
if length == 0xffffffff:
return
yield String(self, "text", length, charset="UTF-8")
def createValue(self):
if "text" in self:
return self["text"].value
else:
return u''
def createDescription(self):
if "text" in self:
return self["text"].value
else:
return u"(empty string)"
class Attribute(FieldSet):
def createFields(self):
yield KeyringString(self, "name")
yield UInt32(self, "type")
type = self["type"].value
if type == 0:
yield KeyringString(self, "value")
elif type == 1:
yield UInt32(self, "value")
else:
raise TypeError("Unknown attribute type (%s)" % type)
def createDescription(self):
return 'Attribute "%s"' % self["name"].value
class ACL(FieldSet):
def createFields(self):
yield UInt32(self, "types_allowed")
yield KeyringString(self, "display_name")
yield KeyringString(self, "pathname")
yield KeyringString(self, "reserved[]")
yield UInt32(self, "reserved[]")
class Item(FieldSet):
def createFields(self):
yield UInt32(self, "id")
yield UInt32(self, "type")
yield UInt32(self, "attr_count")
for index in xrange(self["attr_count"].value):
yield Attribute(self, "attr[]")
def createDescription(self):
return "Item #%s: %s attributes" % (self["id"].value, self["attr_count"].value)
class Items(FieldSet):
def createFields(self):
yield UInt32(self, "count")
for index in xrange(self["count"].value):
yield Item(self, "item[]")
class EncryptedItem(FieldSet):
def createFields(self):
yield KeyringString(self, "display_name")
yield KeyringString(self, "secret")
yield TimestampUnix64(self, "mtime")
yield TimestampUnix64(self, "ctime")
yield KeyringString(self, "reserved[]")
for index in xrange(4):
yield UInt32(self, "reserved[]")
yield UInt32(self, "attr_count")
for index in xrange(self["attr_count"].value):
yield Attribute(self, "attr[]")
yield UInt32(self, "acl_count")
for index in xrange(self["acl_count"].value):
yield ACL(self, "acl[]")
# size = 8 # paddingSize((self.stream.size - self.current_size) // 8, 16)
# if size:
# yield NullBytes(self, "hash_padding", size, "16 bytes alignment")
class EncryptedData(Parser):
PARSER_TAGS = {
"id": "gnomeencryptedkeyring",
"min_size": 16*8,
"description": u"Gnome encrypted keyring",
}
endian = BIG_ENDIAN
def validate(self):
return True
def createFields(self):
yield RawBytes(self, "md5", 16)
while True:
size = (self.size - self.current_size) // 8
if size < 77:
break
yield EncryptedItem(self, "item[]")
size = paddingSize(self.current_size // 8, 16)
if size:
yield NullBytes(self, "padding_align", size)
class GnomeKeyring(Parser):
MAGIC = "GnomeKeyring\n\r\0\n"
PARSER_TAGS = {
"id": "gnomekeyring",
"category": "misc",
"magic": ((MAGIC, 0),),
"min_size": 47*8,
"description": u"Gnome keyring",
}
CRYPTO_NAMES = {
0: u"AEL",
}
HASH_NAMES = {
0: u"MD5",
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return u"Invalid magic string"
return True
def createFields( | self):
yield String(self, "magic", len(self.MAGIC), 'Magic string (%r)' % self.MAGIC, charset="ASCII")
yi | eld UInt8(self, "major_version")
yield UInt8(self, "minor_version")
yield Enum(UInt8(self, "crypto"), self.CRYPTO_NAMES)
yield Enum(UInt8(self, "hash"), self.HASH_NAMES)
yield KeyringString(self, "keyring_name")
yield TimestampUnix64(self, "mtime")
yield TimestampUnix64(self, "ctime")
yield Bit(self, "lock_on_idle")
yield NullBits(self, "reserved[]", 31, "Reserved for future flags")
yield UInt32(self, "lock_timeout")
yield UInt32(self, "hash_iterations")
yield RawBytes(self, "salt", 8)
yield NullBytes(self, "reserved[]", 16)
yield Items(self, "items")
yield UInt32(self, "encrypted_size")
yield Deflate(SubFile(self, "encrypted", self["encrypted_size"].value, "AES128 CBC", parser_class=EncryptedData))
def generate_key(password, salt, hash_iterations):
sha = sha256(password+salt)
for index in xrange(hash_iterations-1):
sha = sha256(sha)
return sha[:16], sha[16:]
|
tact_from_active_monitor(dbcon: DBConnection, contact_id: int, monitor_id: int) -> None:
"""Disconnect a contact and an active monitor."""
q = """delete from active_monitor_contacts where active_monitor_id=%s and contact_id=%s"""
q_args = (monitor_id, contact_id)
await dbcon.operation(q, q_args)
async def set_active_monitor_contacts(dbcon: DBConnection,
contact_ids: Iterable[int], monitor_id: int):
"""(Re-)set contacts for an active monitor.
Delete existing contacts for an active monitor and set the given new
contacts.
"""
async def _run(cur: Cursor) -> None:
q = """delete from active_monitor_contacts where active_monitor_id=%s"""
await cur.execute(q, (monitor_id,))
for contact_id in contact_ids:
q = """insert into active_monitor_contacts (active_monitor_id, contact_id) values (%s, %s)"""
q_args = (monitor_id, contact_id)
await cur.execute(q, q_args)
if not await active_monitor_exists(dbcon, monitor_id):
raise errors.InvalidArguments('monitor does not exist')
await dbcon.transact(_run)
async def get_contacts_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.Contact]:
"""Get contacts for an active monitor.
Return a list of dicts, one dict describing each contacts information.
"""
q = """select
contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from active_monitor_contacts, contacts
where active_monitor_contacts.active_monitor_id = %s
and active_monitor_contacts.contact_id = contacts.id"""
contacts = [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))]
return contacts
async def add_contact_group_to_active_monitor(dbcon: DBConnecti | on, contact_group_id: int, monitor_id: int) -> None:
"""Connect a contact group and an active monitor."""
if not await active_monitor_exists(dbcon, monitor_id):
raise errors.InvalidA | rguments('monitor does not exist')
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact does not exist')
q = """replace into active_monitor_contact_groups (active_monitor_id, contact_group_id) values (%s, %s)"""
q_args = (monitor_id, contact_group_id)
await dbcon.operation(q, q_args)
async def delete_contact_group_from_active_monitor(dbcon: DBConnection, contact_group_id: int, monitor_id: int) -> None:
"""Disconnect a contact group and an active monitor."""
q = """delete from active_monitor_contact_groups where active_monitor_id=%s and contact_group_id=%s"""
q_args = (monitor_id, contact_group_id)
await dbcon.operation(q, q_args)
async def set_active_monitor_contact_groups(dbcon: DBConnection,
contact_group_ids: Iterable[int], monitor_id: int) -> None:
"""(Re-)set contact_groups for an active monitor.
Delete existing contact groups for an active monitor and set the given new
contact groups.
"""
async def _run(cur: Cursor) -> None:
q = """delete from active_monitor_contact_groups where active_monitor_id=%s"""
await cur.execute(q, (monitor_id,))
for contact_group_id in contact_group_ids:
q = """insert into active_monitor_contact_groups (active_monitor_id, contact_group_id) values (%s, %s)"""
q_args = (monitor_id, contact_group_id)
await cur.execute(q, q_args)
if not await active_monitor_exists(dbcon, monitor_id):
raise errors.InvalidArguments('monitor does not exist')
await dbcon.transact(_run)
async def get_contact_groups_for_active_monitor(
dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.ContactGroup]:
"""Get contact groups for an active monitor."""
q = """select
contact_groups.id, contact_groups.name, contact_groups.active
from active_monitor_contact_groups, contact_groups
where active_monitor_contact_groups.active_monitor_id = %s
and active_monitor_contact_groups.contact_group_id = contact_groups.id"""
return [object_models.ContactGroup(*row) for row in await dbcon.fetch_all(q, (monitor_id,))]
async def get_all_contacts(dbcon: DBConnection) -> Iterable[object_models.Contact]:
"""Get all contacts"""
q = """select id, name, email, phone, active from contacts"""
return [object_models.Contact(*row) for row in await dbcon.fetch_all(q)]
async def get_contact(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.
"""Get a single contact if it exists."""
q = """select id, name, email, phone, active from contacts where id=%s"""
q_args = (id,)
row = await dbcon.fetch_row(q, q_args)
contact = None
if row:
contact = object_models.Contact(*row)
return contact
async def get_contacts_for_metadata(
dbcon: DBConnection, meta_key: str, meta_value: str) -> Iterable[object_models.Contact]:
q = """select c.id, c.name, c.email, c.phone, c.active
from contacts as c, object_metadata as meta
where meta.key=%s and meta.value=%s and meta.object_type="contact" and meta.object_id=c.id"""
q_args = (meta_key, meta_value)
return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, q_args)]
async def add_contact_to_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:
"""Connect a contact and a contact group."""
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
if not await contact_exists(dbcon, contact_id):
raise errors.InvalidArguments('contact does not exist')
q = """replace into contact_group_contacts (contact_group_id, contact_id) values (%s, %s)"""
q_args = (contact_group_id, contact_id)
await dbcon.operation(q, q_args)
async def delete_contact_from_contact_group(dbcon: DBConnection, contact_group_id: int, contact_id: int) -> None:
"""Disconnect a contact and a contact_group."""
q = """delete from contact_group_contacts where contact_group_id=%s and contact_id=%s"""
q_args = (contact_group_id, contact_id)
await dbcon.operation(q, q_args)
async def set_contact_group_contacts(dbcon: DBConnection,
contact_group_id: int, contact_ids: Iterable[int]) -> None:
"""(Re-)set contacts for a contact group.
Delete existing contacts for a contact group and set the given new
contacts.
"""
async def _run(cur: Cursor) -> None:
q = """delete from contact_group_contacts where contact_group_id=%s"""
await cur.execute(q, (contact_group_id,))
for contact_id in contact_ids:
q = """insert into contact_group_contacts (contact_group_id, contact_id) values (%s, %s)"""
q_args = (contact_group_id, contact_id)
await cur.execute(q, q_args)
if not await contact_group_exists(dbcon, contact_group_id):
raise errors.InvalidArguments('contact group does not exist')
await dbcon.transact(_run)
async def get_contacts_for_contact_group(dbcon: DBConnection, contact_group_id: int) -> Iterable[object_models.Contact]:
"""Get contacts for a contact group."""
q = """select
contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active
from contact_group_contacts, contacts
where contact_group_contacts.contact_group_id = %s
and contact_group_contacts.contact_id = contacts.id"""
return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (contact_group_id,))]
async def get_all_contact_groups(dbcon: DBConnection) -> Iterable[object_models.ContactGroup]:
q = """select id, name, active from contact_groups"""
contact_groups = [object_models.ContactGroup(*row) for row in await dbcon.fetch_all(q)]
return contact_groups
async def get_contact_group(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.
"""Get a single contact if it exists.
Return a list of d |
# -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to th | e Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from . import exercise
from .S_Structure imp | ort S_Structure
FONT_SIZE_OFFSET = -1
SHEET_LAYOUT_TYPE = 'default'
SHEET_LAYOUT_UNIT = "cm"
# EXAMPLE OF A SHEET NOT USING ANY LAYOUT
# ----------------------- lines_nb col_widths exercises
SHEET_LAYOUT = {'exc': [None, 'all'],
'ans': [None, 'all']
}
# ------------------------------------------------------------------------------
# --------------------------------------------------------------------------
# ------------------------------------------------------------------------------
##
# @class AlgebraMiniTest0
# @brief A simple algebra mini-test
class AlgebraMiniTest0(S_Structure):
# --------------------------------------------------------------------------
##
# @brief Constructor
# @param **options Any options
# @return One instance of sheet.Model
def __init__(self, **options):
self.derived = True
S_Structure.__init__(self, FONT_SIZE_OFFSET,
SHEET_LAYOUT_UNIT, SHEET_LAYOUT,
SHEET_LAYOUT_TYPE)
# BEGINING OF THE ZONE TO REWRITE (see explanations below) ------------
self.header = ""
# self.title = _("Training exercises sheet:")
self.title = ""
self.subtitle = ""
self.text = ""
self.answers_title = _("Examples of answers")
# For instance:
# ex1 = exercise.ProductReduction(many=30)
# self.exercises_list.append(ex1)
for i in range(10):
ex1 = exercise.X_AlgebraExpressionExpansion(x_kind='mini_test',
x_subkind='two_'
'randomly')
self.exercises_list.append(ex1)
|
# ECB wrapper skeleton file for 50.020 S | ecurity
# Oka, SUTD, 2014
| from present import *
import argparse
nokeybits=80
blocksize=64
def ecb(infile,outfile,keyfile,mode):
key = 0x0
with open(keyfile, 'rb') as fkey:
for i in range(nokeybits / 8):
key |= ord(fkey.read(1)) << i * 8
with open(infile, 'rb') as fin:
with open(outfile, 'wb') as fout:
while True:
buf = fin.read(blocksize / 8)
chunk = 0x0
if buf == '':
break
if len(buf) != blocksize / 8:
buf += '\0' * (blocksize / 8 - len(buf))
for i in range(blocksize / 8):
chunk |= ord(buf[i]) << i * 8
if mode == 'c':
result = present(chunk, key)
else:
result = present_inv(chunk, key)
for i in range(blocksize / 8):
fout.write(chr((result >> i * 8) & 0xff))
if __name__=="__main__":
parser=argparse.ArgumentParser(description='Block cipher using ECB mode.')
parser.add_argument('-i', dest='infile',help='input file')
parser.add_argument('-o', dest='outfile',help='output file')
parser.add_argument('-k', dest='keyfile',help='key file')
parser.add_argument('-m', dest='mode',help='mode')
args=parser.parse_args()
infile=args.infile
outfile=args.outfile
keyfile=args.keyfile
mode=args.mode
ecb(infile, outfile, keyfile, mode) |
estAvatar} raises L{NotImplementedError}
unless it is supplied with an L{pop3.IMailbox} interface.
When called with an L{pop3.IMailbox}, it returns a 3-tuple
containing L{pop3.IMailbox}, an implementation of that interface
and a NOOP callable.
"""
class ISomething(Interface):
pass
self.D.addUser('user', 'password')
self.assertRaises(
NotImplementedError,
self.D.requestAvatar, 'user', None, ISomething
)
t = self.D.requestAvatar('user', None, pop3.IMailbox)
self.assertEqual(len(t), 3)
self.assertTrue(t[0] is pop3.IMailbox)
self.assertTrue(pop3.IMailbox.providedBy(t[1]))
t[2]()
def test_requestAvatarId(self):
"""
L{DirdbmDatabase.requestAvatarId} raises L{UnauthorizedLogin} if
supplied with invalid user credentials.
When called with valid credentials, L{requestAvatarId} returns
the username associated with the supplied credentials.
"""
self.D.addUser('user', 'password')
database = self.D.getCredentialsCheckers()[0]
creds = cred.credentials.UsernamePassword('user', 'wrong password')
self.assertRaises(
cred.error.UnauthorizedLogin,
database.requestAvatarId, creds
)
creds = cred.credentials.UsernamePassword('user', 'password')
self.assertEqual(database.requestAvatarId(creds), 'user')
def test_userDirectory(self):
"""
L{MaildirDirdbmDomain.userDirectory} is supplied with a user name
and returns the path to that user's maildir subdirectory.
Calling L{MaildirDirdbmDomain.userDirectory} with a
non-existent user returns the 'postmaster' directory if there
is a postmaster or returns L{None} if there is no postmaster.
"""
self.D.addUser('user', 'password')
self.assertEqual(self.D.userDirectory('user'),
os.path.join(self.D.root, 'user'))
self.D.postmaster = False
self.assertIdentical(self.D.userDirectory('nouser'), None)
self.D.postmaster = True
self.assertEqual(self.D.userDirectory('nouser'),
os.path.join(self.D.root, 'postmaster'))
@implementer(mail.mail.IAliasableDomain)
class StubAliasableDomain(object):
"""
Minimal testable implementation of IAliasableDomain.
"""
def exists(self, user):
"""
No test coverage for invocations of this method on domain objects,
so we just won't implement it.
"""
raise NotImplementedError()
def addUser(self, user, password):
"""
No test coverage for invocations of this method on domain objects,
so we just won't implement it.
"""
raise NotImplementedError()
def getCredentialsCheckers(self):
"""
This needs to succeed in order for other tests to complete
successfully, but we don't actually assert anything about its
behavior. Return an empty list. Sometime later we should return
something else and assert that a portal got set up properly.
"""
return []
def setAliasGroup(self, aliases):
"""
Just record the value so the test can check it later.
"""
self.aliasGroup = aliases
class ServiceDomainTests(unittest.TestCase):
def setUp(self):
self.S = mail.mail.MailService()
self.D = mail.protocols.DomainDeliveryBase(self.S, None)
self.D.service = self.S
self.D.protocolName = 'TEST'
self.D.host = 'hostname'
self.tmpdir = self.mktemp()
domain = mail.maildir.MaildirDirdbmDomain(self.S, self.tmpdir)
domain.addUser('user', 'password')
self.S.addDomain('test.domain', domain)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testAddAliasableDomain(self):
"""
Test that adding an IAliasableDomain to a mail service properly sets
up alias group references and such.
"""
aliases = object()
domain = StubAliasableDomain()
self.S.aliases = aliases
self.S.addDomain('example.com', domain)
self.assertIdentical(domain.aliasGroup, aliases)
def testReceivedHeader(self):
hdr = self.D.receivedHeader(
('remotehost', '123.232.101.234'),
smtp.Address('<someguy@someplace>'),
['user@host.name']
)
fp = StringIO.StringIO(hdr)
emailParser = email.parser.Parser()
m = emailParser.parse(fp)
self.assertEqual(len(m.items()), 1)
self.assertIn('Received', m)
def testValidateTo(self):
user = smtp.User('user@test.domain', 'helo', None, 'wherever@whatever')
return defer.maybeDeferred(self.D.validateTo, user
).addCallback(self._cbValidateTo
)
def _cbValidateTo(self, result):
self.assertTrue(callable(result))
def testValidateToBadUsername(self):
user = smtp.User('resu@test.domain', 'helo', None, 'wherever@whatever')
return self.assertFailure(
defer.maybeDeferred(self.D.validateTo, user),
smtp.SMTPBadRcpt)
def testValidateToBadDomain(self):
user = smtp.User('user@domain.test', 'helo', None, 'wherever@whatever')
return self.assertFailure(
defer.maybeDeferred(self.D.validateTo, user),
smtp.SMTPBadRcpt)
def testValidateFrom(self):
helo = ('hostname', '127.0.0.1')
origin = smtp.Address('<user@hostname>')
self.assertTrue(self.D.validateFrom(helo, origin) is origin)
helo = ('hostname', '1.2.3.4')
origin = smtp.Address('<user@hostname>')
self.assertTrue(self.D.validateFrom(helo, origin) is origin)
helo = ('hostname', '1.2.3.4')
origin = smtp.Address('<>')
self.assertTrue(self.D.validateFrom(helo, origin) is origin)
self.assertRaises(
smtp.SMTPBadSender,
self.D.validateFrom, None, or | igin
)
class VirtualPOP3Tests(unittest.TestCase):
def setUp(self):
self.tmpdir = self.mktemp()
self.S = mail.mail.MailService()
self.D = mail.maildir.MaildirDirdbmDomain(self.S, self.tmpdir)
self.D.ad | dUser('user', 'password')
self.S.addDomain('test.domain', self.D)
portal = cred.portal.Portal(self.D)
map(portal.registerChecker, self.D.getCredentialsCheckers())
self.S.portals[''] = self.S.portals['test.domain'] = portal
self.P = mail.protocols.VirtualPOP3()
self.P.service = self.S
self.P.magic = '<unit test magic>'
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testAuthenticateAPOP(self):
resp = md5(self.P.magic + 'password').hexdigest()
return self.P.authenticateUserAPOP('user', resp
).addCallback(self._cbAuthenticateAPOP
)
def _cbAuthenticateAPOP(self, result):
self.assertEqual(len(result), 3)
self.assertEqual(result[0], pop3.IMailbox)
self.assertTrue(pop3.IMailbox.providedBy(result[1]))
result[2]()
def testAuthenticateIncorrectUserAPOP(self):
resp = md5(self.P.magic + 'password').hexdigest()
return self.assertFailure(
self.P.authenticateUserAPOP('resu', resp),
cred.error.UnauthorizedLogin)
def testAuthenticateIncorrectResponseAPOP(self):
resp = md5('wrong digest').hexdigest()
return self.assertFailure(
self.P.authenticateUserAPOP('user', resp),
cred.error.UnauthorizedLogin)
def testAuthenticatePASS(self):
return self.P.authenticateUserPASS('user', 'password'
).addCallback(self._cbAuthenticatePASS
)
def _cbAuthenticatePASS(self, result):
self.assertEqual(len(result), 3)
self.assertEqual(result[0], pop3.IMailbox)
self.assertTrue(pop3.IMailbox.providedBy(result[1]))
result[2]()
def testAuthenticateBadUserPASS(self):
return self.assertFailure(
|
from datetime import timedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db.models import Count, F
from django.utils.timezone import now
from hc.accounts.models import Profile
class Command(BaseCommand):
help = """Prune old, inactive user accounts.
Conditions for removing an user account:
- created 1 month ago and never logged in. Does not belong
to any team.
Use case: visitor types in their email at the website but
never follows through with login.
"""
def handle(self, *args, **options):
month_ago = now() - timedelta(days=30)
# Old accounts, never logged in, no team memberships
q = User.objects.order_by("id")
q = q.annotate(n_teams=Count("memberships"))
q = q.filter(date_joined__lt=month_ago, last_login=None, n_teams=0)
n, summary = q.delete()
count = summary.get("auth.User", 0)
self.stdout.write | ("Pruned %d never-logged-in user accounts." % count)
# Profiles scheduled for deletion
q = Profile.objects.order_by("id")
q = q.filter(deletion_notice_date__lt=month_ago)
# Exclude users who have logged in after receiving deletion notice
q = q.exclude(user__last_login__gt=F("deletion_notice_date"))
for profile in q:
self.stdout.write("Deleting inactive %s" % profile.u | ser.email)
profile.user.delete()
return "Done!"
|
#!/usr/bin/python
import Proxy_Hours, proxyhours_gather_all_data
try:
from PyQt4 import QtCore, QtGui
qtplatform = "PyQt4"
except:
from PySide import Qt | Core, QtGui
| qtplatform = "PySide"
import os
def which(pgm):
path=os.getenv('PATH')
for p in path.split(os.path.pathsep):
p=os.path.join(p,pgm)
if os.path.exists(p) and os.access(p,os.X_OK):
return p
os.which=which
print os.which("pdftohtml")
def selectFile():
name = QtGui.QFileDialog.getOpenFileName()
if qtplatform == "PySide":
name = name[0]
print name
ui.FilelineEdit.setText(name)
nametxt = str(ui.FilelineEdit.text())
nametxt = os.path.abspath(nametxt)
print "Nametxt:", nametxt
write_out_0, write_out_1, write_out_2, write_out_3 = proxyhours_gather_all_data.proxy_hours(nametxt)
ui.log_lineEdit.setText(write_out_1)
ui.all_data_lineEdit.setText(write_out_2)
ui.time_lineEdit.setText(write_out_3)
ui.tableWidget.setRowCount(len(write_out_0))
for pos, row in enumerate(write_out_0):
add_row(pos,row)
def add_row(pos,row):
r = pos
for c, t in enumerate(row):
ui.tableWidget.setItem(r,c,QtGui.QTableWidgetItem(str(t)))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = Proxy_Hours.QtGui.QMainWindow()
ui = Proxy_Hours.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
ui.OpenpushButton.clicked.connect(selectFile)
ui.actionOpen.triggered.connect(selectFile)
ui.actionQuit.triggered.connect(QtCore.QCoreApplication.instance().quit)
sys.exit(app.exec_())
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Vote inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from luigi.configuration import LuigiTomlParser, get_config, add_config_path
from helpers import LuigiTestCase
class TomlConfigParserTest(LuigiTestCase):
@classmethod
def setUpClass(cls):
add_config_path('test/testconfig/luigi.toml')
add_config_path('test/testconfig/luigi_local.toml')
def setUp(self):
LuigiTomlParser._instance = None
super(TomlConfigParserTest, self).setUp()
def test_get_config(self):
config = get_config('toml')
self.assertIsInstance(config, LuigiTomlParser)
def test_file_reading(self):
config = get_config('toml')
self.assertIn('hdfs', config.data)
def test_get(self):
config = get_config('toml')
# test getting
self.assertEqual(config.get('hdfs', 'client'), 'hadoopcli')
self.assertEqual(config.get('hdfs', 'client', 'test'), 'hadoopcli')
# test default
self.assertEqual(config.get('hdfs', 'test', 'check'), 'check')
with self.assertRaises(KeyError):
config.get('hdfs', 'test')
# test override
self.assertEqual(config.get('hdfs', 'namenode_host'), 'localhost')
# test non-string values
self.assertEqual(config.get('hdfs', 'namenode_port'), 50030)
def test_set(self):
config = get_config('toml')
self.assertEqual(config.get('hdfs', 'client'), 'ha | doopcli')
config.set('hdfs', 'client', 'test')
self.assertEqual(config.get('hdfs', 'client'), 'test')
config.set('hdfs', 'check', 'test me')
self.assertEqual(config.get('hdfs', 'check'), 'test me')
def test_has_option(self):
config = get_config( | 'toml')
self.assertTrue(config.has_option('hdfs', 'client'))
self.assertFalse(config.has_option('hdfs', 'nope'))
self.assertFalse(config.has_option('nope', 'client'))
class HelpersTest(LuigiTestCase):
def test_add_without_install(self):
enabled = LuigiTomlParser.enabled
LuigiTomlParser.enabled = False
with self.assertRaises(ImportError):
add_config_path('test/testconfig/luigi.toml')
LuigiTomlParser.enabled = enabled
def test_get_without_install(self):
enabled = LuigiTomlParser.enabled
LuigiTomlParser.enabled = False
with self.assertRaises(ImportError):
get_config('toml')
LuigiTomlParser.enabled = enabled
|
# coding: utf8
# qtplayer.py
# 10/1/2014 jichi
__all__ = 'HiddenPlayer',
from PySide.QtCore import QUrl
from sakurakit.skdebug import dprint
class _HiddenPlayer:
def __init__(self, parent):
self.parent = parent # QWidget
self._webView = None # QWebView
@property
def webView(self):
if not s | elf._webView:
dprint("create web view")
from PySide.QtWebKit import QWebView
self._webView = QWebView(self.parent)
update_web_settings(se | lf._webView.settings())
self._webView.resize(0, 0) # zero size
return self._webView
def setParent(self, value):
self.parent = value
if self._webView:
self._webView.setParent(value)
def stop(self):
if self._webView:
self._webView.stop()
class HiddenPlayer(object):
def __init__(self, parent=None):
self.__d = _HiddenPlayer(parent)
def parentWidget(self): return self.__d.parent
def setParentWidget(self, value): self.__d.setParent(value)
def webView(self): return self.__d.webView
def stop(self):
self.__d.stop()
def play(self, url, **kwargs):
"""
@param url str or QUrl
"""
if not isinstance(url, QUrl):
url = QUrl(url)
for k,v in kwargs.iteritems():
#url.addQueryItem(k, v)
if not isinstance(v, basestring):
v = "%s" % v
url.addEncodedQueryItem(k, QUrl.toPercentEncoding(v))
self.__d.webView.load(url)
def update_web_settings(settings=None):
"""
@param settings QWebSettings or None
"""
from PySide.QtWebKit import QWebSettings
ws = settings or QWebSettings.globalSettings()
ws.setAttribute(QWebSettings.PluginsEnabled, True)
ws.setAttribute(QWebSettings.JavaEnabled, True)
ws.setAttribute(QWebSettings.DnsPrefetchEnabled, True) # better performance
ws.setAttribute(QWebSettings.AutoLoadImages, False) # do NOT load images
#ws.setAttribute(QWebSettings.JavascriptCanOpenWindows, True)
#ws.setAttribute(QWebSettings.JavascriptCanAccessClipboard, True)
#ws.setAttribute(QWebSettings.DeveloperExtrasEnabled, True)
#ws.setAttribute(QWebSettings.OfflineStorageDatabaseEnabled, True)
#ws.setAttribute(QWebSettings.OfflineWebApplicationCacheEnabled, True)
#ws.setAttribute(QWebSettings.LocalStorageEnabled, True)
#ws.setAttribute(QWebSettings.LocalContentCanAccessRemoteUrls, True)
#ws.setAttribute(QWebSettings.ZoomTextOnly, False)
#ws.setDefaultTextEncoding("SHIFT-JIS")
#ws.setDefaultTextEncoding("EUC-JP")
#ws.setLocalStoragePath(G_PATH_CACHES)
#QWebSettings.setIconDatabasePath(G_PATH_CACHES)
#QWebSettings.setOfflineStoragePath(G_PATH_CACHES)
#QWebSettings.setOfflineWebApplicationCachePath(G_PATH_CACHES)
# See: http://webkit.org/blog/427/webkit-page-cache-i-the-basics/
ws.setMaximumPagesInCache(10) # do not cache lots of pages
# EOF
|
import json
from collections import abc
# item 26: use muptiple inheritance for mixin only
# a mixin that transforms a python object to a dictionary that's ready for seralization
class ToDictMixin(object):
def to_dict(self):
"""Return a dictionary representation of this object"""
return self._traverse('none', self.__dict__)
def _traverse(self, key, obj):
"""Return a dictionary representation of this obj"""
if isinstance(obj, ToDictMixin):
return obj.to_dict()
if isinstance(obj, dict):
return {k: self._traverse(k, v) for k, v in obj.items()}
if isinstance(obj, tuple) or isinstance(obj, list):
return [self._traverse(key, item) for item in obj]
# if it's any other object with __dict__ attr, use it!
if hasattr(obj, '__dict__'):
return self._trave | rse(key, obj.__dict__)
return obj
class BinaryTreeNode(ToDictMixin):
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class BinaryTreeWithParent(BinaryTreeNode):
def __init__(self, value, left=None, right=None, parent=None):
super().__init__(value, left, right)
self.parent = parent
# override so the backref to parent does | not cause infinite recursion
def _traverse(self, key, obj):
# if the key is parent, stop the recursion and return parent's value instead
if key == 'parent' and isinstance(obj, BinaryTreeNode):
return obj.value
return super()._traverse(key, obj)
class NamedSubTree(ToDictMixin):
def __init__(self, name, tree):
self.name = name
self.tree = tree
# Mixins can also play together
class ToJsonMixin(object):
@classmethod
def from_json(cls, kwargs):
"""given kwargs in json format, get it into dictionary format"""
kwargs = json.loads(kwargs)
return cls(**kwargs)
def to_json(self):
d = self.to_dict()
return json.dumps(d)
class BinaryTreeWithJson(BinaryTreeNode, ToJsonMixin):
pass
class EqualityMixin(object):
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Switch(EqualityMixin):
def __init__(self, ports, speed):
self.ports = ports
self.speed = speed
class Machine(EqualityMixin):
def __init__(self, ram, cpu, disk):
self.ram = ram
self.cpu = cpu
self.disk = disk
class DatacenterRack(ToJsonMixin, ToDictMixin, EqualityMixin):
def __init__(self, switch, machines):
self.switch = Switch(**switch)
self.machines = [Machine(**kwargs) for kwargs in machines]
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from edctf.api.models import challengeboard, category, challenge
from edctf.api.serializers import challengeboard_serializer, category_serializer, challenge_serializer
class challengeboard_view(APIView):
"""
Manages challengeboard requests
"""
permission_classes = (IsAuthenticated,)
def get(self, request, id=None, format=None):
"""
Gets all challengeboards or gets one challengeboard via
challengeboards/:id.
"""
# If challengeboard id was requested, return that challengeboard
# else return list of all challengeboards in the database.
if id:
# Retrieve and serialize the requested challengeboard data.
challengeboards = challengeboard.objects.filter(id=id)
challengeboards_serializer = challengeboard_serializer(challengeboards, many=True, context={'request': request})
# Retrieve and serialize the categories in the challengeboard.
categories = category.objects.filter(challengeboard=challengeboards.first())
categories_serializer = category_serializer(categories, many=True, context={'request': request})
# Retrieve and serialize the challenges in each category.
challenges = []
for cat in categories:
challenges += challenge.objects.filter(category=cat)
challeng | es_serializer = challenge_serializer(challenges, many=True, context={'request': request})
# Return the serialized data.
return Response({
'challengeboards': challengeboards_serializer.data,
'categories': categories_serializer.data,
'challenges': challenges_serializer.data,
})
else:
# Retrieve and serialize the requested challengeboard data.
challengeboard | s = challengeboard.objects.all()
serializer = challengeboard_serializer(challengeboards, many=True, context={'request': request})
# Return the serialized data.
return Response({
'challengeboards': serializer.data,
})
|
"""
WSGI config for Incubator project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of anot | her
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon p | rocess, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "Incubator.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Incubator.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
#!/usr/bin/env python
#
# Copyright 2015, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import unittest
import environment
import utils
import tablet
# shards
shard_0_master = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_rdonly = tablet.Tablet()
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_0_master.init_mysql(),
shard_0_rdonly.init_mysql(),
shard_1_master.init_mysql(),
shard_1_rdonly.init_mysql(),
]
utils.Vtctld().start()
utils.VtGate().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_0_master.teardown_mysql(),
shard_0_rdonly.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_rdonly.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_0_master.remove_tree()
shard_0_rdonly.remove_tree()
shard_1_master.remove_tree()
shard_1_rdonly.remove_tree()
class TestCustomSharding(unittest.TestCase):
def | _insert_data(self, shard, start, count, table='data'):
sql = 'insert into %s(id, name) values (:id, :name)' % table
for x in xrange(count):
bindvars = {
'id': start+x,
'name': 'row %d' % (start+x),
}
utils.vtgate.execute_shard(sql, 'test_keyspace', shard,
bindvars=bindvars)
def _check_data(s | elf, shard, start, count, table='data'):
sql = 'select name from %s where id=:id' % table
for x in xrange(count):
bindvars = {
'id': start+x,
}
qr = utils.vtgate.execute_shard(sql, 'test_keyspace', shard,
bindvars=bindvars)
self.assertEqual(len(qr['Rows']), 1)
v = qr['Rows'][0][0]
self.assertEqual(v, 'row %d' % (start+x))
def test_custom_end_to_end(self):
"""Runs through the common operations of a custom sharded keyspace.
Tests creation with one shard, schema change, reading / writing
data, adding one more shard, reading / writing data from both
shards, applying schema changes again, and reading / writing data
from both shards again.
"""
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# start the first shard only for now
shard_0_master.init_tablet('master', 'test_keyspace', '0')
shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '0')
for t in [shard_0_master, shard_0_rdonly]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None)
for t in [shard_0_master, shard_0_rdonly]:
t.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(len(ks['Partitions']['master']['ShardReferences']), 1)
self.assertEqual(len(ks['Partitions']['rdonly']['ShardReferences']), 1)
s = utils.run_vtctl_json(['GetShard', 'test_keyspace/0'])
self.assertEqual(len(s['served_types']), 3)
# create a table on shard 0
sql = '''create table data(
id bigint auto_increment,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'],
auto_log=True)
# insert data on shard 0
self._insert_data('0', 100, 10)
# re-read shard 0 data
self._check_data('0', 100, 10)
# create shard 1
shard_1_master.init_tablet('master', 'test_keyspace', '1')
shard_1_rdonly.init_tablet('rdonly', 'test_keyspace', '1')
for t in [shard_1_master, shard_1_rdonly]:
t.start_vttablet(wait_for_state=None)
for t in [shard_1_master, shard_1_rdonly]:
t.wait_for_vttablet_state('NOT_SERVING')
s = utils.run_vtctl_json(['GetShard', 'test_keyspace/1'])
self.assertEqual(len(s['served_types']), 3)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/1',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['CopySchemaShard', shard_0_rdonly.tablet_alias,
'test_keyspace/1'], auto_log=True)
for t in [shard_1_master, shard_1_rdonly]:
utils.run_vtctl(['RefreshState', t.tablet_alias], auto_log=True)
t.wait_for_vttablet_state('SERVING')
# rebuild the keyspace serving graph now that the new shard was added
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# insert data on shard 1
self._insert_data('1', 200, 10)
# re-read shard 1 data
self._check_data('1', 200, 10)
# create a second table on all shards
sql = '''create table data2(
id bigint auto_increment,
name varchar(64),
primary key (id)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema', '-sql=' + sql, 'test_keyspace'],
auto_log=True)
# insert and read data on all shards
self._insert_data('0', 300, 10, table='data2')
self._insert_data('1', 400, 10, table='data2')
self._check_data('0', 300, 10, table='data2')
self._check_data('1', 400, 10, table='data2')
# reload schema everywhere so the QueryService knows about the tables
for t in [shard_0_master, shard_0_rdonly, shard_1_master, shard_1_rdonly]:
utils.run_vtctl(['ReloadSchema', t.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(len(ks['Partitions']['master']['ShardReferences']), 2)
self.assertEqual(len(ks['Partitions']['rdonly']['ShardReferences']), 2)
# Now test SplitQuery API works (used in MapReduce usually, but bringing
# up a full MR-capable cluster is too much for this test environment)
sql = 'select id, name from data'
s = utils.vtgate.split_query(sql, 'test_keyspace', 4)
self.assertEqual(len(s), 4)
shard0count = 0
shard1count = 0
for q in s:
if q['QueryShard']['Shards'][0] == '0':
shard0count += 1
if q['QueryShard']['Shards'][0] == '1':
shard1count += 1
self.assertEqual(shard0count, 2)
self.assertEqual(shard1count, 2)
# run the queries, aggregate the results, make sure we have all rows
rows = {}
for q in s:
qr = utils.vtgate.execute_shard(
q['QueryShard']['Sql'],
'test_keyspace', ','.join(q['QueryShard']['Shards']),
tablet_type='master', bindvars=q['QueryShard']['BindVariables'])
for r in qr['Rows']:
id = int(r[0])
rows[id] = r[1]
self.assertEqual(len(rows), 20)
expected = {}
for i in xrange(10):
expected[100 + i] = 'row %d' % (100 + i)
expected[200 + i] = 'row %d' % (200 + i)
self.assertEqual(rows, expected)
if __name__ == '__main__':
utils.main()
|
#!/bin/python
import sys
from decimal import Decimal, getcontext,Context
from math import pi as PI
pi = Context(prec=60).create_decimal('3.1415926535897932384626433832795028841971693993751')
PI = pi
def calc(fun, n):
temp = Decimal("0.0")
for ni in xrange(n+1, 0, -1):
(a, b) = fun(ni)
temp = Decimal(b) / (a + temp)
return fun(0)[0] + temp
def fpi(n):
return (6 if n > 0 else 3, (2 * n - 1) ** 2)
#print "%.50f"%(calc(fpi, 1001))
#mini,maxi = raw_input().strip().split(' ')
mini,maxi = 200,231#[long(mini),long(maxi)]
# your code goes here
minifraction = (3,1)
minidecimal = Decimal(3.0)
#print PI
for d in xrange(mini,max | i+1):
#print d
n = int(pi*d)
d1 = n/Decimal(d)
d2 = (n+1)/Decimal(d)
#print n,d,d1,d2
if abs(d1-pi)<abs(d2-pi):
if abs(d1-pi)<abs(minidecimal-pi):
mi | nifraction = (n,d)
minidecimal = n/Decimal(d)
if abs(d1-pi)>abs(d2-pi):
if abs(d2-pi)<abs(minidecimal-pi):
#print n,d,d1,d2
minifraction = (n+1,d)
minidecimal = (n+1)/Decimal(d)
#print minifraction
print "%d/%d"%(minifraction[0],minifraction[1])
|
from django.db.backends import BaseDatabaseClient
from django.conf i | mport settings
import os
class Datab | aseClient(BaseDatabaseClient):
executable_name = 'sqlite3'
def runshell(self):
args = ['', settings.DATABASE_NAME]
os.execvp(self.executable_name, args)
|
# Stellar Magnate - A space-themed commodity trading game
# Copyright (C) 2017 Toshio Kuratomi <toshio@fedoraproject.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with thi | s program. If not, see <http://www.gnu.org/licenses/ | >.
"""Utility functions for dealing with numbers"""
import locale
def format_number(number, max_chars=7):
"""
Format a number in a human readable form. This adds locale-specific separators.
If the number is too long, se scientific notation.
:kwarg max_chars: The maximum number of characters a number can take on
the screen before it is turned into scientific notation.
"""
formatted_number = locale.format('%d', number, grouping=True)
if len(formatted_number) > max_chars:
formatted_number = '{:.1E}'.format(number)
return formatted_number
|
from baseplate.events import FieldKind
from pylons import app_globals as g
from r2.lib.eventcollector import (
EventQueue,
Event,
squelch_exceptions,
)
from r2.lib.utils import sampled
from r2.models import (
FakeSubreddit,
)
class AdEvent(Event):
@classmethod
def get_context_data(cls, request, context):
data = super(AdEvent, cls).get_context_data(request, context)
dnt_header = request.headers.get("DNT", None)
if dnt_header is not None:
data["dnt"] = dnt_header == "1"
return data
class AdzerkAPIEvent(Event):
def add_target_fields(self, thing):
self.add("target_fullname", thing._fullname)
self.add("target_type", thing.__class__.__name__)
self.add("is_deleted", thing._deleted)
def add_caller_fields(self, user):
if user:
self.add("caller_user_id", user._id)
self.add("caller_user_name", user.name)
else:
self.add("is_automated", True)
def add_error_fields(self, error):
if error:
self.add("error_status_code", error.status_code)
self.add("error_body", error.response_body)
class AdEventQueue(EventQueue):
@squelch_exceptions
@sampled("events_collector_ad_serving_sample_rate")
def ad_request(
self,
keywords,
properties,
platform,
placements,
is_refresh,
subreddit=None,
request=None,
context=None,
):
"""Create an `ad_request` for event-collector.
keywords: Array of keywords used to select the ad.
properties: Object contain custom targeting parameters.
platform: The platform the ad was requested for.
placements: Array of placement objects (name, types) to be filled.
is_refresh: Whether or not the request is for the initial ad or a
refresh after refocusing the page.
subreddit: The Subreddit of the ad was displayed on.
request, context: Should be pylons.request & pylons.c respectively;
"""
event = AdEvent(
topic="ad_serving_events",
event_type="ss.ad_request",
request=request,
context=context,
)
# keywords are | case insensitive, normalize and sort them
# for easier equality testing.
keywords = sorted(k.lower() for k in keywords)
event.add("keywords", keywor | ds)
event.add("properties", properties)
event.add("platform", platform)
event.add("placements", placements)
event.add("is_refresh", is_refresh)
if not isinstance(subreddit, FakeSubreddit):
event.add_subreddit_fields(subreddit)
self.save_event(event)
@squelch_exceptions
@sampled("events_collector_ad_serving_sample_rate")
def ad_response(
self,
keywords,
properties,
platform,
placement_name,
placement_type,
adserver_ad_id,
adserver_campaign_id,
adserver_creative_id,
adserver_flight_id,
impression_id,
matched_keywords,
rate_type,
clearing_price,
link_fullname=None,
campaign_fullname=None,
subreddit=None,
priority=None,
ecpm=None,
request=None,
context=None,
):
"""Create an `ad_response` for event-collector.
keywords: Array of keywords used to select the ad.
properties: Object contain custom targeting parameters.
platform: The platform the ad was requested for.
placement_name: The identifier of the placement.
placement_type: The type of placement the ad is.
adserver_ad_id: Unique id of the ad response (from the ad server).
adserver_campaign_id: Unique id of the ad campaign (from the ad server).
adserver_creative_id: Unique id of the ad creative (from the ad server).
adserver_flight_id: Unique id of the ad flight (from the ad server).
impression_id: Unique id of the impression.
matched_keywords: An array of the keywords which matched for the ad.
rate_type: Flat/CPM/CPC/etc.
clearing_price: What was paid for the rate type.
link_fullname: The fullname of the promoted link.
campaign_fullname: The fullname of the PromoCampaign.
subreddit: The Subreddit of the ad was displayed on.
priority: The priority name of the ad.
ecpm: The effective cpm of the ad.
request, context: Should be pylons.request & pylons.c respectively;
"""
event = AdEvent(
topic="ad_serving_events",
event_type="ss.ad_response",
request=request,
context=context,
)
event.add("properties", properties)
event.add("platform", platform)
event.add("placement_name", placement_name)
event.add("placement_type", placement_type)
event.add("adserver_ad_id", adserver_ad_id)
event.add("adserver_campaign_id", adserver_campaign_id)
event.add("adserver_creative_id", adserver_creative_id)
event.add("adserver_flight_id", adserver_flight_id)
event.add("impression_id",
impression_id, kind=FieldKind.HIGH_CARDINALITY)
event.add("rate_type", rate_type)
event.add("clearing_price", clearing_price)
event.add("link_fullname", link_fullname)
event.add("campaign_fullname", campaign_fullname)
event.add("priority", priority)
event.add("ecpm", ecpm)
# keywords are case insensitive, normalize and sort them
# for easier equality testing.
keywords = sorted(k.lower() for k in keywords)
event.add("keywords", keywords)
# don't send empty arrays.
if matched_keywords:
matched_keywords = sorted(k.lower() for k in matched_keywords)
event.add("matched_keywords", matched_keywords)
if not isinstance(subreddit, FakeSubreddit):
event.add_subreddit_fields(subreddit)
self.save_event(event)
@squelch_exceptions
def adzerk_api_request(
self,
request_type,
thing,
request_body,
triggered_by=None,
additional_data=None,
request_error=None,
):
"""
Create an `adzerk_api_events` event for event-collector.
request_type: The type of request being made
thing: The `Thing` which the request data is derived from
request_body: The JSON payload to be sent to adzerk
triggered_by: The user who triggered the API call
additional_data: A dict of any additional meta data that may be
relevant to the request
request_error: An `adzerk_api.AdzerkError` if the request fails
"""
event = AdzerkAPIEvent(
topic='adzerk_api_events',
event_type='ss.%s_request' % request_type,
)
event.add_target_fields(thing)
event.add_caller_fields(triggered_by)
event.add_error_fields(request_error)
event.add("request_body", request_body)
if additional_data:
for key, value in additional_data.iteritems():
event.add(key, value)
self.save_event(event)
|
import inspect
import os.path
import django
import SocketServer
import sys
from django.conf import settings
from django.views.debug import linebreak_iter
# Figure out some paths
django_path = os.path.realpath(os.path.dirname(django.__file__))
socketserver_path = os.path.realpath(os.path.dirname(SocketServer.__file__))
def ms_from_timedelta(td):
"""
Given a timedelta object, returns a float representing milliseconds
"""
return (td.seconds * 1000) + (td.microseconds / 1000.0)
def tidy_stacktrace(stack):
"""
Clean up stacktrace and remove all entries that:
1. Are part of Django (except contrib apps)
2. Are part of SocketServer (used by Django's dev server)
3. Are the last entry (which is part of our stacktracing code)
``stack`` should be | a list of frame tuples from ``inspect.stack()``
"""
trace = []
for frame, path, line_no, func_name, text in (f[:5] for f in stack):
s_path = os.path.realpath(path)
# Support hiding of frames -- used in various utilities that provide
# inspecti | on.
if '__traceback_hide__' in frame.f_locals:
continue
if getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {}).get('HIDE_DJANGO_SQL', True) \
and django_path in s_path and not 'django/contrib' in s_path:
continue
if socketserver_path in s_path:
continue
if not text:
text = ''
else:
text = (''.join(text)).strip()
trace.append((path, line_no, func_name, text))
return trace
def get_template_info(source, context_lines=3):
line = 0
upto = 0
source_lines = []
# before = during = after = ""
origin, (start, end) = source
template_source = origin.reload()
for num, next in enumerate(linebreak_iter(template_source)):
if start >= upto and end <= next:
line = num
# before = template_source[upto:start]
# during = template_source[start:end]
# after = template_source[end:next]
source_lines.append((num, template_source[upto:next]))
upto = next
top = max(1, line - context_lines)
bottom = min(len(source_lines), line + 1 + context_lines)
context = []
for num, content in source_lines[top:bottom]:
context.append({
'num': num,
'content': content,
'highlight': (num == line),
})
return {
'name': origin.name,
'context': context,
}
def get_name_from_obj(obj):
if hasattr(obj, '__name__'):
name = obj.__name__
elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
name = obj.__class__.__name__
else:
name = '<unknown>'
if hasattr(obj, '__module__'):
module = obj.__module__
name = '%s.%s' % (module, name)
return name
def getframeinfo(frame, context=1):
"""
Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line.
This originally comes from ``inspect`` but is modified to handle issues
with ``findsource()``.
"""
if inspect.istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not inspect.isframe(frame):
raise TypeError('arg is not a frame or traceback object')
filename = inspect.getsourcefile(frame) or inspect.getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = inspect.findsource(frame)
except (IOError, IndexError):
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return inspect.Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def get_stack(context=1):
"""
Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context.
Modified version of ``inspect.stack()`` which calls our own ``getframeinfo()``
"""
frame = sys._getframe(1)
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist |
import env
import numpy as np
import metaomr
import metaomr.kanungo as kan
from metaomr.page import Page
import glob
import pandas as pd
import itertools
import os.path
import sys
from datetime import datetime
from random import random, randint
IDEAL = [path for path in sorted(glob.glob('testset/modern/*.png'))
if 'nostaff' not in path]
def random_params():
if random() < 0.25:
nu = 0
else:
nu = random() * 0.05
if random() < 0.25:
a0 = a = 0
else:
a0 = random() * 0.2
a = 0.5 + random() * 2
if random() < 0.25:
b0 = b = 0
else:
b0 = random() * 0.2
b | = 0.5 + random() * 2
k = randint(0, 4)
return nu, a0, a, b0, b, k
columns = pd.MultiIndex.from_product([['real', 'estimate'], 'nu a0 a b0 b k'.split()])
columns = columns.append(pd.MultiIndex.from_product([['estimate'],['stat','time','status','nfev']]))
cols = []
results = []
fun = 'ks'
method = 'Nelder-Mead'
for image in IDEAL:
name = os.path.basename(image).split('.')[0]
page, = metaomr.open(image)
kimg = kan.KanungoImage(kan.normalized_page(page)[0])
f | or i in xrange(3):
params = random_params()
synth = Page(kimg.degrade(params))
synth.staff_dist = 8
for maxfev in [25, 50]:
start = datetime.now()
est_params = kan.est_parameters(synth, test_fn=kan.test_hists_ks if fun == 'ks' else kan.test_hists_chisq, opt_method=method, maxfev=maxfev)
end = datetime.now()
cols.append((name, fun, maxfev, i))
results.append(list(params) + list(est_params.x) + [est_params.fun, (end - start).total_seconds(), est_params.status, est_params.nfev])
sys.stderr.write('.')
res = pd.DataFrame(results, columns=columns)
res.index = pd.MultiIndex.from_tuples(cols)
res.index.names = 'doc test maxfev num'.split()
res.to_csv('kanungo_eval.csv')
sys.stderr.write('\n')
|
#!/bin/env/python
#
# This file is part of CRISIS, an economics simulator.
#
# Copyright (C) 2015 John Kieran Phillips
#
# CRISIS is free software: you can redistribute i | t and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CRISIS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CRISIS. If not, see <http://www.gnu.org/licenses/>.
import math
class Market:
def __init__(self, buyer):
self.buyer = buyer
self.askPrices = {}
self.bidPrices = {}
self.supply = {}
self.demand = {}
def setAskPrice(self, type, price):
self.askPrices[type] = price
def setBidPrice(self, type, price):
self.bidPrices[type] = price
def setSupply(self, type, supply):
self.supply[type] = supply
def setDemand(self, type, demand):
self.demand[type] = demand
def process(self, type):
demandFor = (self.demand[type] if self.demand.has_key(type) else 0)
supplyOf = self.supply[type] if self.supply.has_key(type) else 0
askPrice = self.askPrices[type] if self.askPrices.has_key(type) else 0
bidPrice = self.bidPrices[type] if self.bidPrices.has_key(type) else 0
price = (bidPrice + askPrice) / 2.
trade = min(demandFor, supplyOf)
self.buyer.credit(price * trade)
self.buyer.addGoods(type, trade)
print("bought {} units of goods, total cost {}".format(price * trade, trade))
def clear():
self.askPrices.clear()
self.bidPrices.clear()
self.supply.clear()
self.demand.clear() |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
def test_col_names(cleaned_csv, tmcf):
" | ""Check if all the column names specified in the template mcf
is found in the CSV file."""
cols = pd.read_csv(cleaned_csv, nrows=0).columns
with open(tmcf, "r") as file:
for line in file:
if " C:" in line:
col_name = line[:-1].split("->")[1]
asser | t col_name in cols
|
lue is provided for the `galaxy_instance`, use the default provided via
`load_input_file`.
"""
if not gi:
gi = galaxy_instance()
return ToolShedClient(gi)
def the_same_tool(tool_1_info, tool_2_info):
"""
Given two dicts containing info about tools, determine if they are the same
tool.
Each of the dicts must have the following keys: `name`, `owner`, and
(either `tool_shed` or `tool_shed_url`).
"""
t1ts = tool_1_info.get('tool_shed', tool_1_info.get('tool_shed_url', None))
t2ts = tool_2_info.get('tool_shed', tool_2_info.get('tool_shed_url', None))
if tool_1_info.get('name') == tool_2_info.get('name') and \
tool_1_info.get('owner') == too | l_2_info.get('owner') and \
(t1ts in t2ts or t2ts in t1ts):
return True
return False
def installed_tool_revisions(gi=None, omit=None):
"""
Get a list of tool revisions installed from a Tool Shed on a Ga | laxy instance.
Included are all the tool revisions that were installed from a Tool
Shed and are available from `/api/tool_shed_repositories` url on the
given instance of Galaxy.
:type gi: GalaxyInstance object
:param gi: A GalaxyInstance object as retured by `galaxy_instance` method.
:type omit: list of strings
:param omit: A list of strings that, if found in a tool name, will result
in the tool not being included in the returned list.
:rtype: list of dicts
:return: Each dict in the returned list will have the following keys:
`name`, `owner`, `tool_shed_url`, `revisions`.
.. seealso:: this method returns a subset of data returned by
`installed_tools` function
"""
if not omit:
omit = []
tsc = tool_shed_client(gi)
installed_revisions_list = []
itl = tsc.get_repositories()
for it in itl:
if it['status'] == 'Installed':
skip = False
# Check if we already processed this tool and, if so, add the new
# revision to the existing list entry
for ir in installed_revisions_list:
if the_same_tool(it, ir):
ir['revisions'].append(it.get('changeset_revision', None))
skip = True
# Check if the repo name is contained in the 'omit' list
for o in omit:
if o in it['name']:
skip = True
# We have not processed this tool so create a list entry
if not skip:
ti = {'name': it['name'],
'owner': it['owner'],
'revisions': [it.get('changeset_revision', None)],
'tool_shed_url': 'https://' + it['tool_shed']}
installed_revisions_list.append(ti)
return installed_revisions_list
def installed_tools(gi, omit=None):
"""
Get a list of tools on a Galaxy instance.
:type gi: GalaxyInstance object
:param gi: A GalaxyInstance object as retured by `galaxy_instance` method.
:type omit: list of strings
:param omit: A list of strings that, if found in a tool name, will result
in the tool not being included in the returned list.
:rtype: dict
:return: The returned dictionary contains the following keys, each
containing a list of dictionaries:
- `tool_panel_shed_tools` with a list of tools available in the
tool panel that were installed on the target Galaxy instance
from the Tool Shed;
- `tool_panel_custom_tools` with a list of tools available in
the tool panel that were not installed via the Tool Shed;
- `shed_tools` with a list of tools returned from the
`installed_tool_revisions` function and complemented with a
`tool_panel_section_id` key as matched with the list of tools
from the first element of the returned triplet. Note that the
two lists (`shed_tools` and `tool_panel_shed_tools`) are likely
to be different and hence not every element in the `shed_tools`
will have the `tool_panel_section_id`!
.. seealso:: `installed_tool_revisions` (this function also returns the
output of the `installed_tool_revisions` function, as
`shed_tools` key).
"""
if not omit:
omit = []
tp_tools = [] # Tools available in the tool panel and installe via a TS
custom_tools = [] # Tools available in the tool panel but custom-installed
tl = gi.tools.get_tool_panel() # In-panel tool list
for ts in tl: # ts -> tool section
# print "%s (%s): %s" % (ts['name'], ts['id'], len(ts.get('elems', [])))
# Parse the tool panel to ge the the tool lists
for t in ts.get('elems', []):
# Tool ID is either a tool name (in case of custom-installed tools)
# or a URI (in case of Tool Shed-installed tools) so differentiate
# among those
tid = t['id'].split('/')
if len(tid) > 3:
skip = False
# Check if we already encountered this tool
for added_tool in tp_tools:
if tid[3] in added_tool['name']:
skip = True
# Check if the repo name is contained in the 'omit' list
for o in omit:
if o in tid[3]:
skip = True
if not skip:
tp_tools.append({'tool_shed_url': "https://{0}".format(tid[0]),
'owner': tid[2],
'name': tid[3],
'tool_panel_section_id': ts['id']})
# print "\t%s, %s, %s" % (tid[0], tid[2], tid[3])
else:
# print "\t%s" % t['id']
custom_tools.append(t['id'])
# Match tp_tools with the tool list available from the Tool Shed Clients on
# the given Galaxy instance and and add tool section IDs it
ts_tools = installed_tool_revisions(gi, omit) # Tools revisions installed via a TS
for it in ts_tools:
for t in tp_tools:
if the_same_tool(it, t):
it['tool_panel_section_id'] = t['tool_panel_section_id']
return {'tool_panel_shed_tools': tp_tools,
'tool_panel_custom_tools': custom_tools,
'shed_tools': ts_tools}
def _list_tool_categories(tl):
"""
Given a list of dicts `tl` as returned by the `installed_tools` method and
where each list element holds a key `tool_panel_section_id`, return a list
of unique section IDs.
"""
category_list = []
for t in tl:
category_list.append(t.get('id'))
return set(category_list)
def _parse_cli_options():
"""
Parse command line options, returning `parse_args` from `ArgumentParser`.
"""
parser = ArgumentParser(usage="usage: python %(prog)s <options>")
parser.add_argument("-d", "--dbkeysfile",
dest="dbkeys_list_file",
help="Reference genome dbkeys to install (see "
"dbkeys_list.yaml.sample)",)
parser.add_argument("-g", "--galaxy",
dest="galaxy_url",
help="Target Galaxy instance URL/IP address (required "
"if not defined in the tools list file)",)
parser.add_argument("-a", "--apikey",
dest="api_key",
help="Galaxy admin user API key (required if not "
"defined in the tools list file)",)
parser.add_argument("-t", "--toolsfile",
dest="tool_list_file",
help="Tools file to use (see tool_list.yaml.sample)",)
parser.add_argument("-y", "--yaml_tool",
dest="tool_yaml",
help="Install tool represented by yaml string",)
parser.add_argument("--name",
help="The name of the tool to install (onl |
(h0_new, c0_new) = hidden_
beam_seq = Variable(torch.LongTensor(
batch_size, beam_size, trg_len+1).fill_(vocab2id['<pad>'])).cuda()
beam_seq[:, :, 0] = vocab2id['<s>']
beam_prb = torch.FloatTensor(batch_size, beam_size).fill_(0.0)
last_wd = Variable(torch.LongTensor(
batch_size, beam_size, 1).fill_(vocab2id['<s>'])).cuda()
beam_h_attn = Variable(torch.FloatTensor(
trg_len, batch_size, beam_size, h_attn_new.size(1)).fill_(0.0)).cuda()
for j in range(trg_len):
last_emb = model_emb(last_wd.view(-1, 1))
output_s2s, (h0, c0), h_attn, past_attn = model_s2s.forward_onestep_decoder1(
j,
last_emb,
(h0_new, c0_new),
h_attn_new,
encoder_hy,
p_gen_new,
past_attn_new,
pt_idx
)
p_gen_new.fill_(0.0)
(h0, c0) = repackage_hidden((h0, c0))
prob, wds = output_s2s.data.topk(k=beam_size)
prob = prob.view(batch_size, beam_size, prob.size(1), prob.size(2))
wds = wds.view(batch_size, beam_size, wds.size(1), wds.size(2))
if j == 0:
beam_prb = prob[:, 0, 0]
beam_seq[:, :, 1] = wds[:, 0, 0]
last_wd = Variable(wds[:, 0, 0].unsqueeze(2).clone()).cuda()
h0_new = h0
c0_new = c0
h_attn_new = h_attn
past_attn_new = past_attn
beam_h_attn[j] = h_attn_new.view(batch_size, beam_size, h_attn_new.size(-1))
continue
cand_seq = tensor_transformer(beam_seq, batch_size, beam_size)
cand_seq[:, :, j+1] = wds.squeeze(2).view(batch_size, -1)
cand_last_wd = wds.squeeze(2).view(batch_size, -1)
cand_prob = beam_prb.unsqueeze(1).repeat(1, beam_size, 1).transpose(1,2)
cand_prob += prob[:, :, 0]
cand_prob = cand_prob.contiguous().view(batch_size, beam_size*beam_size)
h0_new = h0_new.view(batch_size, beam_size, h0_new.size(-1))
c0_new = c0_new.view(batch_size, beam_size, c0_new.size(-1))
h_attn_new = h_attn_new.view(batch_size, beam_size, h_attn_new.size(-1))
past_attn_new = past_attn_new.view(batch_size, beam_size, past_attn_new.size(-1))
h0 = h0.view(batch_size, beam_size, h0.size(-1))
h0 = tensor_transformer(h0, batch_size, beam_size)
c0 = c0.view(batch_size, beam_size, c0.size(-1))
c0 = tensor_transformer(c0, batch_size, beam_size)
h_attn = h_attn.view(batch_size, beam_size, h_attn.size(-1))
h_attn = tensor_transformer(h_attn, batch_size, beam_size)
past_attn = past_attn.view(batch_size, beam_size, past_attn.size(-1))
past_attn = tensor_transformer(past_attn, batch_size, beam_size)
tmp_prb, tmp_idx = cand_prob.topk(k=beam_size, dim=1)
for x in range(batch_size):
for b in range(beam_size):
last_wd[x, b] = cand_last_wd[x, tmp_idx[x, b]]
beam_seq[x, b] = cand_seq[x, tmp_idx[x, b]]
beam_prb[x, b] = tmp_prb[x, b]
h0_new[x, b] = h0[x, tmp_idx[x, b]]
c0_new[x, b] = c0[x, tmp_idx[x, b]]
h_attn_new[x, b] = h_attn[x, tmp_idx[x, b]]
past_attn_new[x, b] = past_attn[x, tmp_idx[x, b]]
beam_h_attn[j] = h_attn_new
h0_new = h0_new.view(-1, h0_new.size(-1))
c0_new = c0_new.view(-1, c0_new.size(-1))
h_attn_new = h_attn_new.view(-1, h_attn_new.size(-1))
past_attn_new = past_attn_new.view(-1, past_attn_new.size(-1))
return beam_seq, beam_prb, beam_h_attn
'''
second beam search
'''
def fast_beam_search_2(
model_emb,
model_s2s,
src_text_rep,
vocab2id,
batch_size,
beam_size,
trg_len,
encoder_hy,
hidden_,
h_attn21_new,
h_attn22_new,
p_gen21_new,
past_attn21_new,
past_attn22_new,
beam_h_attn1,
pt_idx
):
(h0_new, c0_new) = hidden_
beam_seq = Variable(torch.LongTensor(batch_size, beam_size, trg_len+1).fill_(vocab2id['<pad>'])).cuda()
beam_seq[:, :, 0] = vocab2id['<s>']
beam_prb = torch.FloatTensor(batch_size, beam_size).fill_(0.0)
last_wd = Variable(torch.LongTensor(batch_size, beam_size, 1).fill_(vocab2id['<s>'])).cuda()
for j in range(trg_len):
last_emb = model_emb(last_wd.view(-1, 1))
output_s2s, (h0, c0), h_attn21, h_attn22, past_attn21, past_attn22 = model_s2s.forward_onestep_decoder2(
j,
last_emb,
(h0_new, c0_new),
h_attn21_new,
h_attn22_new,
encoder_hy,
p_gen21_new,
past_attn21_new,
past_attn22_new,
beam_h_attn1,
pt_idx
)
p_gen21_new.fill_(0.0)
(h0, c0) = repackage_hidden((h0, c0))
prob, wds = output_s2s.data.topk(k=beam_size)
prob = prob.view(batch_size, beam_size, prob.size(1), prob.size(2))
wds = wds.view(batch_size, beam_size, wds.size(1), wds.size(2))
if j == 0:
beam_prb = prob[:, 0, 0]
beam_seq[:, :, 1] = wds[:, 0, 0]
last_wd = Variable(wds[:, 0, 0].unsqueeze(2).clone()).cuda()
h0_new = h0
c0_new = c0
h_attn21_new = h_attn21
h_attn22_new = h_attn22
past_attn21_new = past_attn21
past_attn22_new = past_attn22
continue
cand_seq = tensor_transformer(beam_seq, batch_size, beam_size)
cand_seq[:, :, j+1] = wds.squeeze(2).view(batch_size, - | 1)
cand_last_wd = wds.squeeze(2).view(batch_size, -1)
cand_prob = beam_prb.unsqueeze(1).repeat(1, beam_size, 1).transpose(1,2)
cand_prob += prob[:, :, 0]
cand_prob = cand_prob.contiguous().view(batch_size, beam | _size*beam_size)
h0_new = h0_new.view(batch_size, beam_size, h0_new.size(-1))
c0_new = c0_new.view(batch_size, beam_size, c0_new.size(-1))
h_attn21_new = h_attn21_new.view(batch_size, beam_size, h_attn21_new.size(-1))
h_attn22_new = h_attn22_new.view(batch_size, beam_size, h_attn22_new.size(-1))
past_attn21_new = past_attn21_new.view(batch_size, beam_size, past_attn21_new.size(-1))
past_attn22_new = past_attn22_new.view(batch_size, beam_size, past_attn22_new.size(-1))
h0 = h0.view(batch_size, beam_size, h0.size(-1))
h0 = tensor_transformer(h0, batch_size, beam_size)
c0 = c0.view(batch_size, beam_size, c0.size(-1))
c0 = tensor_transformer(c0, batch_size, beam_size)
h_attn21 = h_attn21.view(batch_size, beam_size, h_attn21.size(-1))
h_attn21 = tensor_transformer(h_attn21, batch_size, beam_size)
h_attn22 = h_attn22.view(batch_size, beam_size, h_attn22.size(-1))
h_attn22 = tensor_transformer(h_attn22, batch_size, beam_size)
past_attn21 = past_attn21.view(batch_size, beam_size, past_attn21.size(-1))
past_attn21 = tensor_transformer(past_attn21, batch_size, beam_size)
past_attn22 = past_attn22.view(batch_size, beam_size, past_attn22.size(-1))
past_attn22 = tensor_transformer(past_attn22, batch_size, beam_size)
tmp_prb, tmp_idx = cand_prob.topk(k=beam_size, dim=1)
for x in range(batch_size):
for b in range(beam_size):
last_wd[x, b] = cand_last_wd[x, tmp_idx[x, b]]
beam_seq[x, b] = cand_seq[x, tmp_idx[x, b]]
beam_prb[x, b] = tmp_prb[x, b]
h0_new[x, b] = h0[x, tmp_idx[x, b]]
c0_new[x, b] = c0[x, tmp_idx[x, b]]
h_attn21_new[x, b] = h_attn21[x, tmp_idx[x, b]]
h_attn22_new[x, b] = h_attn22[x, tmp_idx[x, b]]
past_attn21_new[x, b] = past_attn21[x, tmp_idx[x, b]]
past_attn22_new[x, b] = past_attn22[x, tmp_idx[x, b]]
h0_new = h0_new.view(-1, h0_new.size(-1))
c0_new = c0_new.view(-1, c0_new.size(-1))
h_attn21_new = h_attn21_new.view(-1, h_attn21_new.size(-1))
|
#coding:utf-8
"""
"""
class Task(object):
def __init__(self, id_, project_name, title, serial_no, timelimit, timestamp, note, status):
self.id_ = id_
self.project_name = project_name
self.title = title
self.serial_no = serial_no
self.timelimit = timelimit
self.timestamp = timestamp
self.note = note
self.status = status
def __str__(self):
values = (self.id_, self.project_name, self.title, self.serial_no, self.timelimit, self.timestamp, self.note, self.status)
return "[%s, %s, %s, %s, %s, %s, %s, %s]" % values
def get_id(self):
| return self.id_
def get_project_name(self):
return self.project_name
def get_serial_no(self):
return self.serial_no
def get_tit | le(self):
return self.title
def get_timelimit(self):
return self.timelimit
def get_status(self):
return self.status
def get_note(self):
return self.note
def get_created(self):
return self.timestamp
|
#-*- coding:utf-8 -*-
| import wx
if evt_handler == None:
evt_h | andler = wx.EvtHandler() |
# | !/usr/bin/env python
import traceback
import binascii
import sys
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s <licenseForCustomerToken file>\n"
% sys.argv[0])
sys.exit(-1)
| try:
data = open(sys.argv[1], "rb").read()
if (b"BAD_LOGIN" in data or b"Whoops" in data) or \
b"group_id" not in data:
print(data)
print("\nActivation failed! ;(")
sys.exit(-1)
k = data.rfind(b"group_id")
l = data[k:].find(b")")
keys = data[k + l + 1 + 1:]
output_keys = []
# each key is of 70 bytes
for i in range(0, 8):
key = keys[i * 70 + i:(i + 1) * 70 + i]
h = binascii.hexlify(bytes(key))
h = [h[i:i+2] for i in range(0, len(h), 2)]
h = b",".join(h)
output_keys.append(h)
except SystemExit as e:
sys.exit(e)
except:
traceback.print_exc()
# only 4 bytes of output_keys[0] are necessary for decryption! ;)
activation_bytes = output_keys[0].replace(b",", b"")[0:8]
# get the endianness right (reverse string in pairs of 2)
activation_bytes = "".join(reversed([activation_bytes[i:i+2] for i in
range(0, len(activation_bytes), 2)]))
print(activation_bytes)
|
import click
from bitshares.amount import Amount
from .decorators import online, unlock
from .main import main, config
from .ui import print_tx
@main.group()
def htlc():
pass
@htlc.command()
@click.argument("to")
@click.argument("amount")
@click.argument("symbol")
@click.option(
"--type", type=click.Choice(["ripemd160", "sha1", "sha256", "hash160"]),
default="sha256", prompt="Hash algorithm", show_default=True,
help="Hash algorithm"
)
@click.option(
"--hash", prompt="Hash (hex string)", hide_input=False, confirmation_prompt=True,
help="Hash value as string of hex digits"
)
@click.option(
"--expiration", default=60 * 60, prompt="Expiration (seconds)",
help="Duration of HTLC in seconds"
)
@click.option(
"--length", help="Length of PREIMAGE (not of hash). Generally OK " +
"to leave this as 0 for unconstrained.", default=0, show_default=True
)
@click.option("--account")
@click.pass_context
@online
@unlock
def create(ctx, to, amount, symbol, type, hash, expiration, length, account):
""" Create an HTLC contract from a hash and lock-time
"""
ctx.blockchain.blocking = True
tx = ctx.blockchain.htlc_create(
Amount(amount, symbol),
to,
hash_type=type,
hash_hex=hash,
expiration=expiration,
account=account,
preimage_length=length
)
tx.pop("trx", None)
print_tx(tx)
results = tx.get("operation_results", {})
if results:
htlc_id = results[0][1]
print("Your htlc_id is: {}".format(htlc_id))
@htlc.command()
@click.argument("to")
@click.argument("amount")
@click.argument("symbol")
@click.option(
"--type", type=click.Choice(["ripemd160", "sha1", "sha256", "hash160"]),
default="sha256", prompt="Hash algorithm", show_default=True,
help="Hash algorithm"
)
@click.option(
"--secret", prompt="Redeem Password", hide_input=True, confirmation_prompt=True,
help="Ascii-text preimage"
)
@click.option("--expiration", default=60 * 60, prompt="Expiration (seconds)",
help="Duration of HTLC in seconds"
)
@click.option(
"--length", help="Length of PREIMAGE (not of hash). Generally OK " +
"to leave this as 0 for unrestricted. If non-zero, must match length " +
"of provided preimage", default=0, show_default=True
)
@click.option("--account")
@click.pass_context
@online
@unlock
def create_from_secret(ctx, to, amount, symbol, type, secret, expiration,
length, account):
"""Create an HTLC contract from a secret preimage
If you are the party choosing the preimage, this version of
htlc_create will compute the hash for you from the supplied
preimage, and create the HTLC with the resulting hash.
"""
if length != 0 and length != len(secret):
raise ValueError("Length must be zero or agree with actual preimage length")
ctx.blockchain.blocking = True
tx = ctx.blockchain.htlc_create(
Amount(amount, symbol),
to,
preimage=secret,
preimage_length=length,
hash_type=type,
expi | ration=expiration,
account=account,
)
tx.pop("trx", None)
print_tx(tx)
results = tx.get("operation_results", {})
if results:
htlc_id = results[0][1]
print("Your htlc_id is: {}".format(htlc_id))
@htlc.command()
@c | lick.argument("htlc_id")
@click.option(
"--secret", prompt="Redeem Password", hide_input=False, confirmation_prompt=False,
type=str, help="The preimage, as ascii-text, unless --hex is passed"
)
@click.option(
"--hex", is_flag=True, help="Interpret preimage as hex-encoded bytes"
)
@click.option("--account")
@click.pass_context
@online
@unlock
def redeem(ctx, htlc_id, secret, hex, account):
""" Redeem an HTLC contract by providing preimage
"""
encoding = "hex" if hex else "utf-8"
print_tx(ctx.blockchain.htlc_redeem(htlc_id, secret, encoding=encoding,
account=account)
)
|
# Copyright (c) 2014, Sven Thiele <sthiele78@gmail.com>
#
# This file is part of shogen.
#
# shogen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# th | e Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# shogen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# Y | ou should have received a copy of the GNU General Public License
# along with shogen. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name = 'shogen',
version = '2.0.0',
url = 'http://pypi.python.org/pypi/shogen/',
license = 'GPLv3+',
description = 'Finding shortest genome segments that regulate metabolic pathways',
long_description = open('README.rst').read(),
author = 'Sven Thiele',
author_email = 'sthiele78@gmail.com',
packages = ['__shogen__'],
package_dir = {'__shogen__' : 'src'},
package_data = {'__shogen__' : ['encodings/*.lp']},
scripts = ['shogen.py'],
install_requires = ['pyasp == 1.4' ]
)
|
team_mapping = {
"SY": "Sydney",
"WB": "Western Bulldogs",
"WC": "West Coast",
"HW": "Hawthorn",
"GE": "Geelong",
"FR": "Fremantle",
"RI": "Richmond",
"CW": "Collingwood",
"CA": "Carlton",
"GW": "Greater Western Sydney",
"AD": "Adelaide",
"GC": "Gold Coast",
"ES": "Essendon",
"ME": "Melbourne",
"NM": "North Melbourne",
"PA": "Port Adelaide",
"BL": "Brisbane Lions",
"SK": "St Kilda"
}
def get_team_name(code):
return team_mapping[code]
def get_team_code(full_name):
for code, name in team_mapping.items():
if name == full_name:
return code
return full_name
def get_match_description(response):
match_container = response.xpath("//td[@colspan = | '5' and @align = 'center']")[0]
match_details = match_container.xpath(".//text()").extract()
return {
"round": match_details[1],
"venue": match_details[3],
| "date": match_details[6],
"attendance": match_details[8],
"homeTeam": response.xpath("(//a[contains(@href, 'teams/')])[1]/text()").extract_first(),
"awayTeam": response.xpath("(//a[contains(@href, 'teams/')])[2]/text()").extract_first(),
"homeScore": int(response.xpath("//table[1]/tr[2]/td[5]/b/text()").extract_first()),
"awayScore": int(response.xpath("//table[1]/tr[3]/td[5]/b/text()").extract_first())
}
def get_match_urls(response):
for match in response.xpath("//a[contains(@href, 'stats/games/')]/@href").extract():
yield response.urljoin(match) |
_libs' : [],
'jacoco_home' : '',
'coverage_reporter' : '',
},
'scala_config': {
'scala_home' : '',
'target_platform' : '',
'warnings' : '',
'source_encoding' : None,
},
'scala_test_config': {
'scalatest_libs' : '',
},
'go_config' : {
'go' : '',
'go_home' : '', # GOPATH
},
'thrift_config': {
'thrift': 'thrift',
'thrift_libs': [],
'thrift_incs': [],
},
'fbthrift_config': {
'fbthrift1': 'thrift1',
'fbthrift2': 'thrift2',
'fbthrift_libs': [],
'fbthrift_incs': [],
},
'proto_library_config': {
'protoc': 'thirdparty/protobuf/bin/protoc',
'protoc_java': '',
'protobuf_libs': [],
'protobuf_path': '',
'protobuf_incs': [],
'protobuf_php_path': '',
'protoc_php_plugin': '',
'protobuf_java_libs' : [],
'protoc_go_plugin': '',
# All the generated go source files will be placed
# into $GOPATH/src/protobuf_go_path
'protobuf_go_path': '',
},
'protoc_plugin_config' : {
},
'cc_config': {
'extra_incs': [],
'cppflags': [],
'cflags': [],
'cxxflags': [],
'linkflags': [],
'c_warnings': [],
'cxx_warnings': [],
'warnings': [],
'cpplint': 'cpplint.py',
'optimize': [],
'benchmark_libs': [],
'benchmark_main_libs': [],
'securecc' : None,
},
'cc_library_config': {
'generate_dynamic' : None,
# Options passed to ar/ranlib to control how
# the archive is created, such as, let ar operate
# in deterministic mode discarding timestamps
'arflags': [],
'ranlibflags': [],
}
}
def _try_parse_file(self, filename):
"""load the configuration file and parse. """
try:
self.current_file_name = filename
if os.path.exists(filename):
execfile(filename)
except SystemExit:
console.error_exit('Parse error in config file %s, exit...' % filename)
def parse(self):
"""load the configuration file and parse. """
self._try_parse_file(os.path.join(os.path.dirname(sys.argv[0]), 'blade.conf'))
self._try_parse_file(os.path.expanduser('~/.bladerc'))
self._try_parse_file(os.path.join(self.current_source_dir, 'BLADE_ROOT'))
def update_config(self, section_name, append, user_config):
"""update config section by name. """
config = self.configs.get(section_name, {})
if config:
if append:
self._append_config(section_name, config, append)
self._replace_config(section_name, config, user_config)
else:
console.error('%s: %s: unknown config section name' % (
self.current_file_name, section_name))
def _append_config(self, section_name, config, append):
"""Append config section items"""
if not isinstance(append, dict):
console.error('%s: %s: append must be a dict' %
(self.current_file_name, section_name))
else:
for k in append:
if k in config:
if isinstance(config[k], list):
config[k] += var_to_list(append[k])
else:
console.warning('%s: %s: config item %s is not a list' %
(self.current_file_name, section_name, k))
else:
console.warning('%s: %s: unknown config item name: %s' %
(self.current_file_name, section_name, k))
def _replace_config(self, section_name, config, user_config):
"""Replace config section items"""
unknown_keys = []
for k in user_config:
if k in config:
if isinstance(config[k], list):
user_config[k] = var_to_list(user_config[k])
else:
console.warning('%s: %s: unknown config item name: %s' %
(self.current_file_name, section_name, k))
unknown_keys.append(k)
for k in unknown_keys:
del user_config[k]
config.update(user_config)
def get_config(self, section_name):
"""get config section, returns default values if not set """
return self.configs.get(section_name, {})
def cc_test_config(append=None, **kwargs):
"""cc_test_config section. """
heap_check = kwargs.get('heap_check')
if heap_check is not None and heap_check not in HEAP_CHECK_VALUES:
console.error_exit('cc_test_config: heap_check can only be in %s' %
HEAP_CHECK_VALUES)
blade_config.update_config('cc_test_config', append, kwargs)
def cc_binary_config(append=None, **kwargs):
"""cc_binary_config section. """
blade_config.update_config('cc_binary_config', append, kwargs)
def cc_library_config(append=None, **kwargs):
"""cc_library_config section. """
blade_config.update_config('cc_library_config', append, kwargs)
__DUPLICATED_SOURCE_ACTION_VALUES = set(['warning', 'error', 'none', None])
def global_config(append=None, **kwargs):
"""global_config section. """
duplicated_source_action = kwargs.get('duplicated_source_action')
if duplicated_source_action not in __DUPLICATED_SOURCE_ACTION_VALUES:
console.error_exit('Invalid global_config.duplicated_source_action '
'value, can only be in %s' % __DUPLICATED_SOURCE_ACTION_VALUES)
blade_config.update_config('global_config', append, kwargs)
def distcc_config(append=None, **kwargs):
"""distcc_config. """
blade_config.update_config('distcc_config', append, kwargs)
def link_config(append=None, **kwargs):
"""link_config. """
blade_co | nfig.update_config('link_config', append, kwargs)
def java_config(append=None, **kwargs):
""" | java_config. """
blade_config.update_config('java_config', append, kwargs)
def java_binary_config(append=None, **kwargs):
"""java_test_config. """
blade_config.update_config('java_binary_config', append, kwargs)
def java_test_config(append=None, **kwargs):
"""java_test_config. """
blade_config.update_config('java_test_config', append, kwargs)
def scala_config(append=None, **kwargs):
"""scala_config. """
blade_config.update_config('scala_config', append, kwargs)
def scala_test_config(append=None, **kwargs):
"""scala_test_config. """
blade_config.update_config('scala_test_config', append, kwargs)
def go_config(append=None, **kwargs):
"""go_config. """
blade_config.update_config('go_config', append, kwargs)
def proto_library_config(append=None, **kwargs):
"""protoc config. """
path = kwargs.get('protobuf_include_path')
if path:
console.warning(('%s: proto_library_config: protobuf_include_path has '
'been renamed to protobuf_incs, and become a list') %
blade_config.current_file_name)
del kwargs['protobuf_include_path']
if isinstance(path, basestring) and ' ' in path:
kwargs['protobuf_incs'] = path.split()
else:
kwargs['protobuf_incs'] = [path]
blade_config.update_config('proto_library_config', append, kwargs)
def protoc_plugin(**kwargs):
"""protoc_plugin. """
if 'name' not in kwargs:
console.error_exit("Missing 'name' in protoc_plugin parameters: %s" % kwargs)
config = blade_config.get_config('protoc_plug |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################# | #############################################################
{
'name': 'MRP-Custom Module',
'version': '1.1',
'category': 'mrp_repair',
# 'sequence': 19,
# 'summary': 'Purchase Orders, Receptions, Supplier Invoices',
'description': """
For customized Partner & product screen with button link to Repair Order.
""",
'author': '4devnet.com',
| 'website': 'http://www.4devnet.com',
# 'images' : ['images/purchase_order.jpeg', 'images/purchase_analysis.jpeg', 'images/request_for_quotation.jpeg'],
'depends': ['product','base','mrp_repair'],
'data': [
#'abc_report.xml',
'mrp_custom.xml'
],
'installable': True,
'auto_install': False,
'application': True,
}
|
"""AFOS Database Workflow."""
# 3rd Party
from twisted.internet import reactor
from txyam.client import YamClient
from pyiem.util import LOG
from pyiem.nws import product
# Local
from pywwa import common
from pywwa.ldm import bridge
from pywwa.database import get_database
DBPOOL = get_database("afos", cp_max=5)
MEMCACHE_EXCLUDE = [
"RR1",
"RR2",
"RR3",
"RR4",
"RR5",
"RR6",
"RR7",
"RR8",
"RR9",
"ROB",
"HML",
]
MEMCACHE_CLIENT = YamClient(reactor, ["tcp:iem-memcached3:11211"])
MEMCACHE_CLIENT.connect()
def process_data(data):
"""Process the product"""
defer = DBPOOL.runInteraction(real_parser, data)
defer.addCallback(write_memcache)
defer.addErrback(common.email_error, data)
defer.addErrback(LOG.error)
def write_memcache(nws):
"""write our TextProduct to memcached"""
if nws is None:
return
# 10 minutes should be enough time
LOG.debug("writing %s to memcache", nws.get_product_id())
df = MEMCACHE_CLIENT.set(
nws.get_product_id().encode("utf-8"),
nws.unixtext.replace("\001\n", "").encode("utf-8"),
expireTime=600,
)
df.addErrback(LOG.error)
def real_parser(txn, buf):
"""Actually do something with the buffer, please"""
if buf.strip() == "":
return None
utcnow = common.utcnow()
nws = product.TextProduct(buf, utcnow=utcnow, parse_segments=False)
# When we are in realtime processing, do not consider old data, typically
# when a WFO fails to update the date in their MND
if not common.replace_enabled() and (
(utcnow - nws.valid).days > 180 or (utcnow - nws.valid).days < -180
):
raise Exception(f"Very Latent Product! {nws.valid}")
if nws.warnings:
common.email_error("\n".join(nws.warnings), buf)
if nws.afos is None:
if nws.source[0] not in ["K", "P"]:
return None
raise Exception("TextProduct.afos is null")
if common.replace_enabled():
args = [nws.afos.strip(), nws.source, nws.valid]
bbb = ""
if nws.bbb:
bbb = " and bbb = %s "
args.append(nws.bbb)
txn.execute(
"DELETE from products where pil = %s and source = %s and "
f"entered = %s {bbb}",
args,
)
LOG.info("Removed %s rows for %s", txn.rowcount, nws.get_product_id())
txn.execute(
"INSERT into products (pil, data, entered, "
"source, wmo, bbb) VALUES(%s, %s, %s, %s, %s, %s)",
(nws.afos.strip(), nws.text, nws.valid, nws.source, nws.wmo, nws.bbb),
)
if nws.afos[:3] in MEMCACHE_EXCLUDE:
return None
return nws
def main():
"""Fire up our workflow."""
commo | n.main(with_jabber=False)
bridge(process_data)
reactor.run() # @UndefinedVariable
# See how we are | called.
if __name__ == "__main__":
main()
|
"""
sentry.models.file
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.db import models
from django.utils import timezone
from hashlib import md5
from uuid import uuid4
from sentry.db.models import (
BoundedPositiveIntegerField, GzippedDictField, Model
)
ONE_DAY = 60 * 60 * 24
class File(Model):
name = models.CharField(max_length=128)
storage = models.CharField(max_length=128, null=True)
storage_options = GzippedDictField()
path = models.TextField(null=True)
type = models.CharField(max_length=64)
size = BoundedPositiveIntegerField(null=True)
checksum = models.CharField(max_length=32, null=True)
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
class Meta:
unique_together = (('name', 'checksum'),)
app_label = 'sentry'
db_table = 'sentry_file'
def delete(self, *args, **kwargs):
if self.path:
self.deletefile(commit=False)
super(File, self).delete(*args, **kwargs)
def generate_unique_path(self):
pieces = self.type.split('.')
pieces.extend(map(str, divmod(int(self.timestamp.strftime('%s')), ONE_DAY)))
pieces.append('%s-%s' % (uuid4().hex, self.name))
return '/'.join(pieces)
def get_storage(self):
backend = self.storage
options = self.storage_options
storage = get_storage_class(backend)
return storage(**options)
def deletefile(self, commit=False):
assert self.path
storage = self.get_storage()
storage.delete(self.path)
self.path = None
if commit:
self.save()
def putfile(self, fileobj, commit=True):
"""
Upload this given File's contents.
A file's content is idempotent and you ma | y not re-save a given file.
>>> my_file = File(name='app.dsym', type='objc.dsym')
>>> my_file.putfile(fileobj, commit=False)
>>> my_file.save()
"""
assert not self.path
self.path = self.generate_unique_path()
self.storage = settings.SENTRY_FILESTORE
self.storage_options = settings.SENTRY_FILESTORE_OPTIONS
ch | ecksum = md5('')
for chunk in fileobj.chunks():
checksum.update(chunk)
self.checksum = checksum.hexdigest()
storage = self.get_storage()
storage.save(self.path, fileobj)
if commit:
self.save()
def getfile(self):
"""
Return a file-like object for this File's content.
>>> fileobj = my_file.getfile()
>>> with open('/tmp/localfile', 'wb') as fp:
>>> for chunk in fileobj.chunks():
>>> fp.write(chunk)
"""
assert self.path
storage = self.get_storage()
return storage.open(self.path)
|
nstances.
'slave' sets a redis instance in slave or master mode.
'flush' flushes all the instance or a specified db.
'config' (new in 1.6), ensures a configuration setting on an instance.
version_added: "1.3"
options:
command:
description:
- The selected redis command
required: true
default: null
choices: [ "slave", "flush", "config" ]
login_password:
description:
- The password used to authenticate with (usually not used)
required: false
default: null
login_host:
description:
- The host running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 6379
master_host:
description:
- The host of the master instance [slave command]
required: false
default: null
master_port:
description:
- The port of the master instance [slave command]
required: false
default: null
slave_mode:
description:
- the mode of the redis instance [slave command]
required: false
default: slave
choices: [ "master", "slave" ]
db:
description:
- The database to flush (used in db mode) [flush command]
required: false
default: null
flush_mode:
description:
- Type of flush (all the dbs in a redis instance or a specific one | )
[flush command]
required: false
| default: all
choices: [ "all", "db" ]
name:
version_added: 1.6
description:
- A redis config key.
required: false
default: null
value:
version_added: 1.6
description:
- A redis config value.
required: false
default: null
notes:
- Requires the redis-py Python package on the remote host. You can
install it with pip (pip install redis) or with a package manager.
https://github.com/andymccurdy/redis-py
- If the redis master instance we are making slave of is password protected
this needs to be in the redis.conf in the masterauth variable
requirements: [ redis ]
author: "Xabier Larrakoetxea (@slok)"
'''
EXAMPLES = '''
# Set local redis instance to be slave of melee.island on port 6377
- redis: command=slave master_host=melee.island master_port=6377
# Deactivate slave mode
- redis: command=slave slave_mode=master
# Flush all the redis db
- redis: command=flush flush_mode=all
# Flush only one db in a redis instance
- redis: command=flush db=1 flush_mode=db
# Configure local redis to have 10000 max clients
- redis: command=config name=maxclients value=10000
# Configure local redis to have lua time limit of 100 ms
- redis: command=config name=lua-time-limit value=100
'''
try:
import redis
except ImportError:
redis_found = False
else:
redis_found = True
# ===========================================
# Redis module specific support methods.
#
def set_slave_mode(client, master_host, master_port):
try:
return client.slaveof(master_host, master_port)
except Exception:
return False
def set_master_mode(client):
try:
return client.slaveof()
except Exception:
return False
def flush(client, db=None):
try:
if type(db) != int:
return client.flushall()
else:
# The passed client has been connected to the database already
return client.flushdb()
except Exception:
return False
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
command=dict(default=None, choices=['slave', 'flush', 'config']),
login_password=dict(default=None, no_log=True),
login_host=dict(default='localhost'),
login_port=dict(default=6379, type='int'),
master_host=dict(default=None),
master_port=dict(default=None, type='int'),
slave_mode=dict(default='slave', choices=['master', 'slave']),
db=dict(default=None, type='int'),
flush_mode=dict(default='all', choices=['all', 'db']),
name=dict(default=None),
value=dict(default=None)
),
supports_check_mode = True
)
if not redis_found:
module.fail_json(msg="python redis module is required")
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
command = module.params['command']
# Slave Command section -----------
if command == "slave":
master_host = module.params['master_host']
master_port = module.params['master_port']
mode = module.params['slave_mode']
#Check if we have all the data
if mode == "slave": # Only need data if we want to be slave
if not master_host:
module.fail_json(
msg='In slave mode master host must be provided')
if not master_port:
module.fail_json(
msg='In slave mode master port must be provided')
#Connect and check
r = redis.StrictRedis(host=login_host,
port=login_port,
password=login_password)
try:
r.ping()
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
#Check if we are already in the mode that we want
info = r.info()
if mode == "master" and info["role"] == "master":
module.exit_json(changed=False, mode=mode)
elif mode == "slave" and\
info["role"] == "slave" and\
info["master_host"] == master_host and\
info["master_port"] == master_port:
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=False, mode=status)
else:
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "slave":
if module.check_mode or\
set_slave_mode(r, master_host, master_port):
info = r.info()
status = {
'status': mode,
'master_host': master_host,
'master_port': master_port,
}
module.exit_json(changed=True, mode=status)
else:
module.fail_json(msg='Unable to set slave mode')
else:
if module.check_mode or set_master_mode(r):
module.exit_json(changed=True, mode=mode)
else:
module.fail_json(msg='Unable to set master mode')
# flush Command section -----------
elif command == "flush":
db = module.params['db']
mode = module.params['flush_mode']
#Check if we have all the data
if mode == "db":
if db is None:
module.fail_json(
msg="In db mode the db number must be provided")
#Connect and check
r = redis.StrictRedis(host=login_host,
port=login_port,
password=login_password,
db=db)
try:
r.ping()
except Exception:
e = get_exception()
module.fail_json(msg="unable to connect to database: %s" % e)
# Do the stuff
# (Check Check_mode before commands so the commands aren't evaluated
# if not necessary)
if mode == "all":
if module.check_mode or flush(r):
module.exit_json(changed=True, flushed=True) |
"""
Django settings for central_service project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd$^6$7ybljkbz@b#7j&4cz_46dhe$=uiqnxuz+h3yoyj6u$$fk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
# Application definition
INSTALLED_APPS = (
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'services',
'service_pages',
'rest_framework',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'central_service.urls'
WSGI_APPLICATION = 'central_service.wsgi.application'
# Templates
TEMPLATE_DIRS = (
os.path.join(PR | OJECT_PATH, 'templates'),
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
# Internationalizati | on
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Used in production to define where collectstatic stores stuff
STATIC_ROOT = os.path.join(PROJECT_PATH, '../static')
ADMIN_MEDIA_PREFIX = '/static/admin/'
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# )
# Used in development to force django to serve static files
STATICFILES_DIRS = [
os.path.join(PROJECT_PATH, "static"),
]
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser',
]
}
|
"""
Observe the effect in the perturbations of Laplacians
"""
import sys
import logging
import numpy
import scipy
import itertools
import copy
import matplotlib.pyplot as plt
from apgl.graph import *
from sandbox.util.PathDefaults import PathDefaults
from sandbox.misc.IterativeSpectralClustering import IterativeSpectralClustering
from apgl.graph.GraphUtils import GraphUtils
from apgl.generator.SmallWorldGenerator import SmallWorldGenerator
from apgl.generator.ErdosRenyiGenerator import ErdosRenyiGenerator
from sandbox.util.Util import Util
from wallhack.clusterexp.BoundGraphIterator import BoundGraphIterator
numpy.random.seed(21)
#numpy.seterr("raise")
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.set_printoptions(suppress=True, linewidth=200, precision=3)
k1 = 3
k2 = 3
logging.debug("k=" + str(k1))
numRepetitions = 50
numGraphs = 80
saveResults = Fal | se
resultsDir = PathDefaults.getOutputDir() + "cluster/"
fileName = resultsDir + "ErrorBoundTheorem44.npy"
if saveResults:
errors = numpy.zeros((nu | mGraphs, numRepetitions))
allBoundLists = numpy.zeros((numRepetitions, numGraphs, 5))
for r in range(numRepetitions):
iterator = BoundGraphIterator(numGraphs=numGraphs)
clusterer = IterativeSpectralClustering(k1, k2, T=100, computeBound=True, alg="IASC")
clusterer.nb_iter_kmeans = 20
logging.debug("Starting clustering")
clusterList, timeList, boundList = clusterer.clusterFromIterator(iterator, verbose=True)
allBoundLists[r, :, :] = numpy.array(boundList)
for i in range(len(clusterList)):
errors[i, r] = GraphUtils.randIndex(clusterList[i], iterator.realClustering)
print(allBoundLists.mean(0))
numpy.save(fileName, allBoundLists)
logging.debug("Saved results as " + fileName)
else:
allBoundLists = numpy.load(fileName)
boundList = allBoundLists.mean(0)
stdBoundList = allBoundLists.std(0)
stdBoundList[:, 0] = boundList[:, 0]
plotStyles1 = ['k-', 'k--', 'k-.', 'k:', 'b--', 'b-.', 'g-', 'g--', 'g-.', 'r-', 'r--', 'r-.']
print(boundList)
print(stdBoundList)
plt.figure(0)
plt.plot(boundList[:, 0], boundList[:, 1], plotStyles1[0], label="Frobenius approx")
plt.plot(boundList[:, 0], boundList[:, 2], plotStyles1[1], label="2-norm approx")
plt.plot(boundList[:, 0], boundList[:, 3], plotStyles1[2], label="Frobenius precise")
plt.plot(boundList[:, 0], boundList[:, 4], plotStyles1[3], label="2-norm precise")
plt.xlabel("Graph no.")
plt.ylabel(r"$||\sin \; \Theta(\mathcal{R}(U_k), \mathcal{R}(V_k) )||$")
plt.legend(loc="upper left")
plt.grid(True)
#plt.figure(1)
#plt.plot(numpy.arange(errors.shape[0]), errors)
plt.show()
|
# encoding: utf-8
"""
Place holder for all workers
"""
from | .integration_ | tester import IntegrationTestWorker
from .db_writer import DatabaseWriterWorker
from .deploy import BeforeDeploy, Deploy, Restart, GithubDeploy |
import numpy as np
import math
import sys
import os
sys.path.insert(0,os.environ['learningml']+'/GoF/')
import classifier_eval
from classifier_eval import name_to_nclf, nclf, experiment, make_keras_model
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from rep.estimators import XGBoostClassifier
from keras.wrappers.scikit_learn import KerasClassifier
import time
#nclf_list = [nclf()]
#nclf_list = [nclf(), name_to_nclf("bdt"), nclf('xgb',XGBoostClassifier(),['n_estimators','eta'], [[10,1000],[0.01,1.0]]) ]
#nclf_list = [nclf('xgb',XGBoostClassifier(),['n_estimators','eta'], [[10,1000],[0.01,1.0]], param_opt=[1000.,0.9738])]
#nclf_list = [nclf('nn',"no classifier needed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]],param_opt=[0,500])]
#nclf_list = [name_to_nclf("nn")]
#nclf_list = [name_to_nclf("bdt"), name_to_nclf("xgb"), name_to_nclf("svm"), name_to_nclf("nn")]
#nclf_list = [name_to_nclf("bdt"), name_to_nclf("xgb"), nam | e_to_nclf("nn")]
nclf_list = [name_to_nclf("svm")]
#nclf_list = [nclf('bdt',AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2)), ['learning_rate','n_estimators'], [[0.01,2.0],[1,1000]], param_opt=[1.181, 319]), nclf('xgb',XGBoostClassifier(), ['n_estimators','eta'], [[10,1000],[0.01,1.0]], param_opt=[524, 0.151]), nclf('nn',"no classifier needed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]],param_opt=[0,455])]
#nclf_list = [nclf('nn',"no classifier need | ed for nn", ['n_hidden_layers','dimof_middle'], [[0,1],[100,500]],param_opt=[0,455])]
systematics_fraction = 0.01
file_name_patterns= [ os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{0}D_10000_0.0_1.0_1.0_{1}_euclidean.txt", os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_{0}D_10000_0.0_0.95_0.95_{1}_euclidean.txt" ]
#file_name_patterns= [ os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{0}D_10000_0.0_1.0_1.0_optimisation_{1}.txt", os.environ['learningml']+"/GoF/data/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{0}D_10000_0.0_1.0_0.9_optimisation_{1}.txt" ]
name_CPV= "{0}Dgauss__0_95__0_95_CPV_not_redefined_euclidean"
name_noCPV= "{0}Dgauss__1_0__1_0_noCPV_not_redefined_euclidean"
#name_CPV= "{0}Dgauss__1_0__0_95_CPV_chi2scoringopt"
#name_noCPV= "{0}Dgauss__1_0__1_0_noCPV_chi2scoringopt"
title_CPV = "Gauss 0.95 0.95 euclidean"
title_noCPV="Gauss 1.0 1.0 euclidean"
directory_name = "_0_95__0_95_not_redefined_euclidean"
expt = experiment(nclf_list=nclf_list, file_name_patterns=file_name_patterns, scoring='chi2',single_no_bins_list = [5], systematics_fraction = systematics_fraction, only_mod=False, title_CPV=title_CPV, title_noCPV=title_noCPV, name_CPV=name_CPV, name_noCPV=name_noCPV, directory_name=directory_name)
start_time = time.time()
expt.optimise(optimisation_dimension = 4, keras_optimisation_dimension = 1, number_of_iterations=50)
#optimisation gave nn param_opt
evaluation_start_time = time.time()
print(50*"-"+"\noptimisation took ", (evaluation_start_time - start_time)/60. , " minutes\n" +50*"-")
expt.evaluate(evaluation_dimensions = range(1,11), keras_evaluation_dimensions = [1]*10, number_of_evaluations=100)
end_time = time.time()
print(50*"-"+"\nevaluation took ", (end_time - evaluation_start_time)/60. , " minutes\n" +50*"-")
|
# zFCP configuration dialog
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Samantha N. Bueno <sbueno@redhat.com>
#
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.utils import gtk_action_nowait
from pyanaconda.storage_utils import try_populate_devicetree
__all__ = ["ZFCPDialog"]
class ZFCPDialog(GUIObject):
""" Gtk dialog which allows users to manually add zFCP devices without
having previously specified them in a parm file.
.. inheritance-diagram:: ZFCPDialog
:parts: 3
"""
builderObjects = ["zfcpDialog"]
mainWidgetName = "zfcpDialog"
uiFile = "spokes/advstorage/zfcp.glade"
def __init__(self, data, storage):
GUIObject.__init__(self, data)
self.storage = storage
self.zfcp = self.storage.zfcp()
self._discoveryError = None
self._update_devicetree = False
# grab all of the ui objects
self._configureGrid = self.builder.get_object("configureGrid")
self._conditionNotebook = self.builder.get_object("conditionNotebook")
self._startButton = self.builder.get_object("startButton")
self._okButton = self.builder.get_object("okButton")
self._cancelButton = self.builder.get_object("cancelButton")
self._retryButton = self.builder.get_object("retryButton")
self._deviceEntry = self.builder.get_object("deviceEntry")
self._wwpnEntry = self.builder. | get_object("wwpnEntry")
self._lunEntry = self.builder.get_object("lunEntry")
def refresh(self):
self._deviceEntry.set_text("")
self._deviceEnt | ry.set_sensitive(True)
self._startButton.set_sensitive(True)
def run(self):
rc = self.window.run()
self.window.destroy()
# We need to call this to get the device nodes to show up
# in our devicetree.
if self._update_devicetree:
try_populate_devicetree(self.storage.devicetree)
return rc
def _set_configure_sensitive(self, sensitivity):
""" Set entries to a given sensitivity. """
for child in self._configureGrid.get_children():
child.set_sensitive(sensitivity)
def on_start_clicked(self, *args):
""" Go through the process of validating entry contents and then
attempt to add the device.
"""
# First update widgets
self._startButton.hide()
self._cancelButton.set_sensitive(False)
self._okButton.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
self._set_configure_sensitive(False)
self._deviceEntry.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
# below really, really is ugly and needs to be re-factored, but this
# should give a good base idea as far as expected behavior should go
try:
device = blockdev.s390.sanitize_dev_input(self._deviceEntry.get_text())
wwpn = blockdev.s390.zfcp_sanitize_wwpn_input(self._wwpnEntry.get_text())
lun = blockdev.s390.zfcp_sanitize_lun_input(self._lunEntry.get_text())
except blockdev.S390Error as err:
_config_error = str(err)
self.builder.get_object("deviceErrorLabel").set_text(_config_error)
self._conditionNotebook.set_current_page(2)
spinner = self.builder.get_object("waitSpinner")
spinner.start()
self._discover(device, wwpn, lun)
self._check_discover()
@gtk_action_nowait
def _check_discover(self, *args):
""" After the zFCP discover thread runs, check to see whether a valid
device was discovered. Display an error message if not.
"""
spinner = self.builder.get_object("waitSpinner")
spinner.stop()
if self._discoveryError:
# Failure, display a message and leave the user on the dialog so
# they can try again (or cancel)
self.builder.get_object("deviceErrorLabel").set_text(self._discoveryError)
self._discoveryError = None
self._conditionNotebook.set_current_page(2)
self._set_configure_sensitive(True)
else:
# Great success. Just return to the advanced storage window and let the
# UI update with the newly-added device
self.window.response(1)
return True
self._cancelButton.set_sensitive(True)
return False
def _discover(self, *args):
""" Given the configuration options from a user, attempt to discover
a zFCP device. This includes searching black-listed devices.
"""
# attempt to add the device
try:
self.zfcp.addFCP(args[0], args[1], args[2])
self._update_devicetree = True
except ValueError as e:
self._discoveryError = str(e)
return
def on_entry_activated(self, entry, user_data=None):
# When an entry is activated, press the discover or retry button
current_page = self._conditionNotebook.get_current_page()
if current_page == 0:
self._startButton.clicked()
elif current_page == 2:
self._retryButton.clicked()
|
from unittest import TestCase
import re
from iperflexer import oatbran
bran = oatbran
COW = 'cow'
class TestOatBran(TestCase):
def test_brackets(self):
L_BRACKET = '['
R_BRACKET = "]"
self.assertRegexpMatches(L_BRACKET, bran.L_BRACKET)
self.assertNotRegexpMatches(R_BRACKET, bran.L_BRACKET)
self.assertRegexpMatches(R_BRACKET, bran.R_BRACKET)
self.assertNotRegexpMatches(L_BRACKET, bran.R_BRACKET)
return
def test_spaces(self):
space = ' '
empty_string = ''
spaces = ' '
self.assertRegexpMatches(space, bran.SPACE)
self.assertNotRegexpMatches(empty_string, bran.SPACE)
self.assertNotRegexpMatches(COW, bran.SPACE)
self.assertRegexpMatches(spaces, bran.SPACE)
self.assertRegexpMatches(spaces, bran.SPACES)
self.assertNotRegexpMatches(empty_string, bran.SPACES)
self.assertNotRegexpMatches(COW, bran.SPACES)
self.assertRegexpMatches(spaces, bran.OPTIONAL_SPACES)
self.assertRegexpMatches(empty_string, bran.OPTIONAL_SPACES)
self.assertRegexpMatches(COW, bran.OPTIONAL_SPACES)
return
def test_named(self):
name = "boy"
expression = COW
match = re.search(bran.NAMED(n=name, e=expression), "a cow for liebowitz")
self.assertEqual(expression, match.group(name))
return
def test_digit(self):
digits = "1 2 3 4 5 6 7 8 9 0".split()
for digit in digits:
self.assertRegexpMatches(digit, bran.DIGIT)
self.assertNotRegexpMatches(COW, bran.DIGIT)
return
def test_integer(self):
n1 = "112345"
n2 = "0.1"
self.assertRegexpMatches(n1, bran.INTEGER)
match = re.search(bran.GROUP(e=bran.INTEGER), n2)
self.assertIsNone(match)
return
def test_float(self):
n1 = '12.3'
n2 = "11"
self.assertRegexpMatches(n1, bran.FLOAT)
self.assertNotRegexpMatches(n2, bran.FLOAT)
return
def test_real(self):
n1 = "0.340"
n2 = "123"
match = re.search(bran.GROUP(e=bran.REAL), n1)
self.assertEqual(n1, match.groups()[0])
self.assertRegexpMatches(n2, bran.REAL)
self.assertNotRegexpMatches(COW, bran.REAL)
return
def test_class(self):
s = "Bboy"
e = bran.CLASS(e="Bb") + "boy"
self. assertRegexpMatches(s, e)
def test_single_digit(self):
self.assertRegexpMatches("0", bran.SINGLE_DIGIT)
return
def test_two_digits(self):
self.assertRegexpMatches("19", bran.TWO_DIGITS)
self.assertRegexpMatches("99", bran.TWO_DIGITS)
self.assertNotRegexpMatches("9", bran.TWO_DIGITS)
self.assertNotRegexpMatches("100", bran.TWO_DIGITS)
return
def test_zero_or_one(self):
s = "Gb"
s2 = "Gab"
s3 = "Gaab"
e = "G(a)" + bran.ZERO_OR_ONE + 'b'
self.assertRegexpMatches(s, e)
match = re.search(e, s)
self.assertIsNone(match.groups()[0])
self.assertRegexpMatches(s2, e)
match = re.search(e, s2)
self.assertEqual("a", match.groups()[0])
self.assertNotRegexpMatches(s3, e)
return
def test_range(self):
s = "1"
s3 = "315"
s2 = "a" + s3 + "21"
e = bran.NAMED(n="octet", e=bran.M_TO_N(m=1, n=3, e=bran.DIGIT))
self.assertRegexpMatches(s, e)
self.assertRegexpMatches(s2, e)
self.assertRegexpMatches(s3,e)
match = re.search(e, s2)
self.assertEqual(s3, match.group("octet"))
return
def test_absolute_range(self):
s = "a123"
e = bran. | NAMED(n="octet", e=bran.M_TO_N_ONLY(m=1, n=3, e=bran.DIGIT))
self.assertNotRegexpMatches(s, e)
return
def test_octet(self):
name = "octet"
e = re.compile(bran.NAMED(name,bran.OCTET))
sources = (str(i) for i in range(256))
for source in sources:
match = e.s | earch(source)
self.assertEqual(source, match.group(name))
s = "256"
self.assertNotRegexpMatches(s, bran.OCTET)
return
def test_ip_address(self):
s = "0.0.0.0"
self.assertRegexpMatches(s, bran.IP_ADDRESS)
self.assertNotRegexpMatches("256.255.255.255", bran.IP_ADDRESS)
return
def test_not(self):
source = ",,323.5,"
match = re.search(bran.NAMED('not',bran.NOT(",")), source)
self.assertEqual(match.group('not'), '323.5')
self.assertRegexpMatches(",,3,", bran.NOT(","))
self.assertNotRegexpMatches(",,,,,", bran.NOT(','))
# end class TestOatBran
|
ho@gmail.com>")
__version__ = "2.3"
__license__ = "MIT"
"""
Implements a single-threaded longpoll client
"""
import select
import socket
import json
import httplib
import threading
import time
import vkapi as api
import utils
from __main__ import *
SOCKET_CHECK_TIMEOUT = 10
LONGPOLL_RETRY_COUNT = 10
LONGPOLL_RETRY_TIMEOUT = 10
SELECT_WAIT = 25
OPENER_LIFETIME = 60
CODE_SKIP = -1
CODE_FINE = 0
CODE_ERROR = 1
TYPE_MSG = 4
TYPE_MSG_EDIT = 5
TYPE_MSG_READ_IN = 6 # we read the message
TYPE_MSG_READ_OUT = 7 # they read the message
TYPE_PRS_IN = 8
TYPE_PRS_OUT = 9
TYPE_TYPING = 61
FLAG_OUT = 2
FLAG_CHAT = 16
MIN_CHAT_UID = 2000000000
TCP_KEEPINTVL = 60
TCP_KEEPIDLE = 60
def debug(message, *args):
if DEBUG_POLL:
logger.debug(message, *args)
def read(opener, source):
"""
Read a socket ignoring errors
Args:
opener: a socket to read
source: the user's jid
Returns:
JSON data or an empty string
"""
try:
data = opener.read()
except (httplib.BadStatusLine, socket.error, socket.timeout) as e:
data = ""
logger.warning("longpoll: got error `%s` (jid: %s)", e.message, source)
return data
def processPollResult(user, data):
"""
Processes a poll result
Decides whether to send a chat/groupchat message or presence or just pass the iteration
Args:
user: the User object
data: a valid json with poll result
Returns:
CODE_SKIP: just skip iteration, not adding the user to poll again
CODE_FINE: add user for the next iteration
CODE_ERROR: user should be added to the init buffer
"""
debug("longpoll: processing result (jid: %s)", user.source)
retcode = CODE_FINE
try:
data = json.loads(data)
except ValueError:
logger.error("longpoll: no data. Gonna request again (jid: %s)",
user.source)
retcode = CODE_ERROR
return retcode
if "failed" in data:
logger.debug("longpoll: failed. Searching for a new server (jid: %s)", user.sou | rce)
retcode = CODE_ERROR
else:
user.vk.pollConfig["ts"] = data["ts"]
for evt in data.get("updates", ()):
typ = evt.pop(0)
debug("longpoll: got updates, processing event %s wi | th arguments %s (jid: %s)",
typ, str(evt), user.source)
if typ == TYPE_MSG: # new message
message = None
mid, flags, uid, date, body, subject, attachments = evt
if subject:
subject = subject.get("title")
out = flags & FLAG_OUT
chat = (uid > MIN_CHAT_UID) # a groupchat always has uid > 2000000000
# there is no point to request messages if there's only a single emoji attachment
# we actually only need to request for new messages if there are complex attachments in it (e.g. photos)
if len(attachments) == 1 and "emoji" in attachments:
attachments = None
if not out:
if not attachments and not chat:
message = [{"out": 0, "from_id": uid, "id": mid, "date": date, "text": body}]
# we substract 1 from msg id b/c VK now has reverse history so we need to ask what happened before this exact message
utils.runThread(user.sendMessages, (False, message, mid - 1, uid), "sendMessages-%s" % user.source)
elif typ == TYPE_MSG_READ_OUT:
uid, mid, _ = evt
cache = user.msgCacheByUser.get(uid)
if cache:
xmppMID = cache["xmpp"]
cache.clear()
sendChatMarker(user.source, vk2xmpp(uid), xmppMID)
elif typ == TYPE_PRS_IN: # user has joined
uid = abs(evt[0])
sendPresence(user.source, vk2xmpp(uid), hash=USER_CAPS_HASH)
elif typ == TYPE_PRS_OUT: # user has left
uid = abs(evt[0])
sendPresence(user.source, vk2xmpp(uid), "unavailable")
elif typ == TYPE_TYPING: # user is typing
uid = evt[0]
if uid not in user.typing:
sendMessage(user.source, vk2xmpp(uid), typ="composing")
user.typing[uid] = time.time()
retcode = CODE_FINE
return retcode
def configureSocket(sock):
# see man(7) tcp
debug("setting socket parameters...")
try:
# enable keepalive probes
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# the interval between subsequential keepalive probes, regardless of what the connection has exchanged in the meantime
# overrides tcp_keepalive_intvl
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, TCP_KEEPINTVL)
# the interval between the last data packet sent (simple ACKs are not considered data) and the first keepalive probe;
# after the connection is marked to need keepalive, this counter is not used any further
# overrides tcp_keepalive_time
sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, TCP_KEEPIDLE)
except (AttributeError, OSError):
debug("unable to set socket parameters")
# TODO: make it abstract, to reuse in Steampunk
class Poll(object):
"""
Class used to handle longpoll
"""
__list = {}
__buff = set()
__lock = threading.Lock()
clear = staticmethod(__list.clear)
watchdogRunning = False
@classmethod
def init(cls):
cls.watchdogRunning ^= True
cls.watchdog()
@classmethod
def __add(cls, user):
"""
Issues a readable socket to use it in select()
Adds user in buffer if a error occurred
Adds user in cls.__list if no errors
"""
if user.source in Users:
# in case the new instance was created
user = Users[user.source]
opener = user.vk.makePoll()
debug("longpoll: user has been added to poll (jid: %s)", user.source)
if opener:
sock = opener.sock
configureSocket(sock)
cls.__list[sock] = (user, opener)
return opener
logger.warning("longpoll: got null opener! (jid: %s)", user.source)
cls.__addToBuffer(user)
@classmethod
def add(cls, some_user):
"""
Adds the User class object to poll
"""
debug("longpoll: adding user to poll (jid: %s)", some_user.source)
with cls.__lock:
if some_user in cls.__buff:
return None
# check if someone is trying to add an already existing user
for sock, (user, opener) in cls.__list.iteritems():
if some_user == user:
break
else:
try:
cls.__add(some_user)
except api.LongPollError as e:
logger.debug("longpoll: failed to make poll: %s (jid: %s)", e.message, some_user.source)
cls.__addToBuffer(some_user)
except Exception:
crashLog("poll.add")
@classmethod
def __addToBuffer(cls, user):
"""
Adds user to the list of "bad" users
The list is mostly contain users whose poll
request was failed for some reasons
Args:
user: the user object
"""
cls.__buff.add(user)
logger.debug("longpoll: adding user to the init buffer (jid: %s)", user.source)
utils.runThread(cls.handleUser, (user,), "handleBuffer-%s" % user.source)
@classmethod
def __removeFromBuffer(cls, user):
"""
Instantly removes a user from the buffer
Args:
user: the user object
"""
if user in cls.__buff:
cls.__buff.remove(user)
@classmethod
def removeFromBuffer(cls, user):
"""
Removes a user from the buffer
Args:
user: the user object
"""
with cls.__lock:
cls.__removeFromBuffer(user)
@classmethod
def handleUser(cls, user):
"""
Tries to reinitialize poll for LONGPOLL_RETRY_COUNT every LONGPOLL_RETRY_TIMEOUT seconds
As soon as poll is initialized the user will be removed from buffer
Args:
user: the user object
"""
for _ in xrange(LONGPOLL_RETRY_COUNT):
if user.source in Users:
user = Users[user.source] # we might have a new instance here
if user.vk.initPoll():
with cls.__lock:
logger.debug("longpoll: successfully initialized longpoll (jid: %s)",
user.source)
cls.__add(user)
cls.__removeFromBuffer(user)
break
else:
logger.debug("longpoll: while we were wasting our time"
", the user has left (jid: %s)", user.source)
cls.removeFromBuffer(user)
return None
time.sleep(LONGPOLL_RETRY_TIMEOUT)
else:
cls.removeFromBuffer(user)
logger.error("longpoll: failed to add user to poll in 10 retries"
" (jid: %s)", user.source)
@classmethod
def process(cls):
"""
Processes poll sockets by select.select()
As soon as socket will be ready for reading, user.processPollResult() is called
Read processPollResult.__doc__ to learn more about status codes
"""
while ALIVE:
socks = cls.__list.keys()
if not socks:
time.sleep(0.02)
continue
try:
ready, error = |
import pytest
@pytest.mark.usefixtures('tmpdir')
@pytest.mark.filecopy('test.torrent', '__tmp__/')
class TestContentFilter:
config = """
tasks:
test_reject1:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
reject: '*.iso | '
test_reject2:
mock:
- {title: 'test', file: '__tmp__/test.to | rrent'}
accept_all: yes
content_filter:
reject: '*.avi'
test_require1:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
require:
- '*.bin'
- '*.iso'
test_require2:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
require: '*.avi'
test_require_all1:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
require_all:
- 'ubu*'
- '*.iso'
test_require_all2:
mock:
- {title: 'test', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
require_all:
- '*.iso'
- '*.avi'
test_strict:
mock:
- {title: 'test'}
accept_all: yes
content_filter:
require: '*.iso'
strict: true
test_cache:
mock:
- {title: 'test', url: 'http://localhost/', file: '__tmp__/test.torrent'}
accept_all: yes
content_filter:
reject: ['*.iso']
"""
def test_reject1(self, execute_task):
task = execute_task('test_reject1')
assert task.find_entry('rejected', title='test'), 'should have rejected, contains *.iso'
def test_reject2(self, execute_task):
task = execute_task('test_reject2')
assert task.find_entry(
'accepted', title='test'
), 'should have accepted, doesn\t contain *.avi'
def test_require1(self, execute_task):
task = execute_task('test_require1')
assert task.find_entry('accepted', title='test'), 'should have accepted, contains *.iso'
def test_require2(self, execute_task):
task = execute_task('test_require2')
assert task.find_entry(
'rejected', title='test'
), 'should have rejected, doesn\t contain *.avi'
def test_require_all1(self, execute_task):
task = execute_task('test_require_all1')
assert task.find_entry(
'accepted', title='test'
), 'should have accepted, both masks are satisfied'
def test_require_all2(self, execute_task):
task = execute_task('test_require_all2')
assert task.find_entry(
'rejected', title='test'
), 'should have rejected, one mask isn\'t satisfied'
def test_strict(self, execute_task):
"""Content Filter: strict enabled"""
task = execute_task('test_strict')
assert task.find_entry('rejected', title='test'), 'should have rejected non torrent'
def test_cache(self, execute_task):
"""Content Filter: caching"""
task = execute_task('test_cache')
assert task.find_entry('rejected', title='test'), 'should have rejected, contains *.iso'
# Test that remember_rejected rejects the entry before us next time
task = execute_task('test_cache')
assert task.find_entry(
'rejected', title='test', rejected_by='remember_rejected'
), 'should have rejected, content files present from the cache'
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import unittest
from manifestparser import ManifestParser
here = os.path.dirname(os.path.abspath(__file__))
class TestDefaultSkipif(unittest.TestCase):
"""test applying a skip-if condition in [DEFAULT] and || with the value for the test"""
def test_defaults(self):
default = os.path.join(here, 'default-skipif.ini')
parser = ManifestParser(manifests=(default,))
for test in parser.tests:
if test['name'] == 'test1':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (debug)")
elif test['name'] == 'test2':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (os == 'linux')")
elif test['name'] == 'test3':
self.ass | ertEqual(test['skip-if'], "(os == 'win' && debug ) || (os == 'win')")
elif test['name'] == 'test4':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (os == 'win' && debug)")
elif test['name'] == 'test5':
self.assertEqual(test['skip-if'], "os == 'win' && debug # a pesky comment")
elif test['name'] == ' | test6':
self.assertEqual(test['skip-if'], "(os == 'win' && debug ) || (debug )")
if __name__ == '__main__':
unittest.main()
|
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
class Shift(function_node.FunctionNode):
def __init__(self, ksize=3, dilate=1):
super(Shift, self).__init__()
self.kh, self.kw = _pair(ksize)
if self.kh % 2 != 1:
raise ValueErr | or('kh must be odd')
if self.kw % 2 != 1:
raise ValueError('kw must be odd')
self.dy, self.dx = _ | pair(dilate)
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape[1] >= self.kh * self.kw,
)
def forward_cpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
py = self.kh // 2 * abs(self.dy)
px = self.kw // 2 * abs(self.dx)
x = numpy.pad(x, ((0, 0), (0, 0), (py, py), (px, px)),
'constant')
n_groups = self.kh * self.kw
group_size = c // n_groups
ret = []
for i, group_idx in enumerate(range(n_groups)):
# Make sure that center group is last
if group_idx == (n_groups - 1) // 2:
group_idx = n_groups - 1
elif group_idx == (n_groups - 1):
group_idx = (n_groups - 1) // 2
ky = (group_idx // self.kw) - py // abs(self.dy)
kx = (group_idx % self.kw) - px // abs(self.dx)
hs = py + -ky * self.dy
ws = px + -kx * self.dx
he = hs + h
we = ws + w
cs = i * group_size
ce = (i + 1) * group_size if i < n_groups - 1 else None
ret.append(x[:, cs:ce, hs:he, ws:we])
return numpy.concatenate(ret, axis=1),
def forward_gpu(self, inputs):
x = inputs[0]
b, c, h, w = x.shape
y = cuda.cupy.empty_like(x)
cuda.elementwise(
'raw T x, int32 c, int32 h, int32 w,'
'int32 kh, int32 kw,'
'int32 dy, int32 dx',
'T y',
'''
int b0 = i / (c * h * w);
int rest = i % (c * h * w);
int c0 = rest / (h * w);
rest %= h * w;
int out_row = rest / w;
int out_col = rest % w;
int n_groups = kh * kw;
int group_size = c / n_groups;
int group_idx = c0 / group_size;
// Make sure that center group is last
if (group_idx == (n_groups - 1) / 2) {
group_idx = n_groups - 1;
} else if (group_idx == n_groups - 1) {
group_idx = (n_groups - 1) / 2;
}
int ky = (group_idx / kw) - kh / 2;
int kx = (group_idx % kw) - kw / 2;
if (group_idx >= n_groups) {
ky = 0;
kx = 0;
}
int in_row = -ky * dy + out_row;
int in_col = -kx * dx + out_col;
if (in_row >= 0 && in_row < h && in_col >= 0 && in_col < w) {
y = x[b0 * c * h * w + c0 * h * w + in_row * w + in_col];
} else {
y = 0;
}
''',
'shift_gpu')(x, c, h, w, self.kh, self.kw, self.dy, self.dx, y)
return y,
def backward(self, indexes, grad_outputs):
return shift(grad_outputs[0], ksize=(self.kh, self.kw),
dilate=(-self.dy, -self.dx)),
def shift(x, ksize=3, dilate=1):
"""Shift function.
See: `Shift: A Zero FLOP, Zero Parameter Alternative to Spatial \
Convolutions <https://arxiv.org/abs/1711.08141>`_
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Input variable of shape :math:`(n, c, h, w)`.
ksize (int or pair of ints): Size of filters (a.k.a. kernels).
``ksize=k`` and ``ksize=(k, k)`` are equivalent.
dilate (int or pair of ints): Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d)`` are equivalent.
Returns:
~chainer.Variable:
Output variable of same shape as ``x``.
"""
fnode = Shift(ksize, dilate)
y, = fnode.apply((x,))
return y
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.