text stringlengths 0 1.05M | meta dict |
|---|---|
""" All Targets Defined Unit Tests for annotated indicators in Program Page indicator list
Requirements for "True":
- Indicator has target frequency
- if LOP, indicator has lop_target
- if MID_END, indicator has both MID and END periodic_target and also target values for both
- if EVENT, indicator has at least one periodic_target and all have target values
- if time aware, indicator has all periods from reporting_period_start to reporting_period_end with targets
"""
import datetime
from factories import (
indicators_models as fact_i,
workflow_models as fact_w
)
from indicators.models import (
Indicator,
PeriodicTarget
)
from indicators.queries import MetricsIndicator
from django import test
class TargetsDefinedTestBase(object):
target_frequency = None
program = None
def get_indicator(self):
if not self.program:
self.program = fact_w.ProgramFactory()
return fact_i.IndicatorFactory(
program=self.program,
target_frequency=self.target_frequency
)
def get_target(self, indicator, target=100, start_date=None, end_date=None):
if start_date is None:
start_date = datetime.date(2014, 1, 1)
if end_date is None:
end_date = datetime.date(2015, 1, 1)
return fact_i.PeriodicTargetFactory(
indicator=indicator,
start_date=start_date,
end_date=end_date,
target=target
)
def get_annotated_indicator(self, indicator=None):
if indicator is None:
indicator = self.get_indicator()
return MetricsIndicator.objects.with_annotations('targets').get(pk=indicator.pk)
@test.tag('targets', 'metrics', 'fast', 'core')
class TestNoTargetFrequency(test.TestCase, TargetsDefinedTestBase):
"""if indicator has no target frequency set, it should report all_targets_defined=False"""
def test_no_target_frequency_false(self):
"""no target frequency set => all_targets_defined=False"""
annotated_indicator = self.get_annotated_indicator()
self.assertFalse(annotated_indicator.all_targets_defined)
@test.tag('targets', 'metrics', 'fast', 'core')
class TestLOPIndicatorTargetsDefined(test.TestCase, TargetsDefinedTestBase):
"""if indicator is LOP frequency, it should report all_targets_defined iff lop_target is set"""
target_frequency = Indicator.LOP
def test_no_lop_target(self):
"""LOP indicator with no lop_target should report all_targets_defined=False"""
annotated_indicator = self.get_annotated_indicator()
self.assertFalse(annotated_indicator.all_targets_defined)
def test_lop_target_set(self):
"""LOP indicator with no lop_target should report all_targets_defined=False"""
good_indicator = self.get_indicator()
good_indicator.lop_target = 100
good_indicator.save()
annotated_indicator = self.get_annotated_indicator(good_indicator)
self.assertTrue(annotated_indicator.all_targets_defined)
class RequiresTargetsBase(TargetsDefinedTestBase):
def test_no_targets_set(self):
"""non-LOP indicator with no targets set should report all_targets_defined=False"""
annotated_indicator = self.get_annotated_indicator()
self.assertFalse(annotated_indicator.all_targets_defined)
@test.tag('targets', 'metrics', 'fast', 'core')
class TestMIDENDIndicatorTargetsDefined(test.TestCase, RequiresTargetsBase):
"""if indicator is MID_END frequency, it should report all_targets_defined iff:
- both targets exist"""
target_frequency = Indicator.MID_END
def get_midline_target(self, indicator):
target = self.get_target(indicator)
target.period = PeriodicTarget.MIDLINE
target.customsort = 0
target.save()
return target
def get_endline_target(self, indicator):
target = self.get_target(indicator)
target.period = PeriodicTarget.ENDLINE
target.customsort = 1
target.save()
return target
def test_no_endline_target(self):
"""MID_END indicator with no endline indicator should report all_targets_defined=False"""
indicator = self.get_indicator()
self.get_midline_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertFalse(annotated_indicator.all_targets_defined)
def test_no_midline_target(self):
"""MID_END indicator with no midline indicator should report all_targets_defined=False"""
indicator = self.get_indicator()
self.get_endline_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertFalse(annotated_indicator.all_targets_defined)
def test_midline_and_endline_targets(self):
"""MID_END indicator with no midline indicator should report all_targets_defined=False"""
indicator = self.get_indicator()
self.get_midline_target(indicator)
self.get_endline_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertTrue(annotated_indicator.all_targets_defined)
@test.tag('targets', 'metrics', 'fast')
class TestEVENTIndicatorTargetsDefined(test.TestCase, RequiresTargetsBase):
"""if indicator is EVENT frequency, it should report all_targets_defined iff:
- at least 1 target"""
target_frequency = Indicator.EVENT
def get_event_target(self, indicator):
return self.get_target(indicator)
def test_one_event_target(self):
"""EVENT indicator with 1 event target should report all_targets_defined=True"""
indicator = self.get_indicator()
self.get_event_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertTrue(annotated_indicator.all_targets_defined)
def test_two_event_targets(self):
"""EVENT indicator with 2 event targets should report all_targets_defined=True"""
indicator = self.get_indicator()
self.get_event_target(indicator)
self.get_event_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertTrue(annotated_indicator.all_targets_defined)
class TestReportingPeriodAnnotations(test.TestCase):
"""indicator metrics queries require a months annotation based on program dates"""
def get_indicator_for_program(self, start_date, end_date):
program = fact_w.ProgramFactory(reporting_period_start=start_date, reporting_period_end=end_date)
indicator = fact_i.IndicatorFactory(
program=program
)
return MetricsIndicator.objects.with_annotations('months').get(pk=indicator.pk)
def test_one_year_in_the_past_program_annotation(self):
"""one year program in the past should show program_months=12"""
annotated_indicator = self.get_indicator_for_program(
datetime.date(2015, 1, 1),
datetime.date(2015, 12, 31)
)
self.assertEqual(annotated_indicator.program_months, 12)
def test_one_year_overlapping_present_annotation(self):
"""one year program ending in the future should show program_months=12"""
last_month = datetime.date.today() - datetime.timedelta(days=40)
start_date = datetime.date(last_month.year, last_month.month, 1)
end_date = datetime.date(last_month.year+1, last_month.month, 1) - datetime.timedelta(days=1)
annotated_indicator = self.get_indicator_for_program(
start_date,
end_date
)
self.assertEqual(annotated_indicator.program_months, 12)
def test_35_month_in_the_past_annotation(self):
"""35 month program should show program_months=35"""
annotated_indicator = self.get_indicator_for_program(
datetime.date(2014, 7, 1),
datetime.date(2017, 5, 31)
)
self.assertEqual(annotated_indicator.program_months, 35)
class TimeAwareTargetsBase(RequiresTargetsBase):
def get_program(self, months):
start_date = datetime.date(2012, 4, 1)
end_year = 2012 + (4 + months) // 12
end_month = (4 + months) % 12
if end_month == 0:
end_month = 12
end_year -= 1
end_date = datetime.date(end_year, end_month, 1) - datetime.timedelta(days=1)
self.program = fact_w.ProgramFactory(
reporting_period_start=start_date,
reporting_period_end=end_date
)
def test_two_period_program_one_target_fails(self):
"""TIME_AWARE indicator with a two period program and 1 target set should show all_targets_defined=False"""
self.get_program(months=2*self.month_count)
indicator = self.get_indicator()
self.get_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertFalse(annotated_indicator.all_targets_defined)
def test_two_period_program_two_targets_passes(self):
"""TIME_AWARE indicator with a two period program and 2 targets set should show all_targets_defined=True"""
self.get_program(months=2*self.month_count)
indicator = self.get_indicator()
self.get_target(indicator)
self.get_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertTrue(annotated_indicator.all_targets_defined)
def test_two_period_plus_program_two_targets_fails(self):
"""TIME_AWARE indicator with a 2+ period program and 2 targets set should show all_targets_defined=False"""
self.get_program(months=(2*self.month_count)+1)
indicator = self.get_indicator()
self.get_target(indicator)
self.get_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertFalse(annotated_indicator.all_targets_defined)
def test_one_period_plus_program_two_targets_passes(self):
"""TIME_AWARE indicator with a 1+ period program and 2 targets set should show all_targets_defined=False"""
self.get_program(months=(2*self.month_count)-1)
indicator = self.get_indicator()
self.get_target(indicator)
self.get_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertTrue(annotated_indicator.all_targets_defined)
@test.tag('targets', 'metrics', 'fast')
class TestANNUALIndicatorTargetsDefined(test.TestCase, TimeAwareTargetsBase):
"""if indicator is ANNUAL frequency, it should report all_targets_defined iff:
- target count = number of years in program"""
target_frequency = Indicator.ANNUAL
month_count = 12
@test.tag('targets', 'metrics', 'fast', 'core')
class TestSEMIANNUALIndicatorTargetsDefined(test.TestCase, TimeAwareTargetsBase):
"""if indicator is SEMIANNUAL frequency, it should report all_targets_defined iff:
- target count = number of half-years in program"""
target_frequency = Indicator.SEMI_ANNUAL
month_count = 6
@test.tag('targets', 'metrics', 'fast')
class TestTRIANNUALIndicatorTargetsDefined(test.TestCase, TimeAwareTargetsBase):
"""if indicator is TRI-ANNUAL frequency, it should report all_targets_defined iff:
- target count = number of tri-annual periods in program"""
target_frequency = Indicator.TRI_ANNUAL
month_count = 4
@test.tag('targets', 'metrics', 'fast')
class TestQUARTERLYIndicatorTargetsDefined(test.TestCase, TimeAwareTargetsBase):
"""if indicator is QUARTERLY frequency, it should report all_targets_defined iff:
- target count = number of quarters in program"""
target_frequency = Indicator.QUARTERLY
month_count = 3
@test.tag('targets', 'metrics', 'fast')
class TestMONTHLYIndicatorTargetsDefined(test.TestCase, TimeAwareTargetsBase):
"""if indicator is MONTHLY frequency, it should report all_targets_defined iff:
- target count = number of months in program"""
target_frequency = Indicator.MONTHLY
month_count = 1
def test_thirteen_month_thirteen_targets_passes(self):
"""thirteen months, thirteen targets - should show all_targets_defined=True"""
program = self.get_program(13)
indicator = self.get_indicator()
for _ in range(13):
self.get_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertTrue(annotated_indicator.all_targets_defined)
def test_thirteen_month_twelve_targets_fails(self):
"""thirteen months, twelve targets - should show all_targets_defined=False"""
program = self.get_program(13)
indicator = self.get_indicator()
for _ in range(12):
self.get_target(indicator)
annotated_indicator = self.get_annotated_indicator(indicator)
self.assertFalse(annotated_indicator.all_targets_defined) | {
"repo_name": "mercycorps/TolaActivity",
"path": "indicators/tests/program_metric_tests/indicator_unit/all_targets_defined_queries_unit_tests.py",
"copies": "1",
"size": "12923",
"license": "apache-2.0",
"hash": 4851417763569795000,
"line_mean": 43.412371134,
"line_max": 115,
"alpha_frac": 0.6907064923,
"autogenerated": false,
"ratio": 3.9459541984732827,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002442175904962507,
"num_lines": 291
} |
"""All Task related methods."""
class Tasks:
"""Tasks for database."""
URL = '/_api/tasks'
def __init__(self, database):
"""Initialise the database."""
self.database = database
def __call__(self):
"""All the active tasks in the db."""
response = self.database.action.get(self.URL)
response.raise_for_status()
return response.json()
def fetch(self, task_id):
"""Fetch the task for given task_id."""
url = '{tasks_url}/{task_id}'.format(
tasks_url=self.URL, task_id=task_id
)
response = self.database.action.get(url)
response.raise_for_status()
return response.json()
def create(
self, name, command, params=None,
period=None, offset=None, task_id=None
):
"""Create a task with given command and its parameters."""
task = {'name': name, 'command': command, 'params': params}
if period is not None:
task['period'] = period
if offset is not None:
task['offset'] = offset
if task_id is not None:
task['id'] = task_id
url = '{tasks_url}/{task_id}'.format(
tasks_url=self.URL, task_id=task_id
)
else:
url = self.URL
response = self.database.action.post(url, json=task)
response.raise_for_status()
return response.json()
def delete(self, task_id):
"""Delete the task for given task_id."""
url = '{tasks_url}/{task_id}'.format(
tasks_url=self.URL, task_id=task_id
)
response = self.database.action.delete(url)
response.raise_for_status()
return response.json()
| {
"repo_name": "tariqdaouda/pyArango",
"path": "pyArango/tasks.py",
"copies": "1",
"size": "1754",
"license": "apache-2.0",
"hash": 2775297097048492500,
"line_mean": 29.2413793103,
"line_max": 67,
"alpha_frac": 0.5421892816,
"autogenerated": false,
"ratio": 3.9954441913439633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5037633472943963,
"avg_score": null,
"num_lines": null
} |
__all__ = ['TasProcessor']
import logging
from indra.statements import Inhibition, Agent, Evidence
from indra.statements.validate import assert_valid_db_refs
from indra.ontology.standardize import standardize_name_db_refs
from indra.databases import hgnc_client, chembl_client, lincs_client
logger = logging.getLogger(__name__)
CLASS_MAP = {'1': 'Kd < 100nM', '2': '100nM < Kd < 1uM',
'3': '1uM < Kd < 10uM', '10': 'Kd > 10uM'}
lincs_client_obj = lincs_client.LincsClient()
class TasProcessor(object):
"""A processor for the Target Affinity Spectrum data table."""
def __init__(self, data, affinity_class_limit=2, named_only=False,
standardized_only=False):
self._data = data
self.affinity_class_limit = affinity_class_limit
self.named_only = named_only
self.standardized_only = standardized_only
self.statements = []
for row in data:
# Skip rows that are above the affinity class limit
if int(row['tas']) > affinity_class_limit:
continue
self._process_row(row)
return
def _process_row(self, row):
drugs = self._extract_drugs(row['compound_ids'], row['lspci_id'])
prot = self._extract_protein(row['entrez_gene_symbol'],
row['entrez_gene_id'])
evidences = self._make_evidences(row['tas'], row['references'])
# NOTE: there are several entries in this data set that refer to
# non-human Entrez genes, e.g.
# https://www.ncbi.nlm.nih.gov/gene/3283880
# We skip these for now because resources for Entrez-based
# mappings for non-human genes are not integrated, and would cause
# pre-assembly issues.
if 'HGNC' not in prot.db_refs:
return
for drug in drugs:
self.statements.append(Inhibition(drug, prot, evidence=evidences))
def _extract_drugs(self, compound_ids, lspci_id):
drugs = []
for id_ in compound_ids.split('|'):
db_refs = {'LSPCI': lspci_id}
if id_.startswith('CHEMBL'):
db_refs['CHEMBL'] = id_
elif id_.startswith('HMSL'):
db_refs['HMS-LINCS'] = id_.split('HMSL')[1]
else:
logger.warning('Unhandled ID type: %s' % id_)
# Name standardization finds correct names but because
# ChEMBL is incomplete as a local resource, we don't
# universally standardize its names, instead, we look
# it up explicitly when necessary.
name, db_refs = standardize_name_db_refs(db_refs)
if name is None:
# This is one way to detect that the drug could not be
# standardized beyond just its name so in the
# standardized_only condition, we skip this drug
if self.standardized_only:
continue
elif 'HMS-LINCS' in db_refs:
name = \
lincs_client_obj.get_small_molecule_name(
db_refs['HMS-LINCS'])
elif 'CHEMBL' in db_refs:
name = chembl_client.get_chembl_name(db_refs['CHEMBL'])
# If name is still None, we just use the ID as the name
if name is None:
# With the named_only restriction, we skip drugs without
# a proper name.
if self.named_only:
continue
name = id_
assert_valid_db_refs(db_refs)
drugs.append(Agent(name, db_refs=db_refs))
drugs = list({agent.matches_key():
agent for agent in drugs}.values())
return drugs
def _extract_protein(self, name, gene_id):
refs = {'EGID': gene_id}
hgnc_id = hgnc_client.get_hgnc_from_entrez(gene_id)
if hgnc_id is not None:
refs['HGNC'] = hgnc_id
standard_name, db_refs = standardize_name_db_refs(refs)
if standard_name:
name = standard_name
assert_valid_db_refs(db_refs)
return Agent(name, db_refs=db_refs)
def _make_evidences(self, class_min, references):
evidences = []
for reference in references.split('|'):
pmid, source_id, text_refs = None, None, None
annotations = {'class_min': CLASS_MAP[class_min]}
ref, id_ = reference.split(':')
if ref == 'pubmed':
pmid = id_
text_refs = {'PMID': pmid}
elif ref == 'doi':
text_refs = {'DOI': id_}
else:
source_id = reference
ev = Evidence(source_api='tas', source_id=source_id, pmid=pmid,
annotations=annotations, epistemics={'direct': True},
text_refs=text_refs)
evidences.append(ev)
return evidences
| {
"repo_name": "johnbachman/belpy",
"path": "indra/sources/tas/processor.py",
"copies": "1",
"size": "4974",
"license": "mit",
"hash": 6030239107312334000,
"line_mean": 40.1074380165,
"line_max": 79,
"alpha_frac": 0.5526739043,
"autogenerated": false,
"ratio": 3.7230538922155687,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47757277965155687,
"avg_score": null,
"num_lines": null
} |
# All-Terrain Pi control code
# By Ian Renton
# http://robots.ianrenton.com/atp
# This is the main control code. It runs a web server to serve the control web
# page, then listens for websocket connections. The data received over the
# websockets is used to drive the motors.
# This script runs the web server on port 80 and assumes it can exec
# 'shutdown -h now' so should be run as root. To run as non-root, change port
# to a high number and set up sudo appropriately if shutdown function is
# needed. (Port number also needs setting on 'ws://' URLs in interface.html.)
import os
import cherrypy
import PicoBorgRev
from time import time, sleep
from threading import Thread
from cherrypy.lib.static import serve_file
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import WebSocket
# Make sure we can serve static files out of current dir
current_dir = os.path.dirname(os.path.abspath(__file__))
# Set up and run the server
cherrypy.server.socket_host = '0.0.0.0'
cherrypy.server.socket_port = 80
cherrypy.engine.timeout_monitor.unsubscribe()
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
# Setup the PicoBorg Reverse
PBR = PicoBorgRev.PicoBorgRev()
PBR.Init()
PBR.ResetEpo()
# Watchdog timer
stopWatchdog = False
lastRxTime = int(round(time() * 1000))
# CherryPy server main class
class CherryPy(object):
# When the root URL is visited, open interface.html
@cherrypy.expose
def index(self):
return serve_file(current_dir + '/interface.html')
# When the websocket URL is visited, run the handler
@cherrypy.expose
def ws(self):
handler = cherrypy.request.ws_handler
# When the "/s" URL is visited, shut down the computer. This isn't linked
# anywhere on the web interface to avoid unintentional clicks, but can be
# visited from the browser address bar. Allows a soft shutdown of the Pi
# instead of pulling the power or SSHing in to run the shutdown command.
@cherrypy.expose
def s(self):
os.system('shutdown -h now')
return 'Shutting down.'
# Class that handles the websocket requests and acts on the data supplied
class CommandWebSocket(WebSocket):
def received_message(self, message):
global lastRxTime
# Received a demand, so reset watchdog
lastRxTime = int(round(time() * 1000))
# Convert message to motor demand
command = message.data[:1] # 's': speed (motor 1), 't': turn (motor 2)
value = float(message.data[1:]) # -100 to 100
if (command == 's'):
PBR.SetMotor1(value / 100.0)
elif (command == 't'):
PBR.SetMotor2(value / 100.0)
print('Received command: ' + message.data + ' at ' + str(lastRxTime))
# Respond with something so the client knows we're listening
self.send('')
# Watchdog function that will demand motors are zeroed if we lose comms
class Watchdog(Thread):
def run(self):
global stopWatchdog
while not stopWatchdog:
print('Watchdog check, now ' + str(int(round(time() * 1000))) + ' last Rx at ' + str(lastRxTime))
if (int(round(time() * 1000)) > lastRxTime + 1000):
print('No comms, zeroing motor output')
PBR.SetMotor1(0)
PBR.SetMotor2(0)
sleep(1)
# Quick fire of motors to show that we're up and running
print('Starting up...')
PBR.SetMotor2(100)
sleep(0.05)
PBR.SetMotor2(0)
# Start watchdog timer
print('Starting watchdog timer...')
w = Watchdog()
w.daemon = True
w.start()
# Run CherryPy
print('Starting web server...')
cherrypy.quickstart(CherryPy(), '/', config={'/ws': {'tools.websocket.on': True, 'tools.websocket.handler_cls': CommandWebSocket}, '/gyro.js': {'tools.staticfile.on': True, 'tools.staticfile.filename': current_dir + '/gyro.js'}})
# Cancel watchdog
print('Shutting down...')
stopWatchdog = True
| {
"repo_name": "ianrenton/All-Terrain-Pi",
"path": "home/pi/atp/atp.py",
"copies": "1",
"size": "3800",
"license": "bsd-3-clause",
"hash": 7275963528613456000,
"line_mean": 32.3333333333,
"line_max": 229,
"alpha_frac": 0.7134210526,
"autogenerated": false,
"ratio": 3.47985347985348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9653241944219066,
"avg_score": 0.00800651764688251,
"num_lines": 114
} |
__all__ = ['TestPost', 'TestDraft', 'TestTag', 'TestPostOperator']
import time
import unittest
from datetime import datetime
from ..helpers import TaoblogTestCase
from taoblog.models.post import Post, PostText, PostOperator, Tag, Draft
from taoblog.models import ModelError
class TestTag(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_name(self):
self.assertRaises(ModelError, Tag, 'hello world')
self.assertRaises(ModelError, Tag, 'hello\tworld')
self.assertRaises(ModelError, Tag, 'hello\nworld')
self.assertRaises(ModelError, Tag, 'hello\rworld')
self.assertRaises(ModelError, Tag, 'hello+world')
class TestDraft(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_autoupdate(self):
draft = Draft(title='title', text='text')
self.session.add(draft)
self.session.commit()
self.assertIsNotNone(draft.saved_at)
old_date = draft.saved_at
draft.title = 'new title'
time.sleep(1)
self.session.commit()
self.assertTrue(draft.saved_at>old_date)
class TestPost(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_slug(self):
post = Post(title='hello world', text='world',
slug='hello-world', author_id=1)
self.assertEqual(post.slug, 'hello-world')
# invalid slug
def _set_slug(slug):
post.slug = slug
self.assertRaises(ModelError, _set_slug, 'this contains spaces')
self.assertRaises(ModelError, _set_slug, 'this-contains-newline\n')
self.assertRaises(ModelError, _set_slug, 'this-contains-newline\r')
self.assertRaises(ModelError, _set_slug, 'this-contains/slash')
self.assertRaises(ModelError, _set_slug, 'this-contains-?')
self.assertRaises(ModelError, _set_slug, '')
self.assertRaises(ModelError, _set_slug, ' ')
self.assertRaises(ModelError, _set_slug, '\t')
self.assertEqual(post.permalink, '%d/%d/%s' % (datetime.utcnow().year,
datetime.utcnow().month,
post.slug))
def test_date(self):
post = Post(title='hello world', text='world',
slug='hello-world', author_id=1)
self.assertIsNone(post.created_at)
self.assertIsNone(post.updated_at)
self.session.add(post)
self.session.commit()
self.assertIsNotNone(post.created_at)
self.assertIsNone(post.updated_at)
post.text = 'new world'
self.session.commit()
self.assertIsNotNone(post.updated_at)
def test_tag(self):
clojure = Post(title='clojure lisp', text='',
slug='clojure-lisp', author_id=1)
scheme = Post(title='scheme lisp', text='',
slug='scheme-lisp', author_id=1)
# post not added to session, raise error
self.assertRaises(RuntimeError, clojure.add_tags, ['clojure'])
self.assertRaises(RuntimeError, clojure.remove_tags, ['clojure'])
self.assertRaises(RuntimeError, clojure.set_tags, ['clojure'])
self.assertRaises(RuntimeError, clojure.clear_tags)
self.session.add(clojure)
self.session.add(scheme)
# add tags
# post tags
# clojure: Clojure, LISP
# scheme: Scheme, LISP
self.assertEqual(clojure.add_tags(['Clojure'])[0].name, 'Clojure')
self.assertEqual(clojure.add_tags(['LISP'])[0].name, 'LISP')
self.assertEqual(set(clojure.tags), {'Clojure', 'LISP'})
self.assertEqual(scheme.add_tags(['Scheme'])[0].name, 'Scheme')
self.assertEqual(scheme.add_tags(['SCHEME']), []) # no new tag added
self.assertEqual(scheme.add_tags(['scheme']), []) # no new tag added
self.assertEqual(scheme.add_tags(['lisp'])[0].name, 'LISP')
self.assertEqual(set(scheme.tags), {'Scheme', 'LISP'})
self.assertEqual(set(clojure.tags), {'Clojure', 'LISP'})
# remove tags
scheme.remove_tags(['SCHEME'])
self.assertIsNone(self.session.query(Tag).filter_by(name='Scheme').first())
scheme.remove_tags(['lisp'])
self.assertEqual(self.session.query(Tag).filter_by(name='LISP').first().name, 'LISP')
self.assertEqual(scheme.tags, [])
# clear tags
clojure.clear_tags()
self.assertEqual(clojure.tags, [])
self.assertIsNone(self.session.query(Tag).filter_by(name='Clojure').first())
self.assertIsNone(self.session.query(Tag).first())
scheme.set_tags(['SCHEME', 'LISP', 'Scheme', 'Lisp'])
self.assertEqual(set(tag.name for tag in self.session.query(Tag).all()), {'SCHEME', 'LISP'})
self.assertEqual(scheme.set_tags(['scheme', 'lisp', 'scheme', 'lisp']), ([], [])) # add none, remove none
def test_content(self):
post = Post(title='hello world', text='world',
slug='hello-world', author_id=1)
self.assertEqual(post.content, '<p>%s</p>\n' % post.text)
post.text = 'new world'
self.assertEqual(post.content, '<p>%s</p>\n' % post.text)
def test_query(self):
post = Post(title='a title', text='the first post',
slug='a-title', author_id=1)
self.session.add(post)
self.session.commit()
result = self.session.query(Post).filter_by(title='a title').one()
self.assertEqual(result.title, post.title)
post = Post(title='a title', text='the second post',
slug='a-title', author_id=1)
self.session.add(post)
self.session.commit()
result = self.session.query(Post).join(PostText)\
.filter(PostText.text=='the second post').one()
self.assertEqual(result.text, post.text)
class TestPostOperator(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_create_post(self):
post = Post(title='hello', text='world',
slug='hello', author_id=1)
op = PostOperator(self.session)
op.create_post(post)
self.assertEqual(op.get_post(post.id), post)
# same slug is not allowed
another_post = Post(title='hello', text='world',
slug='hello', author_id=1)
self.assertRaises(ModelError, op.create_post, another_post)
def test_get_posts(self):
op = PostOperator(self.session)
# create post
post = Post(title='hello', text='world',
slug='hello-world', author_id=1)
op.create_post(post)
self.assertEqual(op.get_post(post.id), post)
# get public posts
haskell = Post(title='haskell-2012', text='world3',
slug='hehe', author_id=1)
haskell.created_at = datetime(year=2012, month=4, day=29)
op.create_post(haskell)
haskell.add_tags(['haskell', 'fp'])
scheme = Post(title='scheme-2010', text='world2',
slug='haha', author_id=1)
scheme.created_at = datetime(year=2010, month=1, day=16)
op.create_post(scheme)
scheme.add_tags(['scheme', 'fp'])
clojure = Post(title='clojure-2009', text='world1',
slug='haha', author_id=1)
clojure.created_at = datetime(year=2009, month=12, day=13)
op.create_post(clojure)
clojure.add_tags(['clojure', 'fp'])
posts, more = op.get_public_posts()
self.assertEqual(4, len(posts))
self.assertEqual(posts, [post, haskell, scheme, clojure])
self.assertFalse(more) # no more
self.assertEqual(set([str(tag) for tag in op.get_public_tags()]),
{'clojure', 'fp', 'scheme', 'haskell'})
op.trash_post(post)
posts, more = op.get_public_posts()
self.assertEqual(posts, [haskell, scheme, clojure])
self.assertFalse(more)
# scheme will be removed from public tags
op.trash_post(scheme)
self.assertEqual(set([tag.name for tag in op.get_public_tags()]),
{'clojure', 'fp', 'haskell'})
self.assertEqual(set([str(tag) for tag in op.get_trash_tags()]),
{'scheme', 'fp'})
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "ptpt/taoblog",
"path": "tests/models/post.py",
"copies": "1",
"size": "8547",
"license": "mit",
"hash": -2229711651081767000,
"line_mean": 38.7534883721,
"line_max": 114,
"alpha_frac": 0.5883935884,
"autogenerated": false,
"ratio": 3.6124260355029585,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47008196239029587,
"avg_score": null,
"num_lines": null
} |
"""All tests for views."""
from django.http import HttpRequest
from django.test import TestCase
from django.template.loader import render_to_string
from django.contrib.messages.storage.fallback import FallbackStorage
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.shortcuts import get_current_site
from geokey import version
from geokey.users.tests.model_factories import UserFactory
from ..views import IndexPage
class IndexPageTest(TestCase):
"""Test index page."""
def setUp(self):
"""Set up test."""
self.request = HttpRequest()
self.request.method = 'GET'
self.view = IndexPage.as_view()
setattr(self.request, 'session', 'session')
messages = FallbackStorage(self.request)
setattr(self.request, '_messages', messages)
def test_get_with_anonymous(self):
"""
Test GET with with anonymous.
It should redirect to login page.
"""
self.request.user = AnonymousUser()
response = self.view(self.request)
self.assertEqual(response.status_code, 302)
self.assertIn('/admin/account/login/', response['location'])
def test_get_with_user(self):
"""
Test GET with with user.
It should render the page with the name of my awesome extension.
"""
user = UserFactory.create()
self.request.user = user
response = self.view(self.request).render()
rendered = render_to_string(
'ext_index.html',
{
'user': user,
'PLATFORM_NAME': get_current_site(self.request).name,
'GEOKEY_VERSION': version.get_version(),
'name': 'My Awesome Extension'
}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), rendered)
| {
"repo_name": "ExCiteS/geokey-extension-boilerplate",
"path": "geokey_extension/tests/test_views.py",
"copies": "1",
"size": "1899",
"license": "mit",
"hash": -3011853210522531300,
"line_mean": 29.6290322581,
"line_max": 72,
"alpha_frac": 0.6313849394,
"autogenerated": false,
"ratio": 4.286681715575621,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5418066654975621,
"avg_score": null,
"num_lines": null
} |
__all__ = ['TestUser', 'TestUserOperator']
import unittest
from ..helpers import TaoblogTestCase
from taoblog.models.user import *
from taoblog.models import ModelError
class TestUser(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_name(self):
# empty name
self.assertRaises(ModelError, User,
name='\n\t ',
email='pt@taopeng.me',
provider='twitter',
identity='asas')
# short name
self.assertRaises(ModelError, User,
name='a',
email='pt@taopeng.me',
provider='twitter',
identity='aaa')
# long name
self.assertRaises(ModelError, User,
name='a' * 37,
email='pt@taopeng.com',
provider='twitter',
identity='aaa')
def test_email(self):
# empty email
self.assertRaises(ModelError, User,
name='pt',
email='\t\n ',
provider='twitter',
identity='aaa')
# no @
self.assertRaises(ModelError, User,
name='pt',
email='pttaopeng.me',
provider='twitter',
identity='aaa')
# no dot
self.assertRaises(ModelError, User,
name='pt',
email='pt@taopengme',
provider='twitter',
identity='hhhh')
class TestUserOperator(TaoblogTestCase):
def setUp(self):
self.db_setup()
def tearDown(self):
self.db_teardown()
def test_create_user(self):
op = UserOperator(self.session)
user = User(name='pt',
email='pt@gmail.com',
provider='openid',
identity='a secret')
op.create_user(user)
self.assertEqual(op.get_user(user.id), user)
# same name is ok
another_user = User(name='pt',
email='pt2@gmail.com',
provider='openid2',
identity='haha')
op.create_user(another_user)
self.assertEqual(op.get_user(another_user.id), another_user)
| {
"repo_name": "ptpt/taoblog",
"path": "tests/models/user.py",
"copies": "1",
"size": "2549",
"license": "mit",
"hash": 5376371276195707000,
"line_mean": 31.2658227848,
"line_max": 68,
"alpha_frac": 0.4464495881,
"autogenerated": false,
"ratio": 4.818525519848771,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5764975107948771,
"avg_score": null,
"num_lines": null
} |
# all the C decls for pyvips
# we keep these together to make switching between ABI and API modes simpler
# we have to pass in the libvips version, since it can come from either
# pkg-config in compile.py (in API mode) or libvips itself in __init__.py
# (in ABI mode)
import sys
def _at_least(features, x, y):
return features['major'] > x or (features['major'] == x and
features['minor'] >= y)
def cdefs(features):
"""Return the C API declarations for libvips.
features is a dict with the features we want. Some featrures were only
added in later libvipsm for example, and some need to be disabled in
some FFI modes.
"""
code = ''
# apparently the safest way to do this
is_64bits = sys.maxsize > 2 ** 32
# GType is an int the size of a pointer ... I don't think we can just use
# size_t, sadly
if is_64bits:
code += '''
typedef uint64_t GType;
'''
else:
code += '''
typedef uint32_t GType;
'''
code += '''
typedef void (*GLogFunc) (const char* log_domain,
int log_level,
const char* message, void* user_data);
int g_log_set_handler (const char* log_domain,
int log_levels,
GLogFunc log_func, void* user_data);
extern "Python" void _log_handler_callback (const char*, int,
const char*, void*);
void g_log_remove_handler (const char* log_domain, int handler_id);
typedef struct _VipsImage VipsImage;
typedef struct _GValue GValue;
void* g_malloc (size_t size);
void g_free (void* data);
int vips_free (void* a);
void vips_leak_set (int leak);
char* vips_path_filename7 (const char* path);
char* vips_path_mode7 (const char* path);
GType vips_type_find (const char* basename, const char* nickname);
const char* vips_nickname_find (GType type);
const char* g_type_name (GType gtype);
GType g_type_from_name (const char* name);
typedef void* (*VipsTypeMap2Fn) (GType type, void* a, void* b);
void* vips_type_map (GType base, VipsTypeMap2Fn fn, void* a, void* b);
const char* vips_error_buffer (void);
void vips_error_clear (void);
typedef struct _GValue {
GType g_type;
uint64_t data[2];
} GValue;
void g_value_init (GValue* value, GType gtype);
void g_value_unset (GValue* value);
GType g_type_fundamental (GType gtype);
int vips_enum_from_nick (const char* domain,
GType gtype, const char* str);
const char *vips_enum_nick (GType gtype, int value);
void g_value_set_boolean (GValue* value, int v_boolean);
void g_value_set_int (GValue* value, int i);
void g_value_set_double (GValue* value, double d);
void g_value_set_enum (GValue* value, int e);
void g_value_set_flags (GValue* value, unsigned int f);
void g_value_set_string (GValue* value, const char* str);
void vips_value_set_ref_string (GValue* value, const char* str);
void g_value_set_object (GValue* value, void* object);
void vips_value_set_array_double (GValue* value,
const double* array, int n );
void vips_value_set_array_int (GValue* value,
const int* array, int n );
void vips_value_set_array_image (GValue *value, int n);
typedef int (*VipsCallbackFn)(void* a, void* b);
void vips_value_set_blob (GValue* value,
VipsCallbackFn free_fn, void* data, size_t length);
int g_value_get_boolean (const GValue* value);
int g_value_get_int (GValue* value);
double g_value_get_double (GValue* value);
int g_value_get_enum (GValue* value);
unsigned int g_value_get_flags (GValue* value);
const char* g_value_get_string (GValue* value);
const char* vips_value_get_ref_string (const GValue* value,
size_t* length);
void* g_value_get_object (GValue* value);
double* vips_value_get_array_double (const GValue* value, int* n);
int* vips_value_get_array_int (const GValue* value, int* n);
VipsImage** vips_value_get_array_image (const GValue* value, int* n);
void* vips_value_get_blob (const GValue* value, size_t* length);
// need to make some of these by hand
GType vips_interpretation_get_type (void);
GType vips_operation_flags_get_type (void);
GType vips_band_format_get_type (void);
GType vips_token_get_type (void);
GType vips_saveable_get_type (void);
GType vips_image_type_get_type (void);
typedef struct _GData GData;
typedef struct _GTypeClass GTypeClass;
typedef struct _GTypeInstance {
GTypeClass *g_class;
} GTypeInstance;
typedef struct _GObject {
GTypeInstance g_type_instance;
unsigned int ref_count;
GData *qdata;
} GObject;
typedef struct _GParamSpec {
GTypeInstance g_type_instance;
const char* name;
unsigned int flags;
GType value_type;
GType owner_type;
// private, but cffi in API mode needs these to be able to get the
// offset of any member
char* _nick;
char* _blurb;
GData* qdata;
unsigned int ref_count;
unsigned int param_id;
} GParamSpec;
typedef struct _GEnumValue {
int value;
const char *value_name;
const char *value_nick;
} GEnumValue;
typedef struct _GEnumClass {
GTypeClass *g_type_class;
int minimum;
int maximum;
unsigned int n_values;
GEnumValue *values;
} GEnumClass;
void* g_type_class_ref (GType type);
void g_object_ref (void* object);
void g_object_unref (void* object);
void g_object_set_property (GObject* object,
const char *name, GValue* value);
void g_object_get_property (GObject* object,
const char* name, GValue* value);
typedef struct _VipsObject {
'''
# this field changed name in libvips 8.4
if _at_least(features, 8, 4):
code += '''
GObject parent_instance;
'''
else:
code += '''
GObject parent_object;
'''
code += '''
bool constructed;
bool static_object;
void *argument_table;
char *nickname;
char *description;
bool preclose;
bool close;
bool postclose;
size_t local_memory;
} VipsObject;
typedef struct _VipsObjectClass VipsObjectClass;
typedef struct _VipsArgument {
GParamSpec *pspec;
} VipsArgument;
typedef struct _VipsArgumentInstance {
VipsArgument parent;
// more
} VipsArgumentInstance;
typedef enum _VipsArgumentFlags {
VIPS_ARGUMENT_NONE = 0,
VIPS_ARGUMENT_REQUIRED = 1,
VIPS_ARGUMENT_CONSTRUCT = 2,
VIPS_ARGUMENT_SET_ONCE = 4,
VIPS_ARGUMENT_SET_ALWAYS = 8,
VIPS_ARGUMENT_INPUT = 16,
VIPS_ARGUMENT_OUTPUT = 32,
VIPS_ARGUMENT_DEPRECATED = 64,
VIPS_ARGUMENT_MODIFY = 128
} VipsArgumentFlags;
typedef struct _VipsArgumentClass {
VipsArgument parent;
VipsObjectClass *object_class;
VipsArgumentFlags flags;
int priority;
unsigned int offset;
} VipsArgumentClass;
int vips_object_get_argument (VipsObject* object,
const char *name, GParamSpec** pspec,
VipsArgumentClass** argument_class,
VipsArgumentInstance** argument_instance);
void vips_object_print_all (void);
int vips_object_set_from_string (VipsObject* object,
const char* options);
const char* vips_object_get_description (VipsObject* object);
const char* g_param_spec_get_blurb (GParamSpec* pspec);
typedef struct _VipsImage {
'''
# this field changed name in libvips 8.4
if _at_least(features, 8, 4):
code += '''
VipsObject parent_instance;
'''
else:
code += '''
VipsObject parent_object;
'''
code += '''
// more
} VipsImage;
const char* vips_foreign_find_load (const char* name);
const char* vips_foreign_find_load_buffer (const void* data,
size_t size);
const char* vips_foreign_find_save (const char* name);
const char* vips_foreign_find_save_buffer (const char* suffix);
VipsImage* vips_image_new_matrix_from_array (int width, int height,
const double* array, int size);
VipsImage* vips_image_new_from_memory (const void* data, size_t size,
int width, int height, int bands, int format);
VipsImage* vips_image_copy_memory (VipsImage* image);
GType vips_image_get_typeof (const VipsImage* image,
const char* name);
int vips_image_get (const VipsImage* image,
const char* name, GValue* value_copy);
void vips_image_set (VipsImage* image,
const char* name, GValue* value);
int vips_image_remove (VipsImage* image, const char* name);
char* vips_filename_get_filename (const char* vips_filename);
char* vips_filename_get_options (const char* vips_filename);
VipsImage* vips_image_new_temp_file (const char* format);
int vips_image_write (VipsImage* image, VipsImage* out);
void* vips_image_write_to_memory (VipsImage* in, size_t* size_out);
typedef struct _VipsInterpolate {
VipsObject parent_object;
// more
} VipsInterpolate;
VipsInterpolate* vips_interpolate_new (const char* name);
typedef struct _VipsOperation {
VipsObject parent_instance;
// more
} VipsOperation;
VipsOperation* vips_operation_new (const char* name);
typedef void* (*VipsArgumentMapFn) (VipsObject* object,
GParamSpec* pspec,
VipsArgumentClass* argument_class,
VipsArgumentInstance* argument_instance,
void* a, void* b);
void* vips_argument_map (VipsObject* object,
VipsArgumentMapFn fn, void* a, void* b);
'''
# this field was added in libvips 8.7
if _at_least(features, 8, 7):
code += '''
int vips_object_get_args (VipsObject* object,
const char*** names, int** flags, int* n_args);
'''
code += '''
VipsOperation* vips_cache_operation_build (VipsOperation* operation);
void vips_object_unref_outputs (VipsObject* object);
int vips_operation_get_flags (VipsOperation* operation);
void vips_cache_set_max (int max);
void vips_cache_set_max_mem (size_t max_mem);
void vips_cache_set_max_files (int max_files);
void vips_cache_set_trace (int trace);
'''
if _at_least(features, 8, 5):
code += '''
char** vips_image_get_fields (VipsImage* image);
int vips_image_hasalpha (VipsImage* image);
'''
if _at_least(features, 8, 6):
code += '''
GType vips_blend_mode_get_type (void);
void vips_value_set_blob_free (GValue* value,
void* data, size_t length);
'''
# we must only define these in API mode ... in ABI mode we need to call
# these things earlier
if features['api']:
code += '''
int vips_init (const char* argv0);
int vips_version (int flag);
'''
# ... means inherit from C defines
code += '#define VIPS_MAJOR_VERSION ...\n'
code += '#define VIPS_MINOR_VERSION ...\n'
code += '#define VIPS_MICRO_VERSION ...\n'
# add contents of features as a comment ... handy for debugging
for key, value in features.items():
code += '//%s = %s\n' % (key, value)
return code
__all__ = [
'cdefs'
]
| {
"repo_name": "jcupitt/pyvips",
"path": "pyvips/decls.py",
"copies": "1",
"size": "12388",
"license": "mit",
"hash": 2718728523199166000,
"line_mean": 30.7641025641,
"line_max": 78,
"alpha_frac": 0.5754762674,
"autogenerated": false,
"ratio": 3.7178871548619448,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4793363422261945,
"avg_score": null,
"num_lines": null
} |
""" All the class models for the objects we need to represent the games """
from enum import Enum, auto
from logbook import Logger
from . helpers import get_datetime
class State(Enum):
""" Enumerate the state a game can be in """
STARTED = auto()
STOPPED = auto()
CRASHED = auto()
ABORTED = auto()
class Game(object):
""" Simple container for calculating game stuff like players, teams, winners """
def __init__(self, start_action):
self.sid = get_datetime(start_action['timestamp'])
self._log = [start_action]
self.mc_version = start_action['version']
self._stop_log = []
self.stopped = False
self.game_started = None
self.game_info = {}
self._teams = {}
self._players = []
self._deaths = []
self._player_info = {}
self.log = Logger('Game '+start_action['timestamp'])
self._state = State.STARTED
self.winning_team = None
def __repr__(self):
return "<Game {start}>".format(start=self.sid.isoformat())
@property
def state(self):
""" Expose the game/server state, can only be set to Class State instance """
return self._state
@state.setter
def state(self, state):
if isinstance(state, State):
self._state = state
def game_start(self, action):
""" The game is ON! """
self._players = [player for player in self.get_playing_players()]
self.game_started = len(self._log)
self.game_info = action
def add_action(self, action):
""" Add an action to the log if active game, else save it for prosperity """
if not self.stopped:
#save the action to the general log
self._log.append(action)
#check if this did something to alter state
self.check_action(action)
else:
self._stop_log.append(action)
def check_action(self, action):
""" Check if the action did something we should register """
#get this class method from the action
method = getattr(self, action['action'], None)
if method is not None:
#call the method with the action
method(action)
def death(self, action):
""" Someone did something that should be counted as a score """
#Not kill
if len(action) < 5:
self.log.debug("Player {p} died reason {r} "
.format(p=action['player'], r=action['reason']))
else:
#killed
self.log.debug("Player {p} was killed by {p2} "
.format(p=action['player'], p2=action['killed_by']))
self._deaths.append(action['player'])
#Was this the winning move?
self.check_for_winner()
def player_mode(self, action):
""" Handle a player mode change """
self.log.debug("Player {p} mode changed: {m}"
.format(p=action['player'], m=action['mode']))
if action['mode'] == 'Survival':
if action['player'] not in self._players:
self._players.append(action['player'])
else:
if action['player'] in self._players and not self.stopped:
self._players.remove(action['player'])
def player_join(self, action):
""" Register player when someone joins """
self.log.debug("Player {p} joined game {g}"
.format(p=action['player'], g=self))
player = self._player_info.get(action['player'])
if player is not None:
player['uuid'] = action['uuid']
else:
self._player_info[action['player']] = {'uuid': action['uuid']}
def player_ip(self, action):
""" Sets a players ip address """
self.log.debug("Player {p} have ip-address {ip}"
.format(p=action['player'], ip=action['ipaddress']))
player = self._player_info.get(action['player'])
player = {**player, **{'ipaddress': action['ipaddress']}}
#unnessary assignment?
self._player_info[action['player']] = player
def team_members(self, action):
""" Set team info on players """
for player in action['players']:
g_player = self._player_info.get(player)
if g_player is not None:
g_player['team'] = action['team']
else:
self._player_info[player] = {'team': action['team']}
self.log.debug("Player {p} in team {team}"
.format(p=player, team=action['team']))
def check_for_winner(self):
""" Check the info if we only have one team left (and the winners!) """
#just loop through all players and see if they belong to the same team
t_players = [player for player in self.get_playing_players()]
if len(t_players) < 3:
#Not a valid game, can't decide on a winner.
self.stopped = True
self.state = State.ABORTED
return
team = None
players = set(t_players) - set(self._deaths)
for player in players:
if team is not None:
if team != self._player_info[player]['team']:
#We do not have a winner, more teams still alive
return
else:
team = self._player_info[player]['team']
#If we get here, all remaining players are on the same team
tm_pl = []
for player in self.get_playing_players():
if self._player_info[player]['team'] == team:
tm_pl.append(player)
self._log.append({'action':'team_win', 'team': team, 'players': tm_pl})
self._log.append({'action':'survivors', 'players': list(players)})
self.stopped = True
self.state = State.STOPPED
self.winning_team = team
def get_playing_players(self):
""" Return all players as a list that have joined the server and has a team set
This can be used when player modes is not set in the beginning for some reason """
for player, info in self._player_info.items():
if info.get('uuid') is not None and info.get('team') is not None:
yield player
def to_json(self):
""" Return the game as an JSON object """
raise NotImplementedError
def get_actions(self):
""" returns actions that we should count towards scoring """
for action in self._log:
yield action
def player_info(self, player):
""" Returns the players information """
return self._player_info.get(player)
class Store(object):
""" Store class for storing games """
def __init__(self):
self._store = []
@property
def last(self):
""" Expose the last item in store as Store.last """
if len(self._store) > 0:
return self._store[-1]
else:
return None
@property
def count(self):
""" The number of games in the store """
return len(self._store)
def add(self, game):
""" Add a game to the store """
self._store.append(game)
def items(self):
""" Generator for returning the games in the store """
for item in self._store:
if item.state == State.STOPPED:
yield item
| {
"repo_name": "gylle/uhc_parser",
"path": "parser/models.py",
"copies": "1",
"size": "7385",
"license": "mit",
"hash": 7915668903009639000,
"line_mean": 36.1105527638,
"line_max": 90,
"alpha_frac": 0.5577522004,
"autogenerated": false,
"ratio": 4.172316384180791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0028907442215815662,
"num_lines": 199
} |
# All the code needed to implement pattern masking
# only a subset of the clearest colors are used
from ledlib.helpers import debugprint, verboseprint
from ledlib import colordefs
# A mask is a list of colors and transparencies
# Simple Colors:
# kw = black, white
# roygbiv = basic colors # might even simplfy this to wcmyk
# X = Portal Faction
# R = RES
# E = ENL
# N = NEUTRAL
# - = completely transparent (i.e. non-masked)
# 0 = Passed color 0
# 1 = Passed color 1 [...]
maskcolortable = {
"R" : colordefs.colortable["RES"],
"E" : colordefs.colortable["ENL"],
"w" : colordefs.colortable["WHITISH"],
"k" : colordefs.colortable["BLACKISH"],
"-" : colordefs.colortable["NOTACOLOR"]
}
class MaskPos(object):
def __init__(self, maskchar, opacity=1.00, rgb=(0,0,0)):
self.opacity=opacity
self.rgb = rgb
if maskchar == "-":
self.opacity = 0.00
self.rgb = (0,0,0)
elif maskchar == 0:
pass
elif maskchar == "N":
self.rgb = colordefs.colortable["NEUTRAL"]
elif maskchar in maskcolortable:
self.rgb = maskcolortable[maskchar]
else:
self.opacity = 0.00
self.rgb = (0,0,0)
log.info (" bad or unimplemented mask %s, using transparent", maskchar)
def apply_single(self, rgb_triplet):
# not sure what kind of compositing to use. Let's at least do the right thing
# for 0 and 1 opacity
if self.opacity <= 0.001:
return self.rgb
elif self.opacity == 1.00:
return rgb_triplet
# not sure what to do. Try multiplying.
# print ( "apply single: rgb {0} maskrgb {1} opacity {2}".format(rgb_triplet,self.rgb,self.opacity))
r = ( self.rgb[0] * self.opacity ) + rgb_triplet[0]
if r > 255:
r = 255
g = ( self.rgb[1] * self.opacity ) + rgb_triplet[1]
if g > 255:
g = 255
b = ( self.rgb[2] * self.opacity ) + rgb_triplet[2]
if b > 255:
b = 255
return ( r, g, b )
defaultmaskpos = MaskPos("-")
class Mask (object):
def __init__(self,string,opacity=[1.00],
rgbtable=[colordefs.colortable["NOTACOLOR"]]):
# TODO: implement passed color(s)
self.name = string
self.size = len(string)
self.opacity = [1.00] * self.size
# TODO: implement variable (or even passed static) opacity
debugprint (("Creating a mask from ", self.name))
self.pos = [defaultmaskpos] * self.size
for i in range(self.size):
self.pos[i] = MaskPos(self.name[i], self.opacity[i])
def apply(self, rgb_list):
rgb_list_size = len(rgb_list)
scope = min(rgb_list_size, self.size)
#print( "apply mask: rgb_list_size {0} selfsize {1} scope {2} rgb_list {3}".format(rgb_list_size,self.size,scope,rgb_list))
result = [(150,150,150)] * scope # dimension result list
for i in range(rgb_list_size):
mp = self.pos[i].apply_single(rgb_list[i])
# print ("mask apply result ", result, "size = ", rgb_list_size)
return result
| {
"repo_name": "bbulkow/MagnusFlora",
"path": "led/ledlib/masking.py",
"copies": "1",
"size": "3345",
"license": "mit",
"hash": -3973125859962543600,
"line_mean": 36.1666666667,
"line_max": 131,
"alpha_frac": 0.5491778774,
"autogenerated": false,
"ratio": 3.4237461617195497,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44729240391195496,
"avg_score": null,
"num_lines": null
} |
# All the code samples below have one parameter, which is where the protection level name
# for that storage type will be inserted, e.g. NSDataWritingFileProtectionCompleteUnlessOpen
CODE_SAMPLE_CORE_DATA = """
- (NSPersistentStoreCoordinator *)persistentStoreCoordinator {
if (persistentStoreCoordinator_ != nil) {
return persistentStoreCoordinator_;
}
persistentStoreCoordinator_ = [[NSPersistentStoreCoordinator alloc]
initWithManagedObjectModel:[self managedObjectModel]];
NSURL *storeURL = [NSURL fileURLWithPath:
[[self applicationDocumentsDirectory] stringByAppendingPathComponent: @"MyStore.sqlite"]];
[persistentStoreCoordinator_ addPersistentStoreWithType:NSSQLiteStoreType
configuration:nil URL:storeURL options:nil error:&error];
NSDictionary *fileAttributes = [NSDictionary
dictionaryWithObject:%s
forKey:NSFileProtectionKey];
[[NSFileManager defaultManager] setAttributes:fileAttributes
ofItemAtPath:[storeURL path] error: &error];
return persistentStoreCoordinator_;
}"""
CODE_SAMPLE_SQL = """
int flags = SQLITE_OPEN_CREATE |
SQLITE_OPEN_READWRITE |
%s;
sqlite3_open_v2(path, &database, flags, NULL)
// Or, if you prefer FMDB:
FMDatabase *database = [FMDatabase databaseWithPath:dbPath];
[database openWithFlags:flags]
"""
CODE_SAMPLE_RAW_DATA = """
NSData *contents = [@"secret file contents" dataUsingEncoding:NSUTF8StringEncoding];
[contents writeToFile:path
options:%s
error:&error];
"""
CODE_SAMPLE_KEYCHAIN = """
// Note that metadata, like the account name, is not encrypted.
NSDictionary *item = @{
(__bridge id)kSecAttrAccount: account,
(__bridge id)kSecClass: (__bridge id)kSecClassGenericPassword,
(__bridge id)kSecAttrAccessible: (__bridge id)%s,
(__bridge id)kSecValueData: data,
};
OSStatus error = SecItemAdd((__bridge CFDictionaryRef)item, NULL);
"""
| {
"repo_name": "erikr/howtostoreiosdata",
"path": "howtostoreiosdata/wizard/code_samples.py",
"copies": "1",
"size": "1995",
"license": "mit",
"hash": -554655422729914940,
"line_mean": 31.1774193548,
"line_max": 98,
"alpha_frac": 0.7047619048,
"autogenerated": false,
"ratio": 3.8737864077669903,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5078548312566991,
"avg_score": null,
"num_lines": null
} |
# all the constants
import numpy as np
import re
from os import listdir
from collections import defaultdict
RET='-RET-'
TOP='-TOP-' # top relation for the overall bracket
LBR='('
RBR=')'
SURF='-SURF-' # surface form
CONST='-CONST-'
VERB='-VERB-'
END='-END-'
UNK='UNK' # unknown words
date_relations = set(['time', 'year', 'month', 'day', 'weekday', 'century', 'era', 'decade', 'dayperiod', 'season', 'timezone'])
WORD2VEC_EMBEDDING_PATH='/home/j/llc/cwang24/Tools/word2vec/GoogleNews-vectors-negative300.bin'
PATH_TO_VERB_LIST = '../resources/verbalization-list-v1.01.txt'
all_relations = set()
num_set = set()
def _load_verb_list(path_to_file):
verbdict = {}
with open(path_to_file,'r') as f:
for line in f:
if not line.startswith('#') and line.strip():
if not line.startswith('DO-NOT-VERBALIZE'):
verb_type, lemma, _, subgraph_str = re.split('\s+',line,3)
subgraph = {}
#if len(l) == 1:
#else: # have sub-structure
root = re.split('\s+', subgraph_str, 1)[0]
subgraph[root] = {}
num_relations = 0
for match in re.finditer(':([^\s]+)\s*([^\s:]+)',subgraph_str):
relation = match.group(1)
all_relations.add(relation)
concept = match.group(2)
subgraph[root][relation] = concept
num_relations += 1
#if num_relations == 2:
# print subgraph_str
num_set.add(num_relations)
verbdict[lemma] = verbdict.get(lemma,[])
verbdict[lemma].append(subgraph)
return verbdict
VERB_LIST = _load_verb_list(PATH_TO_VERB_LIST)
#for relation in all_relations:
# print relation
#
#for num in num_set:
# print num
# PATH_TO_COUNTRY_LIST='./resources/country-list.csv'
# def _load_country_list(path_to_file):
# countrydict = {}
# with open(path_to_file,'r') as f:
# for line in f:
# line = line.strip()
# country_name, country_adj, _ = line.split(',', 2)
# countrydict[country_adj] = country_name
# return countrydict
# COUNTRY_LIST=_load_country_list(PATH_TO_COUNTRY_LIST)
# given different domain, return range of split corpus #TODO: move this part to config file
def get_corpus_range(corpus_section,corpus_type):
DOMAIN_RANGE_TABLE={ \
'train':{
'proxy':(0,6603),
'bolt':(6603,7664),
'dfa':(7664,9367),
'mt09sdf':(9367,9571),
'xinhua':(9571,10312)
},
'dev':{
'proxy':(0,826),
'bolt':(826,959),
'consensus':(959,1059),
'dfa':(1059,1269),
'xinhua':(1269,1368)
},
'test':{
'proxy':(0,823),
'bolt':(823,956),
'consensus':(956,1056),
'dfa':(1056,1285),
'xinhua':(1285,1371)
}
}
return DOMAIN_RANGE_TABLE[corpus_type][corpus_section]
| {
"repo_name": "masterkeywikz/seq2graph",
"path": "amr2seq/data_prep/constants.py",
"copies": "1",
"size": "3153",
"license": "mit",
"hash": -4690781043471946000,
"line_mean": 29.9117647059,
"line_max": 128,
"alpha_frac": 0.5214081827,
"autogenerated": false,
"ratio": 3.383047210300429,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4404455393000429,
"avg_score": null,
"num_lines": null
} |
# all the constants
import numpy as np
import re
from os import listdir
from collections import defaultdict
RET='-RET-'
TOP='-TOP-' # top relation for the overall bracket
LBR='('
RBR=')'
SURF='-SURF-' # surface form
CONST='-CONST-'
VERB='-VERB-'
END='-END-'
UNK='UNK' # unknown words
WORD2VEC_EMBEDDING_PATH='/home/j/llc/cwang24/Tools/word2vec/GoogleNews-vectors-negative300.bin'
# DEFAULT_RULE_FILE = './rules/dep2amrLabelRules'
# def _load_rules(rule_file):
# rf = open(rule_file,'r')
# d = {}
# for line in rf.readlines():
# if line.strip():
# dep_rel,amr_rel,_ = line.split()
# if dep_rel not in d: d[dep_rel] = amr_rel[1:]
# else:
# pass
# return d
# __DEP_AMR_REL_TABLE = _load_rules(DEFAULT_RULE_FILE)
# def get_fake_amr_relation_mapping(dep_rel):
# return __DEP_AMR_REL_TABLE[dep_rel]
# DEFAULT_NOM_FILE = './resources/nombank-dict.1.0'
# def _read_nom_list(nombank_dict_file):
# nomdict = open(nombank_dict_file,'r')
# nomlist = []
# token_re = re.compile('^\\(PBNOUN :ORTH \\"([^\s]+)\\" :ROLE-SETS')
# for line in nomdict.readlines():
# m = token_re.match(line.rstrip())
# if m:
# nomlist.append(m.group(1))
# return nomlist
# NOMLIST = _read_nom_list(DEFAULT_NOM_FILE)
# DEFAULT_BROWN_CLUSTER = './resources/wclusters-engiga'
# def _load_brown_cluster(dir_path,cluster_num=1000):
# cluster_dict = defaultdict(str)
# for fn in listdir(dir_path):
# if re.match('^.*c(\d+).*$',fn).group(1) == str(cluster_num) and fn.endswith('.txt'):
# with open(dir_path+'/'+fn,'r') as f:
# for line in f:
# bitstring, tok, freq = line.split()
# cluster_dict[tok]=bitstring
# return cluster_dict
# BROWN_CLUSTER=_load_brown_cluster(DEFAULT_BROWN_CLUSTER)
PATH_TO_VERB_LIST = './resources/verbalization-list-v1.01.txt'
def _load_verb_list(path_to_file):
verbdict = {}
with open(path_to_file,'r') as f:
for line in f:
if not line.startswith('#') and line.strip():
if not line.startswith('DO-NOT-VERBALIZE'):
verb_type, lemma, _, subgraph_str = re.split('\s+',line,3)
subgraph = {}
#if len(l) == 1:
#else: # have sub-structure
root = re.split('\s+', subgraph_str, 1)[0]
subgraph[root] = {}
for match in re.finditer(':([^\s]+)\s*([^\s:]+)',subgraph_str):
relation = match.group(1)
concept = match.group(2)
subgraph[root][relation] = concept
verbdict[lemma] = verbdict.get(lemma,[])
verbdict[lemma].append(subgraph)
return verbdict
VERB_LIST = _load_verb_list(PATH_TO_VERB_LIST)
# PATH_TO_COUNTRY_LIST='./resources/country-list.csv'
# def _load_country_list(path_to_file):
# countrydict = {}
# with open(path_to_file,'r') as f:
# for line in f:
# line = line.strip()
# country_name, country_adj, _ = line.split(',', 2)
# countrydict[country_adj] = country_name
# return countrydict
# COUNTRY_LIST=_load_country_list(PATH_TO_COUNTRY_LIST)
# given different domain, return range of split corpus #TODO: move this part to config file
def get_corpus_range(corpus_section,corpus_type):
DOMAIN_RANGE_TABLE={ \
'train':{
'proxy':(0,6603),
'bolt':(6603,7664),
'dfa':(7664,9367),
'mt09sdf':(9367,9571),
'xinhua':(9571,10312)
},
'dev':{
'proxy':(0,826),
'bolt':(826,959),
'consensus':(959,1059),
'dfa':(1059,1269),
'xinhua':(1269,1368)
},
'test':{
'proxy':(0,823),
'bolt':(823,956),
'consensus':(956,1056),
'dfa':(1056,1285),
'xinhua':(1285,1371)
}
}
return DOMAIN_RANGE_TABLE[corpus_type][corpus_section] | {
"repo_name": "masterkeywikz/seq2graph",
"path": "amr2seq/constants.py",
"copies": "1",
"size": "4178",
"license": "mit",
"hash": 8559728821032438000,
"line_mean": 30.4210526316,
"line_max": 95,
"alpha_frac": 0.5306366683,
"autogenerated": false,
"ratio": 3.1366366366366365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4167273304936637,
"avg_score": null,
"num_lines": null
} |
"""All the database related entities are in this module."""
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import text
import numpy as np
import math
from scipy.stats import lognorm
from sqlalchemy.ext.hybrid import hybrid_property
db = SQLAlchemy()
class Ethnicity(db.Model):
"""Ethnicity Model. We will be using the Bayes factors from this model to compute utility function"""
__tablename__ = 'ethnicity'
unitID = db.Column(db.Integer, primary_key=True)
College = db.Column(db.String(80), nullable=False)
UGDS = db.Column(db.Integer)
UGDS_WHITE = db.Column(db.Float)
UGDS_BLACK = db.Column(db.Float)
UGDS_HISP = db.Column(db.Float)
UGDS_ASIAN = db.Column(db.Float)
UGDS_AIAN = db.Column(db.Float)
UGDS_NHPI = db.Column(db.Float)
UGDS_2MOR = db.Column(db.Float)
UGDS_NRA = db.Column(db.Float)
UGDS_UNKN = db.Column(db.Float)
@hybrid_property
def totprob(self):
return self.UGDS_WHITE + self.UGDS_2MOR + self.UGDS_AIAN + self.UGDS_ASIAN + self.UGDS_BLACK + self.UGDS_HISP + \
self.UGDS_NHPI + self.UGDS_NRA + self.UGDS_UNKN
@hybrid_property
def probSchool(self):
return self.UGDS / sum(row.UGDS for row in self)
def __repr__(self):
"""String representation of the class."""
return '<ethnicity %r>' % self.College
@hybrid_property
def BF_WHITE (self):
return self.make_bf(self.UGDS_WHITE)
@hybrid_property
def BF_BLACK(self):
return self.make_bf(self.UGDS_BLACK)
@hybrid_property
def BF_2MOR(self):
return self.make_bf(self.UGDS_2MOR)
@hybrid_property
def BF_AIAN(self):
return self.make_bf(self.UGDS_AIAN)
@hybrid_property
def BF_ASIAN(self):
return self.make_bf(self.UGDS_ASIAN)
@hybrid_property
def BF_HISP(self):
return self.make_bf(self.UGDS_WHITE)
@hybrid_property
def BF_NHPI(self):
return self.make_bf(self.UGDS_NHPI)
@hybrid_property
def BF_NRA(self):
return self.make_bf(self.UGDS_NRA)
@hybrid_property
def BF_UNKN(self):
return self.make_bf(self.UGDS_UNKN)
def make_bf(self, attr):
"""Utility function to create bayes factors"""
attr = 1.0E-9 + attr
return math.log10(attr/sum(attr * row.probSchool for row in self))
class Academic(db.Model):
"""Academic Model"""
__tablename__ = 'academic'
unitID = db.Column(db.Integer, primary_key=True)
College = db.Column(db.String(80), nullable=False)
UGDS = db.Column(db.Integer)
SATVR25 = db.Column(db.Integer)
SATMT25 = db.Column(db.Integer)
SATVR75 = db.Column(db.Integer)
SATMT75 = db.Column(db.Integer)
C150_4_POOLED_SUPP = db.Column(db.Integer)
SAT_AVG = db.Column(db.Integer)
@hybrid_property
def SAT_25(self):
return self.SATVR25 + self.SATMT25
@hybrid_property
def SAT_75(self):
return self.SATVR75 + self.SATMT75
@hybrid_property
def pSAT_25(self):
return self.SAT_25 / sum(row.SAT_25 for row in self)
@hybrid_property
def pSAT_75(self):
return self.SAT_75 / sum(row.SAT_75 for row in self)
@hybrid_property
def pSAT_AVG(self):
return self.SAT_AVG / sum(row.SAT_AVG for row in self)
@hybrid_property
def probSchool(self):
return self.UGDS / sum(row.UGDS for row in self)
class StudentAid(db.Model):
"""Student AID model"""
__tablename__ = 'student_aid'
unitID = db.Column(db.Integer, primary_key=True)
College = db.Column(db.String(80), nullable=False)
UGDS = db.Column(db.Integer)
pell_ever_2005 = db.Column(db.Model)
fsend_1_2005 = db.Column(db.REAL)
fsend_2_2005 = db.Column(db.REAL)
fsend_3_2005 = db.Column(db.REAL)
fsend_4_2005 = db.Column(db.REAL)
fsend_5_2005 = db.Column(db.REAL)
@hybrid_property
def probSchool(self):
return self.UGDS / sum(row.UGDS for row in self)
@hybrid_property
def totprob(self):
return self.fsend_1_2005 + self.fsend_2_2005 + self.fsend_3_2005 + self.fsend_4_2005 + self.fsend_5_2005
@hybrid_property
def BF_fsend_1_2005(self):
return self.make_bf(self.fsend_1_2005)
@hybrid_property
def BF_fsend_2_2005(self):
return self.make_bf(self.fsend_2_2005)
@hybrid_property
def BF_fsend_3_2005(self):
return self.make_bf(self.fsend_3_2005)
@hybrid_property
def BF_fsend_4_2005(self):
return self.make_bf(self.fsend_4_2005)
@hybrid_property
def BF_fsend_5_2005(self):
return self.make_bf(self.fsend_5_2005)
@hybrid_property
def BF_pell_ever_2005(self):
return self.make_bf(self.pell_ever_2005)
def make_bf(self, attr):
attr = 1.0E-9 + attr
return math.log10(attr/sum(attr * row.probSchool for row in self))
class Earnings(db.Model):
"""Earnings after graduation model"""
__tablename__ = 'earnings'
unitID = db.Column(db.Integer, primary_key=True)
College = db.Column(db.String(80), nullable=False)
UGDS = db.Column(db.Integer)
CDR3 = db.Column(db.REAL)
RPY_3YR_RT = db.Column(db.REAL)
RPY_5YR_RT = db.Column(db.REAL)
RPY_7YR_RT = db.Column(db.REAL)
mn_earn_wne_p6_2005 = db.Column(db.REAL)
md_earn_wne_p6_2005 = db.Column(db.REAL)
pct10_earn_wne_p6_2005 = db.Column(db.REAL)
pct25_earn_wne_p6_2005 = db.Column(db.REAL)
pct75_earn_wne_p6_2005 = db.Column(db.REAL)
pct90_earn_wne_p6_2005= db.Column(db.REAL)
sd_earn_wne_p6_2005= db.Column(db.REAL)
@hybrid_property
def probSchool(self):
return self.UGDS / sum(row.UGDS for row in self)
@hybrid_property
def sdlog(self):
return math.sqrt(math.log((self.sd_earn_wne_p6_2005 / self.mn_earn_wne_p6_2005) ** 2 + 1))
@hybrid_property
def meanlog(self):
return math.log(self.mn_earn_wne_p6_2005 ** 2 / math.sqrt(self.mn_earn_wne_p6_2005 ** 2 + self.sd_earn_wne_p6_2005 ** 2))
@hybrid_property
def p_le30K (self):
return lognorm.cdf(30.0E3, self.sdlog, 0, math.exp(self.meanlog))
@hybrid_property
def p_gt30Kle48K(self):
return lognorm.cdf(48.0E3, self.sdlog, 0, math.exp(self.meanlog)) - self.p_le30K
@hybrid_property
def p_gt48Kle75K(self):
return lognorm.cdf(75.0E3, self.sdlog, 0, math.exp(self.meanlog)) - lognorm.cdf(48.0E3, self.sdlog, 0, math.exp(self.meanlog))
@hybrid_property
def p_gt75Kle110K(self):
return lognorm.cdf(110.0E3, self.sdlog, 0, math.exp(self.meanlog)) - lognorm.cdf(75.0E3, self.sdlog, 0, math.exp(self.meanlog))
@hybrid_property
def p_gt110K(self):
return lognorm.cdf(110.0E3, self.sdlog, 0, math.exp(self.meanlog))
@hybrid_property
def totprob(self):
return self.p_le30K + self.p_gt30Kle48K + self.p_gt48Kle75K + self.p_gt75Kle110K + self.p_gt110K
# def getCollegesPerYear():
# query = "SELECT INSTNM college FROM Scorecard Limit 5 "
#
# sql = text(query)
# result = db.engine.execute(sql)
# names = [["Name"]]
# for row in result:
# names.append([row[0]])
# return names
| {
"repo_name": "vishalbedi/CollegeScorecard",
"path": "backend/flask_app/models.py",
"copies": "1",
"size": "7158",
"license": "mit",
"hash": 2137919662898776800,
"line_mean": 29.2025316456,
"line_max": 136,
"alpha_frac": 0.6422184968,
"autogenerated": false,
"ratio": 2.7573189522342063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8868988345335733,
"avg_score": 0.006109820739694572,
"num_lines": 237
} |
"""All the database related entities are in this module."""
from flask_security import RoleMixin, SQLAlchemyUserDatastore, UserMixin
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Base(db.Model):
"""Base class for all the tables.
Consists of two default columns `created_at` and `modified_at` .
"""
__abstract__ = True
id = db.Column(db.Integer, primary_key=True)
created_at = db.Column(db.DateTime, default=db.func.current_timestamp())
modified_at = db.Column(db.DateTime,
default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
roles_users = db.Table('roles_users', db.Column('user_id', db.Integer(),
db.ForeignKey('auth_user.id')),
db.Column('role_id', db.Integer(),
db.ForeignKey('auth_role.id')))
class Role(Base, RoleMixin):
"""Create roles in the database."""
__tablename__ = 'auth_role'
name = db.Column(db.String(80), nullable=False, unique=True)
description = db.Column(db.String(255))
def __init__(self, name):
"""Initialize the Role object."""
self.name = name
def __repr__(self):
"""String representation of the class."""
return '<Role %r>' % self.name
class User(Base, UserMixin):
"""Create users in the database."""
__tablename__ = 'auth_user'
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False)
first_name = db.Column(db.String(255))
last_name = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
last_login_at = db.Column(db.DateTime())
current_login_at = db.Column(db.DateTime())
last_login_ip = db.Column(db.String(45))
current_login_ip = db.Column(db.String(45))
login_count = db.Column(db.Integer)
roles = db.relationship('Role',
secondary=roles_users,
backref=db.backref('users', lazy='dynamic'))
def __repr__(self):
"""String representation of the class."""
return '<User %r>' % self.email
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
| {
"repo_name": "stevenaubertin/angular2-flask",
"path": "backend/flask_app/models.py",
"copies": "1",
"size": "2313",
"license": "mit",
"hash": 376823964820684540,
"line_mean": 32.5217391304,
"line_max": 79,
"alpha_frac": 0.598357112,
"autogenerated": false,
"ratio": 3.8358208955223883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9932906713344916,
"avg_score": 0.0002542588354945334,
"num_lines": 69
} |
"""All the data-importing functions are listed here for various colleges.
"""
import re
import urllib2
import dateutil.parser
import logging
import logging.handlers
import sys
import datetime
import rpi_calendars
from contextlib import closing
from icalendar import Calendar, Event
import pytz
from django.core.mail import send_mail
from django.conf import settings
from django.db.utils import IntegrityError
from django.db import transaction
from courses.models import (
Semester, Course, Department, Section,
Period, SectionPeriod, OfferedFor,
SectionCrosslisting, SemesterDepartment
)
from courses.signals import sections_modified
from courses.utils import Synchronizer, DAYS
# TODO: remove import *
from catalogparser import *
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
try:
NullHandler = logging.NullHandler
except AttributeError:
level = logging.INFO
class NullHandler(object):
def emit(self, record):
pass
handle = emit
# fallback, so there's no warning of no handlers
logger.addHandler(NullHandler())
logger.addHandler(logging.StreamHandler(sys.stdout))
class SemesterNotifier(object):
def __init__(self):
self.subject = "[YACS] New Semester for Review"
self.message = """Hey Humans,
There's a new semester to approve. Go to the admin interface and check it out!
Love,
YACS.me
"""
self.from_email = settings.FROM_EMAIL
self.recipient_list = settings.ADMINS
self.__notify = False
def requires_notification(self):
self.__notify = True
def notify(self):
if self.__notify:
send_mail(self.subject, self.message, self.from_email, self.recipient_list, fail_silently=True)
class ROCSRPIImporter(object):
"Handles the importation of RPI course data into the database."
FILE_RE = re.compile(r'(\d+)\.xml')
def __init__(self, notifier):
self.semesters = {} # semester.ref: semester obj
self.notifier = notifier
for semester in Semester.objects.filter(ref__startswith='http://sis.rpi.edu/reg/rocs/'):
self.semesters[semester.ref] = semester
self.latest_semester = None
if len(self.semesters) > 0:
self.latest_semester = max(self.semesters.values())
self.sections_changed = False
self.SectionPeriod = Synchronizer(SectionPeriod, SectionPeriod.objects.values_list('id', flat=True))
def clear_unused(self, semester):
self.SectionPeriod.trim(semester=semester)
def sync(self, get_files=None, get_catalog=None):
"Performs the updating of the database data from RPI's SIS"
if get_files is None:
from rpi_courses import list_rocs_xml_files
get_files = list_rocs_xml_files
if get_catalog is None:
from rpi_courses import ROCSCourseCatalog
get_catalog = ROCSCourseCatalog.from_url
for filename in get_files():
name = self.FILE_RE.finditer(filename).next().groups()[0]
semester = self.semesters.get(name + '.xml')
# if latest semester or newer semester
if (not semester) or semester == self.latest_semester:
catalog = get_catalog(filename)
if self.latest_semester and semester == self.latest_semester and catalog.datetime <= self.latest_semester.date_updated:
continue # already up-to-date
logger.debug('found catalog for: %r %r' % (catalog.year, catalog.month))
semester_obj = None
try:
semester_obj, created = Semester.objects.get_or_create(
year=catalog.year,
month=catalog.month,
defaults={
'visible': True,
'name': catalog.name,
'ref': filename,
})
except IntegrityError as error:
logger.debug(' DUPLICATE SEMESTER ' + repr(semester_obj) + ': ' + repr(error))
continue
self.create_courses(catalog, semester_obj)
self.create_crosslistings(semester_obj, set(catalog.crosslistings.values()))
semester_obj.save() # => update date_updated property
if created:
logger.debug(' CREATE SEMESTER ' + repr(semester_obj))
self.notifier.requires_notification()
else:
logger.debug(' EXISTS SEMESTER ' + repr(semester_obj))
if self.sections_changed:
sections_modified.send(sender=self, semester=semester_obj)
self.clear_unused(semester_obj)
def create_courses(self, catalog, semester_obj):
"Inserts all the course data, including section information, into the database from the catalog."
list = self.add_comm_intense(catalog, semester_obj)
for course in catalog.get_courses():
comm = (course.name in list)
department = self.get_or_create_department(semester_obj, code=course.dept, name=course.full_dept)
# we use our OfferedFor.ref to determine if we need to create a new
# course or not.
ref_name = '%r:%r:%r' % (course.name, course.dept, course.num)
qs = OfferedFor.objects.filter(semester=semester_obj, course__department__code=course.dept, course__number=course.num)
qs = qs.select_related('course')
try:
offered_for = qs.get(ref=ref_name)
course_obj = offered_for.course
created = False
except OfferedFor.DoesNotExist:
course_obj = None
if not course_obj:
# for migration support... set empty refs.
try:
offered_for = qs.get(ref='')
offered_for.ref = ref_name
offered_for.save()
course_obj = offered_for.course
created = False
except OfferedFor.DoesNotExist:
course_obj = None
if not course_obj:
course_obj = Course.objects.create(
name=course.name,
number=course.num,
department=department,
min_credits=course.cred[0],
max_credits=course.cred[1],
grade_type=course.grade_type,
is_comm_intense=comm,
)
created = True
if not created:
if self.forced:
course_obj.name = course.name
course_obj.min_credits, course_obj.max_credits = course.cred
course_obj.grade_type = course.grade_type
course_obj.is_comm_intense = comm
course_obj.save()
else:
OfferedFor.objects.get_or_create(
course=course_obj, semester=semester_obj, ref=ref_name
)
self.create_sections(course, course_obj, semester_obj)
crns = [str(s.crn) for s in course_obj.sections.all()]
logger.debug(' %s %s (crns: %s)' % (('+' if created else ' '), course.name, ', '.join(crns)))
def add_comm_intense(self, catalog, semester):
from rpi_courses import get_comm_file
pdf = get_comm_file(semester)
list = []
crns = re.findall(r"\d{5}\s[A-Z]{4}", pdf)
print "Found " + str(len(crns)) + " communication intensive sections"
for i in crns:
course = catalog.find_course_by_crn(int(i.split()[0]))
if course is not None:
print course.name
list.append(course.name)
return list
def create_sections(self, course, course_obj, semester_obj):
"Inserts all section data, including time period information, into the database from the catalog."
for section in course.sections:
# TODO: encode prereqs / notes
remove_prereq_notes(section)
section_obj, created = Section.objects.get_or_create(
crn=section.crn,
semester=semester_obj,
defaults=dict(
notes='\n'.join(section.notes),
number=section.num,
seats_taken=section.seats_taken,
seats_total=section.seats_total,
course=course_obj,
)
)
if not created:
section_obj.number = section.num
section_obj.seats_taken = section.seats_taken
section_obj.seats_total = section.seats_total
section_obj.course = course_obj
section_obj.notes = '\n'.join(section.notes)
section_obj.save()
else:
self.sections_changed = False
self.create_timeperiods(semester_obj, section, section_obj)
# maps from catalog data to database representation
DOW_MAPPER = {
'Monday': Period.MONDAY,
'Tuesday': Period.TUESDAY,
'Wednesday': Period.WEDNESDAY,
'Thursday': Period.THURSDAY,
'Friday': Period.FRIDAY,
'Saturday': Period.SATURDAY,
'Sunday': Period.SUNDAY,
}
def compute_dow(self, days_of_week):
"""Assists in converting rpi_course's representation of days of the week to the database kind."""
value = 0
for dow in days_of_week:
value = value | self.DOW_MAPPER.get(dow, 0)
return value
def create_timeperiods(self, semester_obj, section, section_obj):
"""Creates all the SectionPeriod and Period instances for the given section object from
the catalog and the section_obj database equivalent to refer to.
"""
for period in section.periods:
if None in (period.start, period.end):
continue # invalid period for all we care about... ignore.
period_obj, pcreated = Period.objects.get_or_create(
start=period.start_time,
end=period.end_time,
days_of_week_flag=self.compute_dow(period.days),
)
sectionperiod_obj, created = self.SectionPeriod.get_or_create(
period=period_obj,
section=section_obj,
semester=semester_obj,
defaults=dict(
instructor=period.instructor,
location=period.location,
kind=period.type,
)
)
if not created:
sectionperiod_obj.instructor = period.instructor
sectionperiod_obj.location = period.location
sectionperiod_obj.kind = period.type
sectionperiod_obj.save()
def get_or_create_department(self, semester_obj, code, name=None):
dept, created = Department.objects.get_or_create(
code=code,
defaults={
'name': name or ''
}
)
SemesterDepartment.objects.get_or_create(
semester=semester_obj,
department=dept
)
return dept
def create_crosslistings(self, semester_obj, crosslistings):
"Creates all crosslisting information into the database for all the sections."
for crosslisting in crosslistings:
refid = ','.join(map(str, sorted(tuple(crosslisting.crns))))
crosslisting_obj, created = SectionCrosslisting.objects.get_or_create(semester=semester_obj, ref=refid)
Section.objects.filter(crn__in=crosslisting.crns).update(crosslisted=crosslisting_obj)
class SISRPIImporter(ROCSRPIImporter):
def get_files(self, latest_semester):
from rpi_courses import list_sis_files_for_date
files = list_sis_files_for_date()
if latest_semester:
files.append(latest_semester.ref)
return list(set(files))
def sync(self, get_files=None, get_catalog=None, force=False):
if get_files is None:
get_files = self.get_files
if get_catalog is None:
from rpi_courses import CourseCatalog
get_catalog = CourseCatalog.from_string
self.forced = force
for filename in get_files(self.latest_semester):
semester = self.semesters.get(filename)
# if latest semester or newer semester
if (not semester) or semester == self.latest_semester:
try:
with closing(urllib2.urlopen(filename)) as page:
logger.debug("OPEN " + filename)
if force or (semester and semester.date_updated is not None):
last_mod = dateutil.parser.parse(dict(page.info())['last-modified']).replace(tzinfo=None)
if not force and last_mod <= semester.date_updated:
logger.debug("Skipping b/c of mod date: %r <= %r" % (last_mod, semester.date_updated))
continue
catalog = get_catalog(page.read(), filename)
except urllib2.URLError:
logger.debug("Failed to fetch url (%r)" % (filename))
continue
if not force and self.latest_semester and semester == self.latest_semester:
continue # already up-to-date
logger.debug("Semester: %r => %s-%s" % (
filename,
catalog.year,
catalog.month,
))
semester_obj = None
try:
semester_obj, created = Semester.objects.get_or_create(
year=catalog.year,
month=catalog.month,
defaults={
'visible': False,
'name': catalog.name,
'ref': filename,
})
except IntegrityError as error:
logger.debug(' DUPLICATE SEMESTER ' + repr(semester_obj) + ': ' + repr(error))
continue
self.create_courses(catalog, semester_obj)
# catalog doesn't support this for now.
# self.create_crosslistings(semester_obj, set(catalog.crosslistings.values()))
semester_obj.save() # => update date_updated property
if created:
logger.debug(' CREATE SEMESTER ' + repr(semester_obj))
self.notifier.requires_notification()
else:
logger.debug(' EXISTS SEMESTER ' + repr(semester_obj))
if self.sections_changed:
sections_modified.send(sender=self, semester=semester_obj)
self.clear_unused(semester_obj)
def remove_prereq_notes(section):
all_notes = []
for i in range(0, len(section.notes)):
notes = section.notes[i]
m = re.match("PRE-REQ: ", notes)
if m:
notes = ""
all_notes.append(notes)
section.notes = all_notes
def import_latest_semester(force=False):
"Imports RPI data into the database."
logger.debug('Importing latest semester: %s' % datetime.datetime.now().strftime('%A %x %X %f%Z'))
notifier = SemesterNotifier()
# ROCSRPIImporter().sync() # slower.. someone manually updates this I think?
with transaction.atomic():
SISRPIImporter(notifier).sync(force=force)
notifier.notify()
def import_all_semesters(force=False):
from rpi_courses import list_sis_files, list_rocs_xml_files
logger.debug('Importing ALL semesters: %s' % datetime.datetime.now().strftime('%A %x %X %f%Z'))
notifier = SemesterNotifier()
urls = []
urls.extend(list_sis_files())
urls.extend(list_rocs_xml_files())
for url in urls:
print url
if 'rocs' in url:
importer = ROCSRPIImporter(notifier)
else:
importer = SISRPIImporter(notifier)
with transaction.atomic():
importer.sync(get_files=lambda *a, **k: [url])
notifier.notify()
def import_catalog(all=False):
catalog = parse_catalog(all)
courses = Course.objects.all()
for c in courses:
key = str(c.department.code) + str(c.number)
if key in catalog.keys():
if 'description' in catalog[key].keys() and catalog[key]['description'] != "":
c.description = catalog[key]['description']
c.name = catalog[key]['title']
c.prereqs = catalog[key]['prereqs']
c.save()
# uses >1GB of ram - currently unacceptable
# add_cross_listing()
def import_data(force=False, all=False, catalog=False):
if all:
print 'Importing all semesters'
import_all_semesters(force=force)
else:
import_latest_semester(force=force)
if catalog:
import_catalog(all=all)
def add_cross_listing():
from itertools import product
courses = Course.objects.all().prefetch_related('sections')
for c in courses:
sections = c.sections.all()
cross_list = set()
for s1, s2 in product(sections, sections):
if s1 != s2 and s1.conflicts_with(s2) and s1.instructors == s2.instructors:
cross_list.add(str(s1.id))
cross_list.add(str(s2.id))
sc = SectionCrosslisting(semester=Semester.objects.get(id=s1.semester), ref=",".join(cross_list[i]))
for s in cross_list:
courses.sections.get(id=s).crosslisted = sc.id
def export_schedule(crns):
weekday_offset = {}
for i, day in enumerate(DAYS):
weekday_offset[day] = i
tzinfo = pytz.timezone("America/New_York")
calendar = Calendar()
calendar.add('prodid', '-//YACS Course Schedule//EN')
calendar.add('version', '2.0')
sections = Section.objects.filter(crn__in=crns).prefetch_related('periods', 'section_times', 'section_times__period', 'course', 'semester')
semester_start = datetime.datetime(sections[0].semester.year, sections[0].semester.month, 1, 0, tzinfo=pytz.timezone("America/New_York")).astimezone(pytz.utc)
current = datetime.datetime.utcnow()
semester_end = semester_start + datetime.timedelta(150)
start_date = str(semester_start.date()).replace('-', '')
end_date = str(current.date()).replace('-', '')
# events = list(rpi_calendars.filter_related_events(rpi_calendars.download_events(rpi_calendars.get_url_by_range(start_date, end_date))))
# events.extend(list(rpi_calendars.filter_related_events(rpi_calendars.download_events(rpi_calendars.get_url()))))
events = list(rpi_calendars.filter_related_events(rpi_calendars.download_events(rpi_calendars.get_url())))
days_off = []
break_start = None
break_end = None
logger.debug('== Events ==')
for e in events:
is_after_start = e.start.date() > semester_start.date()
logger.debug(' %s %s' % (e.start.date(), e.name))
if re.search(str(sections[0].semester.name.split(' ')[0]) + ' ' + str(sections[0].semester.year), e.name) is not None:
semester_start = e.start
logger.debug(' -> Semester Start')
if re.search(".*(no classes).*", e.name.lower()) is not None and is_after_start:
days_off.append(e.start.date())
logger.debug(' -> Day Off')
if re.search(".*(spring break)|(thanksgiving).*", e.name.lower()) is not None and is_after_start:
break_start = e.start
logger.debug(' -> Range of days off')
if re.search(".*(classes resume).*", e.name.lower()) is not None and break_start is not None:
break_end = e.start
logger.debug(' -> Range of days end')
if re.search("(.*)study-review days", str(e.name).lower()) is not None and is_after_start:
semester_end = e.start
logger.debug(' -> Semester End')
break
if break_start is not None and break_end is not None:
length = break_end - break_start
for i in range(length.days):
days_off.append((break_start + datetime.timedelta(i)).date())
logger.debug('Semester start: %s' % semester_start)
logger.debug('Semester end: %s' % semester_end)
logger.debug('days off: %s' % days_off)
for s in sections:
for p in s.periods.all():
event = Event()
offset = weekday_offset[p.days_of_week[0]] - semester_start.weekday()
if offset < 0:
offset = 7 + offset
begin = semester_start + datetime.timedelta(offset)
event.add('summary', '%s - %s (%s)' % (s.course.code, s.course.name, s.crn))
event.add('dtstart', datetime.datetime(begin.year, begin.month, begin.day, p.start.hour, p.start.minute, tzinfo=tzinfo))
event.add('dtend', datetime.datetime(begin.year, begin.month, begin.day, p.end.hour, p.end.minute, tzinfo=tzinfo))
days = []
for d in p.days_of_week:
days.append(d[:2])
event.add('rrule', dict(
freq='weekly',
interval=1,
byday=days,
until=datetime.datetime(semester_end.year, semester_end.month, semester_end.day, p.end.hour, p.end.minute, tzinfo=tzinfo)))
event.add('exdate', days_off)
calendar.add_component(event)
return calendar.to_ical().replace("EXDATE", "EXDATE;VALUE=DATE")
| {
"repo_name": "JGrippo/YACS",
"path": "courses/bridge/rpi.py",
"copies": "2",
"size": "21713",
"license": "mit",
"hash": 2860979662781118500,
"line_mean": 40.1231060606,
"line_max": 162,
"alpha_frac": 0.5779486943,
"autogenerated": false,
"ratio": 3.8919161139989247,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5469864808298924,
"avg_score": null,
"num_lines": null
} |
"""All the db operations for storing and retrieving boog data"""
from collections import Counter
import itertools
import operator
from copy import copy
import os.path
from lib.sort_data import sorted_series, sorted_titles, sorted_filters
from lib.sort_data import sorted_apg
from lib.variables import VARIABLES
import lib.db_sql as db_sql
def T_(message):
"""For translation with gettext"""
return message
def insert_new_book(username, data):
"""Make a new db entry"""
cursor, conn = db_sql.connect('books.db')
sql = ("INSERT INTO " + username + " (")
temp_list = []
for key, value in data.items():
if key in VARIABLES.dbnames:
temp_list.append(value)
sql += key + ", "
sql = sql[:-2]
sql += ") VALUES (" + ", ".join(['?']*len(temp_list)) + ")"
cursor.execute(sql, tuple(temp_list))
conn.commit()
conn.close()
return str(cursor.lastrowid)
def insert_many_new(username, data):
"""Make many new db entrys (when importing books)"""
cursor, conn = db_sql.connect('books.db')
keys = list(data[0].keys())
sql = ("INSERT INTO " + username + " (" + ", ".join(keys) +
") VALUES (" + ", ".join(['?']*len(keys)) + ")")
insert_data = []
for book in data:
book = str_to_array(book)
temp_list = []
for key in keys:
temp_list.append(book[key])
insert_data.append(tuple(temp_list))
cursor.executemany(sql, insert_data)
conn.commit()
conn.close()
return
def update_book(username, book_id, data):
"""Update book data"""
cursor, conn = db_sql.connect('books.db')
keys = list(data.keys())
sql = ("UPDATE " + username + " SET " + " = ?, ".join(keys) +
" = ? WHERE _id = ?")
temp_list = []
for key in keys:
temp_list.append(data[key])
temp_list.append(book_id)
cursor.execute(sql, tuple(temp_list))
conn.commit()
conn.close()
return cursor.lastrowid
def array_to_str(data):
"""Convert an array to a string"""
for field in VARIABLES.name_fields:
if field in data and data[field] != None:
data[field] = ' & '.join(data[field])
if 'genre' in data:
data['genre'] = ', '.join(data['genre'])
return data
def str_to_array(data):
"""Convert a string to an array"""
for field in VARIABLES.name_fields:
if field in data:
data[field] = data[field].split(' & ')
if 'genre' in data:
data['genre'] = data['genre'].split(', ')
return data
def rearrange_data(data, warning):
"""Put all books where the current main sorting field is empty
in a separate array (data_temp)
"""
data_temp = None
for row in data:
if row['_id'] == '' or row['_id'] == None:
data_temp = row
row['_id'] = warning
row['empty_field'] = True
else:
row['empty_field'] = False
row['sub_items'] = True
data = [x for x in data if x['_id'] != warning]
return data, data_temp
def sort_append_rearranged(data_temp, data):
"""Sort and then append the data_temp array to data"""
if data_temp != None:
data_temp['books'] = sorted_titles(data_temp['books'], 'title')
data.append(data_temp)
return data
def query_builder(active_filters):
"""Build the query according to the activated shelf and filters"""
paras = {}
para_list = ()
query = ""
for _filter in active_filters:
if 'stat_Read' == _filter:
query += " AND read_count > 0"
elif 'stat_Unread' == _filter:
query += " AND read_count = 0"
elif 'stat_Currently reading' == _filter:
query += " AND read_current = 1"
elif _filter[0:5] == 'lang_':
if _filter[5:] == 'No language':
_filter = 'lang_'
if 'lang' in paras:
paras['lang'] += (_filter[5:], )
else:
paras['lang'] = (_filter[5:], )
elif _filter[0:5] == 'form_':
if _filter[5:] == 'No format':
_filter = 'form_'
if 'form' in paras:
paras['form'] += (_filter[5:], )
else:
paras['form'] = (_filter[5:], )
elif _filter[0:6] == 'shelf_':
if _filter[6:] == 'Not shelfed':
_filter = 'shelf_'
if 'shelf' in paras:
paras['shelf'] += (_filter[6:], )
else:
paras['shelf'] = (_filter[6:], )
if 'lang' in paras:
query += " AND language IN (%s)" % ','.join('?'*len(paras['lang']))
para_list += paras['lang']
if 'form' in paras:
query += " AND form IN (%s)" % ','.join('?'*len(paras['form']))
para_list += paras['form']
if 'shelf' in paras:
query += " AND shelf IN (%s)" % ','.join('?'*len(paras['shelf']))
para_list += paras['shelf']
if query != '':
query = " WHERE" + query[4:]
return query, para_list
def update(username, data):
"""Prepare updated or new data to be stored in the db"""
book_id = data['book_id']
del data['book_id']
data = str_to_array(data)
if 'start_date' in data:
data['reading_stats'] = []
if not isinstance(data['start_date'], list):
data['start_date'] = [data['start_date']]
if not isinstance(data['finish_date'], list):
data['finish_date'] = [data['finish_date']]
temp = [False] * len(data['start_date'])
if 'abdoned' in data:
if not isinstance(data['abdoned'], list):
data['abdoned'] = [data['abdoned']]
for i in data['abdoned']:
temp[int(i)-1] = True
data['abdoned'] = temp
data['read_count'] = 0
data['read_current'] = 0
for start, finish, abdoned in zip(data['start_date'],
data['finish_date'],
data['abdoned']):
data['reading_stats'].append({'start_date' : start,
'finish_date' : finish,
'abdoned' : abdoned})
if (finish != '' or start == '') and not abdoned:
data['read_count'] += 1
if start != '' and finish == '' and not abdoned:
data['read_current'] = 1
else:
data['read_current'] = 0
del data['start_date']
del data['finish_date']
del data['abdoned']
else:
data['reading_stats'] = []
data['read_count'] = 0
if book_id == 'new_book':
book_id = insert_new_book(username, data)
else:
update_book(username, book_id, data)
return book_id
def change_field(username, edit, old_name, new_name):
"""Updated a field for multiple books (used for mass edit)"""
cursor, conn = db_sql.connect('books.db')
if edit == 'series':
sql = ("UPDATE " + username + " SET series = ? WHERE series = ?")
cursor.execute(sql, (new_name, old_name, ))
elif edit in VARIABLES.name_fields:
sql = ("SELECT " + edit + ", _id FROM " + username + " WHERE " + edit +
" LIKE ?")
cursor.execute(sql, ('%"' + old_name + '"%', ))
sql = ("UPDATE " + username + " SET " + edit + " = ? WHERE _id = ?")
for book in cursor.fetchall():
if len(book[edit]) == 1:
cursor.execute(sql, ([new_name], book['_id'], ))
else:
field_new = []
for name in book[edit]:
if name == old_name:
field_new.append(new_name)
else:
field_new.append(name)
cursor.execute(sql, (field_new, book['_id'], ))
conn.commit()
conn.close()
def star_series(username, series_name, status):
"""Mark a series as compled"""
cursor, conn = db_sql.connect('books.db')
sql = ("UPDATE " + username + " SET series_complete = ? WHERE series = ?")
if status == 'star':
cursor.execute(sql, (0, series_name, ))
else:
cursor.execute(sql, (1, series_name, ))
conn.commit()
conn.close()
def get_by_id(username, book_id, field=None):
"""Get a book by its id and return its data"""
cursor, conn = db_sql.connect('books.db')
if field == None:
sql = "SELECT * FROM " + username + " WHERE _id = ?"
cursor.execute(sql, (book_id, ))
else:
sql = "SELECT " + field + " FROM " + username + " WHERE _id = ?"
cursor.execute(sql, (book_id,))
data = array_to_str(dict(cursor.fetchone()))
conn.close()
return data
def get_all(username):
"""Get all books and their data in the users db"""
cursor, conn = db_sql.connect('books.db')
cursor.execute("SELECT * FROM " + username)
data = [array_to_str(dict(x)) for x in cursor.fetchall()]
conn.close()
return data
def aggregate_items(username, group_by, get_fields, active_filters,
array=False):
"""Return a list of all books that statisfy the current filters"""
cursor, conn = db_sql.connect('books.db')
sql = ("SELECT " + group_by + ", " +
", ".join(get_fields) + " FROM " + username)
query, paras = query_builder(active_filters)
cursor.execute(sql + query, paras)
data = [dict(x) for x in cursor.fetchall()]
data_temp = []
if array:
for row in data:
if row[group_by] == None:
row[group_by] = ['']
temp_field = row[group_by][1:]
row[group_by] = row[group_by][0]
for value in temp_field:
data_temp.append(copy(row))
data_temp[-1][group_by] = value
data += data_temp
data = sorted(data, key=operator.itemgetter(group_by))
list1 = []
for key, items in itertools.groupby(data, operator.itemgetter(group_by)):
list1.append({'_id': key, 'books': list(items)})
conn.close()
return list1
def series(username, variant, active_filters):
"""Return a list of your books orderd by series"""
order_by = variant.split('_')[1]
if order_by == 'year':
order_by = 'release_date AS order_nr'
elif order_by == 'order':
order_by = 'order_nr'
data = aggregate_items(username, 'series',
["title", "_id", order_by, "series_complete"],
active_filters)
data, data_temp = rearrange_data(data, 'Not in a series')
if variant.split('_')[0] == 'variant1' and data_temp != None:
for row in data_temp['books']:
data.append({'_id' : row['title'],
'sub_items' : False,
'books' : {'_id' : row['_id']}})
data = sorted_series(data, variant)
else:
data = sorted_series(data, variant)
data = sort_append_rearranged(data_temp, data)
return data
def author_and_more(username, sortby, variant, active_filters):
"""Return a list of your books orderd by author or similar"""
if sortby in VARIABLES.name_fields or sortby == 'genre':
array = True
else:
array = False
if variant == 'year':
data = aggregate_items(
username, sortby,
["title", "_id", "release_date AS order_nr"],
active_filters, array
)
sort_by_order = True
elif variant == 'title':
data = aggregate_items(
username, sortby,
["title", "_id"],
active_filters, array
)
sort_by_order = False
data, data_temp = rearrange_data(data, 'No ' + sortby)
data = sorted_apg(data, sort_by_order, sortby)
data = sort_append_rearranged(data_temp, data)
return data
def titles(username, variant, active_filters):
"""Return a list of your books orderd by title"""
cursor, conn = db_sql.connect('books.db')
sql = ("SELECT title, release_date, pages, add_date, _id FROM " + username)
query, paras = query_builder(active_filters)
cursor.execute(sql + query, paras)
data_temp = [dict(x) for x in cursor.fetchall()]
data = []
for row in data_temp:
temp = {'_id' : row['title'],
'sub_items' : False,
'books' : {'_id' : row['_id']}}
if variant == 'year':
temp['order_nr'] = row['release_date']
elif variant == 'pages':
temp['order_nr'] = row['pages']
elif variant == 'added':
temp['order_nr'] = row['add_date']
data.append(temp)
conn.close()
return sorted_titles(data, '_id', variant)
def covers(username, active_filters):
"""Return a list of your book covers as thumbs"""
cursor, conn = db_sql.connect('books.db')
sql = ("SELECT front, _id FROM " + username)
query, paras = query_builder(active_filters)
cursor.execute(sql + query, paras)
data_temp = [dict(x) for x in cursor.fetchall()]
data = {}
for row in data_temp:
if row["front"] != None:
if os.path.isfile(row["front"] + '_thumb.jpg'):
data[str(row["_id"])] = row["front"] + '_thumb.jpg'
else:
data[str(row["_id"])] = ('thumbnails/' + row["front"] +
'_thumb.jpg')
else:
data[str(row["_id"])] = 'static/icons/circle-x.svg'
conn.close()
return data
def statistic_date(username, active_filters, _type):
"""Date statistics"""
_type = _type.split('#')
cursor, conn = db_sql.connect('books.db')
query, paras = query_builder(active_filters)
if _type[0] in ['release_date', 'add_date']:
sql = ("SELECT " + _type[0] + " FROM " + username)
else:
sql = ("SELECT reading_stats FROM " + username)
cursor.execute(sql + query, paras)
data_temp = [dict(x) for x in cursor.fetchall()]
data = []
if len(_type) == 1:
for row in data_temp:
if _type[0] in ['release_date', 'add_date']:
if row[_type[0]] != '':
data.append(row[_type[0]][0:4])
else:
for row2 in row["reading_stats"]:
if row2[_type[0]] != '':
data.append(row2[_type[0]][0:4])
elif len(_type) == 2:
for row in data_temp:
if _type[0] in ['release_date', 'add_date']:
if row[_type[0]] != '' and row[_type[0]][0:4] == _type[1]:
data.append(row[_type[0]][5:7])
else:
for row2 in row["reading_stats"]:
if (row2[_type[0]] != '' and
row2[_type[0]][0:4] == _type[1]):
data.append(row2[_type[0]][5:7])
data = Counter(data)
labels = sorted(list(data))
data = [data[x] for x in labels]
if labels != [] and labels[0] == "":
labels[0] = "Unknown"
conn.close()
return labels, data
def statistic_pages_read(username, active_filters, _type):
"""Pages read statistics"""
_type = _type.split('#')
cursor, conn = db_sql.connect('books.db')
query, paras = query_builder(active_filters)
sql = ("SELECT reading_stats, pages FROM " + username)
cursor.execute(sql + query, paras)
data_temp = [dict(x) for x in cursor.fetchall()]
data = []
for row in data_temp:
for row2 in row["reading_stats"]:
if row2["finish_date"] != '' and row2["start_date"] != '':
data.append([row2["start_date"], row2["finish_date"],
row["pages"]])
data_temp = {}
if len(_type) == 1:
for row in data:
i = 0
while len(row[2]) > i and row[2][i].isdigit():
i = i+1
if i > 0:
row[0] = int(row[0][0:4])
row[1] = int(row[1][0:4])
row[2] = int(row[2][0:i])
if row[1]-row[0]+1 > 0:
for i in range(row[0], row[1]+1):
if i in data_temp:
data_temp[i] = (data_temp[i] +
row[2]/(row[1]-row[0]+1))
else:
data_temp[i] = row[2]/(row[1]-row[0]+1)
elif len(_type) == 2:
for row in data:
i = 0
while len(row[2]) > i and row[2][i].isdigit():
i = i+1
if i > 0 and (row[0][0:4] == _type[1] or
row[1][0:4] == _type[1]):
row[0] = [int(row[0][0:4]), int(row[0][5:7])]
row[1] = [int(row[1][0:4]), int(row[1][5:7])]
row[2] = int(row[2][0:i])
if row[1][0]-row[0][0]+1 > 1 or (
row[1][0]-row[0][0]+1 > 0 and
row[1][1]-row[0][1]+1 > 0):
if row[0][0] == row[1][0]:
i_start = row[0][1]
i_end = row[1][1]
elif row[0][0] == int(_type[1]):
i_start = row[0][1]
i_end = 12
elif row[1][0] == int(_type[1]):
i_start = 1
i_end = row[1][1]
for i in range(i_start, i_end+1):
if i in data_temp:
data_temp[i] = (data_temp[i] +
row[2]/(row[1][1]-row[0][1]+1+
12*(row[1][0]-row[0][0])))
else:
data_temp[i] = row[2]/(row[1][1]-row[0][1]+1+
12*(row[1][0]-row[0][0]))
data = data_temp
labels = sorted(list(data))
data = [data[x] for x in labels]
labels = [str(x) for x in labels]
conn.close()
return labels, data
def statistic_pages_book(username, active_filters):
"""Pages per book statistics"""
cursor, conn = db_sql.connect('books.db')
query, paras = query_builder(active_filters)
sql = ("SELECT pages FROM " + username)
cursor.execute(sql + query, paras)
data_temp = [dict(x) for x in cursor.fetchall()]
data = []
for row in data_temp:
i = 0
while len(row["pages"]) > i and row["pages"][i].isdigit():
i = i+1
if i > 0:
data.append(round(int(row["pages"][0:i])/100-0.5)*100)
data = Counter(data)
labels = sorted(list(data))
data = [data[x] for x in labels]
labels = [str(x) + "-" + str(x+99) for x in labels]
conn.close()
return labels, data
def delete_by_id(username, book_id):
"""Delete a book by its id"""
cursor, conn = db_sql.connect('books.db')
sql = ("DELETE FROM " + username + " WHERE _id=?")
cursor.execute(sql, (book_id, ))
conn.commit()
conn.close()
def autocomplete(username, query, field, array):
"""Return a list of suggestions"""
cursor, conn = db_sql.connect('books.db')
sql = ("SELECT DISTINCT " + field + " FROM " + username + " WHERE " +
field + " LIKE ?")
if array:
cursor.execute(sql, ('%"' + query + '%', ))
ac_list = []
for book in cursor.fetchall():
if len(book[field]) == 1:
ac_list.append(book[field][0])
else:
for name in book[field]:
if name[0:len(query)] == query:
ac_list.append(name)
else:
cursor.execute(sql, (query + '%', ))
ac_list = [x[field] for x in cursor.fetchall()]
ac_list = [key for key, _ in itertools.groupby(sorted(ac_list))]
conn.close()
return {'suggestions' : ac_list}
def filter_list(username, field, active_filters):
"""Return a list of all possible user filters"""
cursor, conn = db_sql.connect('books.db')
sql = "SELECT DISTINCT " + field + " AS 'name' FROM " + username
cursor.execute(sql)
all_filters = [dict(x) for x in cursor.fetchall()]
conn.close()
filters1 = sorted_filters([x for x in all_filters if x['name'] != ''])
for _filter in all_filters:
if _filter['name'] == '':
if field == 'form':
filters1.append({'name': T_('No format')})
elif field == 'language':
filters1.append({'name': T_('No language')})
elif field == 'shelf':
filters1.append({'name': T_('Not shelfed')})
if field == 'language':
field = 'lang'
return add_count(username, field, filters1, active_filters)
def filter_list_stat(username, active_filters):
"""Return a list to filter fo read/unread"""
filters1 = [{'name': T_('Unread')}, {'name': T_('Read')},
{'name': T_('Currently reading')}]
return add_count(username, 'stat', filters1, active_filters)
def add_count(username, field, filters1, active_filters):
"""Number of items if filter will be activated"""
for _filter in filters1:
if field + '_' + _filter['name'] not in active_filters:
temp = active_filters + [field + '_' + _filter['name']]
else:
temp = active_filters
_filter['#items'] = count_items(username, temp)
return filters1
def count_items(username, active_filters):
"""Count books on a shelf"""
cursor, conn = db_sql.connect('books.db')
sql = "SELECT COUNT(*) AS items FROM " + username
query, paras = query_builder(active_filters)
cursor.execute(sql + query, paras)
items = cursor.fetchone()['items']
conn.close()
return str(items)
def drop(username):
"""Drop a users table"""
cursor, conn = db_sql.connect('books.db')
sql = 'DROP TABLE IF EXISTS ' + username
cursor.execute(sql)
conn.commit()
conn.close()
| {
"repo_name": "JB26/Bibthek",
"path": "lib/db_books.py",
"copies": "1",
"size": "21873",
"license": "mit",
"hash": 2673404510419482000,
"line_mean": 36.5824742268,
"line_max": 79,
"alpha_frac": 0.5084350569,
"autogenerated": false,
"ratio": 3.592215470520611,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4600650527420611,
"avg_score": null,
"num_lines": null
} |
# All the default / common settings to be here
"""
Django settings for linkresting project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p26xba=pq%nk8jts&q@z1fel0sdah-29i^-fwbc^e!(5d_+9&0'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#'south',
'stories',
'crispy_forms',
'auth',
'social.apps.django_app.default'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'linkresting.urls'
WSGI_APPLICATION = 'linkresting.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'linkresting',
# 'USER': 'root',
# 'PASSWORD': 'root',
# 'HOST': 'localhost',
# 'PORT': '3306',
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = ''
STATICFILES_DIRS = (
os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'static'),
)
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'templates'),
)
LOGIN_URL = '/auth/'
LOGIN_REDIRECT_URL = '/'
APPEND_SLASH = True
CRISPY_TEMPLATE_PACK = 'bootstrap3'
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS +=(
'django.core.context_processors.request',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
'linkresting.context_processors.disqus',
'linkresting.context_processors.globals',
)
AUTHENTICATION_BACKENDS = (
'auth.backends.EmailOrUsernameModelBackend',
'social.backends.twitter.TwitterOAuth',
'social.backends.facebook.FacebookOAuth2',
)
| {
"repo_name": "rajeshvaya/linkresting.com",
"path": "src/linkresting/linkresting/settings/defaults.py",
"copies": "1",
"size": "3027",
"license": "mit",
"hash": -4872468172410754000,
"line_mean": 24.225,
"line_max": 92,
"alpha_frac": 0.6987115956,
"autogenerated": false,
"ratio": 3.220212765957447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4418924361557447,
"avg_score": null,
"num_lines": null
} |
# All the imports necessary to run this game:
import random
import os
#Global variables used in our program
cards = [(1,'2C'),(2,'3C'),(3,'4C'),(4,'5C'),(5,'6C'),(6,'7C'),(7,'8C'),
(8,'9C'),(9,'TC'),(10,'JC'),(11,'QC'),(12,'KC'),(13,'AC'),
(14,'2D'),(15,'3D'),(16,'4D'),(17,'5D'),(18,'6D'),(19,'7D'),(20,'8D'),
(21,'9D'),(22,'TD'),(23,'JD'),(24,'QD'),(25,'KD'),(26,'AD'),
(27,'2S'),(28,'3S'),(29,'4S'),(30,'5S'),(31,'6S'),(32,'7S'),(33,'8S'),
(34,'9S'),(35,'TS'),(36,'JS'),(37,'QS'),(38,'KS'),(39,'AS'),
(40,'2H'),(41,'3H'),(42,'4H'),(43,'5H'),(44,'6H'),(45,'7H'),(46,'8H'),
(47,'9H'),(48,'TH'),(49,'JH'),(50,'QH'),(51,'KH'),(52,'AH')]
gameplay = True
user = []
alpha = []
bravo = []
charlie = []
# Run the game
def run_game():
user_sorted = []
alpha_sorted = []
bravo_sorted = []
charlie_sorted = []
decision = ["NA","NA","NA","NA"]
score = [0,0,0,0,0]
#[highest score, user score, alpha score, bravo score, charlie score]
round = 1
round_h = 1
while score[0] < 100:
alpha_decision = "NA"
bravo_decision = "NA"
charlie_decision = "NA"
totem = "N"
(user_sorted, alpha_sorted, bravo_sorted, charlie_sorted) = deck_distribute()
# Here is the decision making loop of the game,
# totem holder always goes first, go in clockwise fashion (U->A->B->C),
# Keep going till 13 cards are exhausted,
# check to make sure user can't break laws
# run AI script when necessary, always print screen before user input
first_play = "2C"
first_hand = True
hearts_broken = False
queen_played = False
# Run the 3 card swap phase
(user_sorted,alpha_sorted,bravo_sorted,charlie_sorted) = swap_three(user_sorted,alpha_sorted,bravo_sorted,charlie_sorted,round_h)
# Resort players cards
(user_sorted,alpha_sorted,bravo_sorted,charlie_sorted) = resorting(user_sorted,alpha_sorted,bravo_sorted,charlie_sorted)
totem = check_two(user_sorted,alpha_sorted,bravo_sorted,charlie_sorted)
while len(user_sorted) != 0:
for j in range(0,4):
# NOTE: after matching with an if or elif it returns (reason for loop consistency)
if totem == "U":
while True:
try:
user_decision = "NA"
print_screen(round,decision,score,user_sorted,first_play,hearts_broken,first_hand)
print "Choose a card to play (input a number)"
user_input = int(raw_input(">"))
user_decision = user_sorted[user_input]
if user_sorted[user_input] == "NA":
user_decision = "You need to input something"
if first_hand == True:
if "C" not in "".join(user_sorted):
if "H" in user_sorted[user_input]:
user_decision = "You can't play a heart on the first hand"
if "QS" in user_sorted[user_input]:
user_decision = "You can't play the QS on the first hand"
if j != 0:
if first_play[1:] in "".join(user_sorted):
if first_play[1:] not in user_sorted[user_input]:
user_decision = "Please choose a suit that matches the first card played"
elif "H" in user_sorted[user_input]:
hearts_broken = True
if "2C" in "".join(user_sorted):
if user_sorted[user_input] != "2C":
user_decision = "You must play the Two of Clubs"
if hearts_broken == False:
if "H" in user_sorted[user_input]:
user_decision = "The Hearts have not been broken yet"
user_sorted.remove(user_decision)
break
except (TypeError, NameError, IndexError, ValueError):
print user_decision
raw_input("Press Enter to Continue")
# input the decision
decision[0] = user_decision
# pass totem to Alpha player
totem = "A"
# if this player was the first card, set decision to first
if j == 0:
first_play = user_decision
elif totem == "A":
(alpha_sorted, alpha_decision, hearts_broken) = A_I(first_play,first_hand,hearts_broken,queen_played,decision,alpha_sorted)
# input player decision
decision[1] = alpha_decision
# pass totem to Bravo player
totem = "B"
# if this player was the first card, set decision to first
if j == 0:
first_play = alpha_decision
elif totem == "B":
(bravo_sorted, bravo_decision, hearts_broken) = A_I(first_play,first_hand,hearts_broken,queen_played,decision,bravo_sorted)
# input player decision
decision[2] = bravo_decision
# pass totem to Charlie player
totem = "C"
# if this player was the first card, set decision to first
if j == 0:
first_play = bravo_decision
elif totem == "C":
(charlie_sorted, charlie_decision, hearts_broken) = A_I(first_play,first_hand,hearts_broken,queen_played,decision,charlie_sorted)
# input player decision
decision[3] = charlie_decision
# pass totem to User
totem = "U"
# if this player was the first card, set decision to first
if j == 0:
first_play = charlie_decision
# now that all decisions have been made, decide who gets points
print_screen(round,decision,score,user_sorted,first_play,hearts_broken,first_hand)
print "Press anything to continue..."
raw_input(">")
(score, totem) = point_allocation(first_play, score, totem, decision)
decision = ["NA","NA","NA","NA"]
first_play = "NA"
first_hand = False
round += 1
round_h += 1
if round_h == 5:
round_h = 1
print_winner(score)
score = [0,0,0,0,0]
# This part of the code will create a deck and distribute to the four players
def deck_distribute():
#Because we are calling this function multiple times lets clear what we use:
del user[:]
del alpha[:]
del bravo[:]
del charlie[:]
set = cards[:]
random.shuffle(set)
random.shuffle(set)
for i in range(0,13):
user.append(set.pop())
alpha.append(set.pop())
bravo.append(set.pop())
charlie.append(set.pop())
del set[:]
user.sort()
u_s = [x for y, x in user]
alpha.sort()
a_s = [x for y, x in alpha]
bravo.sort()
b_s = [x for y, x in bravo]
charlie.sort()
c_s = [x for y, x in charlie]
return u_s, a_s, b_s, c_s
# Check who has the two of clubs and mark them as the totem holder
def check_two(u,a,b,c):
keyword = '2C'
check_u = "\n".join(u)
if keyword in check_u:
t = "U"
check_a = "\n".join(a)
#for a in keyword:
if keyword in check_a:
t = "A"
check_b = "\n".join(b)
if keyword in check_b:
t = "B"
check_c = "\n".join(c)
if keyword in check_c:
t = "C"
return t
#Players must pick three cards and swap
def swap_three(u_s,a_s,b_s,c_s,r):
ua = []
aa = []
ba = []
ca = []
if r == 4:
return u_s,a_s,b_s,c_s
# Run a definition here for user to input 3 cards:
(u_s,ua) = user_three_remove(u_s,r)
# Run through each AI and have them all pick 3 cards:
(a_s,aa) = ai_three_remove(a_s)
(b_s,ba) = ai_three_remove(b_s)
(c_s,ca) = ai_three_remove(c_s)
#Now swap out the cards and let the user know what he got:
if r == 1:
u_s.extend(aa)
a_s.extend(ba)
b_s.extend(ca)
c_s.extend(ua)
if r == 2:
u_s.extend(ba)
a_s.extend(ca)
b_s.extend(ua)
c_s.extend(aa)
if r == 3:
u_s.extend(ca)
a_s.extend(ua)
b_s.extend(aa)
c_s.extend(ba)
return u_s,a_s,b_s,c_s
def user_three_remove(u,r):
a = []
passing = ""
if r == 1:
passing = "Alpha"
if r == 2:
passing = "Bravo"
if r == 3:
passing = "Charlie"
for x in range(0,3):
cards_count = len(u)
print """\n\n\n\n\n\n
.--------------------------------------------------------------------.
| |
| _|_|_| _| |
| _| _| _|_|_| _|_|_| _|_|_| _|_|_| _|_|_| |
| _|_|_| _| _| _|_| _|_| _| _| _| _| _| |
| _| _| _| _|_| _|_| _| _| _| _| _| |
| _| _|_|_| _|_|_| _|_|_| _| _| _| _|_|_| |
| _| |
| _|_| |
| |
| _|_|_| _| |
| _| _| _|_|_| _|_|_| _|_|_| _|_| |
| _|_|_| _| _| _| _| _|_| _|_|_|_| |
| _| _| _| _| _| _|_| _| |
| _| _| _| _|_|_| _|_|_| _|_|_| |
| |
`--------------------------------------------------------------------'
"""
first,second,third ="","",""
for i in range(0,cards_count):
first += " %d " % i
print first
print ".-----. " * (cards_count)
for j in range(0,cards_count):
second += "| %s| " % u[j]
print second
print "| | " * (cards_count)
print "| CTO | " * (cards_count)
print "| | " * (cards_count)
for k in range(0,cards_count):
third += "|%s | " % u[k]
print third
print "`-----' " * (cards_count)
print "Choose a card to pass to player %s:" % passing
while True:
try:
user_input = int(raw_input("> "))
a.append(u[user_input])
u.remove(u[user_input])
break
except (TypeError, NameError, IndexError, ValueError):
print "That is not a valid input..."
raw_input("Press Enter to Continue")
return u,a
def ai_three_remove(s):
a = []
for x in range(0,3):
# Strategic move here, if you have a lot of spades than you know you can effectively use the queen, so keep it and other spades
if "".join(s).count("S") < 6 and "QS" in "".join(s):
a.append("QS")
s.remove("QS")
elif "".join(s).count("S") < 6 and "AS" in "".join(s):
a.append("AS")
s.remove("AS")
elif "".join(s).count("S") < 6 and "KS" in "".join(s):
a.append("KS")
s.remove("KS")
#Now we remove cards based on count, if only 2 diamonds, get rid of highest value first
else:
C = [x for x in s if "C" in x]
D = [x for x in s if "D" in x]
H = [x for x in s if "H" in x]
suit_array = [C,D,H]
while len(min(suit_array, key=len)) == 0:
suit_array.remove(min(suit_array))
decision = min(suit_array, key=len)
decision = decision[-1]
a.append(decision)
s.remove(decision)
return s,a
#Now that the user gets cards from another player, they are at the end, let us fix this:
def resorting(u_s,a_s,b_s,c_s):
u_n = []
a_n = []
b_n = []
c_n = []
for x,y in enumerate(cards):
if y[1] in "".join(u_s):
u_n.append(cards[x])
u = [x for y, x in u_n]
for x,y in enumerate(cards):
if y[1] in "".join(a_s):
a_n.append(cards[x])
a = [x for y, x in a_n]
for x,y in enumerate(cards):
if y[1] in "".join(b_s):
b_n.append(cards[x])
b = [x for y, x in b_n]
for x,y in enumerate(cards):
if y[1] in "".join(c_s):
c_n.append(cards[x])
c = [x for y, x in c_n]
return u,a,b,c
# A_I input
def A_I(f_p,f_h,h_b,q_p,d,i_s):
# Make a decision based upon what would be best for player/rules
# first_play,first_hand,hearts_broken,queen_played,decision,charlie_sorted
decision = "NA"
# before checking anything else, if its the first hand ever, limits to clubs or
# highest spade if king or ace than goes diamonds
if f_h == True:
if "2C" in "".join(i_s):
decision = i_s[0]
elif "C" in "".join(i_s):
update_sorted = [x for x in i_s if "C" in x]
decision = update_sorted[-1]
else:
if ("KS" in "".join(i_s) or "AS" in "".join(i_s)):
update_sorted = [x for x in i_s if "S" in x]
decision = update_sorted[-1]
elif "D" in "".join(i_s):
update_sorted = [x for x in i_s if "D" in x]
decision = update_sorted[-1]
elif "S" in "".join(i_s):
update_sorted = [x for x in i_s if "S" in x]
if "QS" in "".join(update_sorted):
update_sorted.remove('QS')
decision = update_sorted[-1]
else:
decision = i_s[-1]
# IS the first card played a club?
elif "C" in "".join(f_p):
decision = card_to_play("C",f_p,i_s,d)
# IS the first card played a Diamond?
elif "D" in "".join(f_p):
decision = card_to_play("D",f_p,i_s,d)
# IS the first card played a Spade?
elif "S" in "".join(f_p):
decision = card_to_play("S",f_p,i_s,d)
# IS the first card played a Heart?
elif "H" in "".join(f_p):
decision = card_to_play("H",f_p,i_s,d)
# This means no ones played a card yet, free reign
else:
C = [x for x in i_s if "C" in x]
D = [x for x in i_s if "D" in x]
S = [x for x in i_s if "S" in x]
H = [x for x in i_s if "H" in x]
suit_number = [len(C),len(D),len(S),len(H)]
if "S" in "".join(i_s):
if ("QS" not in "".join(i_s) and "KS" not in "".join(i_s) and "AS" not in "".join(i_s)):
decision = S[0]
else:
decision = i_s[0]
else:
decision = i_s[0]
if "H" in decision:
h_b = True
i_s.remove(decision)
return i_s, decision, h_b
# Point system
def point_allocation(f_p,s,t,d):
#score = point_allocation(first_play, score, totem, decision)
points = 0
# Here is where the cards battle out
for suit in d:
if "H" in suit:
points += 1
if "QS" in suit:
points += 13
#Add up the points
for n,i in enumerate(d):
if f_p[1:] not in i:
d[n] = "00"
updated_d = [x[:-1] for x in d]
for n,i in enumerate(updated_d):
if "T" in i:
updated_d[n] = "10"
elif "J" in i:
updated_d[n] = "11"
elif "Q" in i:
updated_d[n] = "12"
elif "K" in i:
updated_d[n] = "13"
elif "A" in i:
updated_d[n] = "14"
updated_d = map(int, updated_d)
max_index = updated_d.index(max(updated_d))
if max_index == 0:
t = "U"
elif max_index == 1:
t = "A"
elif max_index == 2:
t = "B"
elif max_index == 3:
t = "C"
s[max_index + 1] += points
s[0] = max(s)
return s, t
# Common Card decision to pick the best card of that suit, you may have 5 cards that beat the highest card played so play the highest that still beats it:
def best_lowest(card_type,dec,updated_sorted):
decision = "NA"
update_d = [x[:-1] for x in dec if card_type in x]
for m,j in enumerate(update_d):
if j == "T":
update_d[m] = "10"
elif j == "J":
update_d[m] = "11"
elif j == "Q":
update_d[m] = "12"
elif j == "K":
update_d[m] = "13"
elif j == "A":
update_d[m] = "14"
update_d = map(int, update_d)
update_d = max(update_d)
updated_i_s = [x[:-1] for x in updated_sorted]
for n,i in enumerate(updated_i_s):
if i == "T":
updated_i_s[n] = "10"
elif i == "J":
updated_i_s[n] = "11"
elif i == "Q":
updated_i_s[n] = "12"
elif i == "K":
updated_i_s[n] = "13"
elif i == "A":
updated_i_s[n] = "14"
updated_i_s = map(int, updated_i_s)
for x in range(0,len(updated_i_s)):
if updated_i_s[x] < update_d:
decision = updated_i_s[x]
decision = str(decision)
updated_i_s = map(str, updated_i_s)
if decision == "NA":
decision = updated_i_s[-1]
if decision == "10":
decision = "T"
elif decision == "11":
decision = "J"
elif decision == "12":
decision = "Q"
elif decision == "13":
decision = "K"
elif decision == "14":
decision = "A"
decision = decision + card_type
return decision
# After a suit has been played, this the deciding factor on which card to choose as an AI:
def card_to_play (suit, f_p, i_s, d):
if suit in "".join(i_s):
update_sorted = [x for x in i_s if suit in x]
last_flag = d.count("NA")
if last_flag == 1:
if "H" in "".join(d) or "QS" in "".join(d):
#Play lowest card IF it beats the lowest diamond
decision = best_lowest(suit,d,update_sorted)
else:
#Otherwise play the highest diamond value because you are getting no points
decision = update_sorted[-1]
else:
#Play lowest card because we have unknowns, no reason to gamble
decision = update_sorted[0]
else:
#if player has the "QS" play it
if "QS" in i_s:
decision = "QS"
#if player has a card higher than "QS" and Queen of Spades has not been played
elif "KS" in i_s or "AS" in i_s:
update_decision = [x for x in i_s if "S" in x]
decision = update_decision[-1]
#Just play highest card or if any of the suits are 2 or less and are safe to play, play them
#make sure to check that hearts are allowed to be played...
else:
check_clubs = [x for x in i_s if "C" in x]
check_hearts = [x for x in i_s if "H" in x]
if len(check_clubs) < 3 and len(check_clubs) != 0:
decision = check_clubs[-1]
elif len(check_hearts) != 0:
decision = check_hearts[-1]
else:
decision = i_s[-1]
return decision
# Printing the screen to visualize whats happening
def print_screen(r,d,s,u_s,f_p,h_b,f_h):
#Structure that user sees
os.system('clear')
q = ""
if len(str(r)) == 1:
q = " "
p = ["","","",""]
#make sure points system works for the spacing
for x in range(0,4):
if len(str(s[x+1])) == 1:
p[x] = " "
elif len(str(s[x+1])) == 2:
p[x] = " "
cards_count = len(u_s)
print """\n\n\n\n\n\n\n\n
.----------------------------------------.
| Round %d %s | HEARTS |
`----------------------------------------'
.---------------Card Field---------------.
| You: %s |
| Alpha: %s |
| Bravo: %s |
| Charlie: %s |
|-----------------HEARTS-----------------|
| User's points: %r %s |
| Alphas's points: %r %s |
| Bravo's points: %r %s |
| Charlie's points: %r %s |
`----------------------------------------'
Hand:
""" % (r, q, d[0], d[1], d[2], d[3], s[1], p[0], s[2], p[1], s[3], p[2], s[4], p[3])
first, second, can_use, third = "","","",""
for i in range(0,cards_count):
first += " %d " % i
print first
print ".-----. " * (cards_count)
for j in range(0,cards_count):
second += "| %s| " % u_s[j]
print second
print "| | " * (cards_count)
if f_h == True and "C" not in "".join(u_s):
for k in range(0,cards_count):
if "H" in u_s[k] or "QS" in u_s[k]:
can_use += "| | "
else:
can_use += "| CTO | "
elif "2C" in "".join(u_s):
can_use += "| CTO | "
can_use += "| | " * (cards_count - 1)
elif f_p == "NA":
for k in range(0,cards_count):
if h_b == False and "H" in u_s[k]:
can_use += "| | "
else:
can_use += "| CTO | "
else:
for k in range(0,cards_count):
if f_p[1:] in u_s[k] or f_p[1:] not in "".join(u_s):
can_use += "| CTO | "
else:
can_use += "| | "
print can_use
print "| | " * (cards_count)
for k in range(0,cards_count):
third += "|%s | " % u_s[k]
print third
print "`-----' " * (cards_count)
def print_winner(s):
win = s.index(min(s))
if win == 1:
winner = "User! "
elif win == 2:
winner = "Player Alpha! "
elif win == 3:
winner = "Player Bravo! "
elif win == 4:
winner = "Player Charlie!"
print """
.---------------------------------------------------.
| .--------------------------------------------. |
| ( THE WINNER IS: %s )|
| `--------------------------------------------' |
| | |
| `. |
| `. . --- . |
| / \ |
| | O _ O | |
| | ./ \. | |
| / `-._.-' \ |
| .' / \ `. |
| .-~.-~/ \~-.~-. |
| .-~ ~ | | ~ ~-. |
| `- . | | . -' |
| ~ - | | - ~ |
| \ / |
| ___\ /___ |
| ~;_ >- . . -< _i~ |
| `' `' |
| |
`---------------------------------------------------'
""" % winner
raw_input("> ")
# This is the start of the game, built on function calls
while gameplay == True:
print """\n\n\n\n
.---------------------------------------------------.
| |
| _ _ _ |
| | | | | | | |
| | |__| | ___ __ _ _ __| |_ ___ |
| | __ |/ _ \/ _` | '__| __/ __| |
| | | | | __/ (_| | | | |_\__ \ |
| |_| |_|\___|\__,_|_| \__|___/ |
| |
| |\_/|,,_____,~~` |
| 'P' - Play (.".)~~ )`~}} |
| \o/\ /---~\\ ~}} |
| 'Q' - Quit _// _// ~} |
`---------------------------------------------------'
"""
user_input = raw_input("> ")
if (user_input != 'P' and user_input != 'p'):
print "Thanks for playing, bye!"
gameplay = False
else:
run_game() | {
"repo_name": "CraigglesO/Hearts-Trick-Game",
"path": "Hearts_trick_card_game.py",
"copies": "1",
"size": "20904",
"license": "mit",
"hash": 5142168173791110000,
"line_mean": 31.1615384615,
"line_max": 154,
"alpha_frac": 0.4899540758,
"autogenerated": false,
"ratio": 2.6830958798613787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8344383400438906,
"avg_score": 0.06573331104449451,
"num_lines": 650
} |
# all the imports
from __future__ import with_statement
from sqlite3 import dbapi2 as sqlite3
from contextlib import closing
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash
import string, random, socket, logging, json
from tasks import *
# create our little application :)
app = Flask(__name__)
app.config.from_object('development_config')
app.config.from_envvar('MEET_STREET_CONFIG')
if not app.config['DEBUG']:
file_handler = logging.FileHandler(filename="logs/production.log")
app.logger.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
addresses = request.form.getlist("address[]")
addresses = sanitizeAddresses(addresses)
coords = []
for addr in addresses:
if len(addr.split(',')) == 2:
try:
lat = float(addr.split(',')[0])
lng = float(addr.split(',')[1])
coords.append([lat, lng])
continue
except ValueError:
pass
coords.append(getCoordinates(addr))
coords = getConvexHullPoints(coords)
centroid = findMidpoint(coords)
locations = getPointsOfInterest(centroid[0], centroid[1])
# Only return top 4 results
locations = locations[:4]
for location in locations:
location["details"] = getDetails(location["place_id"])
zoom = getZoomLevel(shortestDistance(coords))
return render_template('maps.html', coords=coords, locations=locations, centroid=centroid, zoom=zoom)
else:
return render_template('index.html')
@app.route('/form', methods=['POST', 'GET'])
def form():
if request.method == 'POST':
addresses = request.form.getlist("address[]")
for address in addresses:
print getCoordinates(address)
string = "blah"
return render_template('form.html', string=string)
if __name__ == '__main__':
app.run()
| {
"repo_name": "vincentt143/meet-street",
"path": "meet-street.py",
"copies": "1",
"size": "1917",
"license": "apache-2.0",
"hash": 1207380178441324000,
"line_mean": 33.2321428571,
"line_max": 105,
"alpha_frac": 0.676056338,
"autogenerated": false,
"ratio": 3.8035714285714284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9905519899819305,
"avg_score": 0.014821573350424694,
"num_lines": 56
} |
# all the imports
import json
import sqlite3
from flask import Flask, request, session, g, redirect,url_for, \
abort, render_template, flash, Response
from contextlib import closing
import re
# configuration
# create our little application :)
app = Flask(__name__)
# app.config.from_object(__name__)
app.config['FREEZER_RELATIVE_URLS'] = True
app.config['DEBUG'] = True
app.config['DATABASE'] = 'HH-Kreis-Offenbach.sqlite'
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql') as f:
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one=False):
cur = g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def tremapCalc(t):
t['show'] = "true"
if t['anteil'] <= 10:
t['color'] = "#e3d6d6"
t['show'] = "false"
elif t['anteil'] > 10 and t['anteil'] < 20:
t['color'] = "#cfbaba"
elif t['anteil'] > 20 and t['anteil'] < 40:
t['color'] = "#b69595"
elif t['anteil'] > 40 and t['anteil'] < 60:
t['color'] = "#9c7070"
elif t['anteil'] > 60 and t['anteil'] < 80:
t['color'] = "#834b4b"
elif t['anteil'] > 80:
t['color'] = "#4F0000"
return t
@app.before_request
def before_request():
"""Make sure we are connected to the database each request."""
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
"""Closes the database again at the end of the request."""
if hasattr(g, 'db'):
g.db.close()
@app.template_filter()
def number_trunc(num):
return "%.*f" % (0, num)
@app.template_filter()
def number_anteil(num):
return "%.*f" % (2, num)
def get_years():
years = query_db('select jahr from haushalt group by jahr')
return years
def get_sektoren():
sektoren = query_db('select * from fachdienste order by fachdienst ASC')
return sektoren
@app.template_filter()
def number_format(value, tsep='.', dsep=','):
s = unicode(value)
cnt = 0
numchars = dsep + '0123456789'
ls = len(s)
while cnt < ls and s[cnt] not in numchars:
cnt += 1
lhs = s[:cnt]
s = s[cnt:]
if not dsep:
cnt = -1
else:
cnt = s.rfind(dsep)
if cnt > 0:
rhs = dsep + s[cnt+1:]
s = s[:cnt]
else:
rhs = ''
splt = ''
while s != '':
splt = s[-3:] + tsep + splt
s = s[:-3]
return lhs + splt[:-1] + rhs
@app.route('/')
def index():
return redirect('/gesamt/A/2012/')
@app.route('/gesamt/<flow>/<year>/')
def show_gesamt(flow, year):
info = query_db('select sum(euro) as main_value, jahr, E_A from haushalt where haushalt.jahr = ? and haushalt.E_A = "A"', [year], one=True)
total = info['main_value']
info['flow'] = flow
vorjahr = int(year)-1
einnahmen = query_db('select jahr, sum(euro) as main_value from haushalt where jahr = ? and E_A = "E"', [year], one=True)
entries = []
for t in query_db('select jahr, sum(euro) as main_value, produktgruppe, haushalt.produktbereich, haushalt.rowid as id, fachdienst as title from haushalt join fachdienste on haushalt.produktbereich = fachdienste.produktbereich where jahr = ? and E_A = ? group by fachdienst order by sum(euro) desc', [year, flow]):
prozent = query_db('select 100 - ((select sum(euro) from haushalt where jahr = ? and E_A = ? and produktbereich = hh.produktbereich)) * 100 / sum(euro) as prozent from haushalt as hh where jahr = ? and E_A = ? and produktbereich = ?', [vorjahr, flow, year, flow, t['produktbereich']], one=True)
t['prozent'] = prozent['prozent']
t['anteil'] = (float(t['main_value']) / total) * 100
t = tremapCalc(t)
entries.append(t)
years_agg = query_db('select jahr, sum( case when produktbereich = 20 THEN euro else 0 end) as finanzen, sum( case when produktbereich = 65 THEN euro else 0 end) as gebaeude, sum( case when produktbereich = 51 THEN euro else 0 end) as jugend, sum( case when produktbereich = 50 THEN euro else 0 end) as arbeitsmarkt, sum( case when produktbereich = 40 THEN euro else 0 end) as schule from haushalt where E_A = "' + flow + '" group by jahr')
return render_template('start.html', sektoren=get_sektoren(), einnahmen=einnahmen, entries=entries, info=info, years_agg=years_agg, years=get_years())
@app.route('/produktgruppe/<flow>/<produkt>/<year>/')
def show_produktgruppe(flow, produkt, year):
file = open('log.txt', 'a')
input = produkt + ": " + year + " " + flow + "\n\n"
file.write(input)
file.close()
info = query_db('select sum(euro) as main_value, jahr, fachdienst, haushalt.produktbereich, E_A from haushalt join fachdienste on haushalt.produktbereich = fachdienste.produktbereich where haushalt.produktbereich = ? and jahr = ? and E_A = ?', [produkt, year, flow], one=True)
total = info['main_value']
info['flow'] = flow
vorjahr = int(year)-1
einnahmen = query_db('select sum(euro) as main_value from haushalt where produktbereich = ? and jahr = ? and E_A = "E"', [produkt, year], one=True)
entries = []
year_query = "select jahr "
alpha = map(chr, range(97, 123))
i = 1
for t in query_db('select sum(euro) as main_value, produkt, rowid as id, jahr, produktgruppe, produktgruppe_bez as title from haushalt where produktbereich = ? and jahr = ? and E_A = ? group by produktgruppe_bez order by sum(euro) desc', [produkt, year, flow]):
prozent = query_db('select 100 - ((select sum(euro) from haushalt where jahr = ? and E_A = ? and produktgruppe = hh.produktgruppe)) * 100 / sum(euro) as prozent from haushalt as hh where jahr = ? and E_A = ? and produktgruppe = ?', [vorjahr, flow, year, flow, t['produktgruppe']], one=True)
t['prozent'] = prozent['prozent']
year_query+= ", sum( case when produkt = '" + t['produkt'] + "' THEN euro else 0 end) as produkt" + str(i) + " "
if t['main_value'] != 0 or total != 0:
t['anteil'] = (float(t['main_value']) / total) * 100
t = tremapCalc(t)
entries.append(t)
i +=1
info['count'] = i
years_agg = query_db(year_query + ' from haushalt where produktbereich = "' + produkt + '" and E_A = "' + flow + '" group by jahr')
return render_template('sektor.html', years_agg=years_agg, alphabet=alpha, sektoren=get_sektoren(), entries=entries, info=info, einnahmen=einnahmen, years=get_years())
#return str(year_query)
@app.route('/haushaltsposition/<flow>/<produkt>/<year>/')
def show_haushaltsposition(flow, produkt, year):
info = query_db('select sum(euro) as main_value, jahr, produkt, fachdienst, haushalt.produktgruppe_bez, E_A from haushalt join fachdienste on haushalt.produktbereich = fachdienste.produktbereich where haushalt.produkt = ? and jahr = ? and E_A = ?', [produkt, year, flow], one=True)
total = info['main_value']
info['flow'] = flow
vorjahr = int(year)-1
einnahmen = query_db('select sum(euro) as main_value from haushalt where produkt = ? and jahr = ? and E_A = "E"', [produkt, year], one=True)
entries = []
for t in query_db('select sum(euro) as main_value, produkt, rowid as id, jahr, produktgruppe, haushaltsposition as title from haushalt where produkt = ? and jahr = ? and E_A = ? group by haushaltsposition order by sum(euro) desc', [produkt, year, flow]):
prozent = query_db('select 100 - ((select sum(euro) from haushalt where jahr = ? and E_A = ? and haushaltsposition = hh.haushaltsposition and produkt = hh.produkt)) * 100 / sum(euro) as prozent from haushalt as hh where jahr = ? and E_A = ? and haushaltsposition = ? and produkt = ?', [vorjahr, flow, year, flow, t['title'], produkt], one=True)
t['prozent'] = prozent['prozent']
if t['main_value'] != 0 or total != 0:
t['anteil'] = (float(t['main_value']) / total ) * 100
t = tremapCalc(t)
entries.append(t)
return render_template('haushaltsposition.html', sektoren=get_sektoren(), entries=entries, info=info, einnahmen=einnahmen, years=get_years())
@app.route('/hinweis/')
def show_hinweis():
return render_template('hinweis.html', sektoren=get_sektoren())
# @app.errorhandler(404)
# def page_not_found(error):
# return render_template('page_not_found.html'), 404
if __name__ == '__main__':
app.run()
| {
"repo_name": "okfde/haushalt-kreis-offenbach",
"path": "of_hh.py",
"copies": "1",
"size": "8213",
"license": "mit",
"hash": 3186453904418565600,
"line_mean": 40.6903553299,
"line_max": 444,
"alpha_frac": 0.6560331182,
"autogenerated": false,
"ratio": 2.7312936481543066,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8780606376837494,
"avg_score": 0.021344077903362395,
"num_lines": 197
} |
# all the imports
import logging
import os
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
from BT4221 import *
app = Flask(__name__) # create the application instance :)
app.config.from_object(__name__) # load config from this file , flaskr.py
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskr.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default'
))
@app.route('/')
def main():
return render_template('index.html')
@app.route('/livedemo', methods=['GET', 'POST'])
def demo():
if request.method == 'POST':
logging.info("POST")
day = int(request.form["Day"])
month = int(request.form["Month"])
region = int(request.form["Region"])
properties = int(request.form["Property"])
extproperties = int(request.form["extproperty"])
specificity = int( request.form["spec"])
att = int( request.form["att"])
weapon = int( request.form["weapon"])
prep = int( request.form["numprep"])
res= lm.predict([month, day,region,properties,extproperties,att,weapon,prep,specificity])
res_rf= forest.predict_proba([month, day,region,properties,extproperties,att,weapon,prep,specificity])
res_lr= model2.predict_proba([month, day,region,properties,extproperties,att,weapon,prep,specificity])
counter= plots(res,res_rf,res_lr)
return render_template('pages/livedemo.html', result=res[0], lr =res_lr[0][1], rf=res_rf[0][0], plot=counter)
logging.info("GET")
return render_template('pages/livedemo.html', result=None, lr =None, rf=None)
@app.route('/dataset')
def dataset():
return render_template('pages/dataset.html')
@app.route('/regression')
def regression():
return render_template('pages/regression.html')
@app.route('/randomforest')
def randomforest():
return render_template('pages/randomforest.html')
@app.route('/marketbasket')
def marketbasket():
return render_template('pages/marketbasket.html')
@app.route('/prelim')
def prelim():
#maps_plot = maps()
return render_template('pages/prelim.html')
if __name__ == "__main__":
app.run(debug=True)
| {
"repo_name": "chloeyangu/BigDataAnalytics",
"path": "Terrorisks/flaskr/flaskr/flaskr.py",
"copies": "1",
"size": "2304",
"license": "mit",
"hash": -4723873931551625000,
"line_mean": 33.3880597015,
"line_max": 117,
"alpha_frac": 0.6697048611,
"autogenerated": false,
"ratio": 3.496206373292868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9592024058790675,
"avg_score": 0.014777435120438769,
"num_lines": 67
} |
# all the imports
import os,binascii
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash, Blueprint, stream_with_context, Response
from flaskext.mysql import MySQL
from config import config
from werkzeug.utils import secure_filename
from flask import send_from_directory
import datetime
import logging
from logging.handlers import SMTPHandler
from collections import Counter
import requests
import json
credentials = None
mysql = MySQL()
# create our little application :)
app = Flask(__name__)
for key in config:
app.config[key] = config[key]
mysql.init_app(app)
def tup2float(tup):
return float('.'.join(str(x) for x in tup))
def get_cursor():
return mysql.connect().cursor()
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
# Add new username to leaderboard DB
@app.route('/add/<username>/', methods=['GET', 'POST'])
def addUser(username):
uname = username
db = get_cursor()
score = '0'
countQuery = 'SELECT COUNT(*) FROM Leaderboard'
db.execute(countQuery)
data = db.fetchone()[0]
count = data + 1
sql = 'INSERT INTO `Leaderboard` (`idLeaderboard`, `username`, `score`) VALUES ("%s","%s","%s")'
db.execute(sql%(count, uname, score))
db.execute("COMMIT")
json_data = {}
json_data['idLeaderboard'] = str(count)
json_data['username'] = uname
json_data['score'] = score
return json.dumps(json_data)
# Update the score of a particular username
@app.route('/update/<username>/<score>', methods = ['GET', 'POST'])
def scoreUpdate(username, score):
uname = username
score = score
db = get_cursor()
updateQuery = 'UPDATE `Leaderboard` SET `score`="%s" where `username`="%s"'
db.execute(updateQuery%(score, uname))
db.execute("COMMIT")
json_data = {}
json_data['success'] = "True"
return json.dumps(json_data)
# Show leaderboard
@app.route('/')
def screen():
db = get_cursor()
showQuery = 'select * from Leaderboard order by username, Score desc'
db.execute(showQuery)
data = db.fetchall()
users = []
for userObject in data:
temp = {}
temp['idLeaderboard'] = userObject[0]
temp['username'] = userObject[1]
temp['score'] = userObject[2]
users.append(temp)
print json.dumps(users)
return json.dumps(users)
@app.teardown_appcontext
def close_db():
"""Closes the database again at the end of the request."""
get_cursor().close()
if __name__ == '__main__':
app.debug = True
app.secret_key=os.urandom(24)
# app.permanent_session_lifetime = datetime.timedelta(seconds=200)
app.run(host='0.0.0.0') | {
"repo_name": "sudheesh001/Cove-Services",
"path": "server.py",
"copies": "1",
"size": "2533",
"license": "mit",
"hash": 3793120318097310700,
"line_mean": 25.3958333333,
"line_max": 97,
"alpha_frac": 0.7031188314,
"autogenerated": false,
"ratio": 3.0890243902439023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42921432216439026,
"avg_score": null,
"num_lines": null
} |
# all the imports
import os
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, jsonify
import base64
import cv2
import numpy as np
import ctypes
import time
import re
import httplib, urllib
import json
from review import wrap_Doreview, wrap_ReadVocabulary, wrap_ReadTfidf, wrap_ReadInvertedList, wrap_ReadIdfMat, wrap_ReadFileName
import sys
from flaskr2 import app
reload(sys)
sys.setdefaultencoding('utf-8')
# create our little application :)
#def allowed_file(filename):
#app = Flask(__name__)
#app.config.from_object(__name__)
@app.before_first_request
def init():
print wrap_ReadVocabulary('./flaskr2/static/trainsave/S_vocabulary.xml')
print wrap_ReadTfidf('./flaskr2/static/trainsave/SwordFre_10000.xml')
print wrap_ReadInvertedList('./flaskr2/static/trainsave/SinvertedList_10000.txt')
print wrap_ReadIdfMat('./flaskr2/static/trainsave/IDF_10000.xml')
print wrap_ReadFileName('./flaskr2/static/trainsave/filename.txt')
localtime=""
imagefile = './flaskr2/static/img/'
url='./static/img/'
global tmpImgUrl
global tmpImgUrl2
def grabcut(rect,filename):
img = cv2.imread(filename)
img2 = img.copy()
mask = np.zeros(img.shape[:2],dtype = np.uint8) # mask initialized to PR_BG
output = np.zeros(img.shape,np.uint8) # output image to be shown
bgdmodel = np.zeros((1,65),np.float64)
fgdmodel = np.zeros((1,65),np.float64)
cv2.grabCut(img2,mask,rect,bgdmodel,fgdmodel,1,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==1) + (mask==3),255,0).astype('uint8')
img3=img2[rect[1]:rect[1]+rect[3],rect[0]:rect[0]+rect[2]]
mask3=mask2[rect[1]:rect[1]+rect[3],rect[0]:rect[0]+rect[2]]
output = cv2.bitwise_and(img3,img3,mask=mask3)
cv2.imwrite(imagefile+localtime+"mask.jpg",mask3)
return output
def grabcur_mask(filename):
mask_re = cv2.imread(filename[0],-1) #base64
img2 = cv2.imread(filename[1]) #resize.jpg
mask_org =cv2.imread(filename[2],0) #mask.jpg
mask = np.where((mask_org>20),3,2).astype('uint8')
mask1 = np.where((mask_re[:,:,0]>10)|(mask_re[:,:,1]>10)|(mask_re[:,:,2]>10)&(mask_re[:,:,3]>200),1,mask).astype('uint8')
mask2 = np.where((mask_re[:,:,0]<10)&(mask_re[:,:,1]<10)&(mask_re[:,:,2]<10)&(mask_re[:,:,3]>200),0,mask1).astype('uint8')
bgdmodel = np.zeros((1,65),np.float64)
fgdmodel = np.zeros((1,65),np.float64)
rect=(0,0,img2.shape[1],img2.shape[0])
cv2.grabCut(img2,mask2,rect,bgdmodel,fgdmodel,2,cv2.GC_INIT_WITH_MASK)
mask5 = np.where((mask2==1)+(mask2==3),255,0).astype('uint8')
output = cv2.bitwise_and(img2,img2,mask=mask5)
cv2.imwrite(filename[2],mask5)
cv2.imwrite(filename[3],output) #matting.jpg
def fusion(filename,rect):
so=ctypes.cdll.LoadLibrary
lib=so("./flaskr2/../lib/libfusion.so")
lib.doClone(filename[1],filename[2],filename[0],filename[3],rect[0],rect[1],rect[2],rect[3])
@app.route('/flaskr2/',methods=['GET','POST'])
@app.route('/flaskr2/index',methods=['GET','POST'])
def show_entries():
return render_template('index.html')
@app.route('/flaskr2/draft')
def draft():
return render_template("draft.html")
@app.route('/flaskr2/draftResult', methods = ['POST'])
def Controldddddd():
res1=[]
i=0
while i<len(dict(request.form).values()[0]):
json_data = {key:dict(request.form)[key][i] for key in dict(request.form)}
imgData = base64.b64decode(json_data.get('value[]'))
localtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
f = open('./flaskr2/static/img/'+localtime+'.jpg','wb')
f.write(imgData)
f.close()
result1=wrap_Doreview('./flaskr2/static/img/'+localtime+'.jpg')
res=result1.split(" ")
res1.append(res)
i=i+1
print res1
return jsonify(result = res1)
@app.route('/flaskr2/draftAndroid', methods = ['POST'])
def draftAndroid():
imgData = base64.b64decode(request.json['value'])
localtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
f = open('./flaskr2/static/img/'+localtime+'.jpg','wb')
f.write(imgData)
f.close()
result1=wrap_Doreview('./flaskr2/static/img/'+localtime+'.jpg')
res=result1.split(" ")
print res
return jsonify(result = res)
@app.route('/flaskr2/result1', methods = ['POST'])
def Control():
global tmpImgUrl
# internet url copy to local
tmpImgUrl = request.form.get('value')
return "success"
@app.route('/flaskr2/result11', methods = ['POST'])
def Control11():
global tmpImgUrl2
# internet url copy to local
tmpImgUrl2 = request.form.get('value')
return "success"
@app.route('/flaskr2/result4', methods = ['POST'])
def SelfControl():
global tmpImgUrl
imgData = base64.b64decode(request.form.get('value'))
localtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
f = open('./flaskr2/static/img/'+localtime+'self.jpg','wb')
f.write(imgData)
f.close()
tmpImgUrl = './flaskr2/static/img/'+localtime+'self.jpg'
return jsonify(result = tmpImgUrl)
@app.route('/flaskr2/result41', methods = ['POST'])
def SelfControl1():
global tmpImgUrl2
imgData = base64.b64decode(request.form.get('value'))
localtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
f = open('./flaskr2/static/img/'+localtime+'self.jpg','wb')
f.write(imgData)
f.close()
tmpImgUrl2 = './flaskr2/static/img/'+localtime+'self.jpg'
return jsonify(result = tmpImgUrl2)
@app.route('/flaskr2/tmp', methods = ['POST'])
def sendURL():
global tmpImgUrl
# tmpImgUrl = '.'+tmpImgUrl.split('flaskr2')[1]
return jsonify(result=tmpImgUrl)
@app.route('/flaskr2/tmp1', methods = ['POST'])
def sendURL1():
global tmpImgUrl2
# tmpImgUrl2 = '.'+tmpImgUrl2.split('flaskr2')[1]
return jsonify(result=tmpImgUrl2)
@app.route('/flaskr2/imageCombine')
def imageCombine1():
return render_template('imageCombine.html')
@app.route('/flaskr2/getdata',methods=['POST','GET'])
def get_data():
global localtime
localtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
filename=imagefile+localtime+'input.jpg'
#print request.headers
#print request.json
if request.form:
data=request.form.get('imagedata')
data_tmp = eval(data)
#print data_tmp['x1']
imgData=base64.b64decode(request.form.get('value'))
else:
data = request.json['imagedata']
#print data
data_tmp = json.loads(data)
#print data_tmp
#print data_tmp['value']
imgData=base64.b64decode(data_tmp['value'])
#print imgData
fileImg = open('1.png','wb')
fileImg.write(imgData)
fileImg.close()
uploadimg = open(filename,'wb')
uploadimg.write(imgData)
uploadimg.close()
img1 = cv2.imread(filename)
shape=img1.shape
max_leg=max(shape)
ratio=1.0
if(max_leg>600):
ratio=600.0/max_leg
img1=cv2.resize(img1,(int(shape[1]*ratio),int(shape[0]*ratio)))
cv2.imwrite(filename,img1)
if data_tmp['x1']<0:
data_tmp['x1']=0
if data_tmp['y']<0:
data_tmp['y']=0
if data_tmp['height']>shape[0]:
data_tmp['height']=shape[0]
if data_tmp['width']>shape[1]:
data_tmp['width']=shape[1]
rect=(int(data_tmp['x1']*ratio),int(data_tmp['y']*ratio),int(data_tmp['width']*ratio),int(data_tmp['height']*ratio))
img2=img1[rect[1]:rect[1]+rect[3],rect[0]:rect[0]+rect[2]]
cv2.imwrite(imagefile+localtime+"resize.jpg",img2)
dst=grabcut(rect,filename)
cv2.imwrite(imagefile+localtime+"matting.jpg",dst)
#result={"state":200,'message':"ok",'result':"http://10.108.126.130:5000/static/img/matting.jpg"}
result={"state":200,'message':"ok",'result':url+localtime+'matting.jpg','result1':url+localtime+'resize.jpg'}
return jsonify(result)
@app.route('/flaskr2/further',methods=['POST','GET'])
def further():
global localtime
filename=(imagefile+localtime+'remask.jpg',imagefile+localtime+"resize.jpg",imagefile+localtime+"mask.jpg",imagefile+localtime+'mattings.jpg')
imgData=base64.b64decode(request.form.get('value'))
uploadimg = open(filename[0],'wb')
uploadimg.write(imgData)
uploadimg.close()
grabcur_mask(filename)
result={"state":200,'message':"ok",'result':url+localtime+'mattings.jpg'}
return jsonify(result)
@app.route('/flaskr2/getimgdata',methods=['POST','GET'])
def get_imagedata():
if request.form:
imgData=base64.b64decode(request.form.get('value'))
k=0
while(k<(len(request.form.keys())/6)):
if(request.form.get('imagedata['+str(k)+'][id]')):
fID = request.form.get('imagedata['+str(k)+'][id]').encode("utf-8")
#print type(fID.encode("utf-8"))
#print fID
filename=(imagefile+fID+'background.jpg',imagefile+fID+"resize.jpg",imagefile+fID+"mask.jpg",imagefile+fID+'fusion.jpg')
if(request.form.get('imagedata['+str(k)+'][x1]')):
xx = request.form.get('imagedata['+str(k)+'][x1]').encode("utf-8")
#print type(xx)
if(request.form.get('imagedata['+str(k)+'][y]')):
yy = request.form.get('imagedata['+str(k)+'][y]').encode("utf-8")
if(request.form.get('imagedata['+str(k)+'][width]')):
width = request.form.get('imagedata['+str(k)+'][width]').encode("utf-8")
if(request.form.get('imagedata['+str(k)+'][height]')):
height = request.form.get('imagedata['+str(k)+'][height]').encode("utf-8")
if(request.form.get('imagedata['+str(k)+'][rotate]')):
rotate = request.form.get('imagedata['+str(k)+'][rotate]').encode("utf-8")
uploadimg = open(filename[0],'wb')
uploadimg.write(imgData)
uploadimg.close()
img1 = cv2.imread(filename[1])
shape=img1.shape
#if (int(xx)<0):
if (int(round(float(xx)))<0):
xx=0
if (int(round(float(yy)))<0):
yy=0
r1=float(height)/shape[0]
rect=(int(round(float(xx))),int(round(float(yy))),ctypes.c_float(r1),int(rotate))
fusion(filename,rect)
f = open(filename[3],'rb')
imgData = f.read()
f.close()
k=k+1
result={"state":200,'message':"ok",'result':url+fID+"fusion.jpg"}
return jsonify(result)
#else:
# print "Android Combine"
# imgData=base64.b64decode(request.json['value'])
# data=request.json['imagedata']
# data_tmp = data
@app.route('/flaskr2/result3', methods = ['POST'])
def imageComb():
# internet url copy to local
global tmpImgUrl
tmpImgUrl = request.form.get('value')
localtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
localUrl = './flaskr2/static/img/'+localtime+'text.jpg'
urllib.urlretrieve(tmpImgUrl,localUrl)
tmpImgUrl = localUrl
return jsonify(result = tmpImgUrl)
@app.route('/flaskr2/result31', methods = ['POST'])
def imageComb1():
global tmpImgUrl2
# internet url copy to local
tmpImgUrl2 = request.form.get('value')
localtime = time.strftime("%Y%m%d%H%M%S", time.localtime())
localUrl = './flaskr2/static/img/'+localtime+'text.jpg'
urllib.urlretrieve(tmpImgUrl2,localUrl)
tmpImgUrl2 = localUrl
return jsonify(result = tmpImgUrl2)
@app.route('/flaskr2/resAndroid',methods=['POST','GET'])
def testAndroid():
headers = {
'Content-Type': 'multipart/form-data',
'Ocp-Apim-Subscription-Key': 'b4f268f76170485ebc1e78a045554fae',
}
imgURL1=[]
imgURL2=[]
typeJson = json.loads(request.json['queryexpression'])['type']
if(typeJson=='0'):
values={}
values['q'] = json.loads(request.json['queryexpression'])['background']
params = urllib.urlencode(values)
#print params
#try:
conn = httplib.HTTPSConnection('api.cognitive.microsoft.com')
conn.request("POST", "/bing/v5.0/images/search?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
ImgUrl = []
hjsons = json.loads(data)
ImgLength = len(hjsons['value'])
i=0
while(i<ImgLength):
ImgUrl.append(hjsons['value'][i]['contentUrl'])
#localUrl = './flaskr2/static/img/text'+str(i)+'.jpg'
#urllib.urlretrieve(hjsons['value'][i]['contentUrl'],localUrl)
#ImgUrl.append(localUrl)
i=i+1
i=0
while(i<ImgLength):
ImgUrl.append(hjsons['value'][i]['thumbnailUrl'])
i=i+1
imgURL1.append(ImgUrl)
if(typeJson=='1'):
values={}
values['q'] = json.loads(request.json['queryexpression'])['good']
params = urllib.urlencode(values)
conn = httplib.HTTPSConnection('api.cognitive.microsoft.com')
conn.request("POST", "/bing/v5.0/images/search?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
ImgUrlA = []
hjsons = json.loads(data)
ImgLength = len(hjsons['value'])
i=0
while(i<ImgLength):
ImgUrlA.append(hjsons['value'][i]['contentUrl'])
#localUrl = './flaskr2/static/img/textA'+str(i)+'.jpg'
#urllib.urlretrieve(hjsons['value'][i]['contentUrl'],localUrl)
#ImgUrlA.append(localUrl)
i=i+1
i=0
while(i<ImgLength):
ImgUrlA.append(hjsons['value'][i]['thumbnailUrl'])
i=i+1
imgURL2.append(ImgUrlA)
return jsonify(result = imgURL1,result1 = imgURL2)
@app.route('/flaskr2/result2', methods = ['POST'])
def Control2():
headers = {
# Request headers
'Content-Type': 'multipart/form-data',
'Ocp-Apim-Subscription-Key': 'b4f268f76170485ebc1e78a045554fae',
}
#values={}
#print request.form.get('queryexpression[0][background]')
#json_data = {key:dict(request.form)[key][0] for key in dict(request.form)}
#print json_data
imgURL1=[]
imgURL2 = []
k=0
while(k<len(request.form.keys())):
if(request.form.get('queryexpression['+str(k)+'][background]')):
values={}
values['q'] = request.form.get('queryexpression['+str(k)+'][background]')
params = urllib.urlencode(values)
#print params
#try:
conn = httplib.HTTPSConnection('api.cognitive.microsoft.com')
conn.request("POST", "/bing/v5.0/images/search?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
ImgUrl = []
hjsons = json.loads(data)
ImgLength = len(hjsons['value'])
i=0
while(i<ImgLength):
ImgUrl.append(hjsons['value'][i]['contentUrl'])
#localUrl = './flaskr2/static/img/text'+str(i)+'.jpg'
#urllib.urlretrieve(hjsons['value'][i]['contentUrl'],localUrl)
#ImgUrl.append(localUrl)
i=i+1
i=0
while(i<ImgLength):
ImgUrl.append(hjsons['value'][i]['thumbnailUrl'])
i=i+1
imgURL1.append(ImgUrl)
elif(request.form.get('queryexpression['+str(k)+'][good]')):
values={}
values['q'] = request.form.get('queryexpression['+str(k)+'][good]')
params = urllib.urlencode(values)
conn = httplib.HTTPSConnection('api.cognitive.microsoft.com')
conn.request("POST", "/bing/v5.0/images/search?%s" % params, "{body}", headers)
response = conn.getresponse()
data = response.read()
ImgUrlA = []
hjsons = json.loads(data)
ImgLength = len(hjsons['value'])
i=0
while(i<ImgLength):
ImgUrlA.append(hjsons['value'][i]['contentUrl'])
#localUrl = './flaskr2/static/img/textA'+str(i)+'.jpg'
#urllib.urlretrieve(hjsons['value'][i]['contentUrl'],localUrl)
#ImgUrlA.append(localUrl)
i=i+1
i=0
while(i<ImgLength):
ImgUrlA.append(hjsons['value'][i]['thumbnailUrl'])
i=i+1
imgURL2.append(ImgUrlA)
k=k+1
#conn.close()
return jsonify(result = imgURL1,result1 = imgURL2)
| {
"repo_name": "yuzhao12/MindCamera",
"path": "flaskr2/flaskr.py",
"copies": "1",
"size": "14702",
"license": "mit",
"hash": 3294035222193640400,
"line_mean": 31.170678337,
"line_max": 143,
"alpha_frac": 0.6755543463,
"autogenerated": false,
"ratio": 2.645672125247436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3821226471547436,
"avg_score": null,
"num_lines": null
} |
# all the imports
import os
import re
import sys
import glob
import time
import uuid
import atexit
import requests
import subprocess
import pandas as pd
import seaborn as sns
import pybtex.database
from datetime import datetime
from contextlib import closing
from numpy import array, random
from os.path import abspath, dirname, join
from bokeh.client import pull_session
from bokeh.embed import autoload_server
from werkzeug.contrib.fixers import ProxyFix
from flask import (Flask, request, session, g, redirect, url_for, send_file,
abort, render_template, render_template_string, flash, current_app)
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker, contains_eager
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
sys.path.append( os.path.join( os.path.dirname(__file__), os.path.pardir ) )
# Flask app configuration
# DATADIR = '/Users/brian/Research/projects/uhcs'
SQLALCHEMY_DATABASE_URI = 'uhcsdb/microstructures.sqlite'
MICROGRAPH_PATH = 'static/micrographs'
UPLOAD_FOLDER = join('uhcsdb', MICROGRAPH_PATH)
EXTRACT_PATH = join('static', 'pdf_stage')
PDF_STAGE = join('uhcsdb', EXTRACT_PATH)
ALLOWED_EXTENSIONS = set(['pdf', 'png', 'jpg', 'jpeg', 'gif', 'tif'])
def load_secret_key():
pardir = os.path.dirname(__file__)
keyfile = os.path.join(pardir, 'secret_key')
with open(keyfile, 'rb') as f:
return f.read()
app.config.update(dict(
DATABASE=SQLALCHEMY_DATABASE_URI,
MICROGRAPH_PATH = MICROGRAPH_PATH,
DEBUG=False,
SECRET_KEY=load_secret_key(),
))
app.config.from_envvar('UHCSDB_SETTINGS', silent=True)
_cwd = dirname(abspath(__file__))
print(app.config)
from . import features
from .models import Base, User, Collection, Sample, Micrograph
from uhcsdb import features
from uhcsdb.models import Base, User, Collection, Sample, Micrograph
@app.before_first_request
def build_search_tree():
print('building search tree...')
features.build_search_tree('uhcsdb/static/representations',
featurename='vgg16_multiscale_block5_conv3-vlad-32.h5'
)
# features.build_search_tree(app.config['DATADIR'])
def connect_db(dbpath):
engine = create_engine('sqlite:///' + dbpath)
Base.metadata.bind = engine
dbSession = sessionmaker(bind=engine)
db = dbSession()
return db
def get_db():
if not hasattr(g, '_database'):
g._database = connect_db(app.config['DATABASE'])
return g._database
def paginate(results, page, PER_PAGE):
start = (page-1)*PER_PAGE
if start < 0 or start > len(results):
return []
end = min(start + PER_PAGE, len(results))
page_data = {'prev_num': page - 1, 'next_num': page + 1,
'has_prev': True, 'has_next': True}
if page_data['prev_num'] <= 0:
page_data['has_prev'] = False
if end >= len(results):
page_data['has_next'] = False
return results[start:end], page_data
ENTRIES_PER_PAGE = 24
@app.route('/')
@app.route('/index')
@app.route('/entries/') #, defaults={'page': 1})
@app.route('/entries/<int:page>')
def entries(page=1):
# only show micrographs with these class labels
unique_labels = {
'spheroidite', 'spheroidite+widmanstatten', 'martensite', 'network',
'pearlite', 'pearlite+spheroidite', 'pearlite+widmanstatten'
}
db = get_db()
q = (db.query(Micrograph)
.filter(Micrograph.primary_microconstituent.in_(unique_labels))
)
page_results, page_data = paginate(q.all(), page, ENTRIES_PER_PAGE)
page_entries = [entry.info() for entry in page_results]
return render_template('show_entries.html', entries=page_entries, pg=page_data)
@app.route('/micrograph/<int:entry_id>')
def show_entry(entry_id):
db = get_db()
entry = db.query(Micrograph).filter(Micrograph.micrograph_id == entry_id).first()
return render_template('show_entry.html', entry=entry.info(), author=entry.contributor.info())
@app.route('/visual_query/<int:entry_id>')
def visual_query(entry_id):
db = get_db()
query = db.query(Micrograph).filter(Micrograph.micrograph_id == entry_id).first()
author = query.contributor
scores, nearest = features.query(entry_id)
# write a single query and sort results on feature-space distance after
# entries = db.query(Micrograph).filter(Micrograph.id.in_(nearest)).all()
# write an individual query for each result -- won't scale
entries = map(db.query(Micrograph).get, nearest)
results = [entry.info() for entry in entries]
results = zip(results, scores)
return render_template('query_results.html', query=query.info(),
author=author.info(), results=results)
@app.route('/visualize')
def bokeh_plot():
bokeh_script=autoload_server(None,app_path="/visualize", url="http://rsfern.materials.cmu.edu")
return render_template('visualize.html', bokeh_script=bokeh_script)
@app.route('/writeup')
def writeup():
return redirect('https://arxiv.org/abs/1702.01117')
def format_bib_entry(entry):
return markup
def author_list(entry):
authors = [' '.join(p.last_names) for p in entry.persons['author']]
firstauthors, lastauthor = authors[:-1], authors[-1]
alist = ', '.join(firstauthors)
alist += ', and {}'.format(lastauthor)
return alist
def load_publication_data(path):
""" use pybtex to display relevant publications """
pub_db = pybtex.database.parse_file(path)
publication_data = []
for key, entry in pub_db.entries.items():
pub = dict(entry.fields)
pub['authors'] = author_list(entry)
publication_data.append(pub)
return publication_data
@app.route('/publications')
def publications():
documentation = load_publication_data('uhcsdb/static/documentation.bib')
sources = load_publication_data('uhcsdb/static/sources.bib')
publications = load_publication_data('uhcsdb/static/publications.bib')
return render_template('publications.html',
documentation=documentation,
sources=sources,
publications=publications)
if __name__ == '__main__':
app.config.from_object('config')
with app.app_context():
app.run(debug=False)
| {
"repo_name": "bdecost/uhcsdb",
"path": "uhcsdb/uhcsdb.py",
"copies": "1",
"size": "6267",
"license": "mit",
"hash": -786510791594927500,
"line_mean": 31.8115183246,
"line_max": 99,
"alpha_frac": 0.6743258337,
"autogenerated": false,
"ratio": 3.4434065934065936,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4617732427106594,
"avg_score": null,
"num_lines": null
} |
# all the imports
import os
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
app = Flask(__name__) # create the application instance :)
app.config.from_object(__name__) # load config from this file , flaskr.py
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskr.db'),
SECRET_KEY='fake secret key that is top secret',
USERNAME='admin',
PASSWORD='admin'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
# DATABASE STUFF
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
# Decorator is run automagically when the app dies
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
def init_db():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
init_db()
print('Initialized the database.')
# VIEW FUNCTIONS
@app.route('/')
def show_entries():
db = get_db()
cur = db.execute('SELECT title, text FROM entries ORDER BY id DESC')
entries = cur.fetchall()
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('INSERT INTO entries (title, text) VALUES (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
| {
"repo_name": "sobeckley/Wallpaper_Sorter",
"path": "flaskr/flaskr/flaskr.py",
"copies": "1",
"size": "2765",
"license": "mit",
"hash": 6644359817448868000,
"line_mean": 29.0543478261,
"line_max": 73,
"alpha_frac": 0.6394213382,
"autogenerated": false,
"ratio": 3.638157894736842,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9751845743824882,
"avg_score": 0.005146697822392089,
"num_lines": 92
} |
# all the imports
import os
import sqlite3
import datetime
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
from flask.ext.babel import Babel, lazy_gettext
from passlib.apps import custom_app_context as pwd_context
from config import LANGUAGES
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'kant.db'),
DEBUG=True,
SECRET_KEY='development key',
BABEL_DEFAULT_LOCALE='de',
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
babel = Babel(app)
@babel.localeselector
def get_locale():
# if a user is logged in, use the locale from the user settings
user = getattr(g, 'user', None)
if user is not None:
return user.locale
return request.accept_languages.best_match(LANGUAGES.keys())
@babel.timezoneselector
def get_timezone():
user = getattr(g, 'user', None)
if user is not None:
return user.timezone
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/')
def show_keys():
db = get_db()
cur = db.execute('SELECT key.id, key.name, key.last_update, key.user, ' +
'user.name AS user_name ' +
'FROM keys AS key ' +
'JOIN users AS user ON key.user = user.id ' +
'ORDER BY key.last_update DESC')
keys = cur.fetchall()
return render_template('show_keys.html', keys=keys)
@app.route('/key_history/<int:id>')
def show_key_history(id):
db = get_db()
cur = db.execute('SELECT * FROM key_history WHERE key = ?', [id])
history = cur.fetchall()
cur = db.execute('SELECT name, user FROM keys WHERE id = ?', [id])
key = cur.fetchall()
return render_template('show_key_history.html', history=history, key=key)
@app.route('/users')
def show_users():
db = get_db()
cur = db.execute('SELECT id, name, mail, phone FROM users ORDER BY name')
users = cur.fetchall()
return render_template('show_users.html', users=users)
@app.route('/new_key', methods=['GET'])
def new_key():
if not session.get('logged_in'):
abort(401)
db = get_db()
cur = db.execute('SELECT id, name FROM users ORDER BY name')
users = cur.fetchall()
return render_template('new_key.html', users=users)
@app.route('/edit_key/<int:id>', methods=['GET'])
def edit_key(id):
if not session.get('logged_in'):
abort(401)
db = get_db()
cur = db.execute('SELECT id, name, user FROM keys WHERE id = ?', [id])
key = cur.fetchone()
cur = db.execute('SELECT id, name FROM users')
users = cur.fetchall()
return render_template('edit_key.html', key=key, users=users)
@app.route('/save_key', methods=['POST'])
def save_key():
if not session.get('logged_in'):
abort(401)
db = get_db()
if 'id' in request.form.keys():
key = {
'id': int(request.form['id']),
'name': str(request.form['name']),
'user': int(request.form['user'])
}
cur = db.execute('SELECT user, name FROM keys WHERE id = ?',
[request.form['id']])
old = cur.fetchone()
db.execute('INSERT INTO key_history (key, user_before, user_after, '+
'name_before, name_after, change_user) ' +
'VALUES (?, ?, ?, ?, ?, ?)',
[key['id'], old['user'], key['user'], old['name'], key['name'], 0])
db.commit()
db.execute('UPDATE keys SET name = ?, user = ? WHERE id = ?',
[key['name'], key['user'], key['id']])
db.commit()
else:
db.execute('INSERT INTO keys (name, user, last_update) VALUES (?, ?, ?)',
[request.form['name'], request.form['user'],
datetime.datetime.now()])
db.commit()
flash(lazy_gettext('Changes to the user where saved successfully!'))
return redirect(url_for('show_keys'))
@app.route('/new_user', methods=['GET'])
def new_user():
if not session.get('logged_in'):
abort(401)
return render_template('new_user.html')
@app.route('/save_user', methods=['POST'])
def save_user():
if not session.get('logged_in'):
abort(401)
db = get_db()
if 'id' in request.form.keys():
db.execute('UPDATE users SET name = ?, mail = ?, phone = ? WHERE id = ?',
[request.form['name'], request.form['mail'],
request.form['phone'], request.form['id']])
else:
db.execute('INSERT INTO users (name, mail, phone) VALUES (?, ?, ?)',
[request.form['name'], request.form['mail'],
request.form['phone']])
db.commit()
flash(lazy_gettext('Changes to the user where saved successfully!'))
return redirect(url_for('show_users'))
@app.route('/edit_user/<int:id>', methods=['GET'])
def edit_user(id):
if not session.get('logged_in'):
abort(401)
db = get_db()
cur = db.execute('SELECT id, name, mail, phone FROM users WHERE id = ?', [id])
user = cur.fetchone()
return render_template('edit_user.html', user=user)
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
db = get_db()
cur = db.execute('SELECT password FROM admins WHERE name = ?',
[request.form['username']])
res = cur.fetchone()
if res == None:
error = lazy_gettext('Invalid username')
elif pwd_context.verify(request.form['password'], res['password']) != True:
error = lazy_gettext('Invalid password')
else:
session['logged_in'] = True
flash(lazy_gettext('You were logged in'))
return redirect(url_for('show_keys'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash(lazy_gettext('You were logged out'))
return redirect(url_for('show_keys'))
@app.route('/new_admin', methods=['GET'])
def new_admin():
if not session.get('logged_in'):
abort(401)
return render_template('new_admin.html')
@app.route('/edit_admin/<int:id>', methods=['GET'])
def edit_admin(id):
if not session.get('logged_in'):
abort(401)
db = get_db()
cur = db.execute('SELECT id, name, mail FROM users WHERE id = ?', [id])
admin = cur.fetchone()
return render_template('edit_admin.html', admin=admin)
@app.route('/save_admin', methods=['POST'])
def save_admin():
if not session.get('logged_in'):
abort(401)
db = get_db()
password = pwd_context.encrypt(request.form['password'])
if 'id' in request.form.keys():
if request.form['password'] == '******':
db.execute('UPDATE adminss SET name = ?, mail = ?, password = ? WHERE id = ?',
[request.form['name'], request.form['mail'],
password, request.form['id']])
else:
db.execute('UPDATE users SET name = ?, mail = ? WHERE id = ?',
[request.form['name'], request.form['mail'],
request.form['id']])
else:
db.execute('INSERT INTO admins (name, mail, password) VALUES (?, ?, ?)',
[request.form['name'], request.form['mail'],
password])
db.commit()
flash(lazy_gettext('Changes to the admin where saved successfully!'))
return redirect(url_for('show_admins'))
@app.route('/install')
def install():
init_db()
return render_template('install.html')
if __name__ == '__main__':
app.run()
| {
"repo_name": "Bytespeicher/KANT",
"path": "src/kant.py",
"copies": "1",
"size": "8385",
"license": "mit",
"hash": 9074012061975698000,
"line_mean": 28.628975265,
"line_max": 90,
"alpha_frac": 0.5749552773,
"autogenerated": false,
"ratio": 3.6142241379310347,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9675276629614387,
"avg_score": 0.0027805571233296073,
"num_lines": 283
} |
# all the imports
import os
import sqlite3
import pandas as pd
from flask import Flask, g, render_template
from contextlib import closing
# create our little application :)
app = Flask(__name__)
# configuration
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'flaskr.db'),
))
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.route('/')
def stacked_bar_chart():
# Read sqlite query results into a pandas DataFrame
con = sqlite3.connect("flaskr.db")
df = pd.read_sql_query("SELECT * from entries", con)
# verify that result of SQL query is stored in the dataframe
print(df.to_json())
con.close()
weeks = df['week'].values.tolist() # x axis
data1 = df['data1'].values.tolist()
data2 = df['data2'].values.tolist()
return render_template('linegraph.html', weeks=weeks, data1=data1, data2=data2)
if __name__ == '__main__':
init_db()
app.run(debug=True) | {
"repo_name": "johnsliao/D3.js-flask",
"path": "flaskr/flaskr.py",
"copies": "1",
"size": "1128",
"license": "mit",
"hash": 3398159330241524000,
"line_mean": 24.6590909091,
"line_max": 83,
"alpha_frac": 0.655141844,
"autogenerated": false,
"ratio": 3.40785498489426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9529135963511655,
"avg_score": 0.006772173076520904,
"num_lines": 44
} |
#all the imports
import sqlite3
#for a heavier-duty app, we could use sqlalchemy
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
import local_settings
import re
from datetime import date, datetime
#Link to config settings
ALVA_SETTINGS = 'local_settings.py'
#create our application
app = Flask(__name__)
app.config.from_pyfile('local_settings.py')
#TODO obviously doesn't work
@app.template_filter('dateformat')
def dateformat(value):
date=datetime.date(value)
today=date.strftime("%m/%d/%y")
return value
#connect to db
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
#initialize the db
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql') as f:
db.cursor().executescript(f.read())
db.commit()
#create db connections for functions
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
if hasattr(g, 'db'):
g.db.close()
@app.errorhandler(404)
def page_not_found(error):
return render_template('404.html')
def sluggify(string):
string = re.sub(r"[^\w]+", " ", string)
string = "-".join(string.lower().strip().split())
return string
@app.route('/')
def show_entries():
cur = g.db.execute('select title, subhed, publishdate, private, id, slug from entries order by id desc')
entries = [dict(title=row[0], subhed=row[1], publishdate=row[2], private=row[3], id=row[4], slug=row[5]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/colophon')
def colophon():
return render_template('colophon.html')
def query_db(query, args=(), one=False):
cur = g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
@app.route('/entries/<slug>.html')
def show_entry(slug):
entry = query_db('select * from entries where slug=?', [slug], one=True)
if entry is None:
abort(404)
else:
return render_template('entry.html', entry=entry)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (title, subhed, publishdate, status, descript, private, slug) values (?, ?, ?, ?, ?, ?, ?)', [request.form['title'], request.form['subhed'], request.form['publishdate'], request.form['status'], request.form['descript'], request.form['private'], sluggify(request.form['title'])])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/edit/<slug>.html')
def entry_edit(slug):
entry = query_db('select * from entries where slug=?', [slug], one=True)
if entry is None:
abort(404)
else:
return render_template('edit_entry.html', entry=entry)
@app.route('/edit', methods=['POST'])
def make_edit():
if not session.get('logged_in'):
abort(401)
g.db.execute('update entries set title=?, subhed=?, publishdate=?, status=?, descript=?, private=? where slug=?', [request.form['title'], request.form['subhed'], request.form['publishdate'], request.form['status'], request.form['descript'], request.form['private'], request.form['slug']])
g.db.commit()
slug=request.form['slug']
flash('Entry was successfully edited')
return redirect(url_for('entry_edit', slug=slug))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
#fire up development server as a standalone application
if __name__ == '__main__':
app.run()
| {
"repo_name": "tommeagher/alva",
"path": "v.5/alva.py",
"copies": "1",
"size": "4375",
"license": "mit",
"hash": 1804331070691387100,
"line_mean": 32.9147286822,
"line_max": 316,
"alpha_frac": 0.648,
"autogenerated": false,
"ratio": 3.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4648,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
from contextlib import closing
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
# configuration
DATABASE = '/tmp/wekndpln.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_db()
db.row_factory = sqlite3.Row
return db
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
# amazing shit
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
###################################### app routes #############################
@app.route('/')
def show_entries():
entries = query_db('select title, text from entries order by id desc')
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run() | {
"repo_name": "Pinkii-/weekendplan",
"path": "wekndpln.py",
"copies": "1",
"size": "2507",
"license": "mit",
"hash": -7258128797942725000,
"line_mean": 27.5,
"line_max": 79,
"alpha_frac": 0.6039090546,
"autogenerated": false,
"ratio": 3.6228323699421967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4726741424542197,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
from flask import Flask, request, g, redirect, render_template
from contextlib import closing
app = Flask(__name__)
@app.route('/')
@app.route('/index')
def front_page():
return render_template('index.html')
@app.route('/about')
def about_page():
return render_template('about.html')
@app.route('/search')
def search_page():
return render_template('search.html')
# Edit this
@app.route('/sign_up', methods=["POST"])
def sign_up():
print(request.form)
db_add_person(request.form['name'], request.form['cheep'])
return render_template('sign_up.html')
@app.route('/user/<username>')
def show_user_profile(username):
# show the user profile for that user
return 'User %s' % username
#Data Manipulation
def db_read_people():
cur = get_db().cursor();
cur.execute("SELECT * FROM people")
return cur.fetchall()
# Edit this
def db_add_person(name, people):
cur = get_db().cursor()
person = (name, people)
cur.execute("INSERT INTO people VALUES (?, ?, ?)", person)
get_db().commit()
# Check this
@app.route("/")
def print_people():
people = db_read_people()
print(people)
return render_template('index.html', people=people)
# process people
@app.route("/api/people", methods=["POST"])
def receive_people():
print(request.form)
db_add_person(request.form['name'], request.form['people'])
return redirect("/")
# configuration
DATABASE = '/db/flaskr.db'
DEBUG = True
SECRET_KEY = 'secret'
USERNAME = 'admin'
PASSWORD = 'default'
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
if __name__ == '__main__':
app.run() | {
"repo_name": "daliu/Tuber",
"path": "flaskr.py",
"copies": "1",
"size": "1935",
"license": "mit",
"hash": -1883523486637747700,
"line_mean": 21,
"line_max": 63,
"alpha_frac": 0.6516795866,
"autogenerated": false,
"ratio": 3.359375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9447343304789868,
"avg_score": 0.012742256362026472,
"num_lines": 88
} |
# all the imports
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, jsonify
from contextlib import closing
# configuration
DATABASE = '/tmp/demo.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# creating the app
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/', methods=['GET'])
def show_entries():
cur = g.db.execute("SELECT title, body FROM entries ORDER BY id DESC") # This is where you make the query
entries = [dict(title=row[0], body=row[1]) for row in cur.fetchall()]
return jsonify(entries=entries)
@app.route('/', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('INSERT INTO entries (title, body) VALUES (?,?)', [request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['Logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return jsonify(error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run()
| {
"repo_name": "Blackmagicbox/Microblog_app_in_flask",
"path": "microblog.py",
"copies": "1",
"size": "2101",
"license": "mit",
"hash": -1801190383246015000,
"line_mean": 24.6219512195,
"line_max": 110,
"alpha_frac": 0.626368396,
"autogenerated": false,
"ratio": 3.5914529914529916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4717821387452991,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash
from contextlib import closing
# configuration
DATABASE = '/tmp/flaskr.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__) # look for all uppercase variables defined there
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run() | {
"repo_name": "radiumweilei/flaskr",
"path": "flaskr.py",
"copies": "1",
"size": "2200",
"license": "apache-2.0",
"hash": 377923041844347300,
"line_mean": 25.8414634146,
"line_max": 94,
"alpha_frac": 0.6290909091,
"autogenerated": false,
"ratio": 3.648424543946932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4777515453046932,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash
from contextlib import closing
#configuration
DATABASE = './flaskr.db'
DEBUG = True
SECRET_KEY = 'dev key'
USERNAME = 'admin'
PASSWORD = 'admin'
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (title, text) values (?,?)', [request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was succesfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET','POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash ('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
#runs the server
if __name__ == '__main__':
app.run()
| {
"repo_name": "vmseba/flaskr",
"path": "flaskr.py",
"copies": "1",
"size": "2130",
"license": "mit",
"hash": 5327306830372261000,
"line_mean": 26.6623376623,
"line_max": 113,
"alpha_frac": 0.6305164319,
"autogenerated": false,
"ratio": 3.5678391959798996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46983556278798994,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
#configuration
DATABASE = '/tmp/flasr.db'
DEBUG = True
SECRET_KEY= 'development key'
USERNAME = 'admin'
PASSWORD= 'default'
# create our little application
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries(title, text) values(?, ?)',
[request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] !=app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run()
| {
"repo_name": "mhcrnl/PmwTkEx",
"path": "pywebframework/pyflask/flaskr/faskr.py",
"copies": "1",
"size": "2144",
"license": "apache-2.0",
"hash": 9151373654720099000,
"line_mean": 27.5866666667,
"line_max": 74,
"alpha_frac": 0.6263992537,
"autogenerated": false,
"ratio": 3.591289782244556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4717689035944556,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
# configuration
DATABASE = '/tmp/flaskr.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
#app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run()
| {
"repo_name": "michaelsnook/flaskr-heroku",
"path": "flaskr.py",
"copies": "1",
"size": "2209",
"license": "cc0-1.0",
"hash": 5348196205348368000,
"line_mean": 26.9620253165,
"line_max": 74,
"alpha_frac": 0.6278859212,
"autogenerated": false,
"ratio": 3.574433656957929,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47023195781579286,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
# configuration
DATABASE = 'app.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# create our little application
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
from contextlib import closing
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# sql3 requests decorators
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run() | {
"repo_name": "sergeimoiseev/egrixcalc",
"path": "old/app.py",
"copies": "2",
"size": "2164",
"license": "mit",
"hash": 8504746313751289000,
"line_mean": 27.8666666667,
"line_max": 74,
"alpha_frac": 0.6284658041,
"autogenerated": false,
"ratio": 3.6187290969899664,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5247194901089967,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, Request, Response
from contextlib import closing
from datetime import datetime
# configuration
DATABASE = '/tmp/flaskr.db'
DEBUG = False
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
app = Flask(__name__)
app.config.from_object(__name__)
Request.charset = "shift-jis"
Response.charset = "shift-jis"
DATABASE = '/home/ruin0x11/elona.db'
def connect_to_database():
return sqlite3.connect(DATABASE)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_to_database()
db.row_factory = sqlite3.Row
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def init_db():
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
date = int(datetime.now().strftime("%s"))
for i in range(0,10):
db.execute('insert into chat (time, kind, text, addr) values (?, ?, ?, ?)',
[int(date), 1, "弱気ものprinは猫に殺された「なむ」", "127.0.0.1"])
db.execute('insert into vote (name, votes, addr, time, totalvotes) values (?, ?, ?, ?, ?)',
["弱気ものprin" + str(i), 100 - i, '127.0.0.1', date, 1000])
db.commit()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
def chat_type_from_num(x):
return {
0: 'chat',
1: 'dead',
2: 'wish',
}.get(x, 0)
def chat_type_from_string(x):
return {
'chat': 0,
'dead': 1,
'wish': 2,
}.get(x, 0)
@app.route("/text.txt", methods=["GET"])
def text():
response = "<!--START-->\n%\n素敵な異名コンテスト♪1 [1ヶ月で自動リセット]%\nYour favorite alias♪1 [Auto reset every month]%"
return Response(response, mimetype='text/plain')
@app.route("/log.txt", methods=["GET"])
def get_log():
response = ""
first = query_db('select * from chat order by id desc limit 1', one=True)
no = first['id']+1 if first else 1
response += str(no) + "<C>\n<!--START-->\n"
for line in query_db('select * from chat order by id desc limit 30'):
date = datetime.fromtimestamp(line['time']).strftime("%m/%d(%I)")
response += str(line['id']) + '%' + date + '%' + chat_type_from_num(line['kind']) + line['text'] + '%' + line['addr'] + '%\n'
response += "<!--END-->\n<!-- WebTalk v1.6 --><center><small><a href='http://www.kent-web.com/' target='_top'>WebTalk</a></small></center>"
return Response(response, mimetype='text/plain')
@app.route("/vote.txt", methods=["GET"])
def get_vote():
response = ""
for line in query_db('select * from vote limit 100'):
date = datetime.fromtimestamp(line['time']).strftime("%s")
response += str(line['id']) + '<>' + line['name'] + '<>' + str(line['votes']) + '<>' + line['addr'] + '<>' + date + '#' + str(line['totalvotes']) + '#' + '1' + '#<>\n'
return Response(response, mimetype='text/plain')
@app.route("/cgi-bin/wtalk/wtalk2.cgi", methods=["GET"])
def add_chat():
db = get_db()
mode = request.args.get('mode')
comment = request.args.get('comment')
chat_type = chat_type_from_string(comment[:4])
text = comment[4:]
time = int(datetime.now().strftime("%s"))
addr = request.remote_addr
db.execute('insert into chat (time, kind, text, addr) values (?, ?, ?, ?)',
[time, chat_type, text, addr])
db.commit()
return get_log()
@app.route("/cgi-bin/vote/votec.cgi", methods=["GET"])
def add_vote():
db = get_db()
namber = request.args.get('namber')
mode = request.args.get('mode')
name = request.args.get('vote')
addr = request.remote_addr
time = int(datetime.now().strftime("%s"))
if mode != 'wri':
return Response(status=501)
if name:
first = query_db('select * from vote where name = ?', [name], one=True)
if first:
return get_vote()
db.execute('insert into vote (name, votes, addr, time, totalvotes) values (?, ?, ?, ?, ?)',
[name, 0, addr, time, 0])
db.commit()
elif namber:
vote = query_db('select * from vote where id = ?', [namber], one=True)
if not vote or vote['addr'] == request.remote_addr:
return Response(status=400)
db.execute('update vote set votes = ?, totalvotes = ? where id = ?',
[vote['votes'] + 1, vote['totalvotes'] + 1, namber])
db.commit()
return get_vote()
if __name__ == "__main__":
app.run()
| {
"repo_name": "Ruin0x11/elona_server",
"path": "elona.py",
"copies": "1",
"size": "4933",
"license": "bsd-3-clause",
"hash": 7117966210903689000,
"line_mean": 32,
"line_max": 175,
"alpha_frac": 0.5683364255,
"autogenerated": false,
"ratio": 3.152046783625731,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9194466683188911,
"avg_score": 0.005183305187363934,
"num_lines": 147
} |
# all the imports
import sqlite3
from os.path import dirname, abspath
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
# configuration
DATABASE = abspath(dirname(__file__)) + '/rsvp.db'
DEBUG = True
SECRET_KEY = 'Hmm. Should autogen this, right?'
USERNAME = 'admin'
PASSWORD = 'default'
app = Flask(__name__)
app.config.from_object(__name__)
#app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(app.config['DATABASE'])
db.row_factory = sqlite3.Row
return db
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route('/')
def index():
rsvps = query_db("SELECT * from rsvp where rsvp=? or rsvp=? order by name ", ('yes', 'waitlist'))
present = query_db("SELECT count(*) as num from rsvp where present=?", (1,), True)['num']
absent = query_db("SELECT count(*) as num from rsvp where present<>?", (1,), True)['num']
total = present + absent
return render_template('index.html', **locals())
@app.route('/ajax', methods=['POST'])
def ajax():
db = get_db()
id, present = request.form['id'], request.form['present']
db.cursor().execute("UPDATE rsvp set present=? where id=?", (present, id))
db.commit()
return "OK"
@app.route('/ajax/stats')
def stats():
present = query_db("SELECT count(*) as num from rsvp where present=?", (1,), True)['num']
absent = query_db("SELECT count(*) as num from rsvp where present<>?", (1,), True)['num']
total = present + absent
return "%s / %s" % (present, total)
if __name__ == '__main__':
app.run()
| {
"repo_name": "simeonf/meetup-checkin",
"path": "rsvp/rsvp.py",
"copies": "1",
"size": "1962",
"license": "apache-2.0",
"hash": -5175704498818030000,
"line_mean": 29.65625,
"line_max": 101,
"alpha_frac": 0.618756371,
"autogenerated": false,
"ratio": 3.3769363166953528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44956926876953524,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
import time
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
# for db initiation
from contextlib import closing
# configuration
DATABASE = '/tmp/portal.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
"""
connect to database and get db connection
"""
return sqlite3.connect(app.config['DATABASE'])
def init_db():
"""
initiate the database for the app
"""
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# run this functions before and after a request
# g is a special object in flask
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
# request handler functions
@app.route('/')
def show_entries():
cur = g.db.execute(
'select url, title, description, type,rank from entries order by date,rank desc limit 100')
entries = [dict(url=row[0], title=row[1], description=row[2], type=row[3], rank=row[4])
for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
# add a new entry
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (url,title,description,type,date) values (?, ?,?,?,?)',
[request.form['url'], request.form['title'], request.form['description'], request.form['type'], time.strftime("%Y-%m-%d")])
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
# login request handler
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
# logout request handler
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
# app starting point
if __name__ == '__main__':
app.run()
| {
"repo_name": "rakeshsingh/trend-in",
"path": "portal/portal.py",
"copies": "1",
"size": "2718",
"license": "mit",
"hash": 8277177270227273000,
"line_mean": 25.1346153846,
"line_max": 140,
"alpha_frac": 0.6309786608,
"autogenerated": false,
"ratio": 3.672972972972973,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4803951633772973,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3
from contextlib import closing
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
# configuration
DATABASE = '/tmp/flaskr.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# create application
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
"""
DB Connect function
"""
return sqlite3.connect(app.config['DATABASE'])
def init_db():
"""
Initializes the database
"""
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
cur = g.db.execute('select title, text from entries order by id desc')
entries = [dict(title=row[0], text=row[1]) for row in cur.fetchall()]
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
g.db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']]
)
g.db.commit()
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if "__main__" == __name__:
app.run()
| {
"repo_name": "drincruz/flask-tutorial-flaskr",
"path": "flaskr.py",
"copies": "1",
"size": "2220",
"license": "mit",
"hash": -6150076342959416000,
"line_mean": 25.1176470588,
"line_max": 74,
"alpha_frac": 0.6189189189,
"autogenerated": false,
"ratio": 3.6097560975609757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47286750164609753,
"avg_score": null,
"num_lines": null
} |
# all the imports
import sqlite3,re,json,ast
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
# configuration
DATABASE = '/home/flavorshare/mysite/flavorshare.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
CId=2
value=1
entries = []
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def main_page():
error = None
if not session.get('logged_in'):
return render_template('login_or_register.html')
else :
return redirect(url_for('homePage'))
@app.route('/', methods=['POST'])
def login_or_register():
error = None
if request.method == 'POST':
if request.form['login_register'] == "Login":
pageFunctionName='loginPage'
elif request.form['login_register'] == "Register":
pageFunctionName='registerPage'
return redirect(url_for(pageFunctionName))
@app.route('/register')
def registerPage():
error = None
if not session.get('logged_in'):
return render_template('register.html')
else :
return redirect(url_for('homePage'))
EMAIL_REGEX = re.compile(r"[^@|\s]+@[^@]+\.[^@|\s]+")
@app.route('/register', methods=['POST'])
def register():
error = None
if request.method == 'POST':
if request.form['register'] == "Register":
if request.form['password'] == request.form['confirm_password'] and EMAIL_REGEX.match(request.form['email']) :
g.db.execute('insert into users (name, email, password) values (?, ?, ?)',
[request.form['name'], request.form['email'], request.form['password']])
g.db.commit()
session['username'] = request.form['email']
session['logged_in'] = True
flash('Successfully Registered')
return redirect(url_for('homePage'))
else :
error='Incorrect Details'
return redirect(url_for('register'),error=error)
@app.route('/login')
def loginPage():
error = None
if not session.get('logged_in'):
return render_template('login.html')
else :
return redirect(url_for('homePage'))
@app.route('/login', methods=['POST'])
def login():
error = None
if request.method == 'POST':
if request.form['login'] == "Login":
cur = g.db.execute('select email, password from users where email = \"' + request.form['username'] + '\" and password = \"' + request.form['password'] + '\"')
user_detail = [row for row in cur.fetchall()]
if user_detail:
flash('Successfully Logged In')
session['username'] = request.form['username']
session['logged_in'] = True
return redirect(url_for('homePage'))
if not user_detail:
error='Invalid Login Details'
return render_template('login.html',error=error)
@app.route('/home')
def homePage():
error = None
if not session.get('logged_in'):
return render_template('login_or_register.html')
cur = g.db.execute('select name from users where email = \''+ session.get('username') + '\'')
names = [row for row in cur.fetchall()]
name = names[0]
display_name = name[0]
login=True
cur_count = g.db.execute('select count(*) from notification where mid_assignee in (select mid from users where email=\''+ session.get('username') + '\')')
for row in cur_count:
if row[0]==0:
notification=False
else:
notification=True
return render_template('home.html',display_name=display_name,login=login,notification=notification)
@app.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('username', None)
session.pop('logged_in', None)
return redirect(url_for('main_page'))
@app.route('/notification', methods=['GET'])
def notificationPage():
error = None
if request.method == 'GET':
cur = g.db.execute('select mid_assignor,gid,description,nid from notification where mid_assignee in (select mid from users where email=\''+ session.get('username') + '\')')
mids = [row for row in cur.fetchall()]
#mid=mids[0]
notification_list = []
for row in mids:
notification = {}
cur_name = g.db.execute('select name from users where mid= \''+ str(row[0]) + '\'')
cur_group = g.db.execute('select name from groups where gid= \''+ str(row[1]) + '\'')
for x in cur_name:
for y in cur_group:
notification = [dict(name=str(x[0]),group=str(y[0]),desc=str(row[2]),nid=str(row[3]))]
notification_list.append(notification)
return render_template('notification.html',notification_list=notification_list)
@app.route('/notification', methods=['POST'])
def notification():
error = None
if request.method == 'POST':
if "delete" in request.form:
nid = request.form["delete"]
g.db.execute('delete from notification where nid=\''+ request.form["delete"] + '\'')
g.db.commit()
cur = g.db.execute('select mid_assignor,gid,description,nid from notification where mid_assignee in (select mid from users where email=\''+ session.get('username') + '\')')
mids = [row for row in cur.fetchall()]
#mid=mids[0]
notification_list = []
for row in mids:
notification = {}
cur_name = g.db.execute('select name from users where mid= \''+ str(row[0]) + '\'')
cur_group = g.db.execute('select name from groups where gid= \''+ str(row[1]) + '\'')
for x in cur_name:
for y in cur_group:
notification = [dict(name=str(x[0]),group=str(y[0]),desc=str(row[2]),nid=str(row[3]))]
notification_list.append(notification)# = [dict(list=notification)]
return render_template('notification.html',notification_list=notification_list)
@app.route('/myProfile')
def myProfile():
error = None
if not session.get('logged_in'):
return render_template('login_or_register.html')
user_email = session.get('username')
cur_users = g.db.execute('select name from users where email = \''+ session.get('username') + '\'')
user_name = [row for row in cur_users.fetchall()]
user_name=user_name[0]
return render_template('my_profile.html',user_name=user_name, user_email=user_email)
@app.route('/group_listing')
def group_listingPage():
error = None
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
#print mids
mid=mids[0]
#print mid[0]
cur_groups = g.db.execute('select name from groups where gid in ( select gid from group_members where mid = \''+ str(mid[0]) + '\')')
#print cur_groups
group_names = [row for row in cur_groups.fetchall()]
#print group_names
return render_template('group_listing.html', group_names=group_names)
@app.route('/group_listing', methods=['GET','POST'])
def group_listing():
error = None
if request.method == 'GET':
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
#print mids
mid=mids[0]
#print mid[0]
cur_groups = g.db.execute('select name from groups where admin_id = \''+ str(mid[0]) + '\'')
#print cur_groups
group_names = [row for row in cur_groups.fetchall()]
#print group_names
return render_template('group_listing.html', group_names=group_names)
elif request.method == 'POST':
if 'listing' in request.form:
print request.form['listing']
if 'listing' in request.form:
if request.form['listing'] == "add_group":
return redirect(url_for('add_group'))
else:
group = request.form['listing']
cur = g.db.execute('select name from users where mid in (select mid from group_members where gid in (select gid from groups where name =\"' + group+ '\"))')
g.db.commit()
cur_details=g.db.execute('select description,venue,eventdate from groups where gid in (select gid from groups where name =\"' + group+ '\")')
cur_details=g.db.execute('select description,venue,eventdate from groups where gid in (select gid from groups where name =\"' + group+ '\")')
mids = [row for row in cur_details.fetchall()]
mid=mids[0]
groups = [dict(gname=group)]
names = [dict(name=row[0]) for row in cur.fetchall()]
desc=[dict(desc=row[0]) for row in mids]
venue=[dict(venue=row[1]) for row in mids]
eventdate=[dict(eventdate=row[2]) for row in mids]
return redirect(url_for('group_summary_init',groups=group))
#return render_template('group_summary.html',groups=groups,names=names,desc=desc,venue=venue,eventdate=eventdate)
@app.route('/add_group', methods=['GET','POST'])
def add_group():
error = None
if request.method == 'GET':
return render_template('add_group.html')
elif request.method == 'POST':
if request.form['group_members'] == "Next":
cur = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur.fetchall()]
mid=mids[0]
session['grpname']=request.form['name']
print session['grpname']
g.db.execute('insert into groups (name,admin_id, description, venue, eventdate) values (?,%d,?,?,?)'%mid,[ request.form['name'],request.form['description'],request.form['venue'],request.form['eventdate'] ])
g.db.commit()
#print gname
return redirect(url_for('group_membersPage'))
else:
flash('Try Again')
return redirect(url_for('add_group'))
@app.route('/group_members_summary')
def group_members_summaryPage():
error = None
print session['grpname']
return render_template('group_members_summary.html')
@app.route('/group_members_summary', methods=['POST'])
def group_members_summary():
error = None
print session['grpname']
if request.method == 'POST':
if request.form['display_group_members'] == "next":
number_of_members=int(request.form['number_members'])
for i in range(1,(number_of_members+1)) :
f = "email{0}".format(i)
g.db.execute('insert into group_members(mid,gid) values ((select mid from users where email=\"' + request.form[f]+ '\") ,(select gid from groups where name=\"' + session['grpname']+ '\"))')
#g.db.execute('insert into group_members(mid,gid) values ((select mid from users where email=\"' + request.form['email']+ '\") ,(select gid from groups where name=\"' + session['gname']+ '\"))')
g.db.commit()
print "go to hell"
flash('Group Members Added Successfully')
return redirect(url_for('group_summary_init',groups=session['gname']))
@app.route('/group_members')
def group_membersPage():
error = None
print session['grpname']
g.db.execute('insert into group_members(mid,gid) values ((select mid from users where mid in (select admin_id from groups where gid in(select gid from groups where name=\"' + session['grpname']+ '\"))) ,(select gid from groups where name=\"' + session['grpname']+ '\"))')
g.db.commit()
return render_template('group_members.html')
@app.route('/group_members', methods=['POST'])
def group_members():
error = None
print session['grpname']
if request.method == 'POST':
if request.form['display_group_members'] == "next":
number_of_members=int(request.form['number_members'])
for i in range(1,(number_of_members+1)) :
f = "email{0}".format(i)
g.db.execute('insert into group_members(mid,gid) values ((select mid from users where email=\"' + request.form[f]+ '\") ,(select gid from groups where name=\"' + session['grpname']+ '\"))')
g.db.execute('insert into notification(mid_assignee,mid_assignor,gid,description) values ((select mid from users where email=\"' + request.form[f]+ '\") ,(select mid from users where email = \''+ session.get('username') + '\'),(select gid from groups where name=\"' + session['grpname']+ '\"),("You have been added to a group!!"))')
#g.db.execute('insert into group_members(mid,gid) values ((select mid from users where email=\"' + request.form['email']+ '\") ,(select gid from groups where name=\"' + session['gname']+ '\"))')
g.db.commit()
print "go to hell"
flash('Group Members Added Successfully')
return redirect(url_for('display_group_membersPage'))
@app.route('/display_group_members')
def display_group_membersPage():
error = None
if request.method == 'GET':
#g.db.execute('insert into users (name, email, password) values ("tgif", "6", "abc")')
#g.db.commit()
cur = g.db.execute('select name from users where mid in (select mid from group_members where gid in (select gid from groups where name =\"' + session['grpname']+ '\"))')
#g.db.execute('insert into users (name, email, password) values ("tgif", "999", "abc")')
g.db.commit()
entries = [dict(name=row[0]) for row in cur.fetchall()]
return render_template('display_group_members.html', entries=entries)
@app.route('/display_group_members', methods=['POST'])
def display_group_members():
error = None
if request.method == 'POST':
if request.form['redirect_to'] == "add_more":
pageFunctionName='group_members.html'
return render_template(pageFunctionName)
elif request.form['redirect_to'] == "next":
pageFunctionName='group_config.html'
return redirect(url_for('group_configPage'))
@app.route('/group_summary')
def group_summary_init():
error = None
group = request.args['groups']
print group
session['gname'] = group
cur = g.db.execute('select name from users where mid in (select mid from group_members where gid in (select gid from groups where name =\"' + group+ '\"))')
names = [dict(name=row[0]) for row in cur.fetchall()]
g.db.commit()
cur_details=g.db.execute('select description,venue,eventdate from groups where gid in (select gid from groups where name =\"' + group+ '\")')
mids = [row for row in cur_details.fetchall()]
mid=mids[0]
desc=[dict(desc=row[0]) for row in mids]
venue=[dict(venue=row[1]) for row in mids]
eventdate=[dict(eventdate=row[2]) for row in mids]
groups = [dict(gname=group)]
category_details=g.db.execute('select category.name,group_category.no_of_items from category,group_category where category.cid=group_category.cid and group_category.gid in (select gid from groups where name=\"' + group + '\") and group_category.no_of_items>0')
categories = [row for row in category_details.fetchall()]
cat_name={row[0]:row[1] for row in categories}
print cat_name
category_recipe_details=g.db.execute('select category.name,recipes.name from category,group_category_recipes,recipes where category.cid=group_category_recipes.cid and recipes.rid=group_category_recipes.rid and gid in (select gid from groups where name=\"' + group+ '\") and category.cid not in (307,308,309,310)')
category_recipe = [row for row in category_recipe_details.fetchall()]
category_recipe_list={}
for item in category_recipe:
if item[0] in category_recipe_list:
category_recipe_list[item[0]].append(item[1])
else :
category_recipe_list[item[0]]=[item[1]]
cur_user = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
user_id = [row for row in cur_user.fetchall()]
user_id = user_id[0]
cur_admin = g.db.execute('select admin_id from groups where gid in (select gid from groups where name =\"' + group+ '\")')
admin_id = [row for row in cur_admin.fetchall()]
admin_id = admin_id[0]
cur_admin_name = g.db.execute('select name from users where mid = \''+ str(admin_id[0]) + '\'')
admin_name = [row for row in cur_admin_name.fetchall()]
admin_name = admin_name[0]
a_name = [dict(aname=admin_name[0])]
print admin_id
print user_id
if admin_id[0] == user_id[0]:
print "In admin_id==user_id"
return render_template('group_summary.html',groups=groups,names=names,desc=desc,venue=venue,eventdate=eventdate,cat_name=cat_name,category_recipe_list=category_recipe_list,a_name=a_name)
else:
return render_template('group_summary_normal.html',groups=groups,names=names,desc=desc,venue=venue,eventdate=eventdate,cat_name=cat_name,category_recipe_list=category_recipe_list,a_name=a_name)
@app.route('/group_summary', methods=['POST'])
def group_summary():
error = None
if request.method == 'POST':
group = session['gname']
print group
#print request.form['remove_recipe']
if 'member' in request.form:
print request.form['member']
memberName = request.form['member']
g.db.execute('delete from group_members where gid in (select gid from groups where name=\"' + group+ '\") and mid in ((select mid from users where name=\"' + memberName+ '\"))')
g.db.commit()
flash('Group Member Deleted Successfully')
return redirect(url_for('group_summary_init',groups=session['gname']))
elif 'edit' in request.form:
print "In edit"
return redirect(url_for('group_members_summaryPage'))
elif 'remove_recipe' in request.form:
print "Akshay!!!"
checked_recipes = request.form.getlist('checkbox-recipe')
print checked_recipes
for recipe in checked_recipes :
g.db.execute('delete from group_category_recipes where gid in (select gid from groups where name=\"' + group+ '\") and rid in ((select rid from recipes where name=\"' + recipe+ '\"))')
g.db.commit()
flash('Recipes Deleted Successfully')
return redirect(url_for('group_summary_init',groups=session['gname']))
elif 'done' in request.form:
return redirect(url_for('group_listingPage'))
elif 'addrecipe' in request.form:
cur_category_name = g.db.execute('select name from category where cid in (select A.cid from (select cid,no_of_items from group_category where cid not in (307,308,309,310) and gid in (select gid from groups where name=\"' + group+ '\")) A LEFT OUTER JOIN (select cid, count(rid) as C from group_category_recipes where gid in (select gid from groups where name=\"' + group+ '\") group by cid) B ON A.cid=B.cid where (A.no_of_items - ifnull(B.C,0)) > 0 )')
category = [row for row in cur_category_name.fetchall()]
recipe_list = {}
for name in category:
print "hello i am here"
print name
cur_recipe = g.db.execute('select name from recipes where cid in (select cid from category where name=\''+str(name[0])+'\')')
recipe_name = [row for row in cur_recipe.fetchall()]
recipes = [recipe[0] for recipe in recipe_name]
recipe_list[name[0]] = recipes
print recipe_list
jsondump = json.dumps(recipe_list)
print jsondump
#print recipe
return render_template('add_recipe.html',category=category,jsondump=jsondump,recipe_list=recipe_list)
@app.route('/add_recipe')
def add_recipePage():
error = None
return render_template('add_recipe.html')
@app.route('/add_recipe', methods=['POST'])
def add_recipe():
error = None
if request.method == 'POST':
if request.form['add_recipe'] == "save":
category_name = request.form['select-group']
recipe_name = request.form['select-members']
print "In add recipe"
print category_name
print recipe_name
group = session['gname']
cur = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur.fetchall()]
mid=mids[0]
print mid[0]
cur_gid=g.db.execute('select gid from groups where name = \''+group + '\'')
gids = [row for row in cur_gid.fetchall()]
gid=gids[0]
print gid[0]
cur_cid=g.db.execute('select cid from category where name = \''+category_name + '\'')
cids = [row for row in cur_cid.fetchall()]
cid=cids[0]
print cid[0]
cur_rid=g.db.execute('select rid from recipes where name = \''+recipe_name + '\'')
rids = [row for row in cur_rid.fetchall()]
rid=rids[0]
print rid[0]
g.db.execute('insert into group_category_recipes(gid,cid,rid,mid) values('+str(gid[0])+','+str(cid[0])+','+str(rid[0])+',' + str(mid[0])+')')
g.db.commit()
flash('Recipe Added Successfully')
return redirect(url_for('group_summary_init',groups=session['gname']))
#return redirect(url_for('homePage'))
@app.route('/group_config')
def group_configPage():
error = None
if request.method == 'GET':
groups = [dict(gname=session['grpname'])]
print groups
return render_template('group_config.html',groups=groups)
@app.route('/group_config', methods=['POST'])
def group_config():
error = None
if request.method == 'POST':
if request.form['finish_group'] == "save":
for i in range(301,311) :
f = "category{000}".format(i)
g.db.execute('insert into group_category(gid,cid,no_of_items) values ((select gid from groups where name=\"' + session['grpname']+ '\"),'+str(i)+', '+request.form[f]+')')
g.db.commit()
flash('Group Created Successfully')
return redirect(url_for('homePage'))
@app.route('/saved_recipes')
def savedRecipesPage():
error = None
cur = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur.fetchall()]
mid=mids[0]
cur_recipe = g.db.execute('select name from recipes where rid in (select rid from group_category_recipes where mid =\'' + str(mid[0])+ '\')')
recipe_names = [row for row in cur_recipe.fetchall()]
return render_template('saved_recipes.html', recipe_names = recipe_names)
@app.route('/recipe/<recipe_name>')
def recipe(recipe_name):
error = None
print "In recipe/ page"
cur = g.db.execute('select * from recipes where name = \''+ recipe_name + '\'')
recipe_details = [row for row in cur.fetchall()]
recipe_details = recipe_details[0]
rid = recipe_details[0]
cid = recipe_details[1]
rating = recipe_details[4]
cook_time = recipe_details[5]
servings = recipe_details[6]
instructions = recipe_details[3]
cur_ingredients = g.db.execute('select name,quantity from ingredients,recipe_ingredients where rid = ' + str(rid) + ' and recipe_ingredients.iid = ingredients.iid')
ingredient_list = [row for row in cur_ingredients.fetchall()]
return render_template('recipe.html',recipe_name = recipe_name, rating=rating, cook_time=cook_time, servings=servings, instructions=instructions,ingredient_list=ingredient_list)
@app.route('/recipe/<recipe_name>', methods=['POST'])
def recipePost(recipe_name):
error = None
print "In post of recipe"
if request.method == 'POST':
value = request.form.getlist('ingredients')
print "checkbox values"
print value
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
print mids
mid=mids[0]
print mid[0]
cur_recipe = g.db.execute('select rid from recipes where name =\'' + recipe_name+ '\'')
print cur_recipe
rids = [row for row in cur_recipe.fetchall()]
print rids
rid=rids[0]
if request.form['save_or_share'] == "Save":
print "In Save of recipe"
print "recipe id"
print rid[0]
for i in value:
g.db.execute('insert into my_saved_bag(mid,rid,ingredient) values('+str(mid[0])+','+str(rid[0])+ ',' +'\"'+i+'\")')
g.db.commit()
flash('Ingredients Saved to My Bag')
return redirect(url_for('showBag'))
elif request.form['save_or_share'] == "Share":
print "In Share of recipe"
cur_group_names = g.db.execute('select name from groups where gid in(select gid from group_members where mid = ' + str(mid[0])+')')
group_names = [row for row in cur_group_names.fetchall()]
print group_names
group_list = {}
for name in group_names:
print name[0]
cur_group_members = g.db.execute('select name from users where mid != ' + str(mid[0]) + ' and mid in(select mid from group_members where gid =(select gid from groups where name=\''+str(name[0])+'\'))' )
member_name = [row for row in cur_group_members.fetchall()]
print member_name
member_names=[ member[0] for member in member_name]
group_list[name[0]]=member_names
print group_list
jsonGroupList = json.dumps(group_list)
return render_template('share_ingredients.html', ingredients = value, group_list=group_list, jsonGroupList = jsonGroupList, recipe_name=recipe_name )
@app.route('/share', methods=['POST'])
def share():
error = None
print 'In share'
print request.form
ingredient_list = request.form['ingredients']
group_name = request.form['select-group']
group_member = request.form['select-members']
recipe_name = request.form['recipe_name']
print ingredient_list
print group_name
print group_member
ingredients_list1=ast.literal_eval(ingredient_list)
ingredients_list1 = [i.strip() for i in ingredients_list1]
print ingredients_list1
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
print mids
mid=mids[0]
print mid[0]
cur_recipe = g.db.execute('select rid from recipes where name =\'' + recipe_name+ '\'')
print cur_recipe
rids = [row for row in cur_recipe.fetchall()]
print rids
rid=rids[0]
print "recipe id"
print rid[0]
cur_group = g.db.execute('select gid from groups where name = \''+ group_name + '\'')
gids = [row for row in cur_group.fetchall()]
print gids
gid=gids[0]
print "group id"
print gid[0]
cur_users1 = g.db.execute('select mid from users where name = \''+ group_member + '\'')
mids1 = [row for row in cur_users1.fetchall()]
print mids1
mid_assignee=mids1[0]
print "assignee"
print mid_assignee[0]
for i in ingredients_list1:
#print i
g.db.execute('insert into my_shared_bag(mid_assignee,mid_assignor,rid,gid,ingredient) values('+str(mid_assignee[0])+','+str(mid[0])+','+str(rid[0])+',' + str(gid[0])+ ',' +'\"'+i+'\")')
g.db.execute('insert into notification(mid_assignee,mid_assignor,gid,description) values('+str(mid_assignee[0])+','+str(mid[0])+',' + str(gid[0])+ ',' +'\"A bag has been shared with you!!!")')
g.db.commit()
flash('Ingredients Shared Successfully')
return redirect(url_for('homePage'))
@app.route('/showBag')
def showBag():
error = None
print "IN SHOWBAG"
cur_users = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mids = [row for row in cur_users.fetchall()]
mid=mids[0]
cur_saved_bag = g.db.execute('select ingredient from my_saved_bag where mid = ' + str(mid[0]))
saved_bag = [row[0] for row in cur_saved_bag.fetchall()]
cur_shared_bag = g.db.execute('select mid_assignor,gid,ingredient from my_shared_bag where mid_assignee =' + str(mid[0]))
temp_shared_bag = [row for row in cur_shared_bag.fetchall()]
shared_bag = []
for row in temp_shared_bag:
print row
cur_group_name = g.db.execute('select name from groups where gid = ' + str(row[1]))
group_name = [row1[0] for row1 in cur_group_name.fetchall() ]
group_name = group_name[0]
print group_name
cur_member_name = g.db.execute('select name from users where mid = ' + str(row[0]))
member_name = [row2[0] for row2 in cur_member_name.fetchall() ]
member_name = member_name[0]
print member_name
shared_bag.append((row[2], group_name, member_name))
return render_template('showBag.html', saved_bag = saved_bag,shared_bag = shared_bag)
@app.route('/showBag', methods=['POST'])
def showBagPost():
error = None
print "IN SHOWBAG POST"
if request.method == 'POST':
group = session['gname']
print group
#print request.form['remove_recipe']
if 'saved_ingredient' in request.form:
print "In saved_ingredient"
cur_user = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mid = [row for row in cur_user.fetchall()]
mid=mid[0]
ingredient=request.form['saved_ingredient']
print mid[0]
print ingredient
g.db.execute('delete from my_saved_bag where mid=' + str(mid[0]) + ' and ingredient = \'' + ingredient + '\'' )
g.db.commit()
return redirect(url_for('showBag'))
elif 'shared_ingredient' in request.form:
print "In shared_ingredient"
cur_user = g.db.execute('select mid from users where email = \''+ session.get('username') + '\'')
mid = [row for row in cur_user.fetchall()]
mid=mid[0]
ingredient=request.form['shared_ingredient']
print mid[0]
print ingredient
g.db.execute('delete from my_shared_bag where mid_assignee=' + str(mid[0]) + ' and ingredient = \'' + ingredient + '\'' )
g.db.commit()
return redirect(url_for('showBag'))
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0') | {
"repo_name": "anshikam/FlavorShare",
"path": "app.py",
"copies": "1",
"size": "30292",
"license": "mit",
"hash": 2460550007392793000,
"line_mean": 42.1524216524,
"line_max": 457,
"alpha_frac": 0.6284497557,
"autogenerated": false,
"ratio": 3.4442296759522457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9476042368735378,
"avg_score": 0.01932741258337367,
"num_lines": 702
} |
# all the imports
import sys
import json
import socket
import errno
from flask import session, redirect, url_for, render_template, flash, request, jsonify
from flask.ext.login import login_required, login_user, logout_user, current_user
from ops import app, facebook, twitter, mongo
from auth import User
import time
from mock_data import jobs_data, updates
import db
import xmlrpclib
import techwriting
running_jobs = []
dist_proxy = xmlrpclib.ServerProxy('http://localhost:8090')
@app.route('/techwriting')
def tech_writing():
weights = [30, 25, 20, 10, 7, 5, 3]
cellcolors = []
for i, r in enumerate(techwriting.breakdown['rows']):
cell_row = []
for j, c in enumerate(r):
t = c/float(weights[j])
if t < .1 and t >= 0:
c = 'ff5555'
elif t < .2 and t >= .1:
c = 'ff6666'
elif t < .3 and t >= .2:
c = 'ff7777'
elif t < .4 and t >= .3:
c = 'ff9999'
elif t < .5 and t >= .4:
c = 'ffbbbb'
elif t < .6 and t >= .5:
c = 'dddddd'
elif t < .7 and t >= .6:
c = 'ccffcc'
elif t < .8 and t >= .7:
c = '99ff99'
elif t < .9 and t >= .8:
c = '66ff66'
elif t < 1. and t >= .9:
c = '33ff33'
else:
c = '00ff00'
cell_row.append(c)
cellcolors.append(cell_row)
return render_template('techwriting.html',
enumerate=enumerate,
criteria=techwriting.criteria,
ranking=techwriting.ranking,
breakdown=techwriting.breakdown,
proscons=techwriting.proscons,
algorithm=techwriting.algorithm,
specs=techwriting.specs,
cellcolors=cellcolors,
weights=weights)
.9
.7
.1
.4
.2
@app.route('/')
def show_landing():
return render_template('landing.html', showbg=True)
@app.route('/login', methods=['POST', 'GET'])
def login():
if current_user.is_authenticated():
return redirect(url_for('home', username=current_user.username))
else:
return redirect(url_for('twitter_login'))
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('show_landing'))
@app.route('/progress', methods=['POST'])
@login_required
def progress():
progress_report = []
for job in running_jobs:
progress_report.append({'jid': job['id'], 'progress': get_job_progress(job['id'])})
return jsonify(jobs_progress=progress_report)
def get_job_progress(job_id):
task_statuses = dist_proxy.system.getTaskStatuses(job_id)
ret = {
'active': task_statuses.count('EXECUTING'),
'finished': task_statuses.count('COMPLETE'),
'error': task_statuses.count('FAILED'),
'total': dist_proxy.system.getTotalTasks(job_id)
}
return ret
@app.route('/submit_job', methods=['POST'])
def submit_job():
job_name = request.form['job_name']
try:
job_id = dist_proxy.system.submitRandomizedTestJob("oats", 10, 10)
job_doc = {'id': job_id, 'name': job_name}
running_jobs.append(job_doc)
return jsonify(job_doc=job_doc)
except socket.error:
etype, evalue, etrace = sys.exc_info()
if evalue.errno == errno.ECONNREFUSED:
error = 'The Job driver XMLRPC server must not be running.'
else:
error = str(evalue)
return jsonify(job_doc=None, error=error)
@app.route('/kill_job', methods=['POST'])
def kill_job():
job_id = request.form['job_id']
try:
toremove = None
for i in running_jobs:
if i['id'] == job_id:
toremove = i
break
if toremove:
running_jobs.remove(toremove)
dist_proxy.system.cancelJob(job_id)
return jsonify(nailedit=True)
except:
etype, evalue, etrace = sys.exc_info()
print 'caught unhandled exception: %s' % evalue
return jsonify(nailedit=False, error=evalue)
@app.route('/signup', methods=['GET', 'POST'])
def sign_up():
if current_user.is_authenticated():
return redirect(url_for('home'))
if request.method == 'POST':
user = User(request.form['username'], request.form['name'])
mongo.db.participants.insert(user.save_participant())
login_user(user)
return redirect(url_for('home', username=user.username))
return render_template('signup.html')
@app.route('/docs')
def docs():
return 'Docs'
@app.route('/fetchitems', methods=['POST'])
def fetch_items():
item_type = request.form.get('item_type')
query = request.form.get('query')
if query:
query = json.loads(query)
query = query or {}
username = request.form.get('username') or query.get('username') or current_user.username
item_data = find_items_by_type(item_type, q=query)
print 'fetching items for "%s"' % item_type, item_data
return jsonify(item_data=item_data,
username=username,
html=render_template('items.html', item_type=item_type, items=item_data))
def find_items_by_type(item_type, q=None):
item_map = {}
item_map['friends'] = lambda q: contributors(current_user.username, q or {})
item_map['friend_suggestions'] = lambda q: suggest_friends(current_user.username, q or {})
item_map['jobs'] = get_active_jobs
item_map['friends_results'] = search_friends
item_map['friends_suggestions_results'] = search_participants
item_map['user_updates'] = lambda q: updates['user_updates']
item_map['user_updates'] = lambda q: updates['user_updates']
item_map['user_action_items'] = lambda q: updates['user_action_items']
item_map['community_updates'] = lambda q: updates['community_updates']
try:
return list(item_map[item_type](q))
except KeyError:
etype, evalue, trace = sys.exc_info()
print 'recieved bad item_type "%s": got %s' % (item_type, evalue)
return []
def search_friends(q):
user = db.find_user(current_user.username)
ret = []
for c in user['contributors']:
f = db.find_user(c, strip_id=True)
if q['name'].lower() in f['name'].lower():
ret.append(f)
return ret
def search_participants(q):
ret = []
for f in db.find_participants():
if q['name'].lower() in f['name'].lower():
ret.append(f)
return ret
def contributors(username, q=None):
ret = []
for f in db.find_user(username, q)['contributors']:
f_doc = db.find_user(f, strip_id=True)
if f_doc:
ret.append(f_doc)
else:
print 'could not find', f
return ret
def get_active_jobs(q={}):
return running_jobs
@app.route('/user_prefs/<username>')
@login_required
def user_prefs(username):
user = db.find_user(username, strip_id=True)
if not user:
user = db.base_user_template(username)
return jsonify(user_doc=user, html=render_template('user_prefs.html', user_doc=user))
@app.route('/computers/<username>')
@login_required
def computers(username):
user = db.find_user(username)
return jsonify(computers=user['computers'],
html=render_template('computers.html', computers=user['computers']))
@app.route('/fetch_user_computers/<username>')
def fetch_user_computers(username):
print '>>', username
user = db.find_user(username)
print user['computers']
if user:
return jsonify(html=render_template('items.html', items=user['computers'], item_type='computers'),
item_data=user['computers'])
else:
return jsonify(html='No items found.',
item_data=[])
@app.route('/deleteitem', methods=['POST'])
def delete_item():
item_id = request.form['item_id']
item_type = request.form['item_type']
pane_id = request.form['pane_id']
# This will be much simpler with a call to the database using the item id.
for item in items[item_type]:
if item['id'] == int(item_id):
item['visible'] = False
break
return render_template('items.html', items=items[item_type],
pane_id=pane_id,
item_type=item_type)
@app.route('/jobs/<username>')
@login_required
def jobs(username):
return render_template('jobs.html',
username=username,
num_jobs=len(running_jobs),
has_running_jobs=len(running_jobs) > 0)
@app.route('/home/<username>')
@login_required
def home(username):
return render_template('home.html', username=username)
@app.route('/downloads/<username>')
@login_required
def downloads(username):
return render_template('downloads.html', username=username)
@app.route('/settings/<username>')
@login_required
def settings(username):
return render_template('settings.html', username=username)
@app.route('/friends/<username>')
@login_required
def friends(username):
print 'friends(%s)' % username
return render_template('friends.html', username=username)
@app.route('/add_friend', methods=['POST'])
@login_required
def add_friend():
return manage_friend(request.form['friend_username'], add=True)
@app.route('/remove_friend', methods=['POST'])
@login_required
def remove_friend():
return manage_friend(request.form['friend_username'], remove=True)
def manage_friend(friend_username, add=False, remove=False):
print 'add_friend(%s)' % friend_username
user = db.Participant(current_user.username, load=True)
if db.find_user(friend_username):
try:
if add:
user['contributors'].append(friend_username)
elif remove:
user['contributors'].pop(user['contributors'].index(friend_username))
user.save()
except:
print 'unknown error occured when managing friend'
return jsonify(nailedit=False)
return jsonify(nailedit=True)
else:
return jsonify(nailedit=False)
@app.route('/request_friend', methods=['POST'])
@login_required
def request_friend():
friend_username = request.form['friend_username']
db.FriendRequest(current_user.username, friend_username).insert()
def suggest_friends(username, q={}):
'''returns 2nd degree connections as suggestions or
all participants if no friends are found'''
user = db.find_user(username)
if not user:
print 'could not find user', username
return []
ret = []
if len(user['contributors']) == 0:
return find_non_friends(user)
for cname in user['contributors']:
c = db.find_user(cname)
if c:
for suggestion in [db.find_user(i, strip_id=True) for i in c['contributors']]:
if suggestion and not suggestion['username'] in user['contributors']:
ret.append(suggestion)
if len(ret) == 0:
return find_non_friends(user)
return ret
def find_non_friends(user):
ret = []
for i in list(db.find_participants({'username': {'$ne': user['username']}})):
if not i['username'] in user['contributors']:
ret.append(i)
return ret
@app.route('/facebook')
def facebook_login():
callback_url = url_for('facebook_auth', next=request.args.get('next'))
return facebook.authorize(callback=callback_url)
@app.route('/facebook_auth')
@facebook.authorized_handler
def facebook_auth(resp):
if resp is None:
flash('You denied the request to sign in.')
return redirect(request.args.get('next') or url_for('show_landing'))
user = User(username=resp['screen_name'], name=resp['screen_name'],
token=resp['oauth_token'], secret=resp['oauth_token_secret'])
login_user(user)
user.user_id = session['user_id']
users.append(user)
mongo.db.participants.insert(user.save_participant())
return redirect(request.args.get('next') or url_for('home', username=user.username))
@app.route('/twitter')
def twitter_login():
callback_url = url_for('twitter_auth', next=request.args.get('next'))
return twitter.authorize(callback=callback_url)
@app.route('/startpage/<username>')
@login_required
def startpage(username):
user_doc = db.find_user(username, strip_id=True)
return render_template('startpage.html', user_doc=user_doc, computers=[], username=username)
@app.route('/twitter_auth')
@twitter.authorized_handler
def twitter_auth(resp):
if resp is None:
flash('You denied the request to sign in.')
return redirect(request.args.get('next') or url_for('show_landing'))
stored_user = mongo.db.participants.find_one({'username': resp['screen_name']})
if stored_user:
new_user = False
user = User(username=resp['screen_name'],
token=resp['oauth_token'], secret=resp['oauth_token_secret'])
user.load_participant(stored_user)
else:
new_user = True
user = User(username=resp['screen_name'], name=resp['screen_name'],
token=resp['oauth_token'], secret=resp['oauth_token_secret'])
mongo.db.participants.insert(user.save_participant())
login_user(user)
if new_user:
return redirect(url_for('startpage', username=user.username))
else:
return redirect(request.args.get('next') or url_for('home', username=user.username))
@app.route('/google')
def google():
return 'Google'
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'USAGE: python commpute.py <address> <port>'
app.run(host=sys.argv[1], port=int(sys.argv[2]))
| {
"repo_name": "nickpope/commpute",
"path": "commpute.py",
"copies": "1",
"size": "13917",
"license": "apache-2.0",
"hash": 7902317899134936000,
"line_mean": 29.8580931264,
"line_max": 106,
"alpha_frac": 0.6053747216,
"autogenerated": false,
"ratio": 3.659479358401262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4764854080001262,
"avg_score": null,
"num_lines": null
} |
#all the imports
from GenSched import Schedule, Theater_Room
from RegularSchedule import RegularSchedule
from DontWantSched import DontWantSchedule
from SpecGenreSched import SpecGenreSchedule
from SpecTimeSched import SpecTimeSchedule
#Make initial database
s = Schedule([("The Little Mermaid", "Family"), ("Saw 3", "Horror"), ("The Hangover", "Comedy"), ("Requiem For A Dream", "Drama"), ("2001: A Space Odyssey", "Sci-Fi"), ("Forgetting Sarah Marshall", "Romantic Comedy")])
print("Welcome to Sturman Studios!")
while(1):
genre = ""
response = ""
num1 = 0
num2 = 0
moviename = ""
#print("Movies: " + s.get_names() + "Genres: " + s.get_genres())
print("Movies: " + s.get_names() + "Genres: Family\t\t\tHorror\tComedy\t\tDrama\t\t\tSci-Fi\t\t\tRomantic Comedy")
while(response.upper() != "A" and response.upper() != "B" and response.upper() != "C" and response.upper() != "D"):
response = input("What would you like to do? \n A: Watch a specific movie at a specific time\n B: Watch a specific Genre\n C: Avoid a Specific Genre\n D: Watch anything within a specific time range\n\n Enter the appropriate letter: ")
person_name = input("Enter your name: ")
while((response.upper() == "B" or response.upper() == "C") and (genre.lower() != "horror" and genre.lower() != "family" and genre.lower() != "comedy" and genre.lower() != "drama" and genre.lower() != "romantic comedy" and genre.lower() != "sci-fi")):
genre = input("Please enter a genre: Horror, Family, Comedy, Drama, Romantic Comedy, Sci-Fi: ")
if(response.upper() == "A"):
moviename = input("Enter what movie you're looking for: ")
try:
num1 = int(input("Enter what time you're looking for (military hours): "))
except ValueError:
print("bad input. Try again.")
s = RegularSchedule()
print(s.schedule(person_name, num1, moviename))
elif(response.upper() == "B"):
s = SpecGenreSchedule()
print(s.schedule(person_name, genre))
elif(response.upper() == "C"):
s = DontWantSchedule()
print(s.schedule(person_name, genre))
elif(response.upper() == "D"):
#while(num1 <= num2 or num2 <0 or num1 < 0):
try:
num1 = int(input("Enter the beginning time in your range that you're looking for (military hours): "))
except ValueError:
print("bad input. Try again.")
try:
num2 = int(input("Enter the ending time in your range that you're looking for (military hours): "))
except ValueError:
print("bad input. Try again.")
s = SpecTimeSchedule()
print(s.schedule(person_name, num1, num2))
if(input("Do you want to print the schedules out so far? Type Y for yes, anything else if you don't: ").lower() == 'y'):
s.print_theaters()
response = input("Do you want to quit? Type Q if you do, anything else if you don't: ")
if(response == "Q" or response == "q"):
print("K pce.\n")
exit(0)
print("\n") | {
"repo_name": "askii93/Strategy-Movies",
"path": "overall.py",
"copies": "1",
"size": "2826",
"license": "apache-2.0",
"hash": 5339533688148490000,
"line_mean": 41.1940298507,
"line_max": 251,
"alpha_frac": 0.6733899505,
"autogenerated": false,
"ratio": 2.9345794392523366,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8866962848986268,
"avg_score": 0.04820130815321378,
"num_lines": 67
} |
# all the imports
import os
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash, request, jsonify
from flask_mail import Message, Mail
app = Flask(__name__) # create the application instance :)
app.config.from_object(__name__) # load config from this file , flaskr.py
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'alex.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('alex_SETTINGS', silent=True)
app.config.update(
#EMAIL SETTINGS
MAIL_SERVER='email-smtp.us-east-1.amazonaws.com',
MAIL_PORT=465,
MAIL_USE_SSL=True,
MAIL_USE_TLS=False,
MAIL_USERNAME='AKIAJQD3DQBN6Q677DUQ',
MAIL_PASSWORD='Ai6mnfN0JxYMD9akF0y8s9PhMmP+woi9hd4AHMpyHHMU'
)
mail=Mail(app)
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def init_db():
"""Initializes the database."""
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Creates the database tables."""
init_db()
print('Initialized the database.')
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/', methods=['GET', 'POST'])
def show_entries():
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('index.html')
@app.route('/form', methods=['POST'])
def form():
fname=request.form['first_name']
lname=request.form['last_name']
email=request.form['email']
phone=request.form['phone']
addy=request.form['address']
state=request.form['state']
message=request.form['comment']
msg = Message('Hello', sender = 'lexloulou@gmail.com', recipients = ['lexloulou@gmail.com'])
msg.body = "Hello Flask message sent from Flask-Mail"
msg.html = render_template('form.html', name=fname, lastname=lname, email=email, phone=phone,address=addy,state=state,message=message)
mail.send(msg)
return "Sent"
if __name__ == '__alex__':
app.run(debug=True)
| {
"repo_name": "loutwo17/mySite",
"path": "alex/alex/alex.py",
"copies": "1",
"size": "2650",
"license": "mit",
"hash": 1001446993668458500,
"line_mean": 25.0408163265,
"line_max": 135,
"alpha_frac": 0.6758490566,
"autogenerated": false,
"ratio": 3.0355097365406642,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8868686615433807,
"avg_score": 0.06853443554137152,
"num_lines": 98
} |
"""All the index classes.
Currentlly:
PKIndex: Primary Key Index
TreeIndex: Tree based index using a AVL tree for high cardinality fields
BitmapIndex: Bitmap index for low cardinality fields
vptreeIndex: for similarity search
"""
# from .persistentdb import FILES_DIR
# from .baseclasses import BaseIndex
from collections import defaultdict
import pickle
import os
import bintrees
import numpy as np
from tsdb.tsdb_constants import *
import abc
class Index(metaclass = abc.ABCMeta):
"""
Implementation of polymorphism as Rahul suggested
Interface that all indices should support
"""
@abc.abstractmethod
def __init__(self):
'''
Initialization
1. Initialize DB Parameters
2. Create file system for persistence
'''
pass
@abc.abstractmethod
def insert(self, fieldValue, pk):
"""
Insert the value into the appropriate place in the tree
"""
pass
@abc.abstractmethod
def remove(self, fieldValue, pk):
"""
remove a primary key from the index
"""
pass
@abc.abstractmethod
def getEqual(self, fieldValue):
"""
return set of primary keys that match this fieldValue
"""
pass
class PKIndex(Index):
"""
PK Index using Pickle serialization and a write ahead log.
Essentially a (pk: offset) dictionary with writelog and disk storage.
"""
def __init__(self, db_name='default'):
self.filename = FILES_DIR + '/'+db_name+'/'+'pks.idx'
self.writelog = FILES_DIR + '/'+db_name+'/'+'idx.log'
# if file never created before, create it
if not os.path.exists(self.filename):
self.dict = dict()
self.save_pickle(new=True)
else:
self.dict = self.load_pickle()
if not os.path.exists(self.writelog):
self.fd = open(self.writelog, 'x')
else: # if log had values, load, save, and wipe them
self.fd = self.load_and_clear_log()
self.pk_count = len(self.dict.keys())
self.fd.seek(0,2)
self.writeptr = self.fd.tell()
def load_and_clear_log(self, loaded=False, close=False):
"""
if loaded, writelog assumed to be open. otherwise, closed.
loads values, clears writelog and opens.
if close, then file descriptor not return.
"""
if loaded:
self.fd.close()
self.fd = open(self.writelog,'r')
items = [l.strip().split(':') for l in self.fd.readlines()]
writelog_dict = {k:int(v) for k,v in items}
self.dict.update(writelog_dict)
self.save_pickle()
self.fd.close()
open(self.writelog, 'w').close() # this deletes the log
if not close:
return open(self.writelog, 'w')
def save_pickle(self, new=False):
form = 'xb' if new else 'wb'
with open(self.filename, form, buffering=0) as fd:
pickle.dump(self.dict, fd)
def load_pickle(self):
with open(self.filename, 'rb', buffering=0) as fd:
return pickle.load(fd)
def keys(self):
return self.dict.keys()
def __len__(self):
return self.pk_count
def insert(self, key, value):
# insert the value into the appropriate place in the tree
# delegates to __setitem__
self[key] = value
def remove(self, key, value=None):
# added to match interface. Value unnecessary here.
self.fd = self.load_and_clear_log(loaded=True)
del self.dict[key]
self.save_pickle()
def getEqual(self, key):
return self[key]
def close(self):
self.load_and_clear_log(loaded=True, close=True)
def __getitem__(self, key):
return self.dict[key]
def __setitem__(self, key, value):
if key not in self.dict.keys():
self.pk_count += 1
self.dict[key] = value
self.fd.write(key+':'+str(value)+'\n')
if self.pk_count % REFRESH_RATE == 0:
self.fd = self.load_and_clear_log(loaded=True)
else:
self.dict[key] = value
def __iter__(self):
return iter(self.dict.keys())
def __contains__(self, key):
return key in self.dict.keys()
class TreeIndex(Index):
def __init__(self, fieldName='default', db_name='default'):
self.db_name = db_name
self.name = fieldName
self.filename = FILES_DIR+'/'+db_name+'/'+fieldName+'.idx'
self.dict = defaultdict(set)
self.tree = self._load()
# variable that checks the staus of the in memory
self._stale = False
def _persist(self):
with open(self.filename,'wb') as f:
pickle.dump(self.tree, f)
def _load(self):
'''
load from file if the file exists, else return empty AVL tree
'''
if os.path.isfile(self.filename):
with open(self.filename,'rb') as f:
return pickle.load(f)
else:
# print("Loading Empty Tree")
return bintrees.AVLTree()
def _checkStale(self):
if self._stale:
self._load()
self._stale = False
def insert(self,fieldValue, pk):
self._checkStale()
if fieldValue in self.tree:
if pk not in self.tree[fieldValue]:
self.tree[fieldValue] = self.tree[fieldValue] + [pk]
else:
self.tree[fieldValue] = [pk]
self._persist()
self._stale = True
def remove(self, fieldValue, pk):
self._checkStale()
if fieldValue in self.tree:
matchingPks = self.tree[fieldValue]
if pk in matchingPks:
del matchingPks[matchingPks.index(pk)]
self.tree[fieldValue] = matchingPks
else:
raise ValueError("primary_key {} does not exist in Treeindex for {}".format(pk, self.name))
else:
import pdb; pdb.set_trace()
raise ValueError("Field value {} does not have index".format(self.name))
self._persist()
self._stale = True
def get(self, fieldValue, operator_num=2):
"""
'get' wrapper function to delegate to other functions
based on submitted operator
"""
if operator_num == 0:
return self.getLower(fieldValue)
elif operator_num == 1:
return self.getHigher(fieldValue)
elif operator_num == 2:
return self.getEqual(fieldValue)
elif operator_num == 3:
return self.getNotEq(fieldValue)
elif operator_num == 4:
return self.getLowerOrEq(fieldValue)
elif operator_num == 5:
return self.getHigherOrEq(fieldValue)
raise RuntimeError("should be impossible")
def getEqual(self, fieldValue):
self._checkStale()
if fieldValue in self.tree:
return set(self.tree[fieldValue])
else:
return set()
def getLower(self, fieldValue):
self._checkStale()
retList = set()
for v in self.tree[:fieldValue]:
retList = retList | self.getEqual(v)
return set(retList)
def getHigher(self, fieldValue):
self._checkStale()
retList = set()
for v in self.tree[fieldValue:]:
if v != fieldValue:
retList = retList | self.getEqual(v)
return set(retList)
def getHigherOrEq(self, fieldValue):
return self.getHigher(fieldValue) | self.getEqual(fieldValue)
def getLowerOrEq(self, fieldValue):
return self.getLower(fieldValue) | self.getEqual(fieldValue)
def getNotEq(self, fieldValue):
return self.getAllKeys() - self.getEqual(fieldValue)
def getAllKeys(self):
pks = set()
for value in self.trees.values():
pks = pks | value
return pks
def deleteIndex(self):
if os.path.isfile(self.filename):
os.remove(self.filename)
class BitmapIndex(Index):
def __init__(self, values, pk_len = 4, fieldName='default', db_name='default'):
# Requires the user to specify pk_len, the fixed length of the primary keys
self.pks_len = pk_len
# 'values' is a list of possible fieldValues
self.values = values
self.values_len = len(values)
# File containing the bitmap indices for this field
self.filename = 'persistent_files/'+db_name+'/'+fieldName+'_index.txt'
# Empty list to store the the lists of booleans for each field value.
#CJ: I am storing the indices as a list of list of boolean because it
# is more memory-efficient to append to append to the lists than it is to keep
# vertically stacking numpy arrays.
self.bmindex_list = [[] for ii in range(self.values_len)]
# Empty dictionary to store primary keys and their indices in self.bmindex
self.pks_dict = {}
self.dict_pks = {}
# Create desired files if they don’t exist.
# Load files if they do exist.
# Open the associated files for updating
if not os.path.exists(self.filename):
self.bmindex = open(self.filename, "xb+", buffering=0)
self.last_pk_idx = 0
else:
# Load from file
item_counter = 0
self.bmindex = open(self.filename, "r+b", buffering=0)
while True:
# Try to read the primary key for this line
prim_key = self.bmindex.read(self.pks_len)
if prim_key == b'':
# Reached last line.
break
elif prim_key in self.pks_dict.keys():
del self.dict_pks[self.pks_dict[prim_key]]
self.pks_dict.update({prim_key:item_counter})
self.dict_pks.update({item_counter:prim_key})
for ii in range(self.values_len):
try:
boolean = bool(int(self.bmindex.read(1)))
self.bmindex_list[ii].append(boolean)
except ValueError:
# Thrown if the sentinel value of b'-' has been written
del self.pks_dict[prim_key]
del self.dict_pks[item_counter]
break
item_counter += 1
self.last_pk_idx = item_counter
def insert(self, fieldValue, pk):
# Updates the booleans that indicate the field's values for the appropriate pk.
# Adds pk's that are not already present, and updates pk's that are already present.
# Pk's are stored as bytes, so if the user passed ints or strings, convert to bytes.
if type(pk) != bytes:
pk_str = bytes(str(pk),'utf-8')
else:
pk_str = pk
if len(pk_str) != self.pks_len:
raise ValueError("Primary key {} is not of the pre-specified length {}".format(pk,self.pks_len))
# Check if the fieldValue is valid. If not, throw an error.
if fieldValue not in self.values:
raise ValueError('\"{}\" not in the set of user-specified values: {}'.format(fieldValue, self.values))
else:
# Find field index from the objects values
field_index = self.values.index(fieldValue)
# Add the pk to the dictionary along with its index
if pk_str in self.pks_dict.keys():
del self.dict_pks[self.pks_dict[pk_str]]
del self.pks_dict[pk_str]
self.pks_dict.update({pk_str:self.last_pk_idx})
self.dict_pks.update({self.last_pk_idx:pk_str})
self.last_pk_idx += 1
# Add the pk and the booleans to the end of the file
self.bmindex.seek(0,2)
self.bmindex.write(pk_str)
# Update the in-memory lists and write to file
for ii in range(self.values_len):
if ii==field_index:
self.bmindex_list[ii].append(True)
self.bmindex.write(b'1')
else:
self.bmindex_list[ii].append(False)
self.bmindex.write(b'0')
def remove(self, pk):
# Removes the entry for this field and primary key from the database
if type(pk) != 'bytes':
pk_str = bytes(str(pk),'utf-8')
# Check if the pk is in the file or not.
if pk_str not in self.pks_dict.keys():
raise KeyError('\"{}\" not a valid primary key'.format(pk))
else:
# Remove the primary key from the dictionaries
del self.dict_pks[self.pks_dict[pk_str]]
del self.pks_dict[pk_str]
# Write a sentinel value to the persistent database
self.bmindex.write(pk_str + (self.values_len*b'-'))
def getEqual(self, fieldValue):
# Returns the list of primary keys that match this fieldValue
# Find the index of this fieldValue in the list of valid values
if fieldValue not in self.values:
raise ValueError('\"{}\" not in the set of user-specified values: {}'.format(fieldValue, self.values))
else:
fieldValue_index = self.values.index(fieldValue)
matching_keys = []
for ii in self.dict_pks.keys():
if self.bmindex_list[fieldValue_index][ii] == True:
matching_keys.append(self.dict_pks[ii])
return set(matching_keys)
def getNotEq(self, fieldValue):
if fieldValue not in self.values:
raise ValueError('\"{}\" not in the set of user-specified values: {}'.format(fieldValue, self.values))
else:
fieldValue_index = self.values.index(fieldValue)
matching_keys = []
for ii in self.dict_pks.keys():
if self.bmindex_list[fieldValue_index][ii] == False:
matching_keys.append(self.dict_pks[ii])
return set(matching_keys)
def getAllKeys(self):
return set(self.pks_dict.keys())
def deleteIndex(self):
if os.path.isfile(self.filename):
os.remove(self.filename)
def get(self,pk):
index = self.pks_dict[bytes(pk, 'utf-8')]
for ii in range(self.values_len):
value_index = self.bmindex_list[ii][index]
if value_index == True:
return self.values[ii]
def __del__(self):
self.bmindex.close()
| {
"repo_name": "cs207-project/TimeSeries",
"path": "tsdb/utils_indices.py",
"copies": "1",
"size": "14583",
"license": "mit",
"hash": -9030452057666894000,
"line_mean": 32.5195402299,
"line_max": 114,
"alpha_frac": 0.5710856594,
"autogenerated": false,
"ratio": 3.9429421308815575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001979884626456438,
"num_lines": 435
} |
# All the individual config players have their own tests, but this tests the
# generic ConfigPlayer functionality
from collections import namedtuple
from mpf.assets.show import Show
from mpf.config_players.device_config_player import DeviceConfigPlayer
from mpf.tests.MpfTestCase import MpfTestCase
from mpf.core.config_player import ConfigPlayer
PlayCall = namedtuple('PlayCall', ['settings', 'key', 'priority', 'kwargs'])
class BananaPlayer(DeviceConfigPlayer):
config_file_section = 'banana_player'
show_section = 'bananas'
machine_collection_name = 'bananas'
def __init__(self, machine):
super().__init__(machine)
self.machine.bananas = dict()
self.machine.banana_play_calls = list()
def play(self, settings, context, calling_context, key=None, priority=0, start_time=None, **kwargs):
del start_time
self.machine.banana_play_calls.append(PlayCall(
settings, key, priority, kwargs))
def _clear(self, key):
pass
def get_express_config(self, value):
return dict(banana=value)
def get_full_config(self, value):
return value
class TestConfigPlayers(MpfTestCase):
def get_config_file(self):
return 'test_config_players.yaml'
def get_machine_path(self):
return 'tests/machine_files/config_players/'
def setUp(self):
self.machine_config_patches['mpf']['config_players'] = dict()
self.machine_config_patches['mpf']['config_players']['banana'] = \
'mpf.tests.test_ConfigPlayers.BananaPlayer'
self.machine_spec_patches['banana_player'] = dict(__valid_in__='machine, mode')
# Hack around globals in shows
Show.next_id = 0
super().setUp()
def test_config_player(self):
self.assertIn('bananas', self.machine.show_controller.show_players)
# post events to make sure banana_player is called
self.machine.events.post('event1')
self.advance_time_and_run()
play_call = self.machine.banana_play_calls.pop()
self.assertEqual(play_call.settings, {'express': {}})
self.assertEqual(play_call.key, None)
self.assertEqual(play_call.kwargs, {})
self.machine.events.post('event2')
self.advance_time_and_run()
play_call = self.machine.banana_play_calls.pop()
self.assertEqual(play_call.settings,
{'some': {'banana': 'key'}})
self.assertEqual(play_call.key, None)
self.assertEqual(play_call.kwargs, {}) # todo
self.machine.events.post('event3')
self.advance_time_and_run()
play_call = self.machine.banana_play_calls.pop()
self.assertEqual(play_call.settings,
{'this_banana': {'some': 'key'},
'that_banana': {'some': 'key'}})
self.assertEqual(play_call.key, None)
self.assertEqual(play_call.kwargs, {}) # todo
# event5 is in mode1, so make sure it is not called now
self.assertEqual(0, len(self.machine.banana_play_calls))
self.machine.events.post('event5')
self.advance_time_and_run()
self.assertEqual(0, len(self.machine.banana_play_calls))
# Start the mode, make sure the mode player enables
self.machine.modes['mode1'].start()
self.advance_time_and_run()
self.machine.events.post('event5')
self.advance_time_and_run()
play_call = self.machine.banana_play_calls.pop()
self.assertEqual(play_call.settings, {'express': {}})
# Mode should be passed properly
# self.assertEqual(play_call.key, 'mode1')
self.assertEqual(play_call.kwargs, {}) # todo
# stop the mode, make sure the event doesn't fire
self.machine.modes['mode1'].stop()
self.advance_time_and_run()
self.machine.events.post('event5')
self.advance_time_and_run()
self.assertEqual(0, len(self.machine.banana_play_calls))
# Start a show
self.machine.events.post('event4')
self.advance_time_and_run()
play_call = self.machine.banana_play_calls.pop()
self.assertEqual(play_call.settings, {'banana1': {'banana': 'express'}})
# self.assertEqual(play_call.key, 'show1.1')
self.assertEqual(play_call.kwargs, {'show_tokens': {}}) # todo
self.assertEqual(1, len(self.machine.show_player.instances['_global']['show_player']))
# todo add tests for mode 1 show, make sure the mode is passed
# todo make sure it stops when the mode ends, that banana clear is
# called when it stops, and that it doesn't start again once the mode
# is not running
def test_empty_config_player_section(self):
self.machine.modes["mode2"].start()
self.advance_time_and_run()
self.machine.modes["mode2"].stop()
self.advance_time_and_run()
| {
"repo_name": "missionpinball/mpf",
"path": "mpf/tests/test_ConfigPlayers.py",
"copies": "1",
"size": "4919",
"license": "mit",
"hash": 9003929969340159000,
"line_mean": 33.1597222222,
"line_max": 104,
"alpha_frac": 0.632445619,
"autogenerated": false,
"ratio": 3.7068575734740015,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9837783767450068,
"avg_score": 0.00030388500478656426,
"num_lines": 144
} |
"""All the MapReduce magic."""
from collections import defaultdict, deque
import itertools as it
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing.pool import Pool
import operator as op
from tinymr import _compat
from tinymr.errors import ClosedTaskError, KeyCountError
from tinymr.tools import popitems
class _SerialPool(object):
"""Like ``multiprocessing.Pool()`` but without any of the overhead or
debugging complexities.
"""
def imap_unordered(self, func, stream, chunksize):
return _compat.map(func, stream)
class _MRInternal(object):
"""A lot of the helper methods on ``tinymr.MapReduce()`` are not relevant
to subclassers so they are hidden away.
This class cannot be directly subclassed. Use
``tinymr.mapreduce.MapReduce()``.
"""
def _run_map(self, item):
"""For use with ``multiprocessing.Pool.imap_unordered()``."""
return tuple(self.mapper(item))
def _run_reduce(self, kv):
"""For use with ``multiprocessing.Pool.imap_unordered()``."""
key, values = kv
if self.n_sort_keys != 0:
values = sorted(values, key=op.itemgetter(0))
values = _compat.map(op.itemgetter(1), values)
return tuple(self.reducer(key, values))
@property
def _ptn_key_idx(self):
"""Used internally by the key grouper. When dealing with multiple
partition keys a ``slice()`` has to be passed to
``operator.itemgetter()``.
"""
if self.n_partition_keys == 1:
return 0
else:
return slice(0, self.n_partition_keys)
@property
def _sort_key_idx(self):
"""Used internally by the key grouper. When dealing with multiple
sort keys a ``slice()`` has to be passed to ``operator.itemgetter()``.
"""
# Ensure a lack of sort keys is properly handled down the line by
# letting something fail spectacularly
if self.n_sort_keys == 0:
return None
elif self.n_sort_keys == 1:
# Given keys like: ('partition', 'sort', 'data')
# the number of partition keys equals the index of the single
# sort key
return self.n_partition_keys
else:
start = self.n_partition_keys
stop = start + self.n_sort_keys
return slice(start, stop)
@property
def _map_key_grouper(self):
"""Provides a function that re-groups keys from the map phase. Makes
partitioning easier.
"""
getter_args = [self._ptn_key_idx, -1]
if self.n_sort_keys > 0:
getter_args.insert(1, self._sort_key_idx)
return op.itemgetter(*getter_args)
@property
def _map_job_pool(self):
"""Get the processing pool for the map phase."""
if self.map_jobs == 1:
return _SerialPool()
elif self.threaded_map:
return ThreadPool(self.map_jobs)
else:
return Pool(self.map_jobs)
@property
def _reduce_job_pool(self):
"""Get the processing pool for the reduce phase."""
if self.reduce_jobs == 1:
return _SerialPool()
elif self.threaded_reduce:
return ThreadPool(self.reduce_jobs)
else:
return Pool(self.reduce_jobs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __call__(self, stream):
"""Run the MapReduce task.
Parameters
----------
stream : iter
Input data.
Yields
------
tuple
A stream of ``(key, value)`` tuples.
"""
if self.closed:
raise ClosedTaskError("Task is closed.")
self.init_map()
results = self._map_job_pool.imap_unordered(
self._run_map,
stream,
self.map_chunksize)
results = it.chain.from_iterable(results)
# Parallelized jobs can be difficult to debug so the first set of
# keys get a sniff check for some obvious potential problems.
# Exceptions here prevent issues with multiprocessing getting confused
# when a job fails.
first = next(results)
results = it.chain([first], results)
expected_key_count = self.n_partition_keys + self.n_sort_keys + 1
if len(first) != expected_key_count:
raise KeyCountError(
"Expected {expected} keys from the map phase, not {actual} - "
"first keys: {keys}".format(
expected=expected_key_count,
actual=len(first),
keys=first))
self.check_map_keys(first)
partitioned = defaultdict(deque)
mapped = _compat.map(self._map_key_grouper, results)
# Only sort when required
if self.n_sort_keys == 0:
for ptn, val in mapped:
partitioned[ptn].append(val)
partitioned_items = partitioned.items()
else:
for ptn, srt, val in mapped:
partitioned[ptn].append((srt, val))
if self.n_partition_keys > 1:
partitioned_items = it.starmap(
lambda _ptn, srt_val: (_ptn[0], srt_val),
partitioned.items())
else:
partitioned_items = partitioned.items()
# Reduce phase
self.init_reduce()
results = self._reduce_job_pool.imap_unordered(
self._run_reduce, partitioned_items, self.reduce_chunksize)
results = it.chain.from_iterable(results)
# Same as with the map phase, issue a more useful error
first = next(results)
results = it.chain([first], results)
if len(first) != 2:
raise KeyCountError(
"Expected 2 keys from the reduce phase, not {} - first "
"keys: {}".format(len(first), first))
self.check_reduce_keys(first)
partitioned = defaultdict(deque)
for k, v in results:
partitioned[k].append(v)
return self.output(popitems(partitioned))
| {
"repo_name": "geowurster/tinymr",
"path": "tinymr/_base.py",
"copies": "2",
"size": "6202",
"license": "bsd-3-clause",
"hash": 8822808646637030000,
"line_mean": 31.3020833333,
"line_max": 78,
"alpha_frac": 0.5744920993,
"autogenerated": false,
"ratio": 4.199052132701421,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5773544232001422,
"avg_score": null,
"num_lines": null
} |
"""All the models used by DrawWrite."""
from django.db import models
from django.utils import timezone
class Game(models.Model):
"""
Games have a number of players, a unique name, and a
created time.
"""
name = models.CharField('Name', max_length=50)
num_players = models.SmallIntegerField('Number of Players', default=0)
started = models.BooleanField('Started?', default=False)
time_created = models.DateTimeField('Time Created', default=timezone.now)
round_num = models.SmallIntegerField('Current Round', default=0)
num_finished_current_round = models.SmallIntegerField(
'Number of players who have finished the current round',
default=0
)
def __str__(self):
return self.name
class Player(models.Model):
"""
Holds information about a user, such as their name, a reference
to their game, and their position in the cycle.
"""
name = models.CharField('Name', max_length=50)
game = models.ForeignKey(Game)
position = models.SmallIntegerField('Position')
was_creator = models.BooleanField('Created game')
current_round = models.SmallIntegerField('Current Round', default=0)
def __str__(self):
return self.name
class Chain(models.Model):
"""
A Chain is used to connect WriteLinks and DrawLinks. It keeps track
of how many links it has, and who created it.
"""
time_created = models.DateTimeField('Time Created', default=timezone.now)
next_link_position = models.SmallIntegerField('Next Link Position', default=0)
player = models.OneToOneField(Player)
def __str__(self):
return '{0}\'s chain'.format(self.player)
class DrawLink(models.Model):
"""
A DrawLink holds data for a single 'draw' step of a DrawWrite game.
"""
drawing = models.FileField('File')
link_position = models.SmallIntegerField('Link Position')
chain = models.ForeignKey(Chain)
added_by = models.ForeignKey(Player)
class WriteLink(models.Model):
"""
A WriteLink holds data for a single 'write' step of a DrawWrite game.
"""
text = models.TextField('Description')
link_position = models.SmallIntegerField('Link Position')
chain = models.ForeignKey(Chain)
added_by = models.ForeignKey(Player)
| {
"repo_name": "RMMoreton/drawwrite",
"path": "drawwritesite/drawwrite/models.py",
"copies": "1",
"size": "2278",
"license": "mit",
"hash": 1853833444171951400,
"line_mean": 33.5151515152,
"line_max": 82,
"alpha_frac": 0.6848112379,
"autogenerated": false,
"ratio": 4.1343012704174225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5319112508317423,
"avg_score": null,
"num_lines": null
} |
#all the stuff related to Bot₵oin
import requests
import time
import sys
import datetime
import csv
import random
import re
import json
import traceback
import os
import telegram
from pydblite import Base #The PyDbLite stuff
import builtins
from . import atbSendFunctions
from . import atbMiscFunctions
def handleBTC(bot, chat_id, parsedCommand, messageText, currentMessage, update, instanceAge, btcInstanceArray):
def sendText(givenText, replyingMessageID=0, keyboardLayout=[]):
atbSendFunctions.sendText(bot, chat_id, givenText, replyingMessageID, keyboardLayout)
def sendTextTo(givenText, recipientMessageID):
atbSendFunctions.sendText(bot, recipientMessageID, givenText, 0, [])
def sendPhoto(imageName):
atbSendFunctions.sendPhoto(bot, chat_id, "images/" + imageName)
def sendSticker(stickerName):
atbSendFunctions.sendSticker(bot, chat_id, "stickers/" + stickerName)
def passSpamCheck(timeDelay=15):
return atbMiscFunctions.spamCheck(chat_id, currentMessage.date, timeDelay)
messageText.replace("@AdamTestBot", "")
messageText.replace("@TestAdamTestBot", "")
messageText.replace("@", "")
newCommand = re.split(r'[@\s:,\'*]+', messageText[len("/btc"):].lstrip())
strBtc = "Bt₵"
strBotCoin = "Bot₵oin"
strPerHour = "⁄hr"
strPerShare = "⁄share"
def floatRound(fuck):
return round(fuck, 3)
def getLedger(keyField='myYield'):
outputString = "```\n" + strBotCoin + " Ledger:\n"
K = list()
for user in builtins.btcDB:
K.append(user)
sortedK = sorted(K, key=lambda x: float(x[keyField]), reverse=True)
for user in sortedK:
outputString += user['username'] + " (" + user['name'] + ")"
try:
sym = user['other']['alliance']
if sym != "":
outputString += " [" + sym + "]"
except:
pass
outputString += "\n\t"
outputString += str((round(user['money'], 3))) + strBtc + " - "
if float(user['positiveYields']) > 0:
outputString += "(" + str(user['positiveMultiplier']) + "x) "
if float(user['zeroYields']) > 0:
outputString += "(0x) "
if float(user['negativeYields']) > 0:
outputString += "(" + str(user['negativeMultiplier']) + "x) "
outputString += str(user['myYield']) + strBtc + "⁄hr\n"
return outputString + "----------------\nType '/btc help' in a private chat with me to play!```"
def getUser(userID):
for user in builtins.btcDB:
if int(user['id']) == int(userID):
return user
return None
def getUserByUsername(userName):
for user in builtins.btcDB:
if user['username'] == userName:
return user
return None
def getStockBySymbol(sym):
for stock in builtins.btcStockDB:
if stock['letter'] == sym:
return stock
return None
def defineStocks():
#'name', 'description', 'letter', 'fullRange', 'subRangeSize', 'selectedSubRangeStart', 'currentValue', 'history', 'forecast'
haveDoneThis = False
for stock in builtins.btcStockDB:
if stock['letter'] == 'A':
haveDoneThis = True
if not haveDoneThis:
builtins.btcStockDB.insert("ATB Industrial Average", "The average of all other traded stocks.", 'A', [0,0], 0, 0, 10, [], True)
builtins.btcStockDB.insert("Blaze Corp.", "Legal in Colorado.", 'B', [-.2, 0], 0.23, 0, 4.20, [], True)
builtins.btcStockDB.insert("Consistent Co.", "A slow but safe investment.", 'C', [-0.05, 0.1], 0.05, 0, 12.25, [], True)
builtins.btcStockDB.insert("Deez Nutz Ltd.", "High risk, high reward.", 'D', [-1, 0], 1, -4, 30, [], True)
builtins.btcStockDB.insert("ScrubSoft Inc.", "/scrub 999999999", 'S', [-0.23, 0.15], 0.1, 0, 6.66, [], True)
def getStockInfo(sym):
stock = getStockBySymbol(sym)
outputString = "Stock " + sym + ":\n"
outputString += "Name: " + stock['name'] + "\n"
outputString += "Desc: " + stock['description'] + "\n"
outputString += "Symbol: " + stock['letter'] + "\n"
outputString += "Full Range: " + str(stock['fullRange']) + '\n'
outputString += "subRangeSize: " + str(stock['subRangeSize']) + '\n'
outputString += "Current Subrange Start: " + str(stock['selectedSubRangeStart']) + '\n'
outputString += "Current Value: " + str(stock['currentValue']) + '\n'
outputString += "Forecast: " + str(stock['forecast']) + '\n'
totalOut = 0
for user in builtins.btcDB:
totalOut += user['stocks'][sym]
outputString += "Total Sold: " + str(totalOut) + '\n'
return [outputString, ""]
def updateStockRange(sym, low, high):
stock = getStockBySymbol(sym)
newRange = [low, high]
builtins.btcStockDB.update(stock, fullRange=newRange)
return ["Set Stock " + sym + "'s Range to " + str(newRange), ""]
def updateStockRangeSize(sym, size):
stock = getStockBySymbol(sym)
builtins.btcStockDB.update(stock, subRangeSize=size)
return ["Set Stock " + sym + "'s subRangeSize to " + str(size), ""]
def updateStocks():
stockValueTotal = 0
stockNum = 0
stockA = -1
for stock in builtins.btcStockDB:
if stock['letter'] != 'A':
stockNum += 1
stockValueTotal += stock['currentValue']
lowerBound = stock['selectedSubRangeStart']
builtins.btcStockDB.update(stock, selectedSubRangeStart=round(random.uniform(stock['fullRange'][0], stock['fullRange'][1]), 2))
upperBound = stock['subRangeSize'] + lowerBound
delta = round(random.uniform(lowerBound, upperBound), 2)
forecastUp = True
if abs(stock['selectedSubRangeStart']) > abs(stock['selectedSubRangeStart'] + stock['subRangeSize']):
forecastUp = False
currentHistory = stock['history']
builtins.btcStockDB.update(stock, currentValue=round(max(stock['currentValue'] + delta, 2.5), 3))
builtins.btcStockDB.update(stock, forecast=forecastUp)
currentHistory.append(stock['currentValue'])
builtins.btcStockDB.update(stock, history=currentHistory)
else:
stockA = stock
currentHistory = stockA['history']
builtins.btcStockDB.update(stockA, currentValue=round((stockValueTotal / stockNum), 2))
currentHistory.append(stockA['currentValue'])
builtins.btcStockDB.update(stockA, history=currentHistory)
builtins.btcStockDB.commit()
def stockQuote():
outputString = "```\n" + strBotCoin + " Stock Quote:\n"
K = list()
for stock in builtins.btcStockDB:
K.append(stock)
sortedK = sorted(K, key=lambda x: x['letter'], reverse=False)
for stock in sortedK:
outputString += stock['name'] + "\n\t"
outputString += stock['description'] + "\n\t\t"
outputString += str(stock['currentValue']) + strBtc + " ["
if stock['letter'] == 'A':
outputString += "-"
elif stock['forecast']:
outputString += "↑"
else:
outputString += "↓"
outputString += "]\n"
return outputString + "--------------------\nType '/btc quote' for a quote, or go to '/btc shop' to buy/sell Stocks!```"
def getPortfolio(dialog=False):
outputString = "```\nYour Stock Portfolio:\n"
user = getUser(currentMessage.from_user.id)
K = list()
for stock in builtins.btcStockDB:
K.append(stock)
sortedK = sorted(K, key=lambda x: x['letter'], reverse=False)
for stock in sortedK:
forecast = "↓"
if stock['letter'] == 'A':
forecast = "-"
elif stock['forecast']:
forecast = "↑"
outputString += stock['name'] + " (" + stock['letter'] + ") [" + forecast + "]\n\t"
outputString += str(stock['currentValue']) + strBtc + strPerShare + " - " + str(user['stocks'][stock['letter']]) + " (" + str(round(stock['currentValue'] * user['stocks'][stock['letter']], 2)) + ")\n"
if dialog:
return outputString + "--------------\nWhat Stock Symbol do you want to manage?```\nYou currently have " + str(floatRound(user['money'])) + strBtc
else:
return outputString + "```"
def getMe():
user = getUser(currentMessage.from_user.id)
outputString = "```\n"
outputString += user['username'] + " (" + user['name'] + ")"
try:
sym = user['other']['alliance']
if sym != "":
outputString += " [" + sym + "]"
except:
pass
outputString += "\n\t"
outputString += str(floatRound(user['money'])) + strBtc + " - "
if float(user['positiveYields']) > 0:
outputString += "(" + str(user['positiveMultiplier']) + "x) "
if float(user['zeroYields']) > 0:
outputString += "(0x) "
if float(user['negativeYields']) > 0:
outputString += "(" + str(user['negativeMultiplier']) + "x) "
outputString += str(user['myYield']) + strBtc + strPerHour + "\n\n"
K = list()
for stock in builtins.btcStockDB:
K.append(stock)
sortedK = sorted(K, key=lambda x: x['letter'], reverse=False)
for stock in sortedK:
if user['stocks'][stock['letter']] > 0:
forecast = "↓"
if stock['forecast']:
forecast = "↑"
outputString += stock['name'] + " (" + stock['letter'] + ") [" + forecast + "]\n\t"
outputString += str(stock['currentValue']) + strBtc + strPerShare + " - " + str(user['stocks'][stock['letter']]) + " (" + str(round(stock['currentValue'] * user['stocks'][stock['letter']], 2)) + strBtc + ")\n"
return outputString + "```"
def getHelp():
outputString = "```\n" + strBotCoin + " Help:\n"
outputString += "/btc | Ledger sorted by yield\n"
outputString += "/btc me | Information about you\n"
outputString += "/btc list | Ledger sorted by money\n"
outputString += "/btc join | Join the " + strBotCoin + " Ledger\n"
outputString += "/btc intro | Rules and description\n"
outputString += "/btc shop | Buy items\n"
outputString += "/btc quote | Quote stocks\n"
outputString += "/btc alliance | Enter an alliance\n"
outputString += "/btc pay | pay another user\n"
outputString += "/btc remove | leave the game :("
return outputString + "```"
def getIntro():
outputString = "Welcome to " + strBotCoin + "!\n\n"
outputString += "This game is a progression game, in the vein of Cookie Clicker or Bitcoin Billionaire.\n"
outputString += "Every hour, on the hour, your yield is added to your total. You start with 1.0" + strBtc + " and a yield of 0.1" + strBtc + " per hour.\n"
outputString += "Typing '/btc shop' will let you browse the shop, and will let you buy permanent upgrades, consumable boosts, or weapons to attack others.\n"
outputString += "There's also a stock market; various stocks fluctuate up and down and can help you turn your money into more money! You can find the stock market through the shop. Happy investing!"
return outputString
def getItemInfo(itemName):
info = ["Name", 999, "category", 0.0]
if itemName == "Toothpick":
info = ["Toothpick", 2, "upgrade", 0.025]
elif itemName == "Nothing":
info = ["Nothing", 0, "upgrade", 0]
elif itemName == "Q-Tip":
info = ["Q-Tip", 0.1, "upgrade", 0.001]
elif itemName == "Chisel":
info = ["Chisel", 75.0, "upgrade", 1.25]
elif itemName == "Pickaxe":
info = ["Pickaxe", 200.0, "upgrade", 3.5]
elif itemName == "Jackhammer":
info = ["Jackhammer", 2500.0, "upgrade", 50]
elif itemName == "Laser":
info = ["Laser", 1000000, "upgrade", 22500]
elif itemName == "Steroid":
info = ["Steroid", floatRound(getUser(currentMessage.from_user.id)['myYield'] * 0.8), "consumable", 2]
elif itemName == "Hammer":
info = ["Hammer", "1.1x Target's Yield", "weapon", 1.1, 0]
elif itemName == "Crowbar":
info = ["Crowbar", "1.5x Target's Yield", "weapon", 1.5, -0.5]
else:
return []
if info[2] == "upgrade":
return [info[0] + " (+" + str(round(float(info[3]), 3)) + " " + strPerHour + "): " + str(floatRound(float(info[1]))) + strBtc + "\n", info[1], info[2], info[3]]
elif info[2] == "consumable":
return [info[0] + " (yield x" + str(info[3]) + "): " + str(info[1]) + strBtc + "\n", info[1], info[2], info[3]]
elif info[2] == "weapon":
outputString = info[0] + " (yield x " + str(info[4]) + "): " + info[1]
return [outputString, info[1], info[2], info[3], info[4]] #desc, useless, category, multiplier, effect
else:
return []
def shop(newCommand):
returnText = ""
returnMessageType = "keyboardnm"
keyboardLayout = []
if len(newCommand) == 1:
returnText = "Welcome to the shop! You have " + str(round(float(getUser(currentMessage.from_user.id)['money']), 3)) + strBtc + ".\n"
returnText += "What kind of item do you want to buy?\n\n"
returnText += "(Tip: you can type '/btc buy itemName x' where x is the number of that item you want)"
keyboardLayout = [["/btc shop upgrades", "/btc shop consumables"], ["/btc shop weapons", "/btc shop stocks"], ["/btc exit"]]
else:
if newCommand[1] == "upgrades":
returnText = "Upgrades! Page 1:\n"
returnText += getItemInfo("Q-Tip")[0]
returnText += getItemInfo("Toothpick")[0]
returnText += getItemInfo("Chisel")[0]
returnText += getItemInfo("Pickaxe")[0]
returnText += getItemInfo("Jackhammer")[0]
returnText += getItemInfo("Laser")[0]
buy = "/btc buy "
keyboardLayout.append([buy + "Q-Tip 1"])
keyboardLayout.append([buy + "Toothpick 1"])
keyboardLayout.append([buy + "Chisel 1"])
keyboardLayout.append([buy + "Pickaxe 1"])
keyboardLayout.append([buy + "Jackhammer 1"])
keyboardLayout.append([buy + "Laser 1"])
keyboardLayout.append(["/btc exit"])
elif newCommand[1] == "consumables":
returnText = "Consumables! Page 1:\n"
returnText += getItemInfo("Steroid")[0]
buy = "/btc buy "
keyboardLayout.append([buy + "Steroid"])
keyboardLayout.append(["/btc exit"])
elif newCommand[1] == "stocks":
returnText = getPortfolio(True)
prefix = "/btc stock "
keyboardLayout.append([prefix + "A"])
keyboardLayout.append([prefix + "B"])
keyboardLayout.append([prefix + "C"])
keyboardLayout.append([prefix + "D"])
keyboardLayout.append([prefix + "S"])
keyboardLayout.append(["/btc exit"])
returnMessageType = "keyboard"
elif newCommand[1] == "weapons":
returnText = "Out to deal some damage, eh?\n"
returnText += getItemInfo("Hammer")[0] + "\n"
returnText += getItemInfo("Crowbar")[0]
prefix = "/btc buy "
keyboardLayout.append([prefix + "Hammer"])
keyboardLayout.append([prefix + "Crowbar"])
keyboardLayout.append(["/btc exit"])
else:
returnText = "Sorry! Not implemented yet."
returnMessageType = ""
return [returnText, returnMessageType, keyboardLayout]
def stock(newCommand):
user = getUser(currentMessage.from_user.id)
if len(newCommand) > 1: #managing stock
if len(newCommand) > 2: #actual buy or sell command has been sent
stock = getStockBySymbol(newCommand[1])
if newCommand[2] == "buy" and stock != None:
print("We're buying!")
quantityPurchased = 1
try:
quantityPurchased = int(newCommand[-1])
except:
pass
if quantityPurchased < 0:
quantityPurchased *= -1
if stock['currentValue'] * quantityPurchased > float(user['money']):
quantityPurchased = int(float(user['money'])/stock['currentValue'])
if float(user['money']) < stock['currentValue'] * quantityPurchased or quantityPurchased == 0: #can't afford
return ["I'm afraid you can't afford that.", ""]
builtins.btcDB.update(user, money=user['money'] - (stock['currentValue'] * quantityPurchased))
portfolio = user['stocks']
portfolio[stock['letter']] += quantityPurchased
builtins.btcDB.update(user, stocks=portfolio)
builtins.btcDB.commit()
return ["You bought " + str(quantityPurchased) + " shares of " + stock['name'] + " at " + str(stock['currentValue']) + strBtc + ". You now have " + str(user['stocks'][stock['letter']]) + " shares (" + str(floatRound(stock['currentValue'] * user['stocks'][stock['letter']])) + strBtc + ") in that stock.", ""]
elif newCommand[2] == "sell" and stock != None:
quantitySold = 1
try:
quantitySold = int(newCommand[-1])
except:
pass
if quantitySold < 0:
quantitySold *= -1
if quantitySold > user['stocks'][stock['letter']]:
quantitySold = user['stocks'][stock['letter']]
builtins.btcDB.update(user, money=floatRound(user['money'] + (stock['currentValue'] * quantitySold)))
portfolio = user['stocks']
portfolio[stock['letter']] -= quantitySold
builtins.btcDB.update(user, stocks=portfolio)
builtins.btcDB.commit()
return ["You sold " + str(quantitySold) + " shares of " + stock['name'] + ", making " + str(floatRound(stock['currentValue'] * quantitySold)) + strBtc + ". You have " + str(user['stocks'][stock['letter']]) + " shares (" + str(floatRound(user['stocks'][stock['letter']] * stock['currentValue'])) + strBtc + ") left.", ""]
elif newCommand[2] == "history" and stock != None:
return [stock['history'][max(0, len(stock['history']) - 24):-1] + [stock['history'][-1]], ""]
else:
return ["Invalid stock symbol.", ""]
else:
keyboardLayout = []
message = "Would you like to buy or sell this stock, or view its history?"
keyboardLayout.append(["/btc stock " + newCommand[1] + " buy 1"])
keyboardLayout.append(["/btc stock " + newCommand[1] + " sell 1"])
keyboardLayout.append(["/btc stock " + newCommand[1] + " history"])
return [message, "keyboardnm", keyboardLayout]
else:
return ["Go to the shop to start buying stocks.", ""]
def buy(newCommand):
if len(newCommand) > 1: #buying something
itemInfo = getItemInfo(newCommand[1])
quantity = 1
try:
quantity = int(newCommand[-1])
except:
pass
if quantity < 0:
quantity *= -1
quantityPurchased = 0
if itemInfo != []:
user = getUser(currentMessage.from_user.id)
if itemInfo[2] != "weapon":
#if float(itemInfo[1]) * quantity > float(user['money']):
quantity = int(float(user['money'])/float(itemInfo[1])) #always assume max now
if float(user['money']) >= float(itemInfo[1]) * quantity and quantity != 0: #can afford
quantityPurchased = quantity
else:
return ["Come back when you're a little mmmm...richer.", ""]
if itemInfo[2] == "upgrade":
builtins.btcDB.update(user, money=user['money'] - (itemInfo[1] * quantity))
builtins.btcDB.update(user, myYield=round(user['myYield'] + (itemInfo[3] * quantity), 3))
builtins.btcDB.commit()
return ["You bought " + str(quantityPurchased) + " " + newCommand[1] + "(s)!\nYour yield is now " + str(user['myYield']) + strBtc + strPerHour+ "\nYou now have " + str(round(user['money'], 3)) + strBtc + ".", ""]
elif itemInfo[2] == "consumable":
if user['positiveYields'] == 0:
builtins.btcDB.update(user, money=floatRound(user['money'] - itemInfo[1]))
builtins.btcDB.update(user, positiveMultiplier=itemInfo[3])
builtins.btcDB.update(user, positiveYields=1)
builtins.btcDB.commit()
return ["You bought a " + newCommand[1] + ". Your next yield will be multiplied by " + str(itemInfo[3]) + ", making it " + str(floatRound(user['myYield'] * itemInfo[3])) + strBtc + ". You now have " + str(user['money']) + strBtc + ".", ""]
else:
return ["You already have a consumable active.", ""]
elif itemInfo[2] == "weapon":
return ["Sorry, weapons have been disabled.", ""]
isConfirming = False
target = ""
try:
isConfirming = newCommand[3] == "yes"
except:
pass
try:
target = newCommand[2]
except:
pass
if target == "":
outputString = "\n```\nYou want a " + newCommand[1] + "? Select a target."
if itemInfo[4] < 0:
outputString += " (Just know, you can't bring anyone's balance below 0" + strBtc + ".)\n"
keyboardLayout = []
prefix = "/btc buy "
keyboardLayout.append(["/btc exit"])
K = list()
for u in builtins.btcDB:
K.append(u)
sortedK = sorted(K, key=lambda x: float(x['myYield']), reverse=True)
print("Sorted K?")
for userA in sortedK:
if user['username'] != userA['username']:
outputString += userA['username'] + " (" + userA['name'] + ")\n\t" + str(floatRound(userA['money'])) + strBtc + " - " + str(userA['myYield']) + strBtc + strPerHour + ": "
outputString += str(floatRound(userA['myYield'] * float(itemInfo[3]))) + strBtc + "\n"
keyboardLayout.append([prefix + newCommand[1] + " " + userA['username']])
outputString += "```"
return [outputString, "keyboard", keyboardLayout]
elif target != "yes" and target != "" and not isConfirming:
targetUser = getUserByUsername(newCommand[2])
if targetUser == None:
return ["How are you going to use a weapon on someone who doesn't exist?", ""]
cost = floatRound(targetUser['myYield'] * float(itemInfo[3]))
outputString = "You want to use a " + newCommand[1] + " on " + newCommand[2] + "?\nThat's going to cost you " + str(cost) + strBtc + ".\nYou sure?"
keyboardLayout = []
keyboardLayout.append(["/btc buy " + newCommand[1] + " " + newCommand[2] + " yes"])
keyboardLayout.append(["/btc exit"])
return [outputString, "keyboardnm", keyboardLayout]
elif isConfirming:
targetUser = getUserByUsername(newCommand[2])
cost = floatRound(targetUser['myYield'] * float(itemInfo[3]))
effect = itemInfo[4]
if targetUser == None:
return ["How are you going to use a weapon on someone who doesn't exist?", ""]
elif targetUser['username'] == user['username']:
return ["Sorry, I'm not going to let you use a weapon on yourself.", ""]
elif cost > user['money']:
return ["Why are you looking to attack others when you're so poor yourself?", ""]
builtins.btcDB.update(user, money=floatRound(user['money'] - cost))
if effect == 0:
if targetUser['zeroYields'] > 0:
return ["They've already had their yield set to 0 this hour.", ""]
builtins.btcDB.update(targetUser, zeroYields=1)
elif effect <= 0:
if targetUser['negativeYields'] > 0:
return ["They've already had their yield set negative this hour.", ""]
builtins.btcDB.update(targetUser, negativeYields=1)
builtins.btcDB.update(targetUser, negativeMultiplier=effect)
builtins.btcDB.commit()
if int(targetUser['chat_id']) != 0:
sendTextTo(user['name'] + " attacked you with a " + newCommand[1] + ", multiplying your next yield by " + str(effect) + ".", int(targetUser['chat_id']))
return ["You attacked " + targetUser['name'] + " with a " + newCommand[1] + ".", ""]
else:
return ["Invalid item name.", ""]
else:
return ["What're you buyin', stranger?", ""]
def pay(newCommand):
if len(newCommand) != 3:
return ["USAGE: '/btc pay username amount'", ""]
user = getUser(currentMessage.from_user.id)
payToUser = getUserByUsername(str(newCommand[1]))
try:
amount = round(float(newCommand[2]), 3)
except:
return ["I couldn't parse " + str(newCommand[2]) + " as an amount of money.", ""]
if payToUser == None:
return ["I couldn't find " + str(newCommand[1]) + " on my ledger.", ""]
elif amount > float(user['money']):
return ["You can't afford to give that kind of money away, " + user['name'] + ".", ""]
elif amount < 0:
return ["I'm afraid only you can repay your debt, " + user['name'] + ".", ""]
elif amount == 0:
return ["How helpful. " + user['name'] + " is paying " + payToUser['name'] + " nothing.", ""]
else:
builtins.btcDB.update(user, money=user['money'] - amount)
builtins.btcDB.update(payToUser, money=payToUser['money'] + amount)
builtins.btcDB.commit()
if int(payToUser['chat_id']) != 0:
sendTextTo(user['name'] + " paid you " + str(amount) + strBtc + ".", int(payToUser['chat_id']))
if int(user['chat_id']) != 0:
sendTextTo("You paid " + payToUser['name'] + " " + str(amount) + strBtc + ".", int(user['chat_id']))
return [user['name'] + " has paid " + payToUser['name'] + " " + str(amount) + strBtc + ".", ""]
def updateUserChat(userID, chat_id):
for user in builtins.btcDB:
if int(user['id']) == int(userID) and int(chat_id) != int(user['chat_id']):
builtins.btcDB.update(user, chat_id=int(chat_id))
builtins.btcDB.commit()
# -------- end helper function declarations ------- #
if currentMessage.from_user.username == "Dark_Shadowfall":
if newCommand[0] == '' and int(chat_id) < 0:
return ["Check the Ledger by typing /btc in a private chat with me. Join " + strBotCoin + " by typing '/btc join'!", ""]
elif newCommand[0] == 'list':
return [getLedger("money"), "markdown"]
elif newCommand[0] == "join":
if getUser(currentMessage.from_user.id) == None:
defaultStockArray = {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0, 'H': 0, 'I': 0, 'J': 0, 'K': 0, 'L': 0, 'M': 0, 'N': 0, 'O': 0, 'P': 0, 'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'U': 0, 'V': 0, 'W': 0, 'X': 0, 'Y': 0, 'Z': 0}
username = currentMessage.from_user.username
if username == "":
username = currentMessage.from_user.first_name + str(int(currentMessage.from_user.id / 100000))
name = currentMessage.from_user.first_name
userLastInitial = ""
try:
userLastInitial = currentMessage.from_user.last_name[0].upper()
name += " " + userLastInitial
except:
pass
builtins.btcDB.insert(currentMessage.from_user.id, username, name, 1.0, 0.1, 0, 0, -1, 0, 0, 0, 000000, defaultStockArray, {})
builtins.btcDB.commit()
return [name + " has joined " + strBotCoin + "!\nType '/btc help' in a private chat with me to get started.", ""]
else:
return [currentMessage.from_user.first_name + ", you're already on the ledger.", ""]
elif newCommand[0] == "pay":
return pay(newCommand)
elif getUser(currentMessage.from_user.id) == None:
return ["Type '/btc join' to play " + strBotCoin + "!", ""]
if int(chat_id) > 0: # if we're in a private chat
try:
print(newCommand)
except:
pass
updateUserChat(currentMessage.from_user.id, chat_id) # make sure we know their chat_id
if newCommand[0] == "":
return [getLedger(), "markdown"]
elif newCommand[0] == "help":
return [getHelp(), "markdown"]
elif newCommand[0] == "intro":
return [getIntro(), ""]
elif newCommand[0] == "shop":
return shop(newCommand)
elif newCommand[0] == "buy":
return buy(newCommand)
elif newCommand[0] == "stock":
return stock(newCommand)
elif newCommand[0] == "portfolio":
return [getPortfolio(), "markdown"]
elif newCommand[0] == "me":
return [getMe(), "markdown"]
elif newCommand[0] == "exit":
return ["Bye!", ""]
elif newCommand[0] == "quote":
return [stockQuote(), "markdown"]
elif newCommand[0] == "alliance":
hasAlliance = False
try:
hasAlliance = newCommand[1] != ""
except:
pass
if not hasAlliance:
return ["Usage: '/btc alliance [Symbol]'\nType '/btc alliance remove' or '/btc alliance leave' to leave an alliance.", ""]
sym = newCommand[1][0:3].upper()
user = getUser(currentMessage.from_user.id)
o = user['other']
if newCommand[1] == "remove" or newCommand[1] == "leave":
o['alliance'] = ""
builtins.btcDB.update(user, other=o)
builtins.btcDB.commit()
return ["You have left your alliance.", ""]
o['alliance'] = sym
builtins.btcDB.update(user, other=o)
builtins.btcDB.commit()
return ["You are now part of the " + sym + " alliance.", ""]
elif newCommand[0] == "remove":
builtins.btcDB.delete(getUser(currentMessage.from_user.id))
return["Sorry to see you go. :(", ""]
elif currentMessage.from_user.username == "AdamZG":
if newCommand[0] == "commit":
builtins.btcDB.commit()
return ["Committed the BTC database.", ""]
elif newCommand[0] == "debug":
print(getUserByUsername("AdamZG")['stocks'])
return ["Printed the thing?", ""]
elif newCommand[0] == "migrate":
defaultStockArray = {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0, 'H': 0, 'I': 0, 'J': 0, 'K': 0, 'L': 0, 'M': 0, 'N': 0, 'O': 0, 'P': 0, 'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'U': 0, 'V': 0, 'W': 0, 'X': 0, 'Y': 0, 'Z': 0}
for user in builtins.btcDB:
builtins.btcDB1.insert(user['id'], user['username'], user['name'], user['money'], user['myYield'], user['positiveMultiplier'], user['positiveYields'], user['zeroMultiplier'], user['zeroYields'], user['negativeMultiplier'], user['negativeYields'], user['chat_id'], defaultStockArray, {})
builtins.btcDB1.commit()
return ["Migrated.", ""]
elif newCommand[0] == "resetStocks":
defaultStockArray = {'A': 0, 'B': 0, 'C': 0, 'D': 0, 'E': 0, 'F': 0, 'G': 0, 'H': 0, 'I': 0, 'J': 0, 'K': 0, 'L': 0, 'M': 0, 'N': 0, 'O': 0, 'P': 0, 'Q': 0, 'R': 0, 'S': 0, 'T': 0, 'U': 0, 'V': 0, 'W': 0, 'X': 0, 'Y': 0, 'Z': 0}
for user in builtins.btcDB:
builtins.btcDB.update(user, stocks=defaultStockArray.copy())
builtins.btcDB.commit()
return ["reset stocks. economies that exist: not ours", ""]
elif newCommand[0] == "iS":
defineStocks()
return ["We did the stocks", ""]
elif newCommand[0] == "uS":
i = 1
try:
i = int(newCommand[-1])
except:
pass
for a in range(0, i):
print(a)
updateStocks()
return ["Updated stocks " + str(i) + " times", ""]
elif newCommand[0] == "give":
user = getUserByUsername(newCommand[1])
builtins.btcDB.update(user, money=user['money'] + int(newCommand[2]))
sendTextTo("Admin gave you " + newCommand[2] + strBtc, user['chat_id'])
return ["Wow, cheater.", ""]
elif newCommand[0] == "updateStockRange":
return updateStockRange(newCommand[1], float(newCommand[2]), float(newCommand[3]))
elif newCommand[0] == "updateStockRangeSize":
return updateStockRangeSize(newCommand[1], float(newCommand[2]))
elif newCommand[0] == "getStockInfo":
return getStockInfo(newCommand[-1])
else:
print("Not valid private chat command.")
return ["", "no"]
else:
if (newCommand[0] == "ledger"):
return [getLedger(), "markdown"]
else:
return [strBotCoin + " has officially ended. Matt Mahoney, aka @Dark_Shadowfall, is the undisputed winner. A new game will be coming to replace it at some point in the future. Thanks for playing!\n\nType /btc ledger to see the ledger as it is preserved.", ""]
#('id', 'username', 'name', 'money', 'myYield', 'positiveMultiplier', 'positiveYields', 'zeroMultiplier', 'zeroYields', 'negativeMultiplier', 'negativeYields', 'chat_id')
| {
"repo_name": "agincel/AdamTestBot",
"path": "src/atbBTC.py",
"copies": "1",
"size": "37331",
"license": "mit",
"hash": -3603995628688494000,
"line_mean": 53.3516320475,
"line_max": 340,
"alpha_frac": 0.5044093602,
"autogenerated": false,
"ratio": 3.936167967925723,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49405773281257226,
"avg_score": null,
"num_lines": null
} |
"""all the things that we need"""
from functools import partial
import itertools
from random import random
from kivy.clock import Clock
from kivy.properties import (BooleanProperty, ListProperty, NumericProperty,
ReferenceListProperty, StringProperty)
from kivy.vector import Vector
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.widget import Widget
class AimLine(Widget):
start_pt = ListProperty([0, 0])
end_pt = ListProperty([0, 0])
def __init__(self, start_pt, **kwargs):
super(AimLine, self).__init__(**kwargs)
self.start_pt = start_pt
self.end_pt = start_pt
class BlackHole(Widget):
r = NumericProperty(25.)
mass = NumericProperty(50.)
def __init__(self, **kwargs):
super(BlackHole, self).__init__(**kwargs)
def collide_point(self, x, y):
if (Vector(x, y) - Vector(self.pos)).length() < self.r:
return True
class GoalPoint(Widget):
r = NumericProperty(5.)
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def __init__(self, **kwargs):
super(GoalPoint, self).__init__(**kwargs)
def collide_point(self, x, y):
if (Vector(x, y) - Vector(self.pos)).length() < self.r:
return True
def move(self):
self.x += self.velocity_x
self.y += self.velocity_y
class MainMenu(BoxLayout):
def __init__(self, fling_board, current_level=0, **kwargs):
super(MainMenu, self).__init__(**kwargs)
start_button = Button(text='start new game')
start_button.bind(on_release=fling_board.start_game)
self.add_widget(start_button)
if current_level:
restart_level_button = Button(text='restart level')
restart_level_button.bind(on_release=fling_board.restart_level)
self.add_widget(restart_level_button)
instruction_button = Button(text='instructions')
instruction_button.bind(on_press=fling_board.display_instructions)
self.add_widget(instruction_button)
class Shot(Widget):
r = NumericProperty(10.)
mass = NumericProperty(1.)
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def __init__(self, **kwargs):
super(Shot, self).__init__(**kwargs)
self.last_bounced_ticks = 0
self.last_bounced_wall = None
def move(self):
self.x += self.velocity_x
self.y += self.velocity_y
self.last_bounced_ticks += 1
def gravitate_towards(self, body):
velocity_v = Vector(self.velocity)
gravity_v = Vector(body.pos) - Vector(self.pos)
self.velocity = velocity_v + \
(gravity_v * 1. / gravity_v.length2()) * body.mass
def collide_wall(self, wall):
# don't collide with this wall if we just did so; this
# eliminates a huge class of weird behaviors
if self.last_bounced_wall == wall and self.last_bounced_ticks < 5:
return
deflect_edge = None
velocity_v = Vector(self.velocity)
pos_v = Vector(self.pos)
edge_points = zip(wall.quad_points[0::2], wall.quad_points[1::2])
edges = [
(edge_points[0], edge_points[1]),
(edge_points[1], edge_points[2]),
(edge_points[2], edge_points[3]),
(edge_points[3], edge_points[0]),
]
closest_point = None
for point in edge_points:
if (pos_v - Vector(point)).length() < self.r:
if not closest_point or \
(pos_v - Vector(point)).length() < (Vector(closest_point) - Vector(point)).length():
closest_point = point
if closest_point:
# take the deflection edge to be the normal of here to the corner
deflect_edge = (pos_v - Vector(point)).rotate(90)
else:
for edge in edges:
e0 = Vector(edge[0])
e1 = Vector(edge[1])
ortho_v = (e0 - e1).rotate(90).normalize()
dist_v = Vector.line_intersection(self.pos, pos_v + ortho_v,
edge[0], edge[1])
# dist_v will be None if we happen to be parallel
if not dist_v:
continue
dist_from_edge = (pos_v - dist_v).length()
# if the shot touches the wall here
if min(e0[0], e1[0]) <= dist_v[0] <= max(e0[0], e1[0]) and \
min(e0[1], e1[1]) <= dist_v[1] <= max(e0[1], e1[1]) and \
dist_from_edge < self.r + (wall.thickness / 2.):
if not deflect_edge:
deflect_edge = e0 - e1
dist_from_deflect_edge = dist_from_edge
elif dist_from_edge < dist_from_deflect_edge:
deflect_edge = e0 - e1
dist_from_deflect_edge = dist_from_edge
if deflect_edge:
self.velocity = velocity_v.rotate(-2 * velocity_v.angle(deflect_edge))
self.last_bounced_wall = wall
self.last_bounced_ticks = 0
class ShotCounter(Label):
num_shots = NumericProperty(0)
max_shots = NumericProperty(1)
format = StringProperty('shots: %s / %s')
def __init__(self, font_size=15, **kwargs):
super(ShotCounter, self).__init__(**kwargs)
self.font_size = font_size
self.bind(num_shots=self.update_text)
self.bind(max_shots=self.update_text)
self.update_text()
def increment(self):
self.num_shots += 1
def update_text(self, *args):
self.text = self.format % (self.max_shots - self.num_shots,
self.max_shots)
class Stars(Widget):
points = ListProperty([(0, 0), (0, 0), (0, 0)])
def __init__(self, number_of_stars, **kwargs):
super(Stars, self).__init__(**kwargs)
Clock.schedule_once(partial(self.add_stars, number_of_stars), -1)
def add_stars(self, number_of_stars, dt):
width = self.parent.width
height = self.parent.height
self.points[0] = list(itertools.chain(*[
(random() * width, random() * height)
for i in xrange(number_of_stars)]))
self.points[1] = list(itertools.chain(*[
(random() * width, random() * height)
for i in xrange(number_of_stars / 3)]))
self.points[2] = list(itertools.chain(*[
(random() * width, random() * height)
for i in xrange(number_of_stars / 50)]))
class Wall(Widget):
start_point = ListProperty([0, 0])
end_point = ListProperty([0, 0])
thickness = NumericProperty(4.)
quad_points = ListProperty([0, 0, 0, 0, 0, 0, 0, 0])
def __init__(self, **kwargs):
super(Wall, self).__init__(**kwargs)
self.update_points()
def update_points(self):
v = Vector(self.start_point[0] - self.end_point[0],
self.start_point[1] - self.end_point[1])
# orthogonal vector
o_v = v.normalize().rotate(90) * self.thickness / 2.
self.quad_points = [
self.start_point[0] + o_v[0], self.start_point[1] + o_v[1],
self.start_point[0] - o_v[0], self.start_point[1] - o_v[1],
self.end_point[0] - o_v[0], self.end_point[1] - o_v[1],
self.end_point[0] + o_v[0], self.end_point[1] + o_v[1],
]
self.x = min(self.quad_points[::2]) - self.thickness
self.y = min(self.quad_points[1::2]) - self.thickness
self.width = max(self.quad_points[::2]) - min(self.quad_points[::2]) + 2 * self.thickness
self.height = max(self.quad_points[1::2]) - min(self.quad_points[1::2]) + 2 * self.thickness
| {
"repo_name": "wilsaj/flingy",
"path": "widgets.py",
"copies": "1",
"size": "8007",
"license": "bsd-3-clause",
"hash": -2543282086618853400,
"line_mean": 34.2731277533,
"line_max": 103,
"alpha_frac": 0.5597602098,
"autogenerated": false,
"ratio": 3.5010931351114998,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45608533449114996,
"avg_score": null,
"num_lines": null
} |
# All the times here are in hours
# These are the times for non-pgo
import requests
import logging
from mozci.mozci import query_repo_url_from_buildername, query_repo_name_from_buildername, \
trigger_all_talos_jobs, trigger_range, set_query_source
from mozci.query_jobs import TreeherderApi
from mozci.platforms import build_talos_buildernames_for_repo
from mozci.utils import transfer
from thclient import TreeherderClient
from store_alerts import getAlerts, updateAlert
from utils import fetch_json
from mozci.utils.misc import setup_logging
from managed_settings import TBPL_TESTS
LOG = setup_logging(logging.INFO)
# Use memory-saving mode
transfer.MEMORY_SAVING_MODE = True
TIME_TO_BUILD = 2
TIME_TO_TEST = 1
PENDING_TIME = 1
TIME_TO_WAIT = 2
CYCLE_TIME = 0.25
JSON_PUSHES = "%(repo_url)s/json-pushes"
WEEK = 604800
TWO_WEEKS = 1209600
SIXTY_DAYS = 5184000
SIGNATURE_URL = "https://treeherder.mozilla.org/api/project/%s/performance/signatures/?interval=%s"
PERFORMANCE_DATA = "https://treeherder.mozilla.org/api/project/%s/performance/data/?interval=%s&signatures=%s"
OPTION_COLLECTION_HASH = "https://treeherder.mozilla.org/api/optioncollectionhash/"
SUCCESS = 0
DRY_RUN = False
REVERSE_TESTS = ['dromaeo_css', 'dromaeo_dom', 'v8_7', 'canvasmark']
# TODO: Ask about e10s
TREEHERDER_PLATFORM = {
'linux32': 'Ubuntu HW 12.04',
'linux64': 'Ubuntu HW 12.04 x64',
'windowsxp': 'Windows XP 32-bit',
'windows7-32': 'Windows 7 32-bit',
'windows8-64': 'Windows 8 64-bit',
'osx-10-10': 'Rev5 MacOSX Yosemite 10.10',
'osx-10-10-5': 'Rev7 MacOSX Yosemite 10.10.5',
'android-4-0-armv7-api11': 'Android 4.0 Tegra',
}
# Checked
def checkMerge(revision, buildername):
"This function returns whether an alert is a merge or not."
# Query Revision on pushlog, and check if all the authors of changesets are same.
# We are not doing on the number of changesets, because a merge can have just 2 changesets
# whereas a patch author can have 6-7 changeset.
repo_url = query_repo_url_from_buildername(buildername)
url = "%s?changeset=%s&version=2&full=1" % (JSON_PUSHES % {"repo_url": repo_url}, revision)
req = requests.get(url).json()
push_id = req["pushes"].keys()[0]
changesets = req["pushes"][push_id]["changesets"]
author = ""
for changeset in changesets:
if not author:
author = changeset["author"]
if changeset["author"] != author:
return True
return False
# Checked
def getRevisions(revision, buildername, start=0, end=0):
"This function returns list of revisions for an alert."
# Query Revision on pushlog, and get the revision. This should be easy.
repo_url = query_repo_url_from_buildername(buildername)
url = "%s?changeset=%s&version=2" % (JSON_PUSHES % {"repo_url": repo_url}, revision)
req = requests.get(url).json()
pushid = int(req["pushes"].keys()[0])
startID = pushid + start - 1
endID = pushid + end
url = "%s?startID=%s&endID=%s&version=2&tipsonly=1" % (JSON_PUSHES % {"repo_url": repo_url}, startID, endID)
req = requests.get(url).json()
response = req["pushes"]
revisionList = []
for push in sorted(response.keys()):
revisionList.append(response[push]["changesets"][0][0:40])
return revisionList
# Checked
def getSuccessfulJobs(revision, buildername):
"This function returns the number of data points for an alert."
# Query TH client get_jobs method to get all jobs for a particular buildername
# Then Query mozci function: https://github.com/armenzg/mozilla_ci_tools/blob/master/mozci/query_jobs.py#L187
# to get the status of job for each job
treeherder_api = TreeherderApi()
repo_name = query_repo_name_from_buildername(buildername)
matching_jobs = treeherder_api.get_matching_jobs(repo_name, revision, buildername)
successful_jobs = 0
for job in matching_jobs:
status = treeherder_api.get_job_status(job)
if status == SUCCESS:
successful_jobs += 1
return successful_jobs
def compare(test, buildername, revision, previous_revision):
"This function will compare between 2 given revisions and return result as percentage"
repo_name = query_repo_name_from_buildername(buildername)
# Using TWO_WEEKS as interval, may change it afterwards
signature_request_url = SIGNATURE_URL % (repo_name, TWO_WEEKS)
signatures = fetch_json(signature_request_url)
options_collection_hash_list = fetch_json(OPTION_COLLECTION_HASH)
for signature, value in signatures.iteritems():
# Skip processing subtests. They are identified by 'test' key in the dicitonary.
if 'test' in value:
continue
# Ignoring e10s here.
# TODO: Revisit this later
if TBPL_TESTS[test]['testname'].lower() == value['suite'].lower() and \
TREEHERDER_PLATFORM[value["machine_platform"]] in buildername and \
'test_options' not in value:
test_signature = signature
else:
continue
hash_signature = value['option_collection_hash']
for key in options_collection_hash_list:
if hash_signature == key["option_collection_hash"]:
typeOfTest = key["options"][0]["name"]
break
if typeOfTest == 'pgo' and typeOfTest not in buildername:
# if pgo, it should be present in buildername
continue
elif typeOfTest == 'opt':
# if opt, nothing present in buildername
break
else:
# We do not run talos on any branch other than pgo and opt.
continue
# Using TWO_WEEKS as interval, may change it afterwards
req = fetch_json(PERFORMANCE_DATA % (repo_name, TWO_WEEKS, test_signature))
performance_data = req[test_signature]
treeherder_client = TreeherderClient()
revision_resultset_id = treeherder_client.get_resultsets(repo_name, revision=revision)[0]["id"]
previous_revision_resultset_id = treeherder_client.get_resultsets(repo_name, revision=previous_revision)[0]["id"]
revision_perfdata = []
previous_revision_perfdata = []
for data in performance_data:
if data["result_set_id"] == revision_resultset_id:
revision_perfdata.append(data["value"])
elif data["result_set_id"] == previous_revision_resultset_id:
previous_revision_perfdata.append(data["value"])
if revision_perfdata and previous_revision_perfdata:
mean_revision_perfdata = sum(revision_perfdata) / float(len(revision_perfdata))
mean_previous_revision_perfdata = sum(previous_revision_perfdata) / float(len(previous_revision_perfdata))
else:
print "previous_revision_perfdata: %s" % previous_revision_perfdata
print "revision_perfdata: %s" % revision_perfdata
return 0
if test in REVERSE_TESTS:
# lower value results in regression
return (mean_revision_perfdata - mean_previous_revision_perfdata) * 100.0 / mean_previous_revision_perfdata
else:
# higher value results in regression
return (mean_previous_revision_perfdata - mean_revision_perfdata) * 100.0 / mean_previous_revision_perfdata
def main():
alerts = getAlerts()
for alert in alerts:
# new alert
LOG.info("Running alert for: [%s, %s, %s]" % (alert['test'], alert['buildername'], alert['revision']))
if alert['stage'] == 0:
LOG.info("We are in stage 0.")
if checkMerge(alert['revision'], alert['buildername']) or 'pgo' in alert['buildername']:
LOG.info("We are ignoring alert: %s since it is either a merge or a pgo job." % alert['test'])
alert['stage'] = -1 # We need to have manual inspection in this case.
alert['user'] = 'human'
updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'],
alert['stage'], alert['loop'], alert['user'])
else:
alert['stage'] = 1
# trigger jobs for backfill
if alert['stage'] == 1:
LOG.info("We are in stage 1, and going to backfill jobs.")
revisionList = getRevisions(alert['revision'], alert['buildername'], start=-2, end=2)
# Setting Treeherder as the source for querying.
set_query_source("treeherder")
trigger_range(alert['buildername'], revisionList, times=6, dry_run=DRY_RUN)
alert['stage'] = 2
# We want some time interval between stage 1 and 2, so we exit.
updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'],
alert['stage'], alert['loop'], alert['user'])
continue
# verify jobs for backfill
if alert['stage'] == 2:
LOG.info("We are in stage 2, and going to verify if jobs are backfilled.")
revisionList = getRevisions(alert['revision'], alert['buildername'], start=-2, end=2)
for revision in revisionList:
dataPoints = getSuccessfulJobs(revision, alert['buildername'])
# If dataPoints are less than 6, it means that builds/jobs are still running.
if dataPoints < 6:
print "data points <6 for revision: %s" % revision
# We wait for 6 hours for all triggered tests to complete,
# And if they don't then we mark them for manual intervention/
alert['loop'] += 1
if alert['loop'] > (TIME_TO_BUILD + TIME_TO_TEST + PENDING_TIME) / CYCLE_TIME:
LOG.info("The jobs did not complete backfilling in time, assigning for human inspection.")
alert['stage'] = -1
alert['user'] = 'human'
else:
LOG.info("The jobs have not completed backfilling. Looping back to stage 1.")
alert['stage'] = 1
break
if alert['stage'] != 2:
print "updating alert and then continue, not stage 2"
updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'],
alert['stage'], alert['loop'], alert['user'])
continue
badRevisions = []
# Reset the loop for upcoming stages
alert['loop'] = 0
for i in range(1, len(revisionList)):
print "getting results for revision number: %s" % i
results = compare(alert['test'], alert['buildername'], revisionList[i], revisionList[i-1])
print "compare returned: %s" % results
if results < -2.0:
print "appending bad revision to list: %s"% revisionList[i]
badRevisions.append(revisionList[i])
if len(badRevisions) != 1:
LOG.info("There are too many bad revisions: %s for alert %s on buildername %s, "
"assigning for human inspection." % (badRevisions, alert['test'], alert['buildername']))
alert['stage'] = -1 # too noisy, something bad happened
alert['user'] = 'human'
print "too many bad revisions, update alert to human"
updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'],
alert['stage'], alert['loop'], alert['user'])
continue
if checkMerge(badRevisions[0], alert['buildername']):
LOG.info("The bad revision %s identified for alert %s on buildername %s is a merge, "
"assigning for human inspection" % (badRevisions[0], alert['test'], alert['buildername']))
alert['stage'] = -1 # A merge revision is a bad revision, manually inspect
alert['user'] = 'human'
if alert['revision'] != badRevisions[0]:
LOG.info("Alert_Manager misreported the bad revision. The actual bad revision is %s "
"for alert %s on %s buildername." % (badRevisions[0], alert['test'], alert['buildername']))
alert['revision'] = badRevisions[0] # we misreported initially, change the actual regression revision
print "setting stage = 3!"
alert['stage'] = 3
# Trigger all talos stage
if alert['stage'] == 3:
LOG.info("We are in stage 3, and going to trigger all_talos jobs.")
repo_name = query_repo_name_from_buildername(alert['buildername'])
# Setting Treeherder as the source for querying.
set_query_source("treeherder")
trigger_all_talos_jobs(repo_name, alert['revision'], times=6, dry_run=DRY_RUN)
previousRevision = getRevisions(alert['revision'], alert['buildername'], start=-1, end=-1)[0]
trigger_all_talos_jobs(repo_name, previousRevision, times=6, dry_run=DRY_RUN)
alert['stage'] = 4
updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'],
alert['stage'], alert['loop'], alert['user'])
continue
# Verify All talos stage is completed
if alert['stage'] == 4:
LOG.info("We are in stage 4, and going to verify if all_talos ran successfully.")
previousRevision = getRevisions(alert['revision'], alert['buildername'], start=-1, end=-1)[0]
repo_name = query_repo_name_from_buildername(alert['buildername'])
all_buildernames = build_talos_buildernames_for_repo(repo_name)
for revision in [alert['revision'], previousRevision]:
for buildername in all_buildernames:
dataPoints = getSuccessfulJobs(revision, buildername)
if dataPoints < 6:
# We wait for 8 hours for all talos tests to complete,
# And if they don't then we mark them for manual intervention
alert['loop'] += 1
if alert['loop'] > (TIME_TO_BUILD + TIME_TO_TEST + PENDING_TIME + TIME_TO_WAIT) / CYCLE_TIME:
LOG.info("The all talos jobs for alert %s on %s revision did not complete in time, "
" assigning for human inspection." % (alert['test'], alert['revision']))
alert['stage'] = -1
alert['user'] = 'human'
else:
alert['stage'] = 3
break
if alert['stage'] != 4:
break
if alert['stage'] != 4:
updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'],
alert['stage'], alert['loop'], alert['user'])
continue
alert['stage'] = 5 # final stage, sheriff will check for this.
alert['user'] = 'human'
LOG.info("All automated parts are complete.")
updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user'])
if __name__=="__main__": main()
| {
"repo_name": "jmaher/alert_manager",
"path": "alerts.py",
"copies": "1",
"size": "15324",
"license": "mpl-2.0",
"hash": 3666538438271824400,
"line_mean": 47.4936708861,
"line_max": 134,
"alpha_frac": 0.6030409815,
"autogenerated": false,
"ratio": 3.9761286974571872,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5079169678957187,
"avg_score": null,
"num_lines": null
} |
"""All the views for DrawWrite."""
# Imports {{{
import logging
from base64 import b64decode
from itertools import zip_longest
from django.core.files.base import ContentFile
from django.db import IntegrityError
from django.http import HttpResponseBadRequest, HttpResponseNotAllowed, JsonResponse
from django.shortcuts import redirect, render
from drawwrite.forms import CreateGameForm, JoinGameForm
from drawwrite.models import Chain, Game, DrawLink, Player, WriteLink
from . import services
from .bracefmt import BraceFormatter as __
# }}}
LOG = logging.getLogger(__name__)
# index {{{
def index(request):
"""
The front page of the app.
"""
LOG.debug("enter index")
# Create the two forms that we'll put on this page.
create_form = CreateGameForm()
join_form = JoinGameForm()
# TODO errors shouldn't get added by title and description, but by number.
# Then I should look up the title and description from that number.
return render(request, 'drawwrite/index.html', {
'create_form': create_form,
'join_form': join_form,
'error_title': request.session.pop('error_title', None),
'error_description': request.session.pop('error_description', None),
})
# }}}
# join_game {{{
def join_game(request):
"""
Proccess data that a user sends when they want to join a game.
"""
LOG.debug('enter')
# Send all non-POSTs to the index.
if request.method != 'POST':
LOG.info(__('attempted non-supported method {0}', request.method))
request.session['error_title'] = 'Unsupported method'
request.session['error_description'] = (
'You\'re not allowed to send {0} requests to that endpoint.'.format(request.method),
)
return redirect('drawwrite:index')
# Get the form from the POSTed data.
form = JoinGameForm(request.POST)
# Invalid forms redirect to the index with an error.
if not form.is_valid():
LOG.debug(__(
'name {0} or gamename {1} invalid',
form.data['username'],
form.data['gamename'],
))
request.session['error_title'] = 'Invalid input'
request.session['error_description'] = ' '.join((
'Your Name and the Game Name must only contain letters, numbers,',
'underscores, and hyphens.',
))
return redirect('drawwrite:index')
# Valid forms are processed.
gamename = form.cleaned_data['gamename']
username = form.cleaned_data['username']
# Get the game. On error, add error objects to the session and redirect
# to index.
# TODO extract this, possibly to services.py
games = Game.objects.filter( #pylint: disable=no-member
name=gamename,
).filter(
started=False,
)
if len(games) > 1:
LOG.error(__('somehow, two games with name {0} are being created', gamename))
request.session['error_title'] = 'Non-unique game name'
request.session['error_description'] = 'Could not find a unique game for you to join'
return redirect('drawwrite:index')
if len(games) < 1:
LOG.error(__('tried to join non-existant game {0}', gamename))
request.session['error_title'] = 'Non-existent game'
request.session['error_description'] = ' '.join((
'The game that you attempted to join, {0},'.format(gamename),
'does not exist. Please check that you entered it correctly.',
))
return redirect('drawwrite:index')
game = games[0]
LOG.debug(__('got game for player {0}', username))
# Add a player to the game. On error, add error objects to the session and
# redirect to index.
player = None
try:
player = services.new_player(game, username, False)
except services.GameAlreadyStarted:
LOG.debug(__('could not add {0} to game {1}', username, game.name))
request.session['error_title'] = 'Game started'
request.session['error_description'] = ' '.join((
'The game that you attempted to join has already started. Please',
'either join a different game or start your own game.',
))
return redirect('drawwrite:index')
# TODO don't assume that all IntegrityError's mean that the game name is
# already taken. There are plenty of other explanations that I'm
# silencing by doing this.
except IntegrityError:
LOG.exception(__(
'player with {0} already exists in game {1}',
username,
gamename,
))
request.session['error_title'] = 'Player exists'
request.session['error_description'] = ' '.join((
'The player name that you entered is already in use in the game',
'that you are trying to join. Please choose a new player name',
'and try again.',
))
return redirect('drawwrite:index')
# Redirect to that game's page.
LOG.debug('exiting join game view')
return redirect('drawwrite:play', player.pk)
# }}}
# create_game {{{
def create_game(request):
"""
Create a game according to the values the user specified in the form.
"""
LOG.debug('entering create game view')
# Send all non-POSTs to the index.
if request.method != 'POST':
LOG.debug(__('attempted non-supported method {0}', request.method))
return redirect('drawwrite:index')
# Get the form from the POSTed data.
form = CreateGameForm(request.POST)
# Invalid forms redirect to the index with an error.
if not form.is_valid():
#LOG.debug(__(
# 'username {0} or gamename {1} invalid',
# form.data['username'],
# form.data['gamename'],
#))
LOG.debug(__(
'form error: {0}',
form.errors,
))
request.session['error_title'] = 'Invalid input'
request.session['error_description'] = ' '.join((
'Your Name and the Game Name must only contain letters, numbers,',
'underscores, and hyphens.',
))
return redirect('drawwrite:index')
# Valid forms are processed.
gamename = form.cleaned_data['gamename']
username = form.cleaned_data['username']
# Create game. On error, add error objects to the session and redirect
# to index.
# TODO handle other errors that could happen?
game = services.new_game(gamename)
if game is None:
request.session['error_title'] = 'Game being created'
request.session['error_description'] = (
'The game you are trying to join, {0}, is already being created'
).format(gamename)
# Create a player for that game. On error, add error objects to the
# session and redirect to index.
player = None
try:
player = services.new_player(game, username, True)
# TODO don't assume that all IntegrityError's mean that the user name is
# already taken. There are plenty of other explanations that I'm
# silencing by doing this.
except services.NameTaken as exception:
LOG.error('player name already taken')
request.session['error_title'] = 'Player name taken'
request.session['error_description'] = exception.message()
return redirect('drawwrite:index')
except IntegrityError:
LOG.error(__('a new game has an invalid player {0}', username))
request.session['error_title'] = 'Player name taken'
request.session['error_description'] = ' '.join((
'The player name that you entered, {0},'.format(username),
' is already taken for the game that you entered. Please',
'try a different one.',
))
return redirect('drawwrite:index')
# Redirect to that game's page.
LOG.debug('exiting create game view')
return redirect('drawwrite:play', player.pk)
# }}}
# play {{{
def play(request, player_id):
"""
The page on which players play the game.
"""
LOG.debug('enter play view')
# Get their player from the database using the id in the path. On error,
# set error session attributes and redirect to index.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('non-existant player attempt: {0}', player_id))
request.session['error_title'] = 'Player Does Not Exist'
request.session['error_description'] = ' '.join((
'You attempted to access a non-existant player. Plase do not',
'do that.',
))
return redirect('drawwrite:index')
LOG.debug(__('successfully retreived player {0}', player_id))
# Get the game from the player object.
game = player.game
LOG.debug(__('successfully retreived game for player {0}', player_id))
# If the game hasn't started, show the player the waiting screen.
if not game.started:
LOG.debug(__('game for player {0} has not started', player_id))
# Get a list of all players in this game.
all_players = Player.objects.filter(game=game) #pylint: disable=no-member
LOG.debug(__('got players in game with player {0}', player_id))
# Get the creator of the game.
creator = None
for player in all_players:
if player.was_creator:
creator = player
LOG.debug(__('creator of game is {0}', creator.name))
# Render the waiting screen with all of those players.
LOG.debug(__('showing player {0} the waiting screen', player_id))
return render(request, 'drawwrite/waiting.html', {
'all_players' : all_players,
'player_id' : player_id,
'created' : player.was_creator,
'creator' : creator,
})
LOG.debug(__('game for player {0} has started', player_id))
# The game has started. Check if it's also finished.
if game.round_num >= game.num_players:
LOG.debug('game finished, redirect to view page')
return redirect('drawwrite:showGame', game.pk)
# The game has started, so decide whether to show the waiting page.
if player.current_round == game.round_num + 1:
# If the player's round equals the number of players in the game,
# show the 'wait for game completion' game.
if player.current_round == player.game.num_players:
LOG.debug('show game finished waiting page')
return render(request, 'drawwrite/gameWaiting.html', {
'game_id' : game.pk,
})
# If the game isn't finished, show the waiting page for the next round.
LOG.debug('show waiting page, this user is done with current round')
return render(request, 'drawwrite/roundWaiting.html', {
'player_id' : player_id,
})
# If the player's round doesn't equal the game's round, something is fishy.
elif not player.current_round == game.round_num:
LOG.error(__(
'player {0} has round {1}, while game {2} has round {3}',
player_id,
player.current_round,
game.pk,
game.round_num,
))
# TODO come up with a better thing to show the user in this case
return HttpResponseBadRequest()
# Figure out which position's chain this player should have access to next.
chain_pos_to_get = (player.position + game.round_num) % game.num_players
LOG.debug(__('player {0} needs position {1}s chain', player_id, chain_pos_to_get))
# Get the owner of the chain that player will edit.
chain_owner = None
try:
chain_owner = Player.objects.filter( #pylint: disable=no-member
game=game,
).get(
position=chain_pos_to_get,
)
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__(
'player with game {0} and pos {1} does not exist',
game.pk,
chain_pos_to_get,
))
request.session['error_title'] = 'Player Does Not Exist'
request.session['error_description'] = ' '.join((
'You tried to get a player that does not exist. Sorry for',
'the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('got chain_owner ({0}) for player {1}', chain_owner.pk, player_id))
# Get the chain for the player.
chain = None
try:
chain = Chain.objects.get(player=chain_owner) #pylint: disable=no-member
except Chain.DoesNotExist: #pylint: disable=no-member
# Make a chain for this player.
chain = services.new_chain(player)
LOG.debug(__('got chain for user {0}', player_id))
# If the chain has no links, show the player a screen to enter their first
# text link.
if chain.next_link_position == 0:
LOG.debug(__('returning page for first link for user {0}', player_id))
return render(request, 'drawwrite/chainAdd.html', {
'prev_link_type': '',
'prev_link': None,
'player_id': player_id,
})
# Figure out what type of link the player needs to make.
prev_link_pos = chain.next_link_position - 1
prev_link = None
prev_link_type = ''
if prev_link_pos % 2 == 0:
prev_link_type = 'write'
prev_link = WriteLink.objects.get( #pylint: disable=no-member
chain=chain,
link_position=prev_link_pos
)
else:
prev_link_type = 'draw'
prev_link = DrawLink.objects.get( #pylint: disable=no-member
chain=chain,
link_position=prev_link_pos
)
# Show the player a page to add the next link type.
LOG.debug('exit add to chain view')
return render(request, 'drawwrite/chainAdd.html', {
'prev_link_type': prev_link_type,
'prev_link': prev_link,
'player_id': player_id,
})
# }}}
# check_game_start {{{
def check_game_start(request, player_id): #pylint: disable=unused-argument
"""Check if the passed player's game has started."""
LOG.debug(__('checking game status for player {0}', player_id))
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('non-existant player: {0}', player_id))
return HttpResponseBadRequest()
LOG.debug(__('successfully found player {0}', player_id))
# If the player's game has not started, return an updated list of names.
if not player.game.started:
LOG.debug(__('player {0} game has not started', player_id))
# Get all the players in the game.
all_players = Player.objects.filter(game=player.game) #pylint: disable=no-member
LOG.debug(__('got all players in game with {0}', player_id))
# Create a list of all player names.
names = []
for player in all_players:
names.append(player.name)
LOG.debug('made list of all player names')
# Return the data we need.
return JsonResponse({'started': False, 'names': names})
# If the player's game has started, return an object indicating as much.
return JsonResponse({'started': True, 'names': []})
# }}}
# start_game {{{
def start_game(request, player_id):
"""Start the game of the player identified by player_id"""
LOG.debug(__('starting game of player {0}', player_id))
# Make sure method is POST.
if not request.method == 'POST':
LOG.error('attempted to GET to start game')
return HttpResponseBadRequest()
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('non-existant player {0}', player_id))
return HttpResponseBadRequest()
LOG.debug(__('successfully got player {0}', player_id))
# Set the player's game to 'started'.
services.start_game(player.game)
LOG.debug('set players game to started')
# Redirect to 'play'.
LOG.debug('redirecting to play')
return redirect('drawwrite:play', player_id)
# }}}
# create_link {{{
def create_link(request, player_id):
"""
Accept POST data and create a new link in the chain that player_id should
be adding to.
"""
LOG.debug(__('creating link for player {0}', player_id))
# Only accept POSTs
if not request.method == 'POST':
LOG.error('should have POSTed data')
return HttpResponseNotAllowed(['POST'])
LOG.debug(__('got POST data for player {0}', player_id))
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('non-existant player {0}', player_id))
request.session['error_title'] = 'Player Does Not Exist'
request.session['error_description'] = ' '.join((
'The player that you tried to create a link for does not exist.',
'We apologize for the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('got the player with pk {0}', player_id))
# Calculate the position of the player that this player_id is adding to.
chain_owner_pos = (player.position + player.game.round_num) % player.game.num_players
LOG.debug(__('player {0} needs chain of player {1}', player_id, chain_owner_pos))
# Get the owner of the chain this player is adding to.
try:
chain_owner = Player.objects.filter( #pylint: disable=no-member
game=player.game,
).get(
position=chain_owner_pos,
)
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__(
'player with game {0} and position {1} does not exist',
player.game.pk,
chain_owner_pos,
))
request.session['error_title'] = 'Player Does Not Exist'
request.session['description'] = ' '.join((
'You attempted to access a player that does not exist. We are',
'sorry for the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('successfully got chain owner for player {0}', player_id))
# Get the player's chain.
chain = None
try:
chain = Chain.objects.get(player=chain_owner) #pylint: disable=no-member
except Chain.DoesNotExist: #pylint: disable=no-member
LOG.error(__('player {0} should have a chain but does not', player_id))
request.session['error_title'] = 'Player Has No Chain'
request.session['error_description'] = ' '.join((
'The player that you tried to create a link for does not have',
'a chain, but that should not be possible. We apologize for',
'the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('got the chain for player with pk {0}', player_id))
# Figure out what type of link to make.
if chain.next_link_position % 2 == 0:
# The POST data needs to have the 'description' field or something
# is wrong.
if 'description' not in request.POST.keys():
LOG.error(' '.join((
'should be making write link, but did not receive any',
'writing in the POSTed data',
)))
return HttpResponseBadRequest()
LOG.debug(__('making new write link for player {0}', player_id))
# Make the new write link.
services.new_write_link(chain, request.POST.get('description'), player)
else:
# The POST data needs to have the 'drawing' field or something
# is wrong.
if 'drawing' not in request.POST.keys():
LOG.error(' '.join((
'should be making a draw link, but did not receive any',
'drawing data in the POSTed data',
)))
return HttpResponseBadRequest()
LOG.debug('got image data to save')
# Make sure the data starts with 'data:image/png;base64,'
data_string = request.POST.get('drawing')
if not data_string.startswith('data:image/png;base64,'):
LOG.error(__('got bad image data: started with {0}', data_string[0:15]))
return HttpResponseBadRequest()
LOG.debug('got good(ish) image data')
# Shave off the stuff from above.
data_string = data_string.split(';base64,')[1]
LOG.debug('split off the ;base64, stuff')
# Decode the base64 data.
binary_data = b64decode(data_string)
LOG.debug('decoded base64 data')
# Make a file-like object out of the data.
file_name = "link-{0}-{1}.png".format(player_id, chain.next_link_position)
file_obj = ContentFile(binary_data, name=file_name)
LOG.debug(__('made file with name {0}', file_name))
# Make the draw link.
services.new_draw_link(chain, file_obj, player)
LOG.debug(__('created draw link, file has name {0}', file_name))
# Increase the 'num_players_finished_current_round' of this game.
services.player_finished(player)
# Redirect to 'play'.
return redirect('drawwrite:play', player_id)
# }}}
# check_round_done {{{
def check_round_done(request, player_id):
"""
Check if the round of the current game is completed. Return a javascript
object that has a list of every player's name that has not completed the round.
"""
LOG.debug(__('checking if round is completed for player {0}', player_id))
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error('attempted to get player that does not exist')
request.session['error_title'] = 'Player Does Not Exist'
request.session['error_description'] = ' '.join((
'The player that you attempted to get does not exist. We are',
'sorry for the inconvenience.',
))
return redirect('drawwrite:index')
LOG.debug(__('successfully got player {0}', player_id))
# Check if the game round equals the player's round. If so, then the
# player is allowed to move on. Otherwise, they're not.
if player.game.round_num == player.current_round:
LOG.debug('round is completed')
# Return an object saying that the round is done.
return JsonResponse({'finished': True})
LOG.debug('round is not completed')
# Get all players in the game who have not completed the
# current round.
try:
players_still_playing = Player.objects.filter( #pylint: disable=no-member
game=player.game,
).filter(
current_round__lt=player.current_round,
)
except BaseException as exception:
LOG.error(exception)
raise
LOG.debug('got list of players still playing')
# Turn the players into a list of names.
names_still_playing = []
for player in players_still_playing:
names_still_playing.append(player.name)
LOG.debug('got list of names of players still playing')
# Return an object saying that the round is not done.
return JsonResponse({
'finished': False,
'still_playing': names_still_playing,
})
# }}}
# check_game_done {{{
def check_game_done(request, game_id): #pylint: disable=unused-argument
"""Check if the game with the passed game_id is finished."""
LOG.debug(__('checking if game {0} is done', game_id))
# Get the game.
game = None
try:
game = Game.objects.get(pk=game_id) #pylint: disable=no-member
except Game.DoesNotExist: #pylint: disable=no-member
LOG.error(__('tried to get non-existant game {0}', game_id))
# TODO better error stuff
return HttpResponseBadRequest
LOG.debug(__('got game {0}', game_id))
# Check if the round equals the number of players.
if game.round_num == game.num_players:
return JsonResponse({'finished': True})
# Get a list of players whose current round equals the game's round.
try:
players_still_playing = Player.objects.filter( #pylint: disable=no-member
game=game,
).filter(
current_round=game.round_num,
)
except BaseException as exception:
LOG.error(exception)
raise
LOG.debug('got list of players still playing')
# Turn that list of players into a list of names.
names_still_playing = []
for player in players_still_playing:
names_still_playing.append(player.name)
LOG.debug('created list of names of players still playing')
# Return an object saying that the round is not done.
return JsonResponse({
'finished': False,
'still_playing': names_still_playing,
})
# }}}
# show_game {{{
def show_game(request, game_id):
"""Show a completed game."""
LOG.debug(__('showing game {0}', game_id))
# Get the game.
game = None
try:
game = Game.objects.get(pk=game_id) #pylint: disable=no-member
except Game.DoesNotExist: #pylint: disable=no-member
LOG.error(__('tried to get non-existant game {0}', game_id))
# TODO better error here
return HttpResponseBadRequest()
LOG.debug(__('got game {0}', game_id))
# Get all players associated with that game.
players = Player.objects.filter(game=game) #pylint: disable=no-member
# Render the game view page.
# Change gameName to game_name
return render(request, 'drawwrite/game.html', {
'players': players,
'game_name': game.name,
})
# }}}
# show_chain {{{
def show_chain(request, player_id):
"""Show a completed chain."""
LOG.debug(__('showing chain of player {0}', player_id))
# Get the player.
player = None
try:
player = Player.objects.get(pk=player_id) #pylint: disable=no-member
except Player.DoesNotExist: #pylint: disable=no-member
LOG.error(__('tried to get non-existant player {0}', player_id))
# TODO better error messege
return HttpResponseBadRequest()
LOG.debug(__('got player {0}', player_id))
# Get the chain.
chain = None
try:
chain = Chain.objects.get(player=player) #pylint: disable=no-member
except Chain.DoesNotExist: #pylint: disable=no-member
LOG.error(__('tried to get non-existant chain for player {0}', player_id))
# TODO better error message
return HttpResponseBadRequest()
LOG.debug(__('got chain for player {0}', player_id))
# Get all the write links and all the draw links.
write_links = WriteLink.objects.filter(chain=chain) #pylint: disable=no-member
draw_links = DrawLink.objects.filter(chain=chain) #pylint: disable=no-member
# Make a list of all the links in the chain.
links = []
for write, draw in zip_longest(write_links, draw_links):
if write is not None:
links.append(write)
if draw is not None:
links.append(draw)
LOG.debug(__('made list of all links for player {0}', player_id))
# Render the chain view.
return render(request, 'drawwrite/chain.html', {
'links': links,
'player': player,
})
# }}}
# get_available_games {{{
def get_available_games(request):
"""Return a list of game names that may be joined."""
being_created = Game.objects.filter(started=False) #pylint: disable=no-member
options = []
for game in being_created:
options.append(game.name)
LOG.debug('returning list of available games')
return JsonResponse({'options': options})
# }}}
| {
"repo_name": "RMMoreton/drawwrite",
"path": "drawwritesite/drawwrite/views.py",
"copies": "1",
"size": "27825",
"license": "mit",
"hash": 8370612826824721000,
"line_mean": 36.2489959839,
"line_max": 96,
"alpha_frac": 0.6201617251,
"autogenerated": false,
"ratio": 3.9648047876888004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50849665127888,
"avg_score": null,
"num_lines": null
} |
"""All the weapon pickups.
Weapons are unusual, they don't directly specify the models.
Instead it's specified in the weapon script.
"""
from srctools._class_resources import *
res('weapon_357')
# res('weapon_adrenaline_spawn')
# res('weapon_ak47')
res('weapon_alyxgun')
# res('weapon_ammo_spawn')
res('weapon_annabelle')
res('weapon_ar2',
includes='prop_combine_ball env_entity_dissolver',
)
# res('weapon_aug')
# res('weapon_autoshotgun_spawn')
# res('weapon_awp')
# res('weapon_bizon')
res('weapon_bugbait',
sound("Weapon_Bugbait.Splat"),
includes="npc_grenade_bugbait",
)
# res('weapon_c4')
# res('weapon_chainsaw_spawn')
res('weapon_citizenpackage')
res('weapon_citizensuitcase')
res('weapon_crossbow',
mat('sprites/blueflare1.vmt'),
mat('sprites/light_glow02_noz.vmt'),
sound('Weapon_Crossbow.BoltHitBody'),
sound('Weapon_Crossbow.BoltHitWorld'),
sound('Weapon_Crossbow.BoltSkewer'),
includes='crossbow_bolt',
)
res('weapon_crowbar')
res('weapon_cubemap')
# res('weapon_cz75a')
# res('weapon_deagle')
# res('weapon_decoy')
# res('weapon_defibrillator_spawn')
# res('weapon_elite')
# res('weapon_famas')
# res('weapon_first_aid_kit')
# res('weapon_first_aid_kit_spawn')
# res('weapon_fiveseven')
# res('weapon_flashbang')
res('weapon_frag',
sound('WeaponFrag.Throw'),
sound('WeaponFrag.Roll'),
includes='npc_grenade_frag',
)
# res('weapon_g3sg1')
# res('weapon_galilar')
# res('weapon_gascan_spawn')
# res('weapon_glock')
# res('weapon_grenade_launcher')
# res('weapon_grenade_launcher_spawn')
# res('weapon_healthshot')
# res('weapon_hegrenade')
# res('weapon_hkp2000')
# res('weapon_hunting_rifle_spawn')
# res('weapon_incgrenade')
# res('weapon_item_spawn')
# res('weapon_knife')
# res('weapon_m249')
# res('weapon_m4a1')
# res('weapon_m4a1_silencer')
# res('weapon_mac10')
# res('weapon_mag7')
# res('weapon_melee_spawn')
# res('weapon_molotov')
# res('weapon_molotov_spawn')
# res('weapon_mp7')
# res('weapon_mp9')
# res('weapon_negev')
# res('weapon_nova')
# res('weapon_p250')
# res('weapon_p90')
# res('weapon_pain_pills_spawn')
res('weapon_physcannon',
mat("materials/sprites/orangelight1.vmt"),
mat("materials/sprites/glow04_noz.vmt"),
mat("materials/sprites/orangeflare1.vmt"),
mat("materials/sprites/orangecore1.vmt"),
mat("materials/sprites/orangecore2.vmt"),
mat("materials/sprites/lgtning_noz.vmt"),
mat("materials/sprites/blueflare1_noz.vmt"),
mat("materials/effects/fluttercore.vmt"),
mdl("models/weapons/v_superphyscannon.mdl"),
sound("Weapon_PhysCannon.HoldSound"),
sound("Weapon_Physgun.Off"),
sound("Weapon_MegaPhysCannon.DryFire"),
sound("Weapon_MegaPhysCannon.Launch"),
sound("Weapon_MegaPhysCannon.Pickup"),
sound("Weapon_MegaPhysCannon.Drop"),
sound("Weapon_MegaPhysCannon.HoldSound"),
sound("Weapon_MegaPhysCannon.ChargeZap"),
)
# res('weapon_pipe_bomb_spawn')
res('weapon_pistol')
# res('weapon_pistol_magnum_spawn')
# res('weapon_pistol_spawn')
# res('weapon_portalgun')
# res('weapon_pumpshotgun_spawn')
# res('weapon_revolver')
# res('weapon_rifle_ak47_spawn')
# res('weapon_rifle_desert_spawn')
# res('weapon_rifle_m60_spawn')
# res('weapon_rifle_spawn')
res('weapon_rpg',
sound("Missile.Ignite"),
sound("Missile.Accelerate"),
mat("materials/effects/laser1_noz.vmt"),
mat("materials/sprites/redglow1.vmt"),
includes="rpg_missile",
)
# res('weapon_sawedoff')
# res('weapon_scar20')
# res('weapon_scavenge_item_spawn')
# res('weapon_sg556')
res('weapon_shotgun')
# res('weapon_shotgun_chrome_spawn')
# res('weapon_shotgun_spas_spawn')
res('weapon_smg1', includes="grenade_ar2")
# res('weapon_smg_silenced_spawn')
# res('weapon_smg_spawn')
# res('weapon_smokegrenade')
# res('weapon_sniper_military_spawn')
# res('weapon_spawn')
# res('weapon_ssg08')
res('weapon_striderbuster',
mdl("models/magnusson_device.mdl"),
sound("Weapon_StriderBuster.StickToEntity"),
sound("Weapon_StriderBuster.Detonate"),
sound("Weapon_StriderBuster.Dud_Detonate"),
sound("Weapon_StriderBuster.Ping"),
mat("materials/sprites/orangeflare1.vmt"),
mat("materials/sprites/lgtning.vmt"),
part("striderbuster_attach"),
part("striderbuster_attached_pulse"),
part("striderbuster_explode_core"),
part("striderbuster_explode_dummy_core"),
part("striderbuster_break_flechette"),
part("striderbuster_trail"),
part("striderbuster_shotdown_trail"),
part("striderbuster_break"),
part("striderbuster_flechette_attached"),
includes="env_citadel_energy_core sparktrail",
aliases="prop_stickybomb",
)
res('weapon_stunstick',
sound("Weapon_StunStick.Activate"),
sound("Weapon_StunStick.Deactivate"),
)
# res('weapon_tagrenade')
# res('weapon_taser')
# res('weapon_tec9')
# res('weapon_ump45')
| {
"repo_name": "TeamSpen210/srctools",
"path": "srctools/_class_resources/weapons.py",
"copies": "1",
"size": "4838",
"license": "unlicense",
"hash": -6384353297203735000,
"line_mean": 28.5,
"line_max": 60,
"alpha_frac": 0.6903679206,
"autogenerated": false,
"ratio": 2.689271817676487,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.38796397382764874,
"avg_score": null,
"num_lines": null
} |
"""all things PeerAssets protocol."""
from enum import Enum
from operator import itemgetter
from typing import List, Optional, Generator, cast, Callable
from pypeerassets.kutil import Kutil
from pypeerassets.paproto_pb2 import DeckSpawn as deckspawnproto
from pypeerassets.paproto_pb2 import CardTransfer as cardtransferproto
from pypeerassets.exceptions import (
InvalidCardIssue,
OverSizeOPReturn,
RecieverAmountMismatch,
)
from pypeerassets.card_parsers import parsers
from pypeerassets.networks import net_query
class IssueMode(Enum):
NONE = 0x00
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L19
# No issuance allowed.
CUSTOM = 0x01
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L20
# Custom issue mode, verified by client aware of this.
ONCE = 0x02
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L21
# A single card_issue transaction allowed.
MULTI = 0x04
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L22
# Multiple card_issue transactions allowed.
MONO = 0x08
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L23
# All card transaction amounts are equal to 1.
UNFLUSHABLE = 0x10
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L24
# The UNFLUSHABLE issue mode invalidates any card transfer transaction except for the card issue transaction.
# Meaning that only the issuing entity is able to change the balance of a specific address.
# To correctly calculate the balance of a PeerAssets addres a client should only consider the card transfer
# transactions originating from the deck owner.
SUBSCRIPTION = 0x34 # SUBSCRIPTION (34 = 20 | 4 | 10)
# https://github.com/PeerAssets/rfcs/blob/master/0001-peerassets-transaction-specification.proto#L26
# The SUBSCRIPTION issue mode marks an address holding tokens as subscribed for a limited timeframe. This timeframe is
# defined by the balance of the account and the time at which the first cards of this token are received.
# To check validity of a subscription one should take the timestamp of the first received cards and add the address' balance to it in hours.
SINGLET = 0x0a # SINGLET is a combination of ONCE and MONO (2 | 8)
# Singlet deck, one MONO card issunce allowed
class Deck:
def __init__(self, name: str,
number_of_decimals: int,
issue_mode: int,
network: str,
production: bool,
version: int,
asset_specific_data: bytes=None,
issuer: str="",
issue_time: int=None,
id: str=None,
tx_confirmations: int=None) -> None:
'''
Initialize deck object, load from dictionary Deck(**dict) or initilize
with kwargs Deck("deck", 3, "ONCE")
'''
self.version = version # protocol version
self.name = name # deck name
self.issue_mode = issue_mode # deck issue mode
self.number_of_decimals = number_of_decimals
self.asset_specific_data = asset_specific_data # optional metadata for the deck
self.id = id
self.issuer = issuer
self.issue_time = issue_time
self.tx_confirmations = tx_confirmations
self.network = network
self.production = production
@property
def p2th_address(self) -> Optional[str]:
'''P2TH address of this deck'''
if self.id:
return Kutil(network=self.network,
privkey=bytearray.fromhex(self.id)).address
else:
return None
@property
def p2th_wif(self) -> Optional[str]:
'''P2TH privkey in WIF format'''
if self.id:
return Kutil(network=self.network,
privkey=bytearray.fromhex(self.id)).wif
else:
return None
@property
def metainfo_to_protobuf(self) -> bytes:
'''encode deck into protobuf'''
deck = deckspawnproto()
deck.version = self.version
deck.name = self.name
deck.number_of_decimals = self.number_of_decimals
deck.issue_mode = self.issue_mode
if self.asset_specific_data:
if not isinstance(self.asset_specific_data, bytes):
deck.asset_specific_data = self.asset_specific_data.encode()
else:
deck.asset_specific_data = self.asset_specific_data
if deck.ByteSize() > net_query(self.network).op_return_max_bytes:
raise OverSizeOPReturn('''
Metainfo size exceeds maximum of {max} bytes supported by this network.'''
.format(max=net_query(self.network)
.op_return_max_bytes))
return deck.SerializeToString()
@property
def metainfo_to_dict(self) -> dict:
'''encode deck into dictionary'''
r = {
"version": self.version,
"name": self.name,
"number_of_decimals": self.number_of_decimals,
"issue_mode": self.issue_mode
}
if self.asset_specific_data:
r.update({'asset_specific_data': self.asset_specific_data})
return r
def to_json(self) -> dict:
'''export the Deck object to json-ready format'''
d = self.__dict__
d['p2th_wif'] = self.p2th_wif
return d
@classmethod
def from_json(cls, json: dict):
'''load the Deck object from json'''
try:
del json['p2th_wif']
except KeyError:
pass
return cls(**json)
def __str__(self) -> str:
r = []
for key in self.__dict__:
r.append("{key}='{value}'".format(key=key, value=self.__dict__[key]))
return ', '.join(r)
class CardBundle:
'''On the low level, cards come in bundles.
A single transaction can contain dozens of cards.
CardBundle is a object which is pre-coursor to CardTransfer'''
def __init__(self,
deck: Deck,
sender: str,
txid: str,
blockhash: str,
blocknum: int,
blockseq: int,
timestamp: int,
tx_confirmations: int,
vouts: list=[],
) -> None:
self.deck = deck
self.txid = txid
self.sender = sender
self.vouts = vouts
if blockhash:
self.blockhash = blockhash
self.blockseq = blockseq
self.timestamp = timestamp
self.blocknum = blocknum
self.tx_confirmations = tx_confirmations
else:
self.blockhash = ""
self.blockseq = 0
self.blocknum = 0
self.timestamp = 0
self.tx_confirmations = 0
def to_json(self) -> dict:
'''export the CardBundle object to json-ready format'''
return self.__dict__
class CardTransfer:
def __init__(self, deck: Deck,
receiver: list=[],
amount: List[int]=[],
version: int=1,
blockhash: str=None,
txid: str=None,
sender: str=None,
asset_specific_data: bytes=None,
number_of_decimals: int=None,
blockseq: int=None,
cardseq: int=None,
blocknum: int=None,
timestamp: int=None,
tx_confirmations: int=None,
type: str=None) -> None:
'''CardTransfer object, used when parsing card_transfers from the blockchain
or when sending out new card_transfer.
It can be initialized by passing the **kwargs and it will do the parsing,
or it can be initialized with passed arguments.
* deck - instance of Deck object
* receiver - list of receivers
* amount - list of amounts to be sent, must be integer
* version - protocol version, default 1
* txid - transaction ID of CardTransfer
* sender - transaction sender
* blockhash - block ID where the tx was first included
* blockseq - order in which tx was serialized into block
* timestamp - unix timestamp of the block where it was first included
* tx_confirmations - number of confirmations of the transaction
* asset_specific_data - extra metadata
* number_of_decimals - number of decimals for amount, inherited from Deck object
: type: card type [CardIssue, CardTransfer, CardBurn]'''
if not len(receiver) == len(amount):
raise RecieverAmountMismatch
self.version = version
self.network = deck.network
self.deck_id = deck.id
self.deck_p2th = deck.p2th_address
self.txid = txid
self.sender = sender
self.asset_specific_data = asset_specific_data
if not number_of_decimals:
self.number_of_decimals = deck.number_of_decimals
else:
self.number_of_decimals = number_of_decimals
self.receiver = receiver
self.amount = amount
if blockhash:
self.blockhash = blockhash
self.blockseq = blockseq
self.timestamp = timestamp
self.blocknum = blocknum
self.cardseq = cardseq
self.tx_confirmations = tx_confirmations
else:
self.blockhash = ""
self.blockseq = 0
self.blocknum = 0
self.timestamp = 0
self.cardseq = 0
self.tx_confirmations = 0
if self.sender == deck.issuer:
# if deck issuer is issuing cards to the deck issuing address,
# card is burn and issue at the same time - which is invalid!
if deck.issuer in self.receiver:
raise InvalidCardIssue
else:
# card was sent from deck issuer to any random address,
# card type is CardIssue
self.type = "CardIssue"
# card was sent back to issuing address
# card type is CardBurn
elif self.receiver[0] == deck.issuer and not self.sender == deck.issuer:
self.type = "CardBurn"
# issuer is anyone else,
# card type is CardTransfer
else:
self.type = "CardTransfer"
if type:
self.type = type
@property
def metainfo_to_protobuf(self) -> bytes:
'''encode card_transfer info to protobuf'''
card = cardtransferproto()
card.version = self.version
card.amount.extend(self.amount)
card.number_of_decimals = self.number_of_decimals
if self.asset_specific_data:
if not isinstance(self.asset_specific_data, bytes):
card.asset_specific_data = self.asset_specific_data.encode()
else:
card.asset_specific_data = self.asset_specific_data
if card.ByteSize() > net_query(self.network).op_return_max_bytes:
raise OverSizeOPReturn('''
Metainfo size exceeds maximum of {max} bytes supported by this network.'''
.format(max=net_query(self.network)
.op_return_max_bytes))
return card.SerializeToString()
@property
def metainfo_to_dict(self) -> dict:
'''encode card into dictionary'''
r = {
"version": self.version,
"amount": self.amount,
"number_of_decimals": self.number_of_decimals
}
if self.asset_specific_data:
r.update({'asset_specific_data': self.asset_specific_data})
return r
def to_json(self) -> dict:
'''export the CardTransfer object to json-ready format'''
return self.__dict__
@classmethod
def from_json(cls, json: dict):
'''load the Deck object from json'''
return cls(**json)
def __str__(self) -> str:
r = []
for key in self.__dict__:
r.append("{key}='{value}'".format(key=key, value=self.to_json()[key]))
return ', '.join(r)
def validate_card_issue_modes(issue_mode: int, cards: list) -> list:
"""validate cards against deck_issue modes"""
supported_mask = 63 # sum of all issue_mode values
if not bool(issue_mode & supported_mask):
return [] # return empty list
for i in [1 << x for x in range(len(IssueMode))]:
if bool(i & issue_mode):
try:
parser_fn = cast(
Callable[[list], Optional[list]],
parsers[IssueMode(i).name]
)
except ValueError:
continue
parsed_cards = parser_fn(cards)
if not parsed_cards:
return []
cards = parsed_cards
return cards
class DeckState:
def __init__(self, cards: Generator) -> None:
self.cards = cards
self.total = 0
self.burned = 0
self.balances = cast(dict, {})
self.processed_issues = set()
self.processed_transfers = set()
self.processed_burns = set()
self.calc_state()
self.checksum = not bool(self.total - sum(self.balances.values()))
def _process(self, card: dict, ctype: str) -> bool:
sender = card["sender"]
receiver = card["receiver"][0]
amount = card["amount"][0]
if ctype != 'CardIssue':
balance_check = sender in self.balances and self.balances[sender] >= amount
if balance_check:
self.balances[sender] -= amount
if 'CardBurn' not in ctype:
self._append_balance(amount, receiver)
return True
return False
if 'CardIssue' in ctype:
self._append_balance(amount, receiver)
return True
return False
def _append_balance(self, amount: int, receiver: str) -> None:
try:
self.balances[receiver] += amount
except KeyError:
self.balances[receiver] = amount
def _sort_cards(self, cards: Generator) -> list:
'''sort cards by blocknum and blockseq'''
return sorted([card.__dict__ for card in cards],
key=itemgetter('blocknum', 'blockseq', 'cardseq'))
def calc_state(self) -> None:
for card in self._sort_cards(self.cards):
# txid + blockseq + cardseq, as unique ID
cid = str(card["txid"] + str(card["blockseq"]) + str(card["cardseq"]))
ctype = card["type"]
amount = card["amount"][0]
if ctype == 'CardIssue' and cid not in self.processed_issues:
validate = self._process(card, ctype)
self.total += amount * validate # This will set amount to 0 if validate is False
self.processed_issues |= {cid}
if ctype == 'CardTransfer' and cid not in self.processed_transfers:
self._process(card, ctype)
self.processed_transfers |= {cid}
if ctype == 'CardBurn' and cid not in self.processed_burns:
validate = self._process(card, ctype)
self.total -= amount * validate
self.burned += amount * validate
self.processed_burns |= {cid}
| {
"repo_name": "PeerAssets/pypeerassets",
"path": "pypeerassets/protocol.py",
"copies": "1",
"size": "15807",
"license": "bsd-3-clause",
"hash": -8201133129104425000,
"line_mean": 32.7756410256,
"line_max": 144,
"alpha_frac": 0.5770861011,
"autogenerated": false,
"ratio": 4.152088258471237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5229174359571237,
"avg_score": null,
"num_lines": null
} |
"""all things PeerAssets protocol."""
import warnings
from .kutil import Kutil
from . import paproto
from .pautils import amount_to_exponent, issue_mode_to_enum
from operator import itemgetter
class Deck:
def __init__(self, name: str, number_of_decimals: int, issue_mode: str,
network: str, production: bool, version=1,
asset_specific_data="", issuer="", time=None, asset_id=None):
'''
Initialize deck object, load from dictionary Deck(**dict) or initilize
with kwargs Deck("deck", 3, "ONCE")
'''
self.version = version # protocol version
self.name = name # deck name
self.issue_mode = issue_mode # deck issue mode
assert isinstance(number_of_decimals, int), {"error": "number_of_decimals must be an integer"}
self.number_of_decimals = number_of_decimals
self.asset_specific_data = asset_specific_data # optional metadata for the deck
self.asset_id = asset_id
self.issuer = issuer
self.issue_time = time
self.network = network
self.production = production
if self.network.startswith("t"):
self.testnet = True
else:
self.testnet = False
@property
def p2th_address(self):
'''P2TH address of this deck'''
return Kutil(network=self.network, privkey=self.asset_id).address
@property
def p2th_wif(self):
'''P2TH privkey in WIF format'''
return Kutil(network=self.network, privkey=self.asset_id).wif
@property
def metainfo_to_protobuf(self):
'''encode deck into protobuf'''
deck = paproto.DeckSpawn()
deck.version = self.version
deck.name = self.name
deck.number_of_decimals = self.number_of_decimals
deck.issue_mode = issue_mode_to_enum(deck, self.issue_mode)
if not isinstance(self.asset_specific_data, bytes):
deck.asset_specific_data = self.asset_specific_data.encode()
else:
deck.asset_specific_data = self.asset_specific_data
proto = deck.SerializeToString()
if len(proto) > 80:
warnings.warn('\nMetainfo size exceeds maximum of 80bytes that fit into OP_RETURN.')
return proto
@property
def metainfo_to_dict(self):
'''encode deck into dictionary'''
return {
"version": self.version,
"name": self.name,
"number_of_decimals": self.number_of_decimals,
"issue_mode": self.issue_mode
}
class CardTransfer:
def __init__(self, deck: Deck, receiver=[], amount=[], version=1,
blockhash=None, txid=None, sender=None, asset_specific_data="",
number_of_decimals=None, blockseq=None, cardseq=None,
blocknum=None, timestamp=None):
'''CardTransfer object, used when parsing card_transfers from the blockchain
or when sending out new card_transfer.
It can be initialized by passing the **kwargs and it will do the parsing,
or it can be initialized with passed arguments.
* deck - instance of Deck object
* receivers - list of receivers
* amounts - list of amounts to be sent, must be float
* version - protocol version, default 1
* txid - transaction ID of CardTransfer
* sender - transaction sender
* blockhash - block ID where the tx was first included
* blockseq - order in which tx was serialized into block
* timestamp - unix timestamp of the block where it was first included
* asset_specific_data - extra metadata
* number_of_decimals - number of decimals for amount, inherited from Deck object'''
assert len(amount) == len(receiver), {"error": "Amount must match receiver."}
self.version = version
self.deck_id = deck.asset_id
self.txid = txid
self.sender = sender
self.asset_specific_data = asset_specific_data
if not number_of_decimals:
self.number_of_decimals = deck.number_of_decimals
else:
self.number_of_decimals = number_of_decimals
self.receiver = receiver
assert len(self.receiver) < 20, {"error": "Too many receivers."}
self.amount = amount
if blockhash:
self.blockhash = blockhash
self.blockseq = blockseq
self.timestamp = timestamp
self.blocknum = blocknum
self.cardseq = cardseq
else:
self.blockhash = 0
self.blockseq = 0
self.blocknum = 0
self.timestamp = 0
self.cardseq = 0
if self.sender == deck.issuer:
self.type = "CardIssue"
elif self.receiver[0] == deck.issuer:
self.type = "CardBurn"
else:
self.type = "CardTransfer"
@property
def metainfo_to_protobuf(self):
'''encode card_transfer info to protobuf'''
card = paproto.CardTransfer()
card.version = self.version
card.amount.extend(self.amount)
card.number_of_decimals = self.number_of_decimals
if not isinstance(self.asset_specific_data, bytes):
card.asset_specific_data = self.asset_specific_data.encode()
else:
card.asset_specific_data = self.asset_specific_data
proto = card.SerializeToString()
if len(proto) > 80:
warnings.warn('\nMetainfo size exceeds maximum of 80bytes that fit into OP_RETURN.')
return proto
def validate_card_issue_modes(deck: Deck, cards: list) -> list:
"""validate card transfer against deck issue mode"""
error = {"error": "Invalid issue mode."}
if ("ONCE", "MULTI") in deck.issue_mode:
return error
# first card is single and amount is 1 for SINGLET
if deck.issue_mode == "SINGLET":
c = next(i for i in cards if i.type == "CardIssue")
if c.amounts[0] != 1:
return None
else:
return [c]
# only first is valid for ONCE
if "ONCE" in deck.issue_mode:
return [next(i for i in cards if i.type == "CardIssue")]
if "MULTI" in deck.issue_mode: # everything goes for multi
return cards
if "CUSTOM" in deck.issue_mode: # custom issuance mode
return cards # what to do with this?
else:
return error
class DeckState:
def __init__(self, cards: list):
self.sort_cards(cards)
self.total = 0
self.burned = 0
self.balances = {}
self.processed_issues = {}
self.processed_transfers = {}
self.processed_burns = {}
self.calc_state()
self.checksum = not bool(self.total - sum(self.balances.values()))
def process(self, card, ctype):
sender = card["sender"]
receivers = card["receiver"]
amount = sum(card["amount"])
if 'CardIssue' not in ctype:
balance_check = sender in self.balances and self.balances[sender] >= amount
if balance_check:
self.balances[sender] -= amount
if 'CardBurn' not in ctype:
self.to_receivers(card, receivers)
return True
return False
if 'CardIssue' in ctype:
self.to_receivers(card, receivers)
return True
return False
def to_receivers(self, card, receivers):
for i, receiver in enumerate(receivers):
amount = card["amount"][i]
try:
self.balances[receiver] += amount
except KeyError:
self.balances[receiver] = amount
def sort_cards(self, cards ):
self.cards = sorted([card.__dict__ for card in cards], key=itemgetter('blocknum','blockseq'))
def calc_state(self):
for card in self.cards:
cid = card["txid"] + str(card["cardseq"])
ctype = card["type"]
amount = sum(card["amount"])
if ctype == 'CardIssue' and cid not in self.processed_issues:
validate = self.process(card, ctype)
self.total += amount * validate # This will set amount to 0 if validate is False
self.processed_issues[cid] = card["timestamp"]
if ctype == 'CardTransfer' and cid not in self.processed_transfers:
self.process(card, ctype)
self.processed_transfers[cid] = card["timestamp"]
if ctype == 'CardBurn' and cid not in self.processed_burns:
validate = self.process(card, ctype)
self.total -= amount * validate
self.burned += amount * validate
self.processed_burns[cid] = card["timestamp"]
| {
"repo_name": "backpacker69/pypeerassets",
"path": "pypeerassets/protocol.py",
"copies": "1",
"size": "8780",
"license": "bsd-3-clause",
"hash": 4814869228457875000,
"line_mean": 32.6398467433,
"line_max": 102,
"alpha_frac": 0.5908883827,
"autogenerated": false,
"ratio": 4.044219253800092,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5135107636500091,
"avg_score": null,
"num_lines": null
} |
# All this craziness is so that we can allow the classes in nested_admin.formsets
# to be importable directly from this module, e.g.:
#
# from nested_admin import NestedInlineFormSet
#
# without running afoul of the strict import order required by Django 1.9+.
# This implementation is shamelessly stolen from werkzeug's ``__init__.py``.
#
# Also included is a monkey-patch for django.forms.formsets.all_valid().
import pkg_resources
import sys
from types import ModuleType
import django.forms.formsets
import monkeybiz
try:
__version__ = pkg_resources.get_distribution('django-nested-admin').version
except pkg_resources.DistributionNotFound:
__version__ = None
# import mapping to objects in other modules
all_by_module = {
'nested_admin.formsets': (
'NestedInlineFormSet', 'NestedBaseGenericInlineFormSet'),
'nested_admin.nested': (
'NestedModelAdmin', 'NestedModelAdminMixin', 'NestedInlineAdminFormset',
'NestedInlineModelAdmin', 'NestedStackedInline', 'NestedTabularInline',
'NestedInlineModelAdminMixin', 'NestedGenericInlineModelAdmin',
'NestedGenericStackedInline', 'NestedGenericTabularInline')
}
# modules that should be imported when accessed as attributes of nested_admin
attribute_modules = frozenset(['formsets', 'nested'])
object_origins = {}
for module, items in all_by_module.items():
for item in items:
object_origins[item] = module
class module(ModuleType):
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('nested_admin.' + name)
return ModuleType.__getattribute__(self, name)
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules[__name__]
# setup the new module and patch it into the dict of loaded modules
new_module = sys.modules[__name__] = module(__name__)
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'nested_admin',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en',
})
@monkeybiz.patch(django.forms.formsets)
def all_valid(original_all_valid, formsets):
"""
Checks validation on formsets, then handles a case where an inline
has new data but one of its parent forms is blank.
This causes a bug when one of the parent forms has empty_permitted == True,
which happens if it is an "extra" form in the formset and its index
is >= the formset's min_num.
"""
if not original_all_valid(formsets):
return False
for formset in formsets:
if formset.has_changed() and getattr(formset, 'parent_form', None):
parent_form = formset.parent_form
while True:
if parent_form.empty_permitted:
parent_form.empty_permitted = False
# Reset the validation errors
parent_form._errors = None
if not hasattr(parent_form, 'parent_formset'):
break
parent_form.parent_formset._errors = None
if not hasattr(parent_form.parent_formset, 'parent_form'):
break
parent_form = parent_form.parent_formset.parent_form
if not original_all_valid(formsets):
return False
return True
| {
"repo_name": "sbussetti/django-nested-admin",
"path": "nested_admin/__init__.py",
"copies": "1",
"size": "4049",
"license": "bsd-2-clause",
"hash": 3653661570130393600,
"line_mean": 35.4774774775,
"line_max": 81,
"alpha_frac": 0.6243516918,
"autogenerated": false,
"ratio": 4.23979057591623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.536414226771623,
"avg_score": null,
"num_lines": null
} |
## All this functions were grenerated to dynamically print output.
import sys
import numpy as np
import time
import pylab as plt
class Printer():
"""
Print things to stdout on one line dynamically
"""
def __init__(self):
self.tic=time.clock()
sys.stdout.flush()
self.data=[]
def printtextoneline(self,string):
sys.stdout.write("\r\x1b[K"+string.__str__())
sys.stdout.flush()
def timepercentprint(self,minv,maxv,step,i,neddies,loop2=None,diagnostics=False):
data=[]
percent=(float(i+1)/maxv)*100.0
etime=round(time.clock()-self.tic)
stmtime=round((etime/percent)*100)
progress=int(10/(maxv/(step*(i+1))))
emptyprog=10-progress
if loop2!=None:
percent2ndloop=float((float(loop2[2]+1)/loop2[1])*10)
#print(etime*np.exp(5-((percent2ndloop*10)/percent)*5))
#stmtime=round(etime*np.exp(-((percent2ndloop*10)/percent)*5))
#print((etime/percent)/(percent2ndloop*10))
#stmtime=round((((etime/percent)*100)/(percent2ndloop*10))*100)
x=2*np.pi
if percent2ndloop < 9.5:
stmtime=round(np.exp(x))
else:
stmtime=round((((etime/percent)*100)/(percent2ndloop*10))*100)
if percent2ndloop==10:
percent2ndloop='>'
else:
percent2ndloop=int(percent2ndloop)
sys.stdout.write("\r 0% [{0}{1}{2}]{3}% | Elapsed Time: {4} s | Estimated Time: {5} s | Info: {6} |".format("="*progress,percent2ndloop,' '*emptyprog,round(percent),etime,stmtime,neddies))
self.data.append(stmtime)
if i != maxv and loop2[2]!=loop2[1]:
sys.stdout.flush()
else:
#print(self.data)
#plt.plot(self.data)
#plt.show()
print('')
else:
sys.stdout.write("\r 0% [{0}>{1}]{2}% | Elapsed Time: {3} s | Estimated Time: {4} s | Info: {5} |".format("="*progress,' '*emptyprog,round(percent),etime,stmtime,neddies))
if percent != 100 and i!=maxv:
sys.stdout.flush()
else:
#print(self.data)
#plt.plot(self.data)
#plt.show()
print('')
| {
"repo_name": "Josue-Martinez-Moreno/trackeddy",
"path": "trackeddy/printfunc.py",
"copies": "1",
"size": "2353",
"license": "mit",
"hash": 7913191188884385000,
"line_mean": 37.5901639344,
"line_max": 200,
"alpha_frac": 0.5303867403,
"autogenerated": false,
"ratio": 3.4653902798232696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44957770201232694,
"avg_score": null,
"num_lines": null
} |
# all this shamelessly ripped off from flask-security plugin cause it was well done over there
import base64
import hashlib
import hmac
from passlib.context import CryptContext
class PasswordUtils(object):
def __init__(self, config):
self.salt = config.get('SECURITY_PASSWORD_SALT', None)
self.pw_hash = config.get('SECURITY_PASSWORD_HASH', None)
if self.salt is None:
raise RuntimeError("The configuration value 'SECURITY_PASSWORD_SALT' must be set")
if self.pw_hash is None:
raise RuntimeError("The configuration value 'SECURITY_PASSWORD_HASH' must be set")
self.pwd_context = CryptContext(schemes=[self.pw_hash])
def get_hmac(self, password):
h = hmac.new(self.encode_string(self.salt), self.encode_string(password), hashlib.sha512)
return base64.b64encode(h.digest())
def encrypt_password(self, password):
signed = self.get_hmac(password).decode('ascii')
return self.pwd_context.encrypt(signed)
def verify_password(self, password, password_hash):
password = self.get_hmac(password)
return self.pwd_context.verify(password, password_hash)
def encode_string(self, string):
if isinstance(string, unicode):
string = string.encode('utf-8')
return string
| {
"repo_name": "LandRegistry/lr-utils",
"path": "lrutils/password/password_utils.py",
"copies": "1",
"size": "1329",
"license": "mit",
"hash": 8065265410896025000,
"line_mean": 29.9069767442,
"line_max": 97,
"alpha_frac": 0.6772009029,
"autogenerated": false,
"ratio": 3.943620178041543,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5120821080941543,
"avg_score": null,
"num_lines": null
} |
__all__ = ["ThreadLocal", "Registry", "WeakValuedRegistry", "SyncDict", "encoded_path", "verify_directory"]
try:
import thread as _thread
import threading as _threading
except ImportError:
import dummy_thread as _thread
import dummy_threading as _threading
from datetime import datetime, timedelta
import os
import sha
import string
import types
import weakref
try:
Set = set
except NameError:
from sets import Set
from beaker.converters import asbool
def verify_directory(dir):
"""verifies and creates a directory. tries to
ignore collisions with other threads and processes."""
tries = 0
while not os.access(dir, os.F_OK):
try:
tries += 1
os.makedirs(dir, 0750)
except:
if tries > 5:
raise
class ThreadLocal(object):
"""stores a value on a per-thread basis"""
def __init__(self, value = None, default = None, creator = None):
self.dict = {}
self.default = default
self.creator = creator
if value:
self.put(value)
def __call__(self, *arg):
if len(arg):
self.put(arg[0])
else:
return self.get()
def __str__(self):
return str(self.get())
def assign(self, value):
self.dict[_thread.get_ident()] = value
def put(self, value):
self.assign(value)
def exists(self):
return self.dict.has_key(_thread.get_ident())
def get(self, *args, **params):
if not self.dict.has_key(_thread.get_ident()):
if self.default is not None:
self.put(self.default)
elif self.creator is not None:
self.put(self.creator(*args, **params))
return self.dict[_thread.get_ident()]
def remove(self):
del self.dict[_thread.get_ident()]
class SyncDict(object):
"""
an efficient/threadsafe singleton map algorithm, a.k.a.
"get a value based on this key, and create if not found or not valid" paradigm:
exists && isvalid ? get : create
works with weakref dictionaries and the LRUCache to handle items asynchronously
disappearing from the dictionary.
use python 2.3.3 or greater ! a major bug was just fixed in Nov. 2003 that
was driving me nuts with garbage collection/weakrefs in this section.
"""
def __init__(self, mutex, dictionary):
self.mutex = mutex
self.dict = dictionary
def clear(self):
self.dict.clear()
def get(self, key, createfunc, isvalidfunc = None):
"""regular get method. returns the object asynchronously, if present
and also passes the optional isvalidfunc,
else defers to the synchronous get method which will create it."""
try:
if self.has_key(key):
return self._get_obj(key, createfunc, isvalidfunc)
else:
return self.sync_get(key, createfunc, isvalidfunc)
except KeyError:
return self.sync_get(key, createfunc, isvalidfunc)
def sync_get(self, key, createfunc, isvalidfunc = None):
self.mutex.acquire()
try:
try:
if self.has_key(key):
return self._get_obj(key, createfunc, isvalidfunc, create = True)
else:
return self._create(key, createfunc)
except KeyError:
return self._create(key, createfunc)
finally:
self.mutex.release()
def _get_obj(self, key, createfunc, isvalidfunc, create = False):
obj = self[key]
if isvalidfunc is not None and not isvalidfunc(obj):
if create:
return self._create(key, createfunc)
else:
return self.sync_get(key, createfunc, isvalidfunc)
else:
return obj
def _create(self, key, createfunc):
obj = createfunc()
self[key] = obj
return obj
def has_key(self, key):
return self.dict.has_key(key)
def __contains__(self, key):
return self.dict.__contains__(key)
def __getitem__(self, key):
return self.dict.__getitem__(key)
def __setitem__(self, key, value):
self.dict.__setitem__(key, value)
def __delitem__(self, key):
return self.dict.__delitem__(key)
class Registry(SyncDict):
"""a registry object."""
def __init__(self):
SyncDict.__init__(self, _threading.Lock(), {})
class WeakValuedRegistry(SyncDict):
"""a registry that stores objects only as long as someone has a reference to them."""
def __init__(self):
# weakrefs apparently can trigger the __del__ method of other
# unreferenced objects, when you create a new reference. this can occur
# when you place new items into the WeakValueDictionary. if that __del__
# method happens to want to access this same registry, well, then you need
# the RLock instead of a regular lock, since at the point of dictionary
# insertion, we are already inside the lock.
SyncDict.__init__(self, _threading.RLock(), weakref.WeakValueDictionary())
def encoded_path(root, identifiers, extension = ".enc", depth = 3, digest = True):
"""generate a unique file-accessible path from the given list of identifiers
starting at the given root directory."""
ident = string.join(identifiers, "_")
if digest:
ident = sha.new(ident).hexdigest()
tokens = []
for d in range(1, depth):
tokens.append(ident[0:d])
dir = os.path.join(root, *tokens)
verify_directory(dir)
return os.path.join(dir, ident + extension)
def verify_options(opt, types, error):
if not isinstance(opt, types):
if not isinstance(types, tuple):
types = (types,)
coerced = False
for typ in types:
try:
if typ == bool:
typ = asbool
opt = typ(opt)
coerced = True
except:
pass
if coerced:
break
if not coerced:
raise Exception(error)
return opt
def verify_rules(params, ruleset):
for key, types, message in ruleset:
if key in params:
params[key] = verify_options(params[key], types, message)
return params
def coerce_session_params(params):
rules = [
('data_dir', (str, types.NoneType), "data_dir must be a string referring to a directory."),
('lock_dir', (str,), "lock_dir must be a string referring to a directory."),
('type', (str, types.NoneType), "Session type must be a string."),
('cookie_expires', (bool, datetime, timedelta), "Cookie expires was not a boolean, datetime, or timedelta instance."),
('id', (str,), "Session id must be a string."),
('key', (str,), "Session key must be a string."),
('secret', (str, types.NoneType), "Session secret must be a string."),
('timeout', (int, types.NoneType), "Session timeout must be an integer."),
]
return verify_rules(params, rules)
def coerce_cache_params(params):
rules = [
('data_dir', (str, types.NoneType), "data_dir must be a string referring to a directory."),
('lock_dir', (str,), "lock_dir must be a string referring to a directory."),
('type', (str,), "Session type must be a string."),
]
return verify_rules(params, rules)
| {
"repo_name": "santisiri/popego",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/Beaker-0.7.5-py2.5.egg/beaker/util.py",
"copies": "1",
"size": "7582",
"license": "bsd-3-clause",
"hash": -4407470462856582000,
"line_mean": 32.1091703057,
"line_max": 126,
"alpha_frac": 0.5859931417,
"autogenerated": false,
"ratio": 4.152245345016429,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5238238486716429,
"avg_score": null,
"num_lines": null
} |
__all__ = ["ThreadWrap", "ExecuteGetResponse"]
import threading
import subprocess
import time
import Queue
import sys
import re
from browser.status import *
from log import VLOG
""" wrapper of basic thread where commands enqueued should run on same thread with
same session id for avoiding race condition.
since it uses condition var to sync the timeline between threads, it's thread safety.
ThreadWrap is truly run by ThreadWrap.start(), and then it in sequence runs its
task(aka, command), it does not quit until receive quit command wrapped in task.
Finally you can call its you can dynamically append new task by ThreadWrap.PostTask(cmd) """
class ThreadWrap(threading.Thread):
def __init__(self, condition, session_id, session):
threading.Thread.__init__(self, name=session_id)
# use to sync between main thread and itself
self.condition = condition
# use to control its own state
self.queue = Queue.Queue()
self.session = session
# tracing shared vars by any command
self.status = Status(kOk)
self.value = {}
# delay enough time to make sure its parents thread acquire the condition first, so
# that parent thread can add itself to notify table
self.is_ready = False
def run(self):
while True:
if not self.is_ready:
continue
if self.queue:
cmd = self.queue.get()
self.status = cmd.Run()
if hasattr(cmd, 'is_send_func_'):
# since in low level, switching between threads makes socket reset to NoneType, we
# use a condition var to sync between threads to make safety of socket
self.condition.acquire()
self.condition.notify()
self.condition.release()
if hasattr(cmd, 'is_quit_func_'):
# is_quit_func_ is a dynamically attr where it is easily glued to cmd by
# cmd.is_quit_func_ = True, when run() notice the attr of cmd, the thread
# wrapper is finaly quit
return
else:
# block itself until waked by self.PostTask()
# release the ownership of cpu
time.sleep(0.05)
def PostTask(self, cmd):
self.queue.put(cmd)
return
""" since python' subprocess module does not support manual timeout setting.
This class binds the wanted commands and post the task to another thread which
can be under control in timeout setting calling thread.join(timeout) """
class ExecuteGetResponse(object):
def __init__(self, cmd="", timeout=3):
self.cmd = cmd
self.timeout = timeout
self.process = None
self.stdout = ""
self.stderr = ""
self.is_timeout = False
self.Run()
def Task(self):
self.process = subprocess.Popen(self.cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(self.stdout, self.stderr) = self.process.communicate()
return
def Run(self):
thread = threading.Thread(target=self.Task)
thread.start()
thread.join(self.timeout)
if thread.is_alive():
self.is_timeout = True
self.process.terminate()
thread.join()
return
# return status and response<string>
def GetResponse(self):
# handle timeout error
if self.is_timeout:
msg = "Xdb command timed out after %s seconds" % str(self.timeout)
return (Status(kTimeout, msg), "")
# handle command execute shell-like error, etc. command unregconize or spelled error
if self.stderr:
VLOG(3, "Xdb: %s - %s" % (self.cmd, self.stderr))
return (Status(kUnknownError, "Failed to run Xdb command, is the Xdb server running?"), "")
# handle adb execute error
matchObj = re.search(r'error', self.stdout, re.I)
if matchObj:
VLOG(3, "Xdb: %s - %s" % (self.cmd, self.stdout))
return (Status(kUnknownError, "Failed to run Xdb command, detailed message:" + self.stdout), "")
return (Status(kOk), self.stdout)
| {
"repo_name": "PeterWangIntel/crosswalk-webdriver-python",
"path": "base/thread_wrap.py",
"copies": "1",
"size": "3865",
"license": "bsd-3-clause",
"hash": -4655848634263552000,
"line_mean": 34.787037037,
"line_max": 105,
"alpha_frac": 0.6698576973,
"autogenerated": false,
"ratio": 3.8961693548387095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.506602705213871,
"avg_score": null,
"num_lines": null
} |
__all__ = ['threshold_adaptive',
'threshold_otsu',
'threshold_yen',
'threshold_isodata']
import numpy as np
import scipy.ndimage
from skimage.exposure import histogram
def threshold_adaptive(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None):
"""Applies an adaptive threshold to an array.
Also known as local or dynamic thresholding where the threshold value is
the weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a a
given function using the 'generic' method.
Parameters
----------
image : (N, M) ndarray
Input image.
block_size : int
Uneven size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see `param` parameter)
* 'gaussian': apply gaussian filter (see `param` parameter for custom\
sigma value)
* 'mean': apply arithmetic mean filter
* 'median': apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated
threshold for the centre pixel.
Returns
-------
threshold : (N, M) ndarray
Thresholded binary image
References
----------
.. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()[:50, :50]
>>> binary_image1 = threshold_adaptive(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func)
"""
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
scipy.ndimage.generic_filter(image, param, block_size,
output=thresh_image, mode=mode)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
scipy.ndimage.gaussian_filter(image, sigma, output=thresh_image,
mode=mode)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
scipy.ndimage.convolve1d(image, mask, axis=0, output=thresh_image,
mode=mode)
scipy.ndimage.convolve1d(thresh_image, mask, axis=1,
output=thresh_image, mode=mode)
elif method == 'median':
scipy.ndimage.median_filter(image, block_size, output=thresh_image,
mode=mode)
return image > (thresh_image - offset)
def threshold_otsu(image, nbins=256):
"""Return threshold value based on Otsu's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_otsu(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image, nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
def threshold_yen(image, nbins=256):
"""Return threshold value based on Yen's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion
for Automatic Multilevel Thresholding" IEEE Trans. on Image
Processing, 4(3): 370-378
.. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_yen(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image, nbins)
# On blank images (e.g. filled with 0) with int dtype, `histogram()`
# returns `bin_centers` containing only one value. Speed up with it.
if bin_centers.size == 1:
return bin_centers[0]
# Calculate probability mass function
pmf = hist.astype(np.float32) / hist.sum()
P1 = np.cumsum(pmf) # Cumulative normalized histogram
P1_sq = np.cumsum(pmf ** 2)
# Get cumsum calculated from end of squared array:
P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1]
# P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf'
# in crit. ImageJ Yen implementation replaces those values by zero.
crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) *
(P1[:-1] * (1.0 - P1[:-1])) ** 2)
return bin_centers[crit.argmax()]
def threshold_isodata(image, nbins=256, return_all=False):
"""Return threshold value(s) based on ISODATA method.
Histogram-based threshold, known as Ridler-Calvard method or inter-means.
Threshold values returned satisfy the following equality:
threshold = (image[image <= threshold].mean() +
image[image > threshold].mean()) / 2.0
That is, returned thresholds are intensities that separate the image into
two groups of pixels, where the threshold intensity is midway between the
mean intensities of these groups.
For integer images, the above equality holds to within one; for floating-
point images, the equality holds to within the histogram bin-width.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
return_all: bool, optional
If False (default), return only the lowest threshold that satisfies
the above equality. If True, return all valid thresholds.
Returns
-------
threshold : float or int or array
Threshold value(s).
References
----------
.. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an
iterative selection method"
.. [2] IEEE Transactions on Systems, Man and Cybernetics 8: 630-632,
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4310039
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [4] ImageJ AutoThresholder code,
http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import coins
>>> image = coins()
>>> thresh = threshold_isodata(image)
>>> binary = image > thresh
"""
hist, bin_centers = histogram(image, nbins)
# image only contains one unique value
if len(bin_centers) == 1:
if return_all:
return bin_centers
else:
return bin_centers[0]
hist = hist.astype(np.float32)
# csuml and csumh contain the count of pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively
csuml = np.cumsum(hist)
csumh = np.cumsum(hist[::-1])[::-1] - hist
# intensity_sum contains the total pixel intensity from each bin
intensity_sum = hist * bin_centers
# l and h contain average value of all pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively.
# Note that since exp.histogram does not include empty bins at the low or
# high end of the range, csuml and csumh are strictly > 0, except in the
# last bin of csumh, which is zero by construction.
# So no worries about division by zero in the following lines, except
# for the last bin, but we can ignore that because no valid threshold
# can be in the top bin. So we just patch up csumh[-1] to not cause 0/0
# errors.
csumh[-1] = 1
l = np.cumsum(intensity_sum) / csuml
h = (np.cumsum(intensity_sum[::-1])[::-1] - intensity_sum) / csumh
# isodata finds threshold values that meet the criterion t = (l + m)/2
# where l is the mean of all pixels <= t and h is the mean of all pixels
# > t, as calculated above. So we are looking for places where
# (l + m) / 2 equals the intensity value for which those l and m figures
# were calculated -- which is, of course, the histogram bin centers.
# We only require this equality to be within the precision of the bin
# width, of course.
all_mean = (l + h) / 2.0
bin_width = bin_centers[1] - bin_centers[0]
# Look only at thresholds that are below the actual all_mean value,
# for consistency with the threshold being included in the lower pixel
# group. Otherwise can get thresholds that are not actually fixed-points
# of the isodata algorithm. For float images, this matters less, since
# there really can't be any guarantees anymore anyway.
distances = all_mean - bin_centers
thresholds = bin_centers[(distances >= 0) & (distances < bin_width)]
if return_all:
return thresholds
else:
return thresholds[0]
| {
"repo_name": "SamHames/scikit-image",
"path": "skimage/filter/thresholding.py",
"copies": "1",
"size": "11634",
"license": "bsd-3-clause",
"hash": 1078266607003098500,
"line_mean": 37.78,
"line_max": 126,
"alpha_frac": 0.6320268179,
"autogenerated": false,
"ratio": 3.973360655737705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5105387473637705,
"avg_score": null,
"num_lines": null
} |
__all__ = ['threshold_adaptive',
'threshold_otsu',
'threshold_yen',
'threshold_isodata',
'threshold_li', ]
import numpy as np
from scipy import ndimage as ndi
from ..exposure import histogram
from .._shared.utils import assert_nD
import warnings
def threshold_adaptive(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None):
"""Applies an adaptive threshold to an array.
Also known as local or dynamic thresholding where the threshold value is
the weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a a
given function using the 'generic' method.
Parameters
----------
image : (N, M) ndarray
Input image.
block_size : int
Uneven size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see `param` parameter)
* 'gaussian': apply gaussian filter (see `param` parameter for custom\
sigma value)
* 'mean': apply arithmetic mean filter
* 'median': apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated
threshold for the centre pixel.
Returns
-------
threshold : (N, M) ndarray
Thresholded binary image
References
----------
.. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()[:50, :50]
>>> binary_image1 = threshold_adaptive(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func)
"""
assert_nD(image, 2)
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
ndi.generic_filter(image, param, block_size,
output=thresh_image, mode=mode)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode)
ndi.convolve1d(thresh_image, mask, axis=1,
output=thresh_image, mode=mode)
elif method == 'median':
ndi.median_filter(image, block_size, output=thresh_image, mode=mode)
return image > (thresh_image - offset)
def threshold_otsu(image, nbins=256):
"""Return threshold value based on Otsu's method.
Parameters
----------
image : array
Grayscale input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_otsu(image)
>>> binary = image <= thresh
Notes
-----
The input image must be grayscale.
"""
if image.shape[-1] in (3, 4):
msg = "threshold_otsu is expected to work correctly only for " \
"grayscale images; image shape {0} looks like an RGB image"
warnings.warn(msg.format(image.shape))
hist, bin_centers = histogram(image.ravel(), nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
def threshold_yen(image, nbins=256):
"""Return threshold value based on Yen's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion
for Automatic Multilevel Thresholding" IEEE Trans. on Image
Processing, 4(3): 370-378
.. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_yen(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# On blank images (e.g. filled with 0) with int dtype, `histogram()`
# returns `bin_centers` containing only one value. Speed up with it.
if bin_centers.size == 1:
return bin_centers[0]
# Calculate probability mass function
pmf = hist.astype(np.float32) / hist.sum()
P1 = np.cumsum(pmf) # Cumulative normalized histogram
P1_sq = np.cumsum(pmf ** 2)
# Get cumsum calculated from end of squared array:
P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1]
# P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf'
# in crit. ImageJ Yen implementation replaces those values by zero.
crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) *
(P1[:-1] * (1.0 - P1[:-1])) ** 2)
return bin_centers[crit.argmax()]
def threshold_isodata(image, nbins=256, return_all=False):
"""Return threshold value(s) based on ISODATA method.
Histogram-based threshold, known as Ridler-Calvard method or inter-means.
Threshold values returned satisfy the following equality:
`threshold = (image[image <= threshold].mean() +`
`image[image > threshold].mean()) / 2.0`
That is, returned thresholds are intensities that separate the image into
two groups of pixels, where the threshold intensity is midway between the
mean intensities of these groups.
For integer images, the above equality holds to within one; for floating-
point images, the equality holds to within the histogram bin-width.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
return_all: bool, optional
If False (default), return only the lowest threshold that satisfies
the above equality. If True, return all valid thresholds.
Returns
-------
threshold : float or int or array
Threshold value(s).
References
----------
.. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an
iterative selection method"
.. [2] IEEE Transactions on Systems, Man and Cybernetics 8: 630-632,
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4310039
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [4] ImageJ AutoThresholder code,
http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import coins
>>> image = coins()
>>> thresh = threshold_isodata(image)
>>> binary = image > thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# image only contains one unique value
if len(bin_centers) == 1:
if return_all:
return bin_centers
else:
return bin_centers[0]
hist = hist.astype(np.float32)
# csuml and csumh contain the count of pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively
csuml = np.cumsum(hist)
csumh = np.cumsum(hist[::-1])[::-1] - hist
# intensity_sum contains the total pixel intensity from each bin
intensity_sum = hist * bin_centers
# l and h contain average value of all pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively.
# Note that since exp.histogram does not include empty bins at the low or
# high end of the range, csuml and csumh are strictly > 0, except in the
# last bin of csumh, which is zero by construction.
# So no worries about division by zero in the following lines, except
# for the last bin, but we can ignore that because no valid threshold
# can be in the top bin. So we just patch up csumh[-1] to not cause 0/0
# errors.
csumh[-1] = 1
l = np.cumsum(intensity_sum) / csuml
h = (np.cumsum(intensity_sum[::-1])[::-1] - intensity_sum) / csumh
# isodata finds threshold values that meet the criterion t = (l + m)/2
# where l is the mean of all pixels <= t and h is the mean of all pixels
# > t, as calculated above. So we are looking for places where
# (l + m) / 2 equals the intensity value for which those l and m figures
# were calculated -- which is, of course, the histogram bin centers.
# We only require this equality to be within the precision of the bin
# width, of course.
all_mean = (l + h) / 2.0
bin_width = bin_centers[1] - bin_centers[0]
# Look only at thresholds that are below the actual all_mean value,
# for consistency with the threshold being included in the lower pixel
# group. Otherwise can get thresholds that are not actually fixed-points
# of the isodata algorithm. For float images, this matters less, since
# there really can't be any guarantees anymore anyway.
distances = all_mean - bin_centers
thresholds = bin_centers[(distances >= 0) & (distances < bin_width)]
if return_all:
return thresholds
else:
return thresholds[0]
def threshold_li(image):
"""Return threshold value based on adaptation of Li's Minimum Cross Entropy method.
Parameters
----------
image : array
Input image.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities more than
this value are assumed to be foreground.
References
----------
.. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
.. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165
http://citeseer.ist.psu.edu/sezgin04survey.html
.. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_li(image)
>>> binary = image > thresh
"""
# Copy to ensure input image is not modified
image = image.copy()
# Requires positive image (because of log(mean))
immin = np.min(image)
image -= immin
imrange = np.max(image)
tolerance = 0.5 * imrange / 256
# Calculate the mean gray-level
mean = np.mean(image)
# Initial estimate
new_thresh = mean
old_thresh = new_thresh + 2 * tolerance
# Stop the iterations when the difference between the
# new and old threshold values is less than the tolerance
while abs(new_thresh - old_thresh) > tolerance:
old_thresh = new_thresh
threshold = old_thresh + tolerance # range
# Calculate the means of background and object pixels
mean_back = image[image <= threshold].mean()
mean_obj = image[image > threshold].mean()
temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj))
if temp < 0:
new_thresh = temp - tolerance
else:
new_thresh = temp + tolerance
return threshold + immin
| {
"repo_name": "ClinicalGraphics/scikit-image",
"path": "skimage/filters/thresholding.py",
"copies": "1",
"size": "14107",
"license": "bsd-3-clause",
"hash": 6337271314505982000,
"line_mean": 36.3201058201,
"line_max": 126,
"alpha_frac": 0.6339405969,
"autogenerated": false,
"ratio": 3.9088390135771682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5042779610477168,
"avg_score": null,
"num_lines": null
} |
__all__ = ['threshold_adaptive', 'threshold_otsu', 'threshold_yen']
import numpy as np
import scipy.ndimage
from skimage.exposure import histogram
def threshold_adaptive(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None):
"""Applies an adaptive threshold to an array.
Also known as local or dynamic thresholding where the threshold value is
the weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a a
given function using the 'generic' method.
Parameters
----------
image : (N, M) ndarray
Input image.
block_size : int
Uneven size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see `param` parameter)
* 'gaussian': apply gaussian filter (see `param` parameter for custom\
sigma value)
* 'mean': apply arithmetic mean filter
* 'median': apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated
threshold for the centre pixel.
Returns
-------
threshold : (N, M) ndarray
Thresholded binary image
References
----------
.. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()[:50, :50]
>>> binary_image1 = threshold_adaptive(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func)
"""
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
scipy.ndimage.generic_filter(image, param, block_size,
output=thresh_image, mode=mode)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
scipy.ndimage.gaussian_filter(image, sigma, output=thresh_image,
mode=mode)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
scipy.ndimage.convolve1d(image, mask, axis=0, output=thresh_image,
mode=mode)
scipy.ndimage.convolve1d(thresh_image, mask, axis=1,
output=thresh_image, mode=mode)
elif method == 'median':
scipy.ndimage.median_filter(image, block_size, output=thresh_image,
mode=mode)
return image > (thresh_image - offset)
def threshold_otsu(image, nbins=256):
"""Return threshold value based on Otsu's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_otsu(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image, nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:])**2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
def threshold_yen(image, nbins=256):
"""Return threshold value based on Yen's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion
for Automatic Multilevel Thresholding" IEEE Trans. on Image
Processing, 4(3): 370-378
.. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_yen(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image, nbins)
norm_histo = hist.astype(float) / hist.sum() # Probability mass function
P1 = np.cumsum(norm_histo) # Cumulative normalized histogram
P1_sq = np.cumsum(norm_histo ** 2)
# Get cumsum calculated from end of squared array:
P2_sq = np.cumsum(norm_histo[::-1] ** 2)[::-1]
# P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf'
# in crit. ImageJ Yen implementation replaces those values by zero.
crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) * \
(P1[:-1] * (1.0 - P1[:-1])) ** 2)
max_crit = np.argmax(crit)
threshold = bin_centers[:-1][max_crit]
return threshold
| {
"repo_name": "almarklein/scikit-image",
"path": "skimage/filter/thresholding.py",
"copies": "1",
"size": "7022",
"license": "bsd-3-clause",
"hash": 2678145550383971300,
"line_mean": 36.752688172,
"line_max": 126,
"alpha_frac": 0.6152093421,
"autogenerated": false,
"ratio": 3.9897727272727272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5104982069372728,
"avg_score": null,
"num_lines": null
} |
__all__ = ['ThresholdingFeatClass']
import logging
import numpy as np
from .base import BaseFeatClass
from ..classifiers import ThresholdRescaler, BinaryLabelsClassifier
from ..classifiers import get_thresholds_from_file
logger = logging.getLogger(__name__)
class ThresholdingFeatClass(BaseFeatClass):
def __init__(self, thresholds=None, thresholds_uri=None, **kwargs):
super(ThresholdingFeatClass, self).__init__(**kwargs)
if thresholds is not None and thresholds_uri is not None: raise ValueError('Only one of thresholds/thresholds_uri should be specified.')
self.thresholds_uri = thresholds_uri
if thresholds is None:
if self.thresholds_uri:
thresholds = get_thresholds_from_file(self.thresholds_uri, self.classifier.classes_)
if len(self.classifier.classes_) == 2 and not np.isclose(thresholds.sum(), 1.0) and isinstance(self.classifier, BinaryLabelsClassifier):
if thresholds[0] == 0.5: thresholds[0] = 1.0 - thresholds[1]
if thresholds[1] == 0.5: thresholds[1] = 1.0 - thresholds[0]
logger.warning('Thresholds were set automatically for BinaryLabelsClassifier to {}={} and {}={}.'.format(self.classifier.classes_[0], thresholds[0], self.classifier.classes_[1], thresholds[1]))
#end if
else: thresholds = 0.5
#end if
self.rescaler = ThresholdRescaler(thresholds, len(self.classifier.classes_))
#end def
def predict(self, X, **kwargs):
return self.rescaler.predict(super(ThresholdingFeatClass, self).predict_proba(X, **kwargs))
#end def
def predict_proba(self, X, *, rescale=True, **kwargs):
Y_proba = super(ThresholdingFeatClass, self).predict_proba(X, **kwargs)
if rescale:
return self.rescaler.rescale(Y_proba)
return Y_proba
#end def
def predict_and_proba(self, X, *, rescale=True, **kwargs):
Y_proba = super(ThresholdingFeatClass, self).predict_proba(X, **kwargs)
if rescale:
Y_proba = self.rescaler.rescale(Y_proba)
Y_predict = Y_proba >= 0.5
else:
Y_predict = self.rescaler.predict(Y_proba)
#end if
return Y_proba, Y_predict
#end def
def decision_function(self, X, **kwargs):
return self.predict_proba(X, **kwargs)
def __str__(self):
return 'ThresholdingFeatClass(featurizer={}, classifier={}, thresholds_uri={})'.format(self.featurizer, self.classifier, self.thresholds_uri if self.thresholds_uri else self.rescaler.thresholds)
#end def
#end class
| {
"repo_name": "skylander86/ycml",
"path": "ycml/featclass/thresholds.py",
"copies": "1",
"size": "2642",
"license": "apache-2.0",
"hash": 7652813940318329000,
"line_mean": 36.2112676056,
"line_max": 213,
"alpha_frac": 0.6483724451,
"autogenerated": false,
"ratio": 3.8124098124098125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9905730212472865,
"avg_score": 0.011010409007389529,
"num_lines": 71
} |
__all__ = ['threshold_otsu', 'threshold_adaptive']
import numpy as np
import scipy.ndimage
from skimage.exposure import histogram
def threshold_adaptive(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None):
"""Applies an adaptive threshold to an array.
Also known as local or dynamic thresholding where the threshold value is the
weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a
a given function using the 'generic' method.
Parameters
----------
image : NxM ndarray
Input image.
block_size : int
Uneven size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see `param` parameter)
* 'gaussian': apply gaussian filter (see `param` parameter for custom
sigma value)
* 'mean': apply arithmetic mean filter
* 'median' apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated threshold
for the centre pixel.
Returns
-------
threshold : NxM ndarray
Thresholded binary image
References
----------
http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations
.html?highlight=threshold#adaptivethreshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> binary_image1 = threshold_adaptive(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func)
"""
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
scipy.ndimage.generic_filter(image, param, block_size,
output=thresh_image, mode=mode)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
scipy.ndimage.gaussian_filter(image, sigma, output=thresh_image,
mode=mode)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
scipy.ndimage.convolve1d(image, mask, axis=0, output=thresh_image,
mode=mode)
scipy.ndimage.convolve1d(thresh_image, mask, axis=1,
output=thresh_image, mode=mode)
elif method == 'median':
scipy.ndimage.median_filter(image, block_size, output=thresh_image,
mode=mode)
return image > (thresh_image - offset)
def threshold_otsu(image, nbins=256):
"""Return threshold value based on Otsu's method.
Parameters
----------
image : array
Input image.
nbins : int
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Threshold value.
References
----------
.. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_otsu(image)
>>> binary = image > thresh
"""
hist, bin_centers = histogram(image, nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:])**2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
| {
"repo_name": "emmanuelle/scikits.image",
"path": "skimage/filter/thresholding.py",
"copies": "2",
"size": "4881",
"license": "bsd-3-clause",
"hash": 1433080453095765000,
"line_mean": 35.4253731343,
"line_max": 80,
"alpha_frac": 0.6295841016,
"autogenerated": false,
"ratio": 4.143463497453311,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014047388587730357,
"num_lines": 134
} |
__all__ = ['ThresholdRescaler', 'get_thresholds_from_file']
import numpy as np
from ..utils import load_dictionary_from_file
class ThresholdRescaler(object):
def __init__(self, thresholds, n_classes=None):
if isinstance(thresholds, float): self.thresholds = np.full(n_classes, thresholds)
elif isinstance(thresholds, (list, tuple)): self.thresholds = np.array(thresholds)
else: self.thresholds = thresholds
if self.thresholds.ndim == 2:
self.thresholds = self.thresholds[:, 0]
if self.thresholds.ndim != 1: raise ValueError('`thresholds` should be a 1D array.')
if n_classes is None: n_classes = self.thresholds.shape[0]
else: assert n_classes == self.thresholds.shape[0]
self.denominators = 2.0 * (1.0 - self.thresholds)
#end def
def rescale(self, Y_proba):
return rescale_proba_with_thresholds(Y_proba, self.thresholds, denominators=self.denominators)
#end def
def predict(self, Y_proba):
Y_predict = np.zeros(Y_proba.shape, dtype=np.bool)
for i in range(Y_proba.shape[0]):
Y_predict[i, :] = Y_proba[i, :] > self.thresholds
return Y_predict
#end def
#end class
def rescale_proba_with_thresholds(Y_proba, thresholds, *, denominators=None):
assert Y_proba.shape[1] == thresholds.shape[0]
if denominators is None: denominators = 2.0 * (1.0 - thresholds)
rescaled = np.zeros(Y_proba.shape)
for i in range(Y_proba.shape[0]):
for k in range(Y_proba.shape[1]):
if Y_proba[i, k] >= thresholds[k]: rescaled[i, k] = 0.5 + ((Y_proba[i, k] - thresholds[k]) / denominators[k])
else: rescaled[i, k] = Y_proba[i, k] / (thresholds[k] * 2.0)
#end for
#end for
return rescaled
#end def
def get_thresholds_from_file(f, classes, *, default=0.5):
d = load_dictionary_from_file(f)
return np.array([float(d.get(c, default)) for c in classes])
#end def
| {
"repo_name": "skylander86/ycml",
"path": "ycml/classifiers/thresholds.py",
"copies": "1",
"size": "1965",
"license": "apache-2.0",
"hash": -8733335217416255000,
"line_mean": 32.3050847458,
"line_max": 121,
"alpha_frac": 0.6351145038,
"autogenerated": false,
"ratio": 3.2372322899505765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43723467937505767,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Tick']
class Tick(object):
'''Tick Class.
A new Tick object is instantiated every tick by BehaviorTree. It is passed
as parameter to the nodes through the tree during the traversal.
The role of the Tick class is to store the instances of tree, debug, target
and blackboard. So, all nodes can access these informations.
For internal uses, the Tick also is useful to store the open node after the
tick signal, in order to let `BehaviorTree` to keep track and close them
when necessary.
This class also makes a bridge between nodes and the debug, passing the
node state to the debug if the last is provided.
'''
def __init__(self, tree=None, target=None, blackboard=None, debug=None):
'''Constructor.
:param tree: a BehaviorTree instance.
:param target: a target object.
:param blackboard: a Blackboard instance.
:param debug: a debug instance.
'''
self.tree = tree
self.target = target
self.blackboard = blackboard
self.debug = debug
self._open_nodes = []
self._node_count = 0
def _enter_node(self, node):
'''Called when entering a node (called by BaseNode).
:param node: a node instance.
'''
self._node_count += 1
self._open_nodes.append(node)
def _open_node(self, node):
'''Called when opening a node (called by BaseNode).
:param node: a node instance.
'''
pass
def _tick_node(self, node):
'''Called when ticking a node (called by BaseNode).
:param node: a node instance.
'''
pass
def _close_node(self, node):
'''Called when closing a node (called by BaseNode).
:param node: a node instance.
'''
self._open_nodes.pop()
def _exit_node(self, node):
'''Called when exiting a node (called by BaseNode).
:param node: a node instance.
'''
pass | {
"repo_name": "renatopp/behavior3py",
"path": "b3/core/tick.py",
"copies": "2",
"size": "2005",
"license": "mit",
"hash": 3079876645558248000,
"line_mean": 27.6571428571,
"line_max": 79,
"alpha_frac": 0.6034912718,
"autogenerated": false,
"ratio": 4.238900634249472,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5842391906049471,
"avg_score": null,
"num_lines": null
} |
__all__ = ['TiledCompound']
import itertools as it
import numpy as np
from mbuild.compound import Compound
from mbuild.exceptions import MBuildError
from mbuild.port import Port
from mbuild.coordinate_transform import translate
from mbuild.periodic_kdtree import PeriodicCKDTree
from mbuild import clone
class TiledCompound(Compound):
"""Replicates a Compound in any cartesian direction(s).
Correctly updates connectivity while respecting periodic boundary
conditions.
Parameters
-----------
tile : mb.Compound
The Compound to be replicated.
n_tiles : array-like, shape=(3,), dtype=int, optional, default=(1, 1, 1)
Number of times to replicate tile in the x, y and z-directions.
name : str, optional, default=tile.name
Descriptive string for the compound.
"""
def __init__(self, tile, n_tiles, name=None):
super(TiledCompound, self).__init__()
n_tiles = np.asarray(n_tiles)
if not np.all(n_tiles > 0):
raise ValueError('Number of tiles must be positive.')
# Check that the tile is periodic in the requested dimensions.
if np.any(np.logical_and(n_tiles != 1, tile.periodicity == 0)):
raise ValueError('Tile not periodic in at least one of the '
'specified dimensions.')
if name is None:
name = tile.name + '-'.join(str(d) for d in n_tiles)
self.name = name
self.periodicity = np.array(tile.periodicity * n_tiles)
if all(n_tiles == 1):
self._add_tile(tile, [(0, 0, 0)])
self._hoist_ports(tile)
return # Don't waste time copying and checking bonds.
# For every tile, assign temporary ID's to particles which are internal
# to that tile. E.g., when replicating a tile with 1800 particles, every
# tile will contain particles with ID's from 0-1799. These ID's are used
# below to fix bonds crossing periodic boundary conditions where a new
# tile has been placed.
for idx, particle in enumerate(tile.particles(include_ports=True)):
particle.index = idx
# Replicate and place periodic tiles.
# -----------------------------------
for ijk in it.product(range(n_tiles[0]),
range(n_tiles[1]),
range(n_tiles[2])):
new_tile = clone(tile)
translate(new_tile, np.array(ijk * tile.periodicity))
self._add_tile(new_tile, ijk)
self._hoist_ports(new_tile)
# Fix bonds across periodic boundaries.
# -------------------------------------
# Cutoff for long bonds is half the shortest periodic distance.
bond_dist_thres = min(tile.periodicity[tile.periodicity > 0]) / 2
# Bonds that were periodic in the original tile.
indices_of_periodic_bonds = set()
for particle1, particle2 in tile.bonds():
if np.linalg.norm(particle1.pos - particle2.pos) > bond_dist_thres:
indices_of_periodic_bonds.add((particle1.index,
particle2.index))
# Build a periodic kdtree of all particle positions.
self.particle_kdtree = PeriodicCKDTree(data=self.xyz, bounds=self.periodicity)
all_particles = np.asarray(list(self.particles(include_ports=False)))
# Store bonds to remove/add since we'll be iterating over all bonds.
bonds_to_remove = set()
bonds_to_add = set()
for particle1, particle2 in self.bonds():
if (particle1.index, particle2.index) not in indices_of_periodic_bonds \
and (particle2.index, particle1.index) not in indices_of_periodic_bonds:
continue
if self.min_periodic_distance(particle1.pos, particle2.pos) > bond_dist_thres:
bonds_to_remove.add((particle1, particle2))
image2 = self._find_particle_image(particle1, particle2,
all_particles)
image1 = self._find_particle_image(particle2, particle1,
all_particles)
if (image2, particle1) not in bonds_to_add:
bonds_to_add.add((particle1, image2))
if (image1, particle2) not in bonds_to_add:
bonds_to_add.add((particle2, image1))
for bond in bonds_to_remove:
self.remove_bond(bond)
for bond in bonds_to_add:
self.add_bond(bond)
# Clean up temporary data.
for particle in self._particles(include_ports=True):
particle.index = None
del self.particle_kdtree
def _add_tile(self, new_tile, ijk):
"""Add a tile with a label indicating its tiling position. """
tile_label = "{0}_{1}".format(self.name, '-'.join(str(d) for d in ijk))
self.add(new_tile, label=tile_label, inherit_periodicity=False)
def _hoist_ports(self, new_tile):
"""Add labels for all the ports to the parent (TiledCompound). """
for port in new_tile.children:
if isinstance(port, Port):
self.add(port, containment=False)
def _find_particle_image(self, query, match, all_particles):
"""Find particle with the same index as match in a neighboring tile. """
_, idxs = self.particle_kdtree.query(query.pos, k=10)
neighbors = all_particles[idxs]
for particle in neighbors:
if particle.index == match.index:
return particle
raise MBuildError('Unable to find matching particle image while'
' stitching bonds.')
| {
"repo_name": "sallai/mbuild",
"path": "mbuild/recipes/tiled_compound.py",
"copies": "1",
"size": "5774",
"license": "mit",
"hash": -6407374002826200000,
"line_mean": 40.5395683453,
"line_max": 92,
"alpha_frac": 0.5888465535,
"autogenerated": false,
"ratio": 4.02649930264993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.511534585614993,
"avg_score": null,
"num_lines": null
} |
__all__ = ['TiledCompound']
import itertools as it
import numpy as np
from mbuild.compound import Compound
from mbuild.exceptions import MBuildError
from mbuild.port import Port
from mbuild.periodic_kdtree import PeriodicCKDTree
from mbuild import clone
class TiledCompound(Compound):
"""Replicates a Compound in any cartesian direction(s).
Correctly updates connectivity while respecting periodic boundary
conditions.
Parameters
-----------
tile : mb.Compound
The Compound to be replicated.
n_tiles : array-like, shape=(3,), dtype=int, optional, default=(1, 1, 1)
Number of times to replicate tile in the x, y and z-directions.
name : str, optional, default=tile.name
Descriptive string for the compound.
"""
def __init__(self, tile, n_tiles, name=None):
super(TiledCompound, self).__init__()
n_tiles = np.asarray(n_tiles)
if not np.all(n_tiles > 0):
raise ValueError('Number of tiles must be positive.')
# Check that the tile is periodic in the requested dimensions.
if np.any(np.logical_and(n_tiles != 1, tile.periodicity == 0)):
raise ValueError('Tile not periodic in at least one of the '
'specified dimensions.')
if name is None:
name = tile.name + '-'.join(str(d) for d in n_tiles)
self.name = name
self.periodicity = np.array(tile.periodicity * n_tiles)
if all(n_tiles == 1):
self._add_tile(tile, [(0, 0, 0)])
self._hoist_ports(tile)
return # Don't waste time copying and checking bonds.
# For every tile, assign temporary ID's to particles which are internal
# to that tile. E.g., when replicating a tile with 1800 particles, every
# tile will contain particles with ID's from 0-1799. These ID's are used
# below to fix bonds crossing periodic boundary conditions where a new
# tile has been placed.
for idx, particle in enumerate(tile.particles(include_ports=True)):
particle.index = idx
# Replicate and place periodic tiles.
# -----------------------------------
for ijk in it.product(range(n_tiles[0]),
range(n_tiles[1]),
range(n_tiles[2])):
new_tile = clone(tile)
new_tile.translate(np.array(ijk * tile.periodicity))
self._add_tile(new_tile, ijk)
self._hoist_ports(new_tile)
# Fix bonds across periodic boundaries.
# -------------------------------------
# Cutoff for long bonds is half the shortest periodic distance.
bond_dist_thres = min(tile.periodicity[tile.periodicity > 0]) / 2
# Bonds that were periodic in the original tile.
indices_of_periodic_bonds = set()
for particle1, particle2 in tile.bonds():
if np.linalg.norm(particle1.pos - particle2.pos) > bond_dist_thres:
indices_of_periodic_bonds.add((particle1.index,
particle2.index))
# Build a periodic kdtree of all particle positions.
self.particle_kdtree = PeriodicCKDTree(data=self.xyz, bounds=self.periodicity)
all_particles = np.asarray(list(self.particles(include_ports=False)))
# Store bonds to remove/add since we'll be iterating over all bonds.
bonds_to_remove = set()
bonds_to_add = set()
for particle1, particle2 in self.bonds():
if (particle1.index, particle2.index) not in indices_of_periodic_bonds \
and (particle2.index, particle1.index) not in indices_of_periodic_bonds:
continue
if self.min_periodic_distance(particle1.pos, particle2.pos) > bond_dist_thres:
bonds_to_remove.add((particle1, particle2))
image2 = self._find_particle_image(particle1, particle2,
all_particles)
image1 = self._find_particle_image(particle2, particle1,
all_particles)
if (image2, particle1) not in bonds_to_add:
bonds_to_add.add((particle1, image2))
if (image1, particle2) not in bonds_to_add:
bonds_to_add.add((particle2, image1))
for bond in bonds_to_remove:
self.remove_bond(bond)
for bond in bonds_to_add:
self.add_bond(bond)
# Clean up temporary data.
for particle in self._particles(include_ports=True):
particle.index = None
del self.particle_kdtree
def _add_tile(self, new_tile, ijk):
"""Add a tile with a label indicating its tiling position. """
tile_label = "{0}_{1}".format(self.name, '-'.join(str(d) for d in ijk))
self.add(new_tile, label=tile_label, inherit_periodicity=False)
def _hoist_ports(self, new_tile):
"""Add labels for all the ports to the parent (TiledCompound). """
for port in new_tile.children:
if isinstance(port, Port):
self.add(port, containment=False)
def _find_particle_image(self, query, match, all_particles):
"""Find particle with the same index as match in a neighboring tile. """
_, idxs = self.particle_kdtree.query(query.pos, k=10)
neighbors = all_particles[idxs]
for particle in neighbors:
if particle.index == match.index:
return particle
raise MBuildError('Unable to find matching particle image while'
' stitching bonds.')
| {
"repo_name": "tcmoore3/mbuild",
"path": "mbuild/recipes/tiled_compound.py",
"copies": "3",
"size": "5723",
"license": "mit",
"hash": 8688001058264066000,
"line_mean": 40.4710144928,
"line_max": 92,
"alpha_frac": 0.5864057313,
"autogenerated": false,
"ratio": 4.018960674157303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006924000613588458,
"num_lines": 138
} |
__all__ = [ 'Tile' ]
from .Coordinate import Coordinate
from .Exceptions import HandsomeException
from .Interval import Interval
from .Pixel import Pixel, array_view, pixel_view
from .capi import generate_numpy_begin, c_void_p
from handsome.capi import Rectangle, downsample_tile, generate_numpy_begin
import math
import numpy as np
class Tile:
def __init__(self, origin, shape, sample_rate = 1, dtype=Pixel):
self.shape = shape
self.sample_rate = sample_rate
self.dtype = dtype
self.set_origin(origin)
self.__buffer = None
self.__coordinate_image = None
self.__tile_bounds = None
self.__buffer_ptr = None
def set_origin(self, origin):
self.origin = origin
self.horizontal = Interval(origin[0], origin[0] + self.shape[0])
self.vertical = Interval(origin[1], origin[1] + self.shape[1])
self.__coordinate_image = None
self.__tile_bounds = None
def contains_point(self, point):
return (
self.horizontal.contains(point[0])
and self.vertical.contains(point[1])
)
def overlaps(self, other):
return (
self.horizontal.overlaps(other.horizontal)
and self.vertical.overlaps(other.vertical)
)
def intersection_slices(self, other):
horizontal = self.horizontal.intersection(other.horizontal)
if horizontal is None:
return None
vertical = self.vertical.intersection(other.vertical)
if vertical is None:
return None
return (
self.slice_from_intervals(horizontal, vertical),
other.slice_from_intervals(horizontal, vertical)
)
def slice_from_intervals(self, horizontal, vertical):
left = self.origin[0]
top = self.origin[1] + self.shape[1]
sample_rate = self.sample_rate
return np.s_[
(top - vertical.end) * sample_rate: (top - vertical.start) * sample_rate: 1,
(horizontal.start - left) * sample_rate : (horizontal.end - left) * sample_rate : 1
]
@property
def buffer(self):
if self.__buffer is not None:
return self.__buffer
dtype = self.dtype
shape = (self.shape[1] * self.sample_rate, self.shape[0] * self.sample_rate)
cacher = tile_cachers.get((shape, dtype))
if cacher is None:
cacher = tile_cachers[(shape, dtype)] = make_tile_cacher(shape, dtype)
ptr, buffer = next(cacher)
self.__buffer_ptr = ptr
self.__buffer = buffer
return buffer
@property
def buffer_ptr(self):
if self.__buffer_ptr is not None:
return self.__buffer_ptr
buffer = self.buffer
return self.__buffer_ptr
@property
def coordinate_image(self):
if self.__coordinate_image is not None:
return self.__coordinate_image
self.__coordinate_image = make_coordinate_image(self.origin, self.shape, self.sample_rate)
return self.__coordinate_image
@property
def bounds(self):
if self.__tile_bounds is not None:
return self.__tile_bounds
self.__tile_bounds = Rectangle(
self.origin[0],
self.origin[1],
self.origin[0] + self.shape[0],
self.origin[1] + self.shape[1]
)
return self.__tile_bounds
def downsample(self, sample_rate):
downrate = int(math.ceil(self.sample_rate / sample_rate))
in_height, in_width = self.buffer.shape
buffer = array_view(self.buffer)
new_shape = [
int(math.ceil(o / downrate))
for o in buffer.shape
]
new_shape[-1] = buffer.shape[-1]
out_height, out_width = new_shape[:2]
out = np.zeros(shape=new_shape, dtype=np.float32)
downsample_tile(
generate_numpy_begin(buffer),
in_width, in_height,
downrate, downrate,
generate_numpy_begin(out),
out_width, out_height
)
return pixel_view(out)
def composite_from(self, from_tile):
if self.sample_rate != from_tile.sample_rate:
raise HandsomeException(
'sample rates do not match',
{
'self.sample_rate' : self.sample_rate,
'from_tile.sample_rate' : from_tile.sample_rate
}
)
slices = self.intersection_slices(from_tile)
if slices is None:
return
target_slice, source_slice = slices
alpha = np.copy(from_tile.buffer[source_slice]['A'])
alpha = alpha.reshape(alpha.shape + (1,))
target = array_view(self.buffer[target_slice])
target[:] = (
(1 - alpha) * target
+ alpha * array_view(from_tile.buffer[source_slice])
)
def make_coordinate_image(origin, shape, sample_rate):
width, height = shape
xs = np.linspace(
origin[0], float(origin[0] + width), width * sample_rate,
endpoint = False, dtype = np.float32
)
ys = np.linspace(
origin[1], origin[1] + height, height * sample_rate,
endpoint = False, dtype = np.float32
)[::-1]
shape = (len(ys), len(xs))
out = np.zeros(shape, dtype=Coordinate)
for x in range(len(xs)):
out[:,x]['y'] = ys
for y in range(len(ys)):
out[y,:]['x'] = xs
return out
def strides(start, stop, step=1):
begin, end = None, None
for i in range(start, stop, step):
begin,end = end,i
if begin is None:
continue
yield (begin, end)
if end is not None:
yield (end, stop)
tile_cachers = { }
def make_tile_cacher(shape, dtype, cache_size=int(4 * 2 ** 20)):
from functools import reduce
from operator import mul
item_size = dtype.itemsize
if not isinstance(item_size, int):
item_size = dtype().itemsize
items_per_tile = reduce(mul, shape)
tile_size = item_size * items_per_tile
tiles_per_cache = max(int(cache_size // tile_size), 1)
while True:
cache = np.zeros(tiles_per_cache * items_per_tile, dtype=dtype)
begin = generate_numpy_begin(cache)
for offset in range(tiles_per_cache):
start, end = offset * items_per_tile, (offset + 1) * items_per_tile
ptr = c_void_p(begin.value + start * item_size)
out = cache[start:end].reshape(shape)
yield (ptr, out)
| {
"repo_name": "bracket/handsome",
"path": "handsome/Tile.py",
"copies": "2",
"size": "6576",
"license": "bsd-2-clause",
"hash": -6082180440706165000,
"line_mean": 25.5161290323,
"line_max": 98,
"alpha_frac": 0.5734489051,
"autogenerated": false,
"ratio": 3.834402332361516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004659363568965101,
"num_lines": 248
} |
__all__ = ['TimestepperBD']
import os
import numpy as np
import miles.default as default
from miles import (BaseTimestepper, ChunksBD, get_random_name)
class TimestepperBD(BaseTimestepper):
"""Runs a trajectory from an initial phase space point until it hits a
milestone.
"""
def __init__(self, trajectory_parser, configuration,
max_chunks=default.max_chunks):
super().__init__(trajectory_parser, configuration, max_chunks)
self.chunks_class = ChunksBD
def find_transitions_in_file(self, file_name):
"""Find transitions in an output file."""
data = np.loadtxt(file_name)
return self.trajectory_parser.parse(data[:, 1:3])
def find_transitions(self, chunk):
"""Find transitions in a chunk."""
# We store the transitions in a list. The alternative would be
# to turn this into a generator method but this complicates
# matters when sending the transitions via MPI.
transitions = self.find_transitions_in_file(chunk.output_name)
for transition in transitions:
self.save(transition)
return transitions
def save(self, transition):
"""Store files associated to a transition."""
temp_dir = self.configuration.temp_dir
temp_file_name = get_random_name()
transition.file_name = os.path.join(temp_dir, temp_file_name)
x, y = transition.colvars
with open(transition.file_name, 'w') as f:
print('{:f} {:f}'.format(x, y), file=f)
| {
"repo_name": "clsb/miles",
"path": "miles/timestepper_bd.py",
"copies": "1",
"size": "1542",
"license": "mit",
"hash": 7410523479204792000,
"line_mean": 31.8085106383,
"line_max": 74,
"alpha_frac": 0.6452658885,
"autogenerated": false,
"ratio": 3.9844961240310077,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5129762012531007,
"avg_score": null,
"num_lines": null
} |
# __all__ = ['TimestepperMOIL']
#
# from miles import BaseTimestepper
#
#
# class TimestepperMOIL(BaseTimestepper):
# """Runs a trajectory from an initial phase space point until it hits a
# milestone.
#
# """
# def __init__(self, reporter, configuration,
# max_chunks=default.max_chunks):
# super().__init__(reporter, configuration, max_chunks)
#
# self.chunks_class = ChunksMOIL
#
# def find_transitions(self, chunk):
# """Find transitions in sequences of DCD/DVD files.
#
# """
# out = chunk.output_name
# dcd_file_name = path_ext_join(out, 'dcd')
# dvd_file_name = path_ext_join(out, 'dvd')
#
# transitions = []
#
# with DCDReader(dcd_file_name) as dcd, DVDReader(dvd_file_name) as dvd:
# for x, v in zip(dcd, dvd):
# p = PhaseSpacePoint(x, v, self._projection_mapping)
#
# self._current_time += self.configuration.time_step_length
#
# transition = self._reporter.next(p.colvars,
# self._current_time)
#
# if transition:
# self.save(transition, dcd, dvd)
# self._current_time = 0
# transitions.append(transition)
#
# return transitions
#
# def save(self, transition, dcd, dvd):
# """Store files associated to a transition into the database."""
# file_name = get_random_name()
# transition.file_name = file_name
#
# database_dir = self.configuration.database_dir
# dcd_file_name = path_ext_join(database_dir, file_name, 'dcd')
# dvd_file_name = path_ext_join(database_dir, file_name, 'dvd')
#
# dcd.save_current_frame_to(dcd_file_name)
# dvd.save_current_frame_to(dvd_file_name)
| {
"repo_name": "clsb/miles",
"path": "miles/timestepper_moil.py",
"copies": "1",
"size": "1852",
"license": "mit",
"hash": -2229327631735111200,
"line_mean": 33.9433962264,
"line_max": 80,
"alpha_frac": 0.5572354212,
"autogenerated": false,
"ratio": 3.289520426287744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43467558474877444,
"avg_score": null,
"num_lines": null
} |
__all__ = ['TimestepperNAMD']
import logging
import os
import shutil
from typing import List, Optional, Union
import numpy as np
import miles.default as default
from miles import (BaseTimestepper, Chunk, ChunksNAMD, CollectiveVariables, Clock, Configuration, Mapping, PhaseSpacePoint, Transition, TrajectoryParser, get_random_name, read_namdbin) # noqa: E501
class TimestepperNAMD(BaseTimestepper):
"""Timestepper using NAMD2.
"""
def __init__(self, trajectory_parser: TrajectoryParser,
configuration: Configuration,
collective_variables: CollectiveVariables,
max_chunks: int = default.max_chunks) -> None:
super().__init__(trajectory_parser, configuration,
collective_variables, max_chunks)
self.chunks_class = ChunksNAMD
self.mapping = None # type: Optional[Mapping]
self.steps_per_chunk = self.configuration.steps_per_chunk
def find_transitions(self, chunk_or_file_name: Union[Chunk, str]) \
-> List[Transition]:
"""Find transitions within sequences of NAMD binary files.
"""
if isinstance(chunk_or_file_name, Chunk):
file_name = chunk_or_file_name.output_name
else:
file_name = chunk_or_file_name
with Clock() as clock:
colvars = self._get_colvars_from_colvars_traj(file_name)
logging.debug('get_colvars_from_colvars_traj completed after '
'{} seconds'.format(clock))
# if self.collective_variables.indices:
# colvars = self._get_colvars_from_colvars_traj(file_name)
# else:
# colvars = self._get_colvars_from_phase_space_point(file_name)
return self.find_transitions_in_colvars(colvars, file_name)
def _get_colvars_from_colvars_traj(self, file_name: str) -> np.array:
full_file_name = os.path.extsep.join([file_name, 'colvars.traj'])
colvars = np.loadtxt(full_file_name)
steps_per_chunk = self.configuration.steps_per_chunk
# Ignore the first line which corresponds to the phase space
# point used as initial condition (i.e., before the first time
# step).
assert colvars.shape[0] == steps_per_chunk + 1
return colvars[1:steps_per_chunk, self.collective_variables.indices]
# def _get_colvars_from_phase_space_point(self, file_name: str) -> np.array:
# N, D = self.configuration.steps_per_chunk, self.mapping.colvars_dim
#
# # We first read all but the last frame in the trajectory. Then
# # we read the last frame. This is due to the way NAMD stores
# # trajectories.
# colvars = np.empty((N, D), dtype=np.float64)
#
# for n in range(N - 1):
# colvars[n, :] = self._read_colvars(file_name, n + 1)
# colvars[-1, :] = self._read_colvars(file_name)
#
# return colvars
def _read_colvars(self, file_name: str, n: Optional[int] = None) \
-> np.array:
"""Read phase space point and return collective variables.
"""
extsep = os.path.extsep
if n:
name = extsep.join([file_name, str(n)])
else:
name = file_name
x = read_namdbin(extsep.join([name, 'coor']))
# v = read_namdbin(extsep.join([name, 'vel']))
# p = PhaseSpacePoint(x, v, self.mapping)
p = PhaseSpacePoint(x, None, self.mapping)
return p.colvars
def find_transitions_in_colvars(self, colvars: np.array,
prefix: str) -> List[Transition]:
"""Find transitions within sequences of collective variables.
"""
with Clock() as clock:
pairs = self.trajectory_parser.parse(colvars)
logging.debug('trajectory_parser.parse completed after {} seconds'
.format(clock))
transitions = []
with Clock() as clock:
for n, transition in pairs:
if n == self.steps_per_chunk:
input_file_name = prefix
else:
input_file_name = '{}.{}'.format(prefix, n+1)
self.save(transition, input_file_name)
transitions.append(transition)
logging.debug('timestepper.save completed after {} seconds'
.format(clock))
return transitions
def save(self, transition: Transition, output_name: str) -> None:
"""Store files associated to a transition into the database.
"""
output_dir = self.configuration.simulation_dir
file_name = os.path.join(output_dir, get_random_name())
dest_file_names = []
for suffix in ('coor', 'vel', 'xsc'):
orig_file_name = full_file_name(output_name, suffix)
dest_file_name = full_file_name(file_name, suffix)
shutil.copy(orig_file_name, dest_file_name)
dest_file_names.append(dest_file_name)
transition.set_files(dest_file_names)
def full_file_name(name: str, suffix: str) -> str:
"""Return a full file name.
"""
return os.extsep.join([name, suffix])
| {
"repo_name": "clsb/miles",
"path": "miles/timestepper_namd.py",
"copies": "1",
"size": "5192",
"license": "mit",
"hash": -865141221824599800,
"line_mean": 35.8226950355,
"line_max": 198,
"alpha_frac": 0.5976502311,
"autogenerated": false,
"ratio": 3.7623188405797103,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48599690716797106,
"avg_score": null,
"num_lines": null
} |
__all__ = ['time_zone_by_country_and_region']
_country = {}
_country["AD"] = "Europe/Andorra"
_country["AE"] = "Asia/Dubai"
_country["AF"] = "Asia/Kabul"
_country["AG"] = "America/Antigua"
_country["AI"] = "America/Anguilla"
_country["AL"] = "Europe/Tirane"
_country["AM"] = "Asia/Yerevan"
_country["AO"] = "Africa/Luanda"
_country["AR"] = {}
_country["AR"]["01"] = "America/Argentina/Buenos_Aires"
_country["AR"]["02"] = "America/Argentina/Catamarca"
_country["AR"]["03"] = "America/Argentina/Tucuman"
_country["AR"]["04"] = "America/Argentina/Rio_Gallegos"
_country["AR"]["05"] = "America/Argentina/Cordoba"
_country["AR"]["06"] = "America/Argentina/Tucuman"
_country["AR"]["07"] = "America/Argentina/Buenos_Aires"
_country["AR"]["08"] = "America/Argentina/Buenos_Aires"
_country["AR"]["09"] = "America/Argentina/Tucuman"
_country["AR"]["10"] = "America/Argentina/Jujuy"
_country["AR"]["11"] = "America/Argentina/San_Luis"
_country["AR"]["12"] = "America/Argentina/La_Rioja"
_country["AR"]["13"] = "America/Argentina/Mendoza"
_country["AR"]["14"] = "America/Argentina/Buenos_Aires"
_country["AR"]["15"] = "America/Argentina/San_Luis"
_country["AR"]["16"] = "America/Argentina/Buenos_Aires"
_country["AR"]["17"] = "America/Argentina/Salta"
_country["AR"]["18"] = "America/Argentina/San_Juan"
_country["AR"]["19"] = "America/Argentina/San_Luis"
_country["AR"]["20"] = "America/Argentina/Rio_Gallegos"
_country["AR"]["21"] = "America/Argentina/Buenos_Aires"
_country["AR"]["22"] = "America/Argentina/Catamarca"
_country["AR"]["23"] = "America/Argentina/Ushuaia"
_country["AR"]["24"] = "America/Argentina/Tucuman"
_country["AS"] = "US/Samoa"
_country["AT"] = "Europe/Vienna"
_country["AU"] = {}
_country["AU"]["01"] = "Australia/Canberra"
_country["AU"]["02"] = "Australia/NSW"
_country["AU"]["03"] = "Australia/North"
_country["AU"]["04"] = "Australia/Queensland"
_country["AU"]["05"] = "Australia/South"
_country["AU"]["06"] = "Australia/Tasmania"
_country["AU"]["07"] = "Australia/Victoria"
_country["AU"]["08"] = "Australia/West"
_country["AW"] = "America/Aruba"
_country["AX"] = "Europe/Mariehamn"
_country["AZ"] = "Asia/Baku"
_country["BA"] = "Europe/Sarajevo"
_country["BB"] = "America/Barbados"
_country["BD"] = "Asia/Dhaka"
_country["BE"] = "Europe/Brussels"
_country["BF"] = "Africa/Ouagadougou"
_country["BG"] = "Europe/Sofia"
_country["BH"] = "Asia/Bahrain"
_country["BI"] = "Africa/Bujumbura"
_country["BJ"] = "Africa/Porto-Novo"
_country["BL"] = "America/St_Barthelemy"
_country["BM"] = "Atlantic/Bermuda"
_country["BN"] = "Asia/Brunei"
_country["BO"] = "America/La_Paz"
_country["BQ"] = "America/Curacao"
_country["BR"] = {}
_country["BR"]["01"] = "America/Rio_Branco"
_country["BR"]["02"] = "America/Maceio"
_country["BR"]["03"] = "America/Sao_Paulo"
_country["BR"]["04"] = "America/Manaus"
_country["BR"]["05"] = "America/Bahia"
_country["BR"]["06"] = "America/Fortaleza"
_country["BR"]["07"] = "America/Sao_Paulo"
_country["BR"]["08"] = "America/Sao_Paulo"
_country["BR"]["11"] = "America/Campo_Grande"
_country["BR"]["13"] = "America/Belem"
_country["BR"]["14"] = "America/Cuiaba"
_country["BR"]["15"] = "America/Sao_Paulo"
_country["BR"]["16"] = "America/Belem"
_country["BR"]["17"] = "America/Recife"
_country["BR"]["18"] = "America/Sao_Paulo"
_country["BR"]["20"] = "America/Fortaleza"
_country["BR"]["21"] = "America/Sao_Paulo"
_country["BR"]["22"] = "America/Recife"
_country["BR"]["23"] = "America/Sao_Paulo"
_country["BR"]["24"] = "America/Porto_Velho"
_country["BR"]["25"] = "America/Boa_Vista"
_country["BR"]["26"] = "America/Sao_Paulo"
_country["BR"]["27"] = "America/Sao_Paulo"
_country["BR"]["28"] = "America/Maceio"
_country["BR"]["29"] = "America/Sao_Paulo"
_country["BR"]["30"] = "America/Recife"
_country["BR"]["31"] = "America/Araguaina"
_country["BS"] = "America/Nassau"
_country["BT"] = "Asia/Thimphu"
_country["BW"] = "Africa/Gaborone"
_country["BY"] = "Europe/Minsk"
_country["BZ"] = "America/Belize"
_country["CA"] = {}
_country["CA"]["AB"] = "America/Edmonton"
_country["CA"]["BC"] = "America/Vancouver"
_country["CA"]["MB"] = "America/Winnipeg"
_country["CA"]["NB"] = "America/Halifax"
_country["CA"]["NL"] = "America/St_Johns"
_country["CA"]["NS"] = "America/Halifax"
_country["CA"]["NT"] = "America/Yellowknife"
_country["CA"]["NU"] = "America/Rankin_Inlet"
_country["CA"]["ON"] = "America/Rainy_River"
_country["CA"]["PE"] = "America/Halifax"
_country["CA"]["QC"] = "America/Montreal"
_country["CA"]["SK"] = "America/Regina"
_country["CA"]["YT"] = "America/Whitehorse"
_country["CC"] = "Indian/Cocos"
_country["CD"] = {}
_country["CD"]["02"] = "Africa/Kinshasa"
_country["CD"]["05"] = "Africa/Lubumbashi"
_country["CD"]["06"] = "Africa/Kinshasa"
_country["CD"]["08"] = "Africa/Kinshasa"
_country["CD"]["10"] = "Africa/Lubumbashi"
_country["CD"]["11"] = "Africa/Lubumbashi"
_country["CD"]["12"] = "Africa/Lubumbashi"
_country["CF"] = "Africa/Bangui"
_country["CG"] = "Africa/Brazzaville"
_country["CH"] = "Europe/Zurich"
_country["CI"] = "Africa/Abidjan"
_country["CK"] = "Pacific/Rarotonga"
_country["CL"] = "Chile/Continental"
_country["CM"] = "Africa/Lagos"
_country["CN"] = {}
_country["CN"]["01"] = "Asia/Shanghai"
_country["CN"]["02"] = "Asia/Shanghai"
_country["CN"]["03"] = "Asia/Shanghai"
_country["CN"]["04"] = "Asia/Shanghai"
_country["CN"]["05"] = "Asia/Harbin"
_country["CN"]["06"] = "Asia/Chongqing"
_country["CN"]["07"] = "Asia/Shanghai"
_country["CN"]["08"] = "Asia/Harbin"
_country["CN"]["09"] = "Asia/Shanghai"
_country["CN"]["10"] = "Asia/Shanghai"
_country["CN"]["11"] = "Asia/Chongqing"
_country["CN"]["12"] = "Asia/Shanghai"
_country["CN"]["13"] = "Asia/Urumqi"
_country["CN"]["14"] = "Asia/Chongqing"
_country["CN"]["15"] = "Asia/Chongqing"
_country["CN"]["16"] = "Asia/Chongqing"
_country["CN"]["18"] = "Asia/Chongqing"
_country["CN"]["19"] = "Asia/Harbin"
_country["CN"]["20"] = "Asia/Harbin"
_country["CN"]["21"] = "Asia/Chongqing"
_country["CN"]["22"] = "Asia/Harbin"
_country["CN"]["23"] = "Asia/Shanghai"
_country["CN"]["24"] = "Asia/Chongqing"
_country["CN"]["25"] = "Asia/Shanghai"
_country["CN"]["26"] = "Asia/Chongqing"
_country["CN"]["28"] = "Asia/Shanghai"
_country["CN"]["29"] = "Asia/Chongqing"
_country["CN"]["30"] = "Asia/Chongqing"
_country["CN"]["31"] = "Asia/Chongqing"
_country["CN"]["32"] = "Asia/Chongqing"
_country["CN"]["33"] = "Asia/Chongqing"
_country["CO"] = "America/Bogota"
_country["CR"] = "America/Costa_Rica"
_country["CU"] = "America/Havana"
_country["CV"] = "Atlantic/Cape_Verde"
_country["CW"] = "America/Curacao"
_country["CX"] = "Indian/Christmas"
_country["CY"] = "Asia/Nicosia"
_country["CZ"] = "Europe/Prague"
_country["DE"] = "Europe/Berlin"
_country["DJ"] = "Africa/Djibouti"
_country["DK"] = "Europe/Copenhagen"
_country["DM"] = "America/Dominica"
_country["DO"] = "America/Santo_Domingo"
_country["DZ"] = "Africa/Algiers"
_country["EC"] = {}
_country["EC"]["01"] = "Pacific/Galapagos"
_country["EC"]["02"] = "America/Guayaquil"
_country["EC"]["03"] = "America/Guayaquil"
_country["EC"]["04"] = "America/Guayaquil"
_country["EC"]["05"] = "America/Guayaquil"
_country["EC"]["06"] = "America/Guayaquil"
_country["EC"]["07"] = "America/Guayaquil"
_country["EC"]["08"] = "America/Guayaquil"
_country["EC"]["09"] = "America/Guayaquil"
_country["EC"]["10"] = "America/Guayaquil"
_country["EC"]["11"] = "America/Guayaquil"
_country["EC"]["12"] = "America/Guayaquil"
_country["EC"]["13"] = "America/Guayaquil"
_country["EC"]["14"] = "America/Guayaquil"
_country["EC"]["15"] = "America/Guayaquil"
_country["EC"]["17"] = "America/Guayaquil"
_country["EC"]["18"] = "America/Guayaquil"
_country["EC"]["19"] = "America/Guayaquil"
_country["EC"]["20"] = "America/Guayaquil"
_country["EC"]["22"] = "America/Guayaquil"
_country["EE"] = "Europe/Tallinn"
_country["EG"] = "Africa/Cairo"
_country["EH"] = "Africa/El_Aaiun"
_country["ER"] = "Africa/Asmera"
_country["ES"] = {}
_country["ES"]["07"] = "Europe/Madrid"
_country["ES"]["27"] = "Europe/Madrid"
_country["ES"]["29"] = "Europe/Madrid"
_country["ES"]["31"] = "Europe/Madrid"
_country["ES"]["32"] = "Europe/Madrid"
_country["ES"]["34"] = "Europe/Madrid"
_country["ES"]["39"] = "Europe/Madrid"
_country["ES"]["51"] = "Africa/Ceuta"
_country["ES"]["52"] = "Europe/Madrid"
_country["ES"]["53"] = "Atlantic/Canary"
_country["ES"]["54"] = "Europe/Madrid"
_country["ES"]["55"] = "Europe/Madrid"
_country["ES"]["56"] = "Europe/Madrid"
_country["ES"]["57"] = "Europe/Madrid"
_country["ES"]["58"] = "Europe/Madrid"
_country["ES"]["59"] = "Europe/Madrid"
_country["ES"]["60"] = "Europe/Madrid"
_country["ET"] = "Africa/Addis_Ababa"
_country["FI"] = "Europe/Helsinki"
_country["FJ"] = "Pacific/Fiji"
_country["FK"] = "Atlantic/Stanley"
_country["FO"] = "Atlantic/Faeroe"
_country["FR"] = "Europe/Paris"
_country["GA"] = "Africa/Libreville"
_country["GB"] = "Europe/London"
_country["GD"] = "America/Grenada"
_country["GE"] = "Asia/Tbilisi"
_country["GF"] = "America/Cayenne"
_country["GG"] = "Europe/Guernsey"
_country["GH"] = "Africa/Accra"
_country["GI"] = "Europe/Gibraltar"
_country["GL"] = {}
_country["GL"]["01"] = "America/Thule"
_country["GL"]["02"] = "America/Godthab"
_country["GL"]["03"] = "America/Godthab"
_country["GM"] = "Africa/Banjul"
_country["GN"] = "Africa/Conakry"
_country["GP"] = "America/Guadeloupe"
_country["GQ"] = "Africa/Malabo"
_country["GR"] = "Europe/Athens"
_country["GS"] = "Atlantic/South_Georgia"
_country["GT"] = "America/Guatemala"
_country["GU"] = "Pacific/Guam"
_country["GW"] = "Africa/Bissau"
_country["GY"] = "America/Guyana"
_country["HK"] = "Asia/Hong_Kong"
_country["HN"] = "America/Tegucigalpa"
_country["HR"] = "Europe/Zagreb"
_country["HT"] = "America/Port-au-Prince"
_country["HU"] = "Europe/Budapest"
_country["ID"] = {}
_country["ID"]["01"] = "Asia/Pontianak"
_country["ID"]["02"] = "Asia/Makassar"
_country["ID"]["03"] = "Asia/Jakarta"
_country["ID"]["04"] = "Asia/Jakarta"
_country["ID"]["05"] = "Asia/Jakarta"
_country["ID"]["06"] = "Asia/Jakarta"
_country["ID"]["07"] = "Asia/Jakarta"
_country["ID"]["08"] = "Asia/Jakarta"
_country["ID"]["09"] = "Asia/Jayapura"
_country["ID"]["10"] = "Asia/Jakarta"
_country["ID"]["11"] = "Asia/Pontianak"
_country["ID"]["12"] = "Asia/Makassar"
_country["ID"]["13"] = "Asia/Makassar"
_country["ID"]["14"] = "Asia/Makassar"
_country["ID"]["15"] = "Asia/Jakarta"
_country["ID"]["16"] = "Asia/Makassar"
_country["ID"]["17"] = "Asia/Makassar"
_country["ID"]["18"] = "Asia/Makassar"
_country["ID"]["19"] = "Asia/Pontianak"
_country["ID"]["20"] = "Asia/Makassar"
_country["ID"]["21"] = "Asia/Makassar"
_country["ID"]["22"] = "Asia/Makassar"
_country["ID"]["23"] = "Asia/Makassar"
_country["ID"]["24"] = "Asia/Jakarta"
_country["ID"]["25"] = "Asia/Pontianak"
_country["ID"]["26"] = "Asia/Pontianak"
_country["ID"]["30"] = "Asia/Jakarta"
_country["ID"]["31"] = "Asia/Makassar"
_country["ID"]["33"] = "Asia/Jakarta"
_country["IE"] = "Europe/Dublin"
_country["IL"] = "Asia/Jerusalem"
_country["IM"] = "Europe/Isle_of_Man"
_country["IN"] = "Asia/Calcutta"
_country["IO"] = "Indian/Chagos"
_country["IQ"] = "Asia/Baghdad"
_country["IR"] = "Asia/Tehran"
_country["IS"] = "Atlantic/Reykjavik"
_country["IT"] = "Europe/Rome"
_country["JE"] = "Europe/Jersey"
_country["JM"] = "America/Jamaica"
_country["JO"] = "Asia/Amman"
_country["JP"] = "Asia/Tokyo"
_country["KE"] = "Africa/Nairobi"
_country["KG"] = "Asia/Bishkek"
_country["KH"] = "Asia/Phnom_Penh"
_country["KI"] = "Pacific/Tarawa"
_country["KM"] = "Indian/Comoro"
_country["KN"] = "America/St_Kitts"
_country["KP"] = "Asia/Pyongyang"
_country["KR"] = "Asia/Seoul"
_country["KW"] = "Asia/Kuwait"
_country["KY"] = "America/Cayman"
_country["KZ"] = {}
_country["KZ"]["01"] = "Asia/Almaty"
_country["KZ"]["02"] = "Asia/Almaty"
_country["KZ"]["03"] = "Asia/Qyzylorda"
_country["KZ"]["04"] = "Asia/Aqtobe"
_country["KZ"]["05"] = "Asia/Qyzylorda"
_country["KZ"]["06"] = "Asia/Aqtau"
_country["KZ"]["07"] = "Asia/Oral"
_country["KZ"]["08"] = "Asia/Qyzylorda"
_country["KZ"]["09"] = "Asia/Aqtau"
_country["KZ"]["10"] = "Asia/Qyzylorda"
_country["KZ"]["11"] = "Asia/Almaty"
_country["KZ"]["12"] = "Asia/Qyzylorda"
_country["KZ"]["13"] = "Asia/Aqtobe"
_country["KZ"]["14"] = "Asia/Qyzylorda"
_country["KZ"]["15"] = "Asia/Almaty"
_country["KZ"]["16"] = "Asia/Aqtobe"
_country["KZ"]["17"] = "Asia/Almaty"
_country["LA"] = "Asia/Vientiane"
_country["LB"] = "Asia/Beirut"
_country["LC"] = "America/St_Lucia"
_country["LI"] = "Europe/Vaduz"
_country["LK"] = "Asia/Colombo"
_country["LR"] = "Africa/Monrovia"
_country["LS"] = "Africa/Maseru"
_country["LT"] = "Europe/Vilnius"
_country["LU"] = "Europe/Luxembourg"
_country["LV"] = "Europe/Riga"
_country["LY"] = "Africa/Tripoli"
_country["MA"] = "Africa/Casablanca"
_country["MC"] = "Europe/Monaco"
_country["MD"] = "Europe/Chisinau"
_country["ME"] = "Europe/Podgorica"
_country["MF"] = "America/Marigot"
_country["MG"] = "Indian/Antananarivo"
_country["MK"] = "Europe/Skopje"
_country["ML"] = "Africa/Bamako"
_country["MM"] = "Asia/Rangoon"
_country["MN"] = "Asia/Choibalsan"
_country["MO"] = "Asia/Macao"
_country["MP"] = "Pacific/Saipan"
_country["MQ"] = "America/Martinique"
_country["MR"] = "Africa/Nouakchott"
_country["MS"] = "America/Montserrat"
_country["MT"] = "Europe/Malta"
_country["MU"] = "Indian/Mauritius"
_country["MV"] = "Indian/Maldives"
_country["MW"] = "Africa/Blantyre"
_country["MX"] = {}
_country["MX"]["01"] = "America/Mexico_City"
_country["MX"]["02"] = "America/Tijuana"
_country["MX"]["03"] = "America/Hermosillo"
_country["MX"]["04"] = "America/Merida"
_country["MX"]["05"] = "America/Mexico_City"
_country["MX"]["06"] = "America/Chihuahua"
_country["MX"]["07"] = "America/Monterrey"
_country["MX"]["08"] = "America/Mexico_City"
_country["MX"]["09"] = "America/Mexico_City"
_country["MX"]["10"] = "America/Mazatlan"
_country["MX"]["11"] = "America/Mexico_City"
_country["MX"]["12"] = "America/Mexico_City"
_country["MX"]["13"] = "America/Mexico_City"
_country["MX"]["14"] = "America/Mazatlan"
_country["MX"]["15"] = "America/Chihuahua"
_country["MX"]["16"] = "America/Mexico_City"
_country["MX"]["17"] = "America/Mexico_City"
_country["MX"]["18"] = "America/Mazatlan"
_country["MX"]["19"] = "America/Monterrey"
_country["MX"]["20"] = "America/Mexico_City"
_country["MX"]["21"] = "America/Mexico_City"
_country["MX"]["22"] = "America/Mexico_City"
_country["MX"]["23"] = "America/Cancun"
_country["MX"]["24"] = "America/Mexico_City"
_country["MX"]["25"] = "America/Mazatlan"
_country["MX"]["26"] = "America/Hermosillo"
_country["MX"]["27"] = "America/Merida"
_country["MX"]["28"] = "America/Monterrey"
_country["MX"]["29"] = "America/Mexico_City"
_country["MX"]["30"] = "America/Mexico_City"
_country["MX"]["31"] = "America/Merida"
_country["MX"]["32"] = "America/Monterrey"
_country["MY"] = {}
_country["MY"]["01"] = "Asia/Kuala_Lumpur"
_country["MY"]["02"] = "Asia/Kuala_Lumpur"
_country["MY"]["03"] = "Asia/Kuala_Lumpur"
_country["MY"]["04"] = "Asia/Kuala_Lumpur"
_country["MY"]["05"] = "Asia/Kuala_Lumpur"
_country["MY"]["06"] = "Asia/Kuala_Lumpur"
_country["MY"]["07"] = "Asia/Kuala_Lumpur"
_country["MY"]["08"] = "Asia/Kuala_Lumpur"
_country["MY"]["09"] = "Asia/Kuala_Lumpur"
_country["MY"]["11"] = "Asia/Kuching"
_country["MY"]["12"] = "Asia/Kuala_Lumpur"
_country["MY"]["13"] = "Asia/Kuala_Lumpur"
_country["MY"]["14"] = "Asia/Kuala_Lumpur"
_country["MY"]["15"] = "Asia/Kuching"
_country["MY"]["16"] = "Asia/Kuching"
_country["MZ"] = "Africa/Maputo"
_country["NA"] = "Africa/Windhoek"
_country["NC"] = "Pacific/Noumea"
_country["NE"] = "Africa/Niamey"
_country["NF"] = "Pacific/Norfolk"
_country["NG"] = "Africa/Lagos"
_country["NI"] = "America/Managua"
_country["NL"] = "Europe/Amsterdam"
_country["NO"] = "Europe/Oslo"
_country["NP"] = "Asia/Katmandu"
_country["NR"] = "Pacific/Nauru"
_country["NU"] = "Pacific/Niue"
_country["NZ"] = {}
_country["NZ"]["85"] = "Pacific/Auckland"
_country["NZ"]["E7"] = "Pacific/Auckland"
_country["NZ"]["E8"] = "Pacific/Auckland"
_country["NZ"]["E9"] = "Pacific/Auckland"
_country["NZ"]["F1"] = "Pacific/Auckland"
_country["NZ"]["F2"] = "Pacific/Auckland"
_country["NZ"]["F3"] = "Pacific/Auckland"
_country["NZ"]["F4"] = "Pacific/Auckland"
_country["NZ"]["F5"] = "Pacific/Auckland"
_country["NZ"]["F7"] = "Pacific/Chatham"
_country["NZ"]["F8"] = "Pacific/Auckland"
_country["NZ"]["F9"] = "Pacific/Auckland"
_country["NZ"]["G1"] = "Pacific/Auckland"
_country["NZ"]["G2"] = "Pacific/Auckland"
_country["NZ"]["G3"] = "Pacific/Auckland"
_country["OM"] = "Asia/Muscat"
_country["PA"] = "America/Panama"
_country["PE"] = "America/Lima"
_country["PF"] = "Pacific/Marquesas"
_country["PG"] = "Pacific/Port_Moresby"
_country["PH"] = "Asia/Manila"
_country["PK"] = "Asia/Karachi"
_country["PL"] = "Europe/Warsaw"
_country["PM"] = "America/Miquelon"
_country["PN"] = "Pacific/Pitcairn"
_country["PR"] = "America/Puerto_Rico"
_country["PS"] = "Asia/Gaza"
_country["PT"] = {}
_country["PT"]["02"] = "Europe/Lisbon"
_country["PT"]["03"] = "Europe/Lisbon"
_country["PT"]["04"] = "Europe/Lisbon"
_country["PT"]["05"] = "Europe/Lisbon"
_country["PT"]["06"] = "Europe/Lisbon"
_country["PT"]["07"] = "Europe/Lisbon"
_country["PT"]["08"] = "Europe/Lisbon"
_country["PT"]["09"] = "Europe/Lisbon"
_country["PT"]["10"] = "Atlantic/Madeira"
_country["PT"]["11"] = "Europe/Lisbon"
_country["PT"]["13"] = "Europe/Lisbon"
_country["PT"]["14"] = "Europe/Lisbon"
_country["PT"]["16"] = "Europe/Lisbon"
_country["PT"]["17"] = "Europe/Lisbon"
_country["PT"]["18"] = "Europe/Lisbon"
_country["PT"]["19"] = "Europe/Lisbon"
_country["PT"]["20"] = "Europe/Lisbon"
_country["PT"]["21"] = "Europe/Lisbon"
_country["PT"]["22"] = "Europe/Lisbon"
_country["PW"] = "Pacific/Palau"
_country["PY"] = "America/Asuncion"
_country["QA"] = "Asia/Qatar"
_country["RE"] = "Indian/Reunion"
_country["RO"] = "Europe/Bucharest"
_country["RS"] = "Europe/Belgrade"
_country["RU"] = {}
_country["RU"]["01"] = "Europe/Volgograd"
_country["RU"]["02"] = "Asia/Irkutsk"
_country["RU"]["03"] = "Asia/Novokuznetsk"
_country["RU"]["04"] = "Asia/Novosibirsk"
_country["RU"]["05"] = "Asia/Vladivostok"
_country["RU"]["06"] = "Europe/Moscow"
_country["RU"]["07"] = "Europe/Volgograd"
_country["RU"]["08"] = "Europe/Samara"
_country["RU"]["09"] = "Europe/Moscow"
_country["RU"]["10"] = "Europe/Moscow"
_country["RU"]["11"] = "Asia/Irkutsk"
_country["RU"]["13"] = "Asia/Yekaterinburg"
_country["RU"]["14"] = "Asia/Irkutsk"
_country["RU"]["15"] = "Asia/Anadyr"
_country["RU"]["16"] = "Europe/Samara"
_country["RU"]["17"] = "Europe/Volgograd"
_country["RU"]["18"] = "Asia/Krasnoyarsk"
_country["RU"]["20"] = "Asia/Irkutsk"
_country["RU"]["21"] = "Europe/Moscow"
_country["RU"]["22"] = "Europe/Volgograd"
_country["RU"]["23"] = "Europe/Kaliningrad"
_country["RU"]["24"] = "Europe/Volgograd"
_country["RU"]["25"] = "Europe/Moscow"
_country["RU"]["26"] = "Asia/Kamchatka"
_country["RU"]["27"] = "Europe/Volgograd"
_country["RU"]["28"] = "Europe/Moscow"
_country["RU"]["29"] = "Asia/Novokuznetsk"
_country["RU"]["30"] = "Asia/Vladivostok"
_country["RU"]["31"] = "Asia/Krasnoyarsk"
_country["RU"]["32"] = "Asia/Omsk"
_country["RU"]["33"] = "Asia/Yekaterinburg"
_country["RU"]["34"] = "Asia/Yekaterinburg"
_country["RU"]["35"] = "Asia/Yekaterinburg"
_country["RU"]["36"] = "Asia/Anadyr"
_country["RU"]["37"] = "Europe/Moscow"
_country["RU"]["38"] = "Europe/Volgograd"
_country["RU"]["39"] = "Asia/Krasnoyarsk"
_country["RU"]["40"] = "Asia/Yekaterinburg"
_country["RU"]["41"] = "Europe/Moscow"
_country["RU"]["42"] = "Europe/Moscow"
_country["RU"]["43"] = "Europe/Moscow"
_country["RU"]["44"] = "Asia/Magadan"
_country["RU"]["45"] = "Europe/Samara"
_country["RU"]["46"] = "Europe/Samara"
_country["RU"]["47"] = "Europe/Moscow"
_country["RU"]["48"] = "Europe/Moscow"
_country["RU"]["49"] = "Europe/Moscow"
_country["RU"]["50"] = "Asia/Yekaterinburg"
_country["RU"]["51"] = "Europe/Moscow"
_country["RU"]["52"] = "Europe/Moscow"
_country["RU"]["53"] = "Asia/Novosibirsk"
_country["RU"]["54"] = "Asia/Omsk"
_country["RU"]["55"] = "Europe/Samara"
_country["RU"]["56"] = "Europe/Moscow"
_country["RU"]["57"] = "Europe/Samara"
_country["RU"]["58"] = "Asia/Yekaterinburg"
_country["RU"]["59"] = "Asia/Vladivostok"
_country["RU"]["60"] = "Europe/Kaliningrad"
_country["RU"]["61"] = "Europe/Volgograd"
_country["RU"]["62"] = "Europe/Moscow"
_country["RU"]["63"] = "Asia/Yakutsk"
_country["RU"]["64"] = "Asia/Sakhalin"
_country["RU"]["65"] = "Europe/Samara"
_country["RU"]["66"] = "Europe/Moscow"
_country["RU"]["67"] = "Europe/Samara"
_country["RU"]["68"] = "Europe/Volgograd"
_country["RU"]["69"] = "Europe/Moscow"
_country["RU"]["70"] = "Europe/Volgograd"
_country["RU"]["71"] = "Asia/Yekaterinburg"
_country["RU"]["72"] = "Europe/Moscow"
_country["RU"]["73"] = "Europe/Samara"
_country["RU"]["74"] = "Asia/Krasnoyarsk"
_country["RU"]["75"] = "Asia/Novosibirsk"
_country["RU"]["76"] = "Europe/Moscow"
_country["RU"]["77"] = "Europe/Moscow"
_country["RU"]["78"] = "Asia/Yekaterinburg"
_country["RU"]["79"] = "Asia/Irkutsk"
_country["RU"]["80"] = "Asia/Yekaterinburg"
_country["RU"]["81"] = "Europe/Samara"
_country["RU"]["82"] = "Asia/Irkutsk"
_country["RU"]["83"] = "Europe/Moscow"
_country["RU"]["84"] = "Europe/Volgograd"
_country["RU"]["85"] = "Europe/Moscow"
_country["RU"]["86"] = "Europe/Moscow"
_country["RU"]["87"] = "Asia/Novosibirsk"
_country["RU"]["88"] = "Europe/Moscow"
_country["RU"]["89"] = "Asia/Vladivostok"
_country["RW"] = "Africa/Kigali"
_country["SA"] = "Asia/Riyadh"
_country["SB"] = "Pacific/Guadalcanal"
_country["SC"] = "Indian/Mahe"
_country["SD"] = "Africa/Khartoum"
_country["SE"] = "Europe/Stockholm"
_country["SG"] = "Asia/Singapore"
_country["SH"] = "Atlantic/St_Helena"
_country["SI"] = "Europe/Ljubljana"
_country["SJ"] = "Arctic/Longyearbyen"
_country["SK"] = "Europe/Bratislava"
_country["SL"] = "Africa/Freetown"
_country["SM"] = "Europe/San_Marino"
_country["SN"] = "Africa/Dakar"
_country["SO"] = "Africa/Mogadishu"
_country["SR"] = "America/Paramaribo"
_country["ST"] = "Africa/Sao_Tome"
_country["SV"] = "America/El_Salvador"
_country["SX"] = "America/Curacao"
_country["SY"] = "Asia/Damascus"
_country["SZ"] = "Africa/Mbabane"
_country["TC"] = "America/Grand_Turk"
_country["TD"] = "Africa/Ndjamena"
_country["TF"] = "Indian/Kerguelen"
_country["TG"] = "Africa/Lome"
_country["TH"] = "Asia/Bangkok"
_country["TJ"] = "Asia/Dushanbe"
_country["TK"] = "Pacific/Fakaofo"
_country["TL"] = "Asia/Dili"
_country["TM"] = "Asia/Ashgabat"
_country["TN"] = "Africa/Tunis"
_country["TO"] = "Pacific/Tongatapu"
_country["TR"] = "Asia/Istanbul"
_country["TT"] = "America/Port_of_Spain"
_country["TV"] = "Pacific/Funafuti"
_country["TW"] = "Asia/Taipei"
_country["TZ"] = "Africa/Dar_es_Salaam"
_country["UA"] = {}
_country["UA"]["01"] = "Europe/Kiev"
_country["UA"]["02"] = "Europe/Kiev"
_country["UA"]["03"] = "Europe/Uzhgorod"
_country["UA"]["04"] = "Europe/Zaporozhye"
_country["UA"]["05"] = "Europe/Zaporozhye"
_country["UA"]["06"] = "Europe/Uzhgorod"
_country["UA"]["07"] = "Europe/Zaporozhye"
_country["UA"]["08"] = "Europe/Simferopol"
_country["UA"]["09"] = "Europe/Kiev"
_country["UA"]["10"] = "Europe/Zaporozhye"
_country["UA"]["11"] = "Europe/Simferopol"
_country["UA"]["13"] = "Europe/Kiev"
_country["UA"]["14"] = "Europe/Zaporozhye"
_country["UA"]["15"] = "Europe/Uzhgorod"
_country["UA"]["16"] = "Europe/Zaporozhye"
_country["UA"]["17"] = "Europe/Simferopol"
_country["UA"]["18"] = "Europe/Zaporozhye"
_country["UA"]["19"] = "Europe/Kiev"
_country["UA"]["20"] = "Europe/Simferopol"
_country["UA"]["21"] = "Europe/Kiev"
_country["UA"]["22"] = "Europe/Uzhgorod"
_country["UA"]["23"] = "Europe/Kiev"
_country["UA"]["24"] = "Europe/Uzhgorod"
_country["UA"]["25"] = "Europe/Uzhgorod"
_country["UA"]["26"] = "Europe/Zaporozhye"
_country["UA"]["27"] = "Europe/Kiev"
_country["UG"] = "Africa/Kampala"
_country["US"] = {}
_country["US"]["AK"] = "America/Anchorage"
_country["US"]["AL"] = "America/Chicago"
_country["US"]["AR"] = "America/Chicago"
_country["US"]["AZ"] = "America/Phoenix"
_country["US"]["CA"] = "America/Los_Angeles"
_country["US"]["CO"] = "America/Denver"
_country["US"]["CT"] = "America/New_York"
_country["US"]["DC"] = "America/New_York"
_country["US"]["DE"] = "America/New_York"
_country["US"]["FL"] = "America/New_York"
_country["US"]["GA"] = "America/New_York"
_country["US"]["HI"] = "Pacific/Honolulu"
_country["US"]["IA"] = "America/Chicago"
_country["US"]["ID"] = "America/Denver"
_country["US"]["IL"] = "America/Chicago"
_country["US"]["IN"] = "America/Indianapolis"
_country["US"]["KS"] = "America/Chicago"
_country["US"]["KY"] = "America/New_York"
_country["US"]["LA"] = "America/Chicago"
_country["US"]["MA"] = "America/New_York"
_country["US"]["MD"] = "America/New_York"
_country["US"]["ME"] = "America/New_York"
_country["US"]["MI"] = "America/New_York"
_country["US"]["MN"] = "America/Chicago"
_country["US"]["MO"] = "America/Chicago"
_country["US"]["MS"] = "America/Chicago"
_country["US"]["MT"] = "America/Denver"
_country["US"]["NC"] = "America/New_York"
_country["US"]["ND"] = "America/Chicago"
_country["US"]["NE"] = "America/Chicago"
_country["US"]["NH"] = "America/New_York"
_country["US"]["NJ"] = "America/New_York"
_country["US"]["NM"] = "America/Denver"
_country["US"]["NV"] = "America/Los_Angeles"
_country["US"]["NY"] = "America/New_York"
_country["US"]["OH"] = "America/New_York"
_country["US"]["OK"] = "America/Chicago"
_country["US"]["OR"] = "America/Los_Angeles"
_country["US"]["PA"] = "America/New_York"
_country["US"]["RI"] = "America/New_York"
_country["US"]["SC"] = "America/New_York"
_country["US"]["SD"] = "America/Chicago"
_country["US"]["TN"] = "America/Chicago"
_country["US"]["TX"] = "America/Chicago"
_country["US"]["UT"] = "America/Denver"
_country["US"]["VA"] = "America/New_York"
_country["US"]["VT"] = "America/New_York"
_country["US"]["WA"] = "America/Los_Angeles"
_country["US"]["WI"] = "America/Chicago"
_country["US"]["WV"] = "America/New_York"
_country["US"]["WY"] = "America/Denver"
_country["UY"] = "America/Montevideo"
_country["UZ"] = {}
_country["UZ"]["01"] = "Asia/Tashkent"
_country["UZ"]["02"] = "Asia/Samarkand"
_country["UZ"]["03"] = "Asia/Tashkent"
_country["UZ"]["06"] = "Asia/Tashkent"
_country["UZ"]["07"] = "Asia/Samarkand"
_country["UZ"]["08"] = "Asia/Samarkand"
_country["UZ"]["09"] = "Asia/Samarkand"
_country["UZ"]["10"] = "Asia/Samarkand"
_country["UZ"]["12"] = "Asia/Samarkand"
_country["UZ"]["13"] = "Asia/Tashkent"
_country["UZ"]["14"] = "Asia/Tashkent"
_country["VA"] = "Europe/Vatican"
_country["VC"] = "America/St_Vincent"
_country["VE"] = "America/Caracas"
_country["VG"] = "America/Tortola"
_country["VI"] = "America/St_Thomas"
_country["VN"] = "Asia/Phnom_Penh"
_country["VU"] = "Pacific/Efate"
_country["WF"] = "Pacific/Wallis"
_country["WS"] = "Pacific/Samoa"
_country["YE"] = "Asia/Aden"
_country["YT"] = "Indian/Mayotte"
_country["YU"] = "Europe/Belgrade"
_country["ZA"] = "Africa/Johannesburg"
_country["ZM"] = "Africa/Lusaka"
_country["ZW"] = "Africa/Harare"
def time_zone_by_country_and_region(country_code, region_name=None):
if country_code not in _country:
return None
if not region_name or region_name == '00':
region_name = None
timezones = _country[country_code]
if isinstance(timezones, str):
return timezones
if region_name:
return timezones.get(region_name)
| {
"repo_name": "mygu/django-googlemap",
"path": "googlemap/ip2geo/timezone.py",
"copies": "2",
"size": "27245",
"license": "mit",
"hash": -7943400278920077000,
"line_mean": 37.1582633053,
"line_max": 68,
"alpha_frac": 0.6175445036,
"autogenerated": false,
"ratio": 2.4112753341003628,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4028819837700363,
"avg_score": null,
"num_lines": null
} |
__all__ = ['TokensToIndexTransformer']
import logging
from .base import PureTransformer
from .text import ListCountVectorizer
logger = logging.getLogger(__name__)
class TokensToIndexTransformer(PureTransformer):
def __init__(self, skip_unknown=False, pad_sequences=None, count_vectorizer_args={}, pad_sequences_args={}, **kwargs):
super(TokensToIndexTransformer, self).__init__(**kwargs)
self.skip_unknown = skip_unknown
self.pad_sequences = pad_sequences
self.count_vectorizer_args = count_vectorizer_args
self.pad_sequences_args = pad_sequences_args
#end def
def fit(self, X, *args, **kwargs):
self.count_vectorizer_ = ListCountVectorizer(**self.count_vectorizer_args).fit(X)
logger.debug('TokensToIndexTransformer vocabulary fitted with size {}.'.format(len(self.vocabulary_)))
return self
#end def
def _transform(self, X, y=None, **kwargs):
if 'maxlen' in self.pad_sequences_args:
raise ValueError('The `maxlen` argument should not be set in `pad_sequences_args`. Set it in `pad_sequences` instead.')
analyzer = self.count_vectorizer_.build_analyzer()
V = self.vocabulary_
X_transformed = []
for seq in X:
indexes = []
for j, tok in enumerate(analyzer(seq)):
index = V.get(tok)
if not getattr(self, 'skip_unknown', False): indexes.append(0 if index is None else (index + 1))
elif index is not None: indexes.append(index)
#end for
X_transformed.append(indexes)
#end for
if self.pad_sequences is not None:
from keras.preprocessing.sequence import pad_sequences as keras_pad_sequences
maxlen = getattr(self, 'pad_sequences_maxlen_', None if self.pad_sequences is True else self.pad_sequences)
X_transformed = keras_pad_sequences(X_transformed, maxlen=maxlen, **self.pad_sequences_args)
if self.pad_sequences is True or maxlen is not None:
logger.debug('TokensToIndexTransformer transformed sequences has max length {}.'.format(X_transformed.shape[1]))
self.pad_sequences_maxlen_ = X_transformed.shape[1]
#end if
return X_transformed
#end def
@property
def vocabulary_(self): return self.count_vectorizer_.vocabulary_
@property
def stop_words_(self): return self.count_vectorizer_.stop_words_
def __repr__(self):
count_vectorizer_repr = '{}(vocabulary_={}, stop_words_={})'.format(self.count_vectorizer_.__class__.__name__, len(getattr(self.count_vectorizer_, 'vocabulary_', [])), len(getattr(self.count_vectorizer_, 'stop_words_', []))) if hasattr(self, 'count_vectorizer_') else None
return '{}(skip_unknown={}, pad_sequences={}, count_vectorizer_args={}, pad_sequences_args={}, count_vectorizer_={})'.format(self.__class__.__name__, self.skip_unknown, self.pad_sequences, self.count_vectorizer_args, self.pad_sequences_args, count_vectorizer_repr)
#end def
#end class
| {
"repo_name": "skylander86/ycml",
"path": "ycml/transformers/sequences.py",
"copies": "1",
"size": "3076",
"license": "apache-2.0",
"hash": 6265458517541118000,
"line_mean": 41.1369863014,
"line_max": 280,
"alpha_frac": 0.6518205462,
"autogenerated": false,
"ratio": 3.9588159588159586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5110636505015959,
"avg_score": null,
"num_lines": null
} |
__all__ = ["to_nibabel", "from_nibabel", "nifti_to_ants"]
import os
from tempfile import mkstemp
import numpy as np
import nibabel as nib
from ..core import ants_image_io as iio2
def to_nibabel(image):
"""
Convert an ANTsImage to a Nibabel image
"""
import nibabel as nib
fd, tmpfile = mkstemp(suffix=".nii.gz")
image.to_filename(tmpfile)
new_img = nib.load(tmpfile)
os.close(fd)
# os.remove(tmpfile) ## Don't remove tmpfile as nibabel lazy loads the data.
return new_img
def from_nibabel(nib_image):
"""
Convert a nibabel image to an ANTsImage
"""
fd, tmpfile = mkstemp(suffix=".nii.gz")
nib_image.to_filename(tmpfile)
new_img = iio2.image_read(tmpfile)
os.close(fd)
os.remove(tmpfile)
return new_img
def nifti_to_ants( nib_image ):
"""
Converts a given Nifti image into an ANTsPy image
Parameters
----------
img: NiftiImage
Returns
-------
ants_image: ANTsImage
"""
ndim = nib_image.ndim
if ndim < 3:
print("Dimensionality is less than 3.")
return None
q_form = nib_image.get_qform()
spacing = nib_image.header["pixdim"][1 : ndim + 1]
origin = np.zeros((ndim))
origin[:3] = q_form[:3, 3]
direction = np.diag(np.ones(ndim))
direction[:3, :3] = q_form[:3, :3] / spacing[:3]
ants_img = iio2.from_numpy(
data = nib_image.get_data().astype( np.float ),
origin = origin.tolist(),
spacing = spacing.tolist(),
direction = direction )
return ants_img
| {
"repo_name": "ANTsX/ANTsPy",
"path": "ants/utils/convert_nibabel.py",
"copies": "1",
"size": "1572",
"license": "apache-2.0",
"hash": -8781720737191637000,
"line_mean": 21.7826086957,
"line_max": 80,
"alpha_frac": 0.6017811705,
"autogenerated": false,
"ratio": 3.1377245508982035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42395057213982035,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Tool']
from typing import Dict, Optional, Any
import yaml
import docker
class Tool(object):
def __init__(self,
name: str,
image: str,
environment: Dict[str, str],
source: Optional[str] = None
) -> None:
"""
Constructs a new Tool description.
Parameters:
name: the name of the tool.
image: the name of Docker image for this tool.
environment: a dictionary of environment variables that should be
injected upon loading the tool inside the container.
"""
self.__name = name
self.__image = image
self.__environment = environment
self.__source = source
@property
def source(self) -> Optional[str]:
"""
The name of the source that provides this tool, if any.
"""
return self.__source
@property
def name(self) -> str:
"""
The name of this tool.
"""
return self.__name
@property
def environment(self) -> Dict[str, str]:
"""
A dictionary of environment variables that should be used when the
tool is mounted inside a container.
"""
return dict(self.__environment)
@property
def image(self) -> str:
"""
The name of the Docker image for this tool.
"""
return self.__image
@staticmethod
def from_dict(d: Dict[str, Any]) -> 'Tool':
return Tool(name=d['name'],
image=d['image'],
environment=d['environment'].copy(),
source=d.get('source'))
def to_dict(self) -> Dict[str, Any]:
return {'name': self.name,
'image': self.image,
'environment': self.environment.copy(),
'source': self.source}
| {
"repo_name": "ChrisTimperley/AutomatedRepairBenchmarks.c",
"path": "bugzoo/core/tool.py",
"copies": "3",
"size": "1906",
"license": "mit",
"hash": -5149253597954320000,
"line_mean": 26.2285714286,
"line_max": 77,
"alpha_frac": 0.5162644281,
"autogenerated": false,
"ratio": 4.717821782178218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 70
} |
__all__ = ['ToolTip']
try:
import tkinter as tk
from tkinter import ttk
except:
import Tkinter as tk
import ttk
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = tk.Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
try:
# For Mac OS
tw.tk.call("::tk::unsupported::MacWindowStyle",
"style", tw._w,
"help", "noActivates")
except tk.TclError:
pass
label = tk.Label(tw, text=self.text, justify=tk.LEFT,
background="#ffffe0", relief=tk.SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def create(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
if __name__ == '__main__':
root = tk.Tk()
for idx in range(0, 2):
b = tk.Button(root, text='A button')
b.grid()
create(b, 'A tooltip !!')
root.mainloop()
| {
"repo_name": "ArcherSys/ArcherSys",
"path": "Lib/site-packages/pygubu/widgets/simpletooltip.py",
"copies": "2",
"size": "1722",
"license": "mit",
"hash": -7479057489036518000,
"line_mean": 25.90625,
"line_max": 75,
"alpha_frac": 0.5197444832,
"autogenerated": false,
"ratio": 3.5578512396694215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0047758575900507015,
"num_lines": 64
} |
__all__ = ('TopCachedEncryptedHeapStorage',)
import logging
import tempfile
import mmap
import pyoram
from pyoram.util.virtual_heap import SizedVirtualHeap
from pyoram.encrypted_storage.encrypted_heap_storage import \
(EncryptedHeapStorageInterface,
EncryptedHeapStorage)
import tqdm
import six
from six.moves import xrange
log = logging.getLogger("pyoram")
class TopCachedEncryptedHeapStorage(EncryptedHeapStorageInterface):
"""
An encrypted block storage device for accessing memory
organized as a heap, where the top 1 or more levels can
be cached in local memory. This achieves two things:
(1) Reduces the number of buckets that need to be read
from or written to external storage for a given
path I/O operation.
(2) Allows certain block storage devices to achieve
concurrency across path writes by partioning the
storage space into independent subheaps starting
below the cache line.
This devices takes as input an existing encrypted heap
storage device. This class should not be cloned or used
to setup storage, but rather used as a wrapper class for
an existing heap storage device to speed up a bulk set
of I/O requests. The original heap storage device should
not be used after it is wrapped by this class. This
class will close the original device when closing
itself.
The number of cached levels (starting from the root
bucket at level 0) can be set with the 'cached_levels'
keyword (>= 1).
By default, this will create an independent storage
device capable of reading from and writing to the
original storage devices memory for each independent
subheap (if any) below the last cached level. The
'concurrency_level' keyword can be used to limit the
number of concurrent devices to some level below the
cache line (>= 0, <= 'cached_levels').
Values for 'cached_levels' and 'concurrency_level' will
be automatically reduced when they are larger than what
is allowed by the heap size.
"""
def __new__(cls, *args, **kwds):
if kwds.get("cached_levels", 1) == 0:
assert len(args) == 1
storage = args[0]
storage.cached_bucket_data = bytes()
return storage
else:
return super(TopCachedEncryptedHeapStorage, cls).\
__new__(cls)
def __init__(self,
heap_storage,
cached_levels=1,
concurrency_level=None):
assert isinstance(heap_storage, EncryptedHeapStorage)
assert cached_levels != 0
vheap = heap_storage.virtual_heap
if cached_levels < 0:
cached_levels = vheap.levels
if concurrency_level is None:
concurrency_level = cached_levels
assert concurrency_level >= 0
cached_levels = min(vheap.levels, cached_levels)
concurrency_level = min(cached_levels, concurrency_level)
self._external_level = cached_levels
total_buckets = sum(vheap.bucket_count_at_level(l)
for l in xrange(cached_levels))
self._root_device = heap_storage
# clone before we download the cache so that we can
# track bytes transferred during read/write requests
# (separate from the cached download)
self._concurrent_devices = \
{vheap.first_bucket_at_level(0): self._root_device.clone_device()}
self._cached_bucket_count = total_buckets
self._cached_buckets_tempfile = tempfile.TemporaryFile()
self._cached_buckets_tempfile.seek(0)
with tqdm.tqdm(desc=("Downloading %s Cached Heap Buckets"
% (self._cached_bucket_count)),
total=self._cached_bucket_count*self._root_device.bucket_size,
unit="B",
unit_scale=True,
disable=not pyoram.config.SHOW_PROGRESS_BAR) as progress_bar:
for b, bucket in enumerate(
self._root_device.bucket_storage.yield_blocks(
xrange(vheap.first_bucket_at_level(cached_levels)))):
self._cached_buckets_tempfile.write(bucket)
progress_bar.update(self._root_device.bucket_size)
self._cached_buckets_tempfile.flush()
self._cached_buckets_mmap = mmap.mmap(
self._cached_buckets_tempfile.fileno(), 0)
log.info("%s: Cloning %s sub-heap devices"
% (self.__class__.__name__, vheap.bucket_count_at_level(concurrency_level)))
# Avoid cloning devices when the cache line is at the root
# bucket or when the entire heap is cached
if (concurrency_level > 0) and \
(concurrency_level <= vheap.last_level):
for b in xrange(vheap.first_bucket_at_level(concurrency_level),
vheap.first_bucket_at_level(concurrency_level+1)):
try:
self._concurrent_devices[b] = self._root_device.clone_device()
except: # pragma: no cover
log.error( # pragma: no cover
"%s: Exception encountered " # pragma: no cover
"while cloning device. " # pragma: no cover
"Closing storage." # pragma: no cover
% (self.__class__.__name__)) # pragma: no cover
self.close() # pragma: no cover
raise # pragma: no cover
self._subheap_storage = {}
# Avoid populating this dictionary when the entire
# heap is cached
if self._external_level <= vheap.last_level:
for b in xrange(vheap.first_bucket_at_level(self._external_level),
vheap.first_bucket_at_level(self._external_level+1)):
node = vheap.Node(b)
while node.bucket not in self._concurrent_devices:
node = node.parent_node()
assert node.bucket >= 0
assert node.level == concurrency_level
self._subheap_storage[b] = self._concurrent_devices[node.bucket]
#
# Additional Methods
#
@property
def cached_bucket_data(self):
return self._cached_buckets_mmap
#
# Define EncryptedHeapStorageInterface Methods
#
@property
def key(self):
return self._root_device.key
@property
def raw_storage(self):
return self._root_device.raw_storage
#
# Define HeapStorageInterface Methods
#
def clone_device(self, *args, **kwds):
raise NotImplementedError( # pragma: no cover
"Class is not designed for cloning") # pragma: no cover
@classmethod
def compute_storage_size(cls, *args, **kwds):
return EncryptedHeapStorage.compute_storage_size(*args, **kwds)
@classmethod
def setup(cls, *args, **kwds):
raise NotImplementedError( # pragma: no cover
"Class is not designed to setup storage") # pragma: no cover
@property
def header_data(self):
return self._root_device.header_data
@property
def bucket_count(self):
return self._root_device.bucket_count
@property
def bucket_size(self):
return self._root_device.bucket_size
@property
def blocks_per_bucket(self):
return self._root_device.blocks_per_bucket
@property
def storage_name(self):
return self._root_device.storage_name
@property
def virtual_heap(self):
return self._root_device.virtual_heap
@property
def bucket_storage(self):
return self._root_device.bucket_storage
def update_header_data(self, new_header_data):
self._root_device.update_header_data(new_header_data)
def close(self):
log.info("%s: Uploading %s cached bucket data before closing"
% (self.__class__.__name__, self._cached_bucket_count))
with tqdm.tqdm(desc=("Uploading %s Cached Heap Buckets"
% (self._cached_bucket_count)),
total=self._cached_bucket_count*self.bucket_size,
unit="B",
unit_scale=True,
disable=not pyoram.config.SHOW_PROGRESS_BAR) as progress_bar:
self.bucket_storage.\
write_blocks(
xrange(self._cached_bucket_count),
(self._cached_buckets_mmap[(b*self.bucket_size):
((b+1)*self.bucket_size)]
for b in xrange(self._cached_bucket_count)),
callback=lambda i: progress_bar.update(self._root_device.bucket_size))
for b in self._concurrent_devices:
self._concurrent_devices[b].close()
self._root_device.close()
# forces the bar to become full at close
# even if te write_blocks action was faster
# the the mininterval time
progress_bar.mininterval = 0
self._cached_buckets_mmap.close()
self._cached_buckets_tempfile.close()
def read_path(self, b, level_start=0):
assert 0 <= b < self.virtual_heap.bucket_count()
bucket_list = self.virtual_heap.Node(b).bucket_path_from_root()
if len(bucket_list) <= self._external_level:
return [self._cached_buckets_mmap[(bb*self.bucket_size):
((bb+1)*self.bucket_size)]
for bb in bucket_list[level_start:]]
elif level_start >= self._external_level:
return self._subheap_storage[bucket_list[self._external_level]].\
bucket_storage.read_blocks(bucket_list[level_start:])
else:
local_buckets = bucket_list[:self._external_level]
external_buckets = bucket_list[self._external_level:]
buckets = []
for bb in local_buckets[level_start:]:
buckets.append(
self._cached_buckets_mmap[(bb*self.bucket_size):
((bb+1)*self.bucket_size)])
if len(external_buckets) > 0:
buckets.extend(
self._subheap_storage[external_buckets[0]].\
bucket_storage.read_blocks(external_buckets))
assert len(buckets) == len(bucket_list[level_start:])
return buckets
def write_path(self, b, buckets, level_start=0):
assert 0 <= b < self.virtual_heap.bucket_count()
bucket_list = self.virtual_heap.Node(b).bucket_path_from_root()
if len(bucket_list) <= self._external_level:
for bb, bucket in zip(bucket_list[level_start:], buckets):
self._cached_buckets_mmap[(bb*self.bucket_size):
((bb+1)*self.bucket_size)] = bucket
elif level_start >= self._external_level:
self._subheap_storage[bucket_list[self._external_level]].\
bucket_storage.write_blocks(bucket_list[level_start:], buckets)
else:
buckets = list(buckets)
assert len(buckets) == len(bucket_list[level_start:])
local_buckets = bucket_list[:self._external_level]
external_buckets = bucket_list[self._external_level:]
ndx = -1
for ndx, bb in enumerate(local_buckets[level_start:]):
self._cached_buckets_mmap[(bb*self.bucket_size):
((bb+1)*self.bucket_size)] = buckets[ndx]
if len(external_buckets) > 0:
self._subheap_storage[external_buckets[0]].\
bucket_storage.write_blocks(external_buckets,
buckets[(ndx+1):])
@property
def bytes_sent(self):
return sum(device.bytes_sent for device
in self._concurrent_devices.values())
@property
def bytes_received(self):
return sum(device.bytes_received for device
in self._concurrent_devices.values())
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/encrypted_storage/top_cached_encrypted_heap_storage.py",
"copies": "1",
"size": "12369",
"license": "mit",
"hash": -430434162053232960,
"line_mean": 40.9288135593,
"line_max": 93,
"alpha_frac": 0.5796749939,
"autogenerated": false,
"ratio": 4.369127516778524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5448802510678523,
"avg_score": null,
"num_lines": null
} |
__all__ = ["TopologicalSorter", "CycleError"]
_NODE_OUT = -1
_NODE_DONE = -2
class _NodeInfo:
__slots__ = "node", "npredecessors", "successors"
def __init__(self, node):
# The node this class is augmenting.
self.node = node
# Number of predecessors, generally >= 0. When this value falls to 0,
# and is returned by get_ready(), this is set to _NODE_OUT and when the
# node is marked done by a call to done(), set to _NODE_DONE.
self.npredecessors = 0
# List of successor nodes. The list can contain duplicated elements as
# long as they're all reflected in the successor's npredecessors attribute).
self.successors = []
class CycleError(ValueError):
"""Subclass of ValueError raised by TopologicalSorterif cycles exist in the graph
If multiple cycles exist, only one undefined choice among them will be reported
and included in the exception. The detected cycle can be accessed via the second
element in the *args* attribute of the exception instance and consists in a list
of nodes, such that each node is, in the graph, an immediate predecessor of the
next node in the list. In the reported list, the first and the last node will be
the same, to make it clear that it is cyclic.
"""
pass
class TopologicalSorter:
"""Provides functionality to topologically sort a graph of hashable nodes"""
def __init__(self, graph=None):
self._node2info = {}
self._ready_nodes = None
self._npassedout = 0
self._nfinished = 0
if graph is not None:
for node, predecessors in graph.items():
self.add(node, *predecessors)
def _get_nodeinfo(self, node):
result = self._node2info.get(node)
if result is None:
self._node2info[node] = result = _NodeInfo(node)
return result
def add(self, node, *predecessors):
"""Add a new node and its predecessors to the graph.
Both the *node* and all elements in *predecessors* must be hashable.
If called multiple times with the same node argument, the set of dependencies
will be the union of all dependencies passed in.
It is possible to add a node with no dependencies (*predecessors* is not provided)
as well as provide a dependency twice. If a node that has not been provided before
is included among *predecessors* it will be automatically added to the graph with
no predecessors of its own.
Raises ValueError if called after "prepare".
"""
if self._ready_nodes is not None:
raise ValueError("Nodes cannot be added after a call to prepare()")
# Create the node -> predecessor edges
nodeinfo = self._get_nodeinfo(node)
nodeinfo.npredecessors += len(predecessors)
# Create the predecessor -> node edges
for pred in predecessors:
pred_info = self._get_nodeinfo(pred)
pred_info.successors.append(node)
def prepare(self):
"""Mark the graph as finished and check for cycles in the graph.
If any cycle is detected, "CycleError" will be raised, but "get_ready" can
still be used to obtain as many nodes as possible until cycles block more
progress. After a call to this function, the graph cannot be modified and
therefore no more nodes can be added using "add".
"""
if self._ready_nodes is not None:
raise ValueError("cannot prepare() more than once")
self._ready_nodes = [
i.node for i in self._node2info.values() if i.npredecessors == 0
]
# ready_nodes is set before we look for cycles on purpose:
# if the user wants to catch the CycleError, that's fine,
# they can continue using the instance to grab as many
# nodes as possible before cycles block more progress
cycle = self._find_cycle()
if cycle:
raise CycleError("nodes are in a cycle", cycle)
def get_ready(self):
"""Return a tuple of all the nodes that are ready.
Initially it returns all nodes with no predecessors; once those are marked
as processed by calling "done", further calls will return all new nodes that
have all their predecessors already processed. Once no more progress can be made,
empty tuples are returned.
Raises ValueError if called without calling "prepare" previously.
"""
if self._ready_nodes is None:
raise ValueError("prepare() must be called first")
# Get the nodes that are ready and mark them
result = tuple(self._ready_nodes)
n2i = self._node2info
for node in result:
n2i[node].npredecessors = _NODE_OUT
# Clean the list of nodes that are ready and update
# the counter of nodes that we have returned.
self._ready_nodes.clear()
self._npassedout += len(result)
return result
def is_active(self):
"""Return True if more progress can be made and ``False`` otherwise.
Progress can be made if cycles do not block the resolution and either there
are still nodes ready that haven't yet been returned by "get_ready" or the
number of nodes marked "done" is less than the number that have been returned
by "get_ready".
Raises ValueError if called without calling "prepare" previously.
"""
if self._ready_nodes is None:
raise ValueError("prepare() must be called first")
return self._nfinished < self._npassedout or bool(self._ready_nodes)
def __bool__(self):
return self.is_active()
def done(self, *nodes):
"""Marks a set of nodes returned by "get_ready" as processed.
This method unblocks any successor of each node in *nodes* for being returned
in the future by a a call to "get_ready"
Raises :exec:`ValueError` if any node in *nodes* has already been marked as
processed by a previous call to this method, if a node was not added to the
graph by using "add" or if called without calling "prepare" previously or if
node has not yet been returned by "get_ready".
"""
if self._ready_nodes is None:
raise ValueError("prepare() must be called first")
n2i = self._node2info
for node in nodes:
nodeinfo = n2i.get(node)
# Check if we know about this node (it was added previously using add()
if nodeinfo is None:
raise ValueError(
"node {node} was not added using add()".format(node=node)
)
# If the node has not being returned (marked as ready) previously, inform the user.
stat = nodeinfo.npredecessors
if stat != _NODE_OUT:
if stat >= 0:
raise ValueError(
"node {node} was not passed out (still not ready)".format(
node=node
)
)
elif stat == _NODE_DONE:
raise ValueError(
"node {node} was already marked done".format(node=node)
)
else:
assert False, "node {node}: unknown status {stat}".format(
node=node, stat=stat
)
# Mark the node as processed
nodeinfo.npredecessors = _NODE_DONE
# Go to all the successors and reduce the number of predecessors, collecting all the ones
# that are ready to be returned in the next get_ready() call.
for successor in nodeinfo.successors:
successor_info = n2i[successor]
successor_info.npredecessors -= 1
if successor_info.npredecessors == 0:
self._ready_nodes.append(successor)
self._nfinished += 1
def _find_cycle(self):
n2i = self._node2info
stack = []
itstack = []
seen = set()
node2stacki = {}
for node in n2i:
if node in seen:
continue
while True:
if node in seen:
# If we have seen already the node and is in the
# current stack we have found a cycle.
if node in node2stacki:
val = node2stacki[node]
return stack[val:] + [node]
# else go on to get next successor
else:
seen.add(node)
itstack.append(iter(n2i[node].successors).__next__)
node2stacki[node] = len(stack)
stack.append(node)
# Backtrack to the topmost stack entry with
# at least another successor.
while stack:
try:
node = itstack[-1]()
break
except StopIteration:
del node2stacki[stack.pop()]
itstack.pop()
else:
break
return None
def static_order(self):
"""Returns an iterable of nodes in a topological order.
The particular order that is returned may depend on the specific
order in which the items were inserted in the graph.
Using this method does not require to call "prepare" or "done". If any
cycle is detected, :exc:`CycleError` will be raised.
"""
self.prepare()
while self.is_active():
node_group = self.get_ready()
for each in node_group:
yield each
self.done(*node_group)
| {
"repo_name": "djrobstep/schemainspect",
"path": "schemainspect/graphlib/__init__.py",
"copies": "1",
"size": "9896",
"license": "unlicense",
"hash": -6929130829176618000,
"line_mean": 37.3565891473,
"line_max": 101,
"alpha_frac": 0.5851859337,
"autogenerated": false,
"ratio": 4.5415328132170725,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5626718746917072,
"avg_score": null,
"num_lines": null
} |
__all__ = ['tqdm', 'trange']
import sys
import time
def format_interval(t):
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '%d:%02d:%02d' % (h, m, s)
else:
return '%02d:%02d' % (m, s)
def format_meter(n, total, elapsed):
# n - number of finished iterations
# total - total number of iterations, or None
# elapsed - number of seconds passed since start
if n > total:
total = None
elapsed_str = format_interval(elapsed)
rate = '%5.2f' % (n / elapsed) if elapsed else '?'
if total:
frac = float(n) / total
N_BARS = 10
bar_length = int(frac*N_BARS)
bar = '#'*bar_length + '-'*(N_BARS-bar_length)
percentage = '%3d%%' % (frac * 100)
left_str = format_interval(elapsed / n * (total-n)) if n else '?'
#return '|%s| %d/%d %s [elapsed: %s left: %s, %s iters/sec]' % (
# bar, n, total, percentage, elapsed_str, left_str, rate)
return '|%s| %d/%d %s' % (bar, n, total, percentage)
else:
return '%d [elapsed: %s, %s iters/sec]' % (n, elapsed_str, rate)
class StatusPrinter(object):
def __init__(self):
self.last_printed_len = 0
def print_status(self, s):
sys.stdout.write('\r'+s+' '*max(self.last_printed_len-len(s), 0))
sys.stdout.flush()
self.last_printed_len = len(s)
def tqdm(iterable, desc='', total=None, leave=False, mininterval=0.5, miniters=1):
"""
Get an iterable object, and return an iterator which acts exactly like the
iterable, but prints a progress meter and updates it every time a value is
requested.
'desc' can contain a short string, describing the progress, that is added
in the beginning of the line.
'total' can give the number of expected iterations. If not given,
len(iterable) is used if it is defined.
If leave is False, tqdm deletes its traces from screen after it has finished
iterating over all elements.
If less than mininterval seconds or miniters iterations have passed since
the last progress meter update, it is not updated again.
"""
if total is None:
try:
total = len(iterable)
except TypeError:
total = None
prefix = desc+': ' if desc else ''
sp = StatusPrinter()
sp.print_status(prefix + format_meter(0, total, 0))
start_t = last_print_t = time.time()
last_print_n = 0
n = 0
for obj in iterable:
yield obj
# Now the object was created and processed, so we can print the meter.
n += 1
if n - last_print_n >= miniters:
# We check the counter first, to reduce the overhead of time.time().
cur_t = time.time()
if cur_t - last_print_t >= mininterval:
sp.print_status(prefix + format_meter(n, total, cur_t-start_t))
last_print_n = n
last_print_t = cur_t
if not leave:
sp.print_status('')
sys.stdout.write('\r')
else:
if last_print_n < n:
cur_t = time.time()
sp.print_status(prefix + format_meter(n, total, cur_t-start_t))
def trange(*args, **kwargs):
"""A shortcut for writing tqdm(xrange)"""
return tqdm(xrange(*args), **kwargs) | {
"repo_name": "Plasticoo/RandomImgur",
"path": "lib/tqdm.py",
"copies": "1",
"size": "3354",
"license": "mit",
"hash": -2724210150147272000,
"line_mean": 32.2178217822,
"line_max": 82,
"alpha_frac": 0.5700655933,
"autogenerated": false,
"ratio": 3.541710665258712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.95181221867119,
"avg_score": 0.01873081436936224,
"num_lines": 101
} |
__all__ = ['tqdm', 'trange']
import sys
import time
def format_interval(t):
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '%d:%02d:%02d' % (h, m, s)
else:
return '%02d:%02d' % (m, s)
def format_meter(n, total, elapsed):
# n - number of finished iterations
# total - total number of iterations, or None
# elapsed - number of seconds passed since start
if n > total:
total = None
elapsed_str = format_interval(elapsed)
rate = '%5.2f' % (n / elapsed) if elapsed else '?'
if total:
frac = float(n) / total
N_BARS = 10
bar_length = int(frac*N_BARS)
bar = '#'*bar_length + '-'*(N_BARS-bar_length)
percentage = '%3d%%' % (frac * 100)
left_str = format_interval(elapsed / n * (total-n)) if n else '?'
return '|%s| %d/%d %s [elapsed: %s left: %s, %s iters/sec]' % (
bar, n, total, percentage, elapsed_str, left_str, rate)
else:
return '%d [elapsed: %s, %s iters/sec]' % (n, elapsed_str, rate)
class StatusPrinter(object):
def __init__(self, file):
self.file = file
self.last_printed_len = 0
def print_status(self, s):
self.file.write('\r'+s+' '*max(self.last_printed_len-len(s), 0))
self.file.flush()
self.last_printed_len = len(s)
def tqdm(iterable, desc='', total=None, leave=False, file=sys.stderr,
mininterval=0.5, miniters=1):
"""
Get an iterable object, and return an iterator which acts exactly like the
iterable, but prints a progress meter and updates it every time a value is
requested.
'desc' can contain a short string, describing the progress, that is added
in the beginning of the line.
'total' can give the number of expected iterations. If not given,
len(iterable) is used if it is defined.
'file' can be a file-like object to output the progress message to.
If leave is False, tqdm deletes its traces from screen after it has
finished iterating over all elements.
If less than mininterval seconds or miniters iterations have passed since
the last progress meter update, it is not updated again.
"""
if total is None:
try:
total = len(iterable)
except TypeError:
total = None
prefix = desc+': ' if desc else ''
sp = StatusPrinter(file)
sp.print_status(prefix + format_meter(0, total, 0))
start_t = last_print_t = time.time()
last_print_n = 0
n = 0
for obj in iterable:
yield obj
# Now the object was created and processed, so we can print the meter.
n += 1
if n - last_print_n >= miniters:
# We check the counter first, to reduce the overhead of time.time()
cur_t = time.time()
if cur_t - last_print_t >= mininterval:
sp.print_status(prefix + format_meter(n, total, cur_t-start_t))
last_print_n = n
last_print_t = cur_t
if not leave:
sp.print_status('')
sys.stdout.write('\r')
else:
if last_print_n < n:
cur_t = time.time()
sp.print_status(prefix + format_meter(n, total, cur_t-start_t))
file.write('\n')
def trange(*args, **kwargs):
"""A shortcut for writing tqdm(range()) on py3 or tqdm(xrange()) on py2"""
try:
f = xrange
except NameError:
f = range
return tqdm(f(*args), **kwargs)
| {
"repo_name": "Djabbz/tqdm",
"path": "tqdm.py",
"copies": "8",
"size": "3549",
"license": "mit",
"hash": -4310563474555024400,
"line_mean": 30.1315789474,
"line_max": 79,
"alpha_frac": 0.5700197239,
"autogenerated": false,
"ratio": 3.5740181268882174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8144037850788218,
"avg_score": null,
"num_lines": null
} |
__all__ = ['tqdm', 'trange']
import sys
import time
def format_interval(t):
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '%d:%02d:%02d' % (h, m, s)
else:
return '%02d:%02d' % (m, s)
def format_meter(n, total, elapsed):
# n - number of finished iterations
# total - total number of iterations, or None
# elapsed - number of seconds passed since start
if n > total:
total = None
elapsed_str = format_interval(elapsed)
rate = '%5.2f' % (n / elapsed) if elapsed else '?'
if total:
frac = float(n) / total
N_BARS = 10
bar_length = int(frac*N_BARS)
bar = '#'*bar_length + '-'*(N_BARS-bar_length)
percentage = '%3d%%' % (frac * 100)
left_str = format_interval(elapsed / n * (total-n)) if n else '?'
return '|%s| %d/%d %s [elapsed: %s left: %s, %s iters/sec]' % (
bar, n, total, percentage, elapsed_str, left_str, rate)
else:
return '%d [elapsed: %s, %s iters/sec]' % (n, elapsed_str, rate)
class StatusPrinter(object):
def __init__(self, file):
self.file = file
self.last_printed_len = 0
def print_status(self, s):
self.file.write('\r'+s+' '*max(self.last_printed_len-len(s), 0))
self.file.flush()
self.last_printed_len = len(s)
def tqdm(iterable, desc='', total=None, leave=False, file=sys.stderr,
mininterval=0.5, miniters=1):
"""
Get an iterable object, and return an iterator which acts exactly like the
iterable, but prints a progress meter and updates it every time a value is
requested.
'desc' can contain a short string, describing the progress, that is added
in the beginning of the line.
'total' can give the number of expected iterations. If not given,
len(iterable) is used if it is defined.
'file' can be a file-like object to output the progress message to.
If leave is False, tqdm deletes its traces from screen after it has
finished iterating over all elements.
If less than mininterval seconds or miniters iterations have passed since
the last progress meter update, it is not updated again.
"""
if total is None:
try:
total = len(iterable)
except TypeError:
total = None
prefix = desc+': ' if desc else ''
sp = StatusPrinter(file)
sp.print_status(prefix + format_meter(0, total, 0))
start_t = last_print_t = time.time()
last_print_n = 0
n = 0
for obj in iterable:
yield obj
# Now the object was created and processed, so we can print the meter.
n += 1
if n - last_print_n >= miniters:
# We check the counter first, to reduce the overhead of time.time()
cur_t = time.time()
if cur_t - last_print_t >= mininterval:
sp.print_status(prefix + format_meter(n, total, cur_t-start_t))
last_print_n = n
last_print_t = cur_t
if not leave:
sp.print_status('')
sys.stdout.write('\r')
else:
if last_print_n < n:
cur_t = time.time()
sp.print_status(prefix + format_meter(n, total, cur_t-start_t))
file.write('\n')
def trange(*args, **kwargs):
"""A shortcut for writing tqdm(range()) on py3 or tqdm(xrange()) on py2"""
try:
f = xrange
except NameError:
f = range
return tqdm(f(*args), **kwargs) | {
"repo_name": "fras2560/Baseball-Simulator",
"path": "simulator/tqdm.py",
"copies": "1",
"size": "3661",
"license": "apache-2.0",
"hash": -2656214811171791000,
"line_mean": 30.1315789474,
"line_max": 79,
"alpha_frac": 0.552581262,
"autogenerated": false,
"ratio": 3.686807653575025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47393889155750246,
"avg_score": null,
"num_lines": null
} |
all_tracking = ('B0020HRVG2', 'B0050DIWHU', 'B00C0ALZ0W', 'B00GL3PZ0K', 'B00H6JBFOS', 'B00ALBR6LO', 'B00BVJG2P6',
'B00G1J1D28', 'B00M60PIXQ', 'B00N6PESWC', '9780385521338','0385521332', '9780307888907','0307888908',
'9780804137508', '0804137501', '0804138079', '9780804141444', '0804141444', '9780385346658', '0385346654',
'9780770434007','0770434002', '9780770436735','0770436730', '9780804137386','0804137382', '9780385347594','0385347596',
'9780804136723', '0804136726', 'B00J1ILFVA','B007REKL1A','B00SPW377S','B0083DJVGU','B00C0AM1GY','B00L6YJZV4','B00G1IV9T6',
'B004JHYR54','B003EI2E9K','B004J4XGD6','B00CGI3FMY','B004P8JPTK','B002KJA964','B00540NW8S','B002PMW112',
'B009MYATJM','B00RKO6OXG','B00N6PBH6C','B00RKVWRJY','B00JNQIYNW','B0019O6J2E','B0019O6J2O','B00139XSKQ',
'0307446549','0307732134','0307446557','0307446530','1601427115','1601427131','1601427123','30773000X',
'0307730026','1601428146','0307730999','0307730069','1601427670','0307729982','0307729958','1400073979',
'1400073987','1400073960','0307730042','1601426992','0307731332','0307731359','0307731316','1400072921',
'140007293X','1400072948')
business_tracking = ('B0020HRVG2', 'B0050DIWHU', 'B00C0ALZ0W', 'B00GL3PZ0K', 'B00H6JBFOS', 'B00ALBR6LO', 'B00BVJG2P6',
'B00G1J1D28', 'B00M60PIXQ', 'B00N6PESWC', '9780385521338','0385521332', '9780307888907','0307888908',
'9780804137508', '0804137501', '0804138079', '9780804141444', '0804141444', '9780385346658', '0385346654',
'9780770434007','0770434002', '9780770436735','0770436730', '9780804137386','0804137382', '9780385347594','0385347596',
'9780804136723', '0804136726')
amish_asin = ('B00J1ILFVA','B007REKL1A','B00SPW377S','B0083DJVGU','B00C0AM1GY','B00L6YJZV4','B00G1IV9T6',
'B004JHYR54','B003EI2E9K','B004J4XGD6','B00CGI3FMY','B004P8JPTK','B002KJA964','B00540NW8S','B002PMW112',
'B009MYATJM','B00RKO6OXG','B00N6PBH6C','B00RKVWRJY','B00JNQIYNW','B0019O6J2E','B0019O6J2O','B00139XSKQ',
'0307446549','0307732134','0307446557','0307446530','1601427115','1601427131','1601427123','30773000X',
'0307730026','1601428146','0307730999','0307730069','1601427670','0307729982','0307729958','1400073979',
'1400073987','1400073960','0307730042','1601426992','0307731332','0307731359','0307731316','1400072921',
'140007293X','1400072948')
all_search_asins = ('B00J1ILFVA','B007REKL1A','B00SPW377S','B0083DJVGU','B00C0AM1GY','B00L6YJZV4','B00G1IV9T6',
'B004JHYR54','B003EI2E9K','B004J4XGD6','B00CGI3FMY','B004P8JPTK','B002KJA964','B00540NW8S','B002PMW112',
'B009MYATJM','B00RKO6OXG','B00N6PBH6C','B00RKVWRJY','B00JNQIYNW','B0019O6J2E','B0019O6J2O','B00139XSKQ',
'0307446549','0307732134','0307446557','0307446530','1601427115','1601427131','1601427123','30773000X',
'0307730026','1601428146','0307730999','0307730069','1601427670','0307729982','0307729958','1400073979',
'1400073987','1400073960','0307730042','1601426992','0307731332','0307731359','0307731316','1400072921',
'140007293X','1400072948','9781101904145','B00TWEMGE8',
'9780553419030','055341903X',
'9780553419047','B00S3RD3ZG',
'9780307720252','B00RRT332Y',
'9780307720214','0307720217',
'9780804139137','B00R04ME4O',
'9780804139120','0804139121',
'9780553417784','0553417789',
'9780804141246','B00N6PEN0Y',
'9780804141239','0804141231',
'9780804137751','0804137757',
'9780804137768','B00N6PET3A',
'9780553446975','B00N6PD3EQ',
'9780553446968','0553446967',
'9780385347419','B00N6PCWY8',
'9780385347402','0385347405',
'9780385346559','B00N6PD4CM',
'9780385346542','0385346549',
'9780804136730','B00N6PESWC',
'9780804136723','0804136726',
'9780385347594','0385347596',
'9780385347600','B00M60PIXQ',
'9780804140782','0804140782',
'9780804140799','B00LYXV5OW',
'9780804138383','0804138389',
'9780804138390','B00JTCH6YS',
'9780804138734','0804138737',
'9780804138741','B00JNQMKXW',
'9780385346467','B00JI54HCU',
'9780385346450','038534645X',
'9780804139298','0804139296',
'9780804139304','B00J6YBOFQ',
'9780385346658','0385346654',
'9780385346665','B00H6JBFOS',
'9780385348546','0385348541',
'9780307815514','B00D5BDK4Y',
'9780804137386','0804137382',
'9780804137393','B00G1J1D28',
'9780804141444','0804141444',
'9780804141451','B00GL3PZ0K',
'9780804137362','0804137366',
'9780804137379','B00FO60EMO',
'9780804138888','B00FDS7B3I',
'9780804138871','0804138877',
'9780385347020','0385347022',
'9780385347037','B00EGMQIDG',
'9780385347624','B00DTEMGF6',
'9780385347617','0385347618',
'9780804137515','B00C0ALZ0W',
'9780804137508','0804137501',
'9780804138079','0804138079',
'9780385349369','038534936X',
'9780385349376','B00CGI3DWQ',
'9780770436735','0770436730',
'9780770436742','B00BVJG2P6',
'9780770437497','0770437494',
'9780770437503','B00BRUQ3WG',
'9780307886675','0307886670',
'9780307886699','B00BH0VSIU',
'9780385348348','B00B3GMH4W',
'9780385348331','0385348339',
'9780307951618','0307951618',
'9780307951625','B00AUSCOR4',
'9780307951601','030795160X',
'9780770434021','B00ALBR6LO',
'9780770434007','0770434002',
'9780770437169','B00ALBR6JG',
'9780770437176','0770437176',
'9780770437152','077043715X',
'9780385346955','B009Y4I9C4',
'9780385346948','0385346948',
'9780385349390','0385349394',
'9780307956392','0307956393',
'9780307956415','B009JU6UPG',
'9780770435615','B00985DWVM',
'9780770435608','0770435602',
'9780307889140','B009QJNRTM',
'9780307889126','0307889122',
'9780307986078','0307986071',
'9780307986092','B008WOUFIS',
'9780307956439','B008IU9WLC',
'9780307956422','0307956423',
'9780307720962','0307720969',
'9780307720979','B0083DJUMA',
'9780307720955','0307720950',
'9780307888341','0307888347',
'9780307888365','B00540NWYM',
'9780307951595','B006LSZ7BM',
'9780307951571','030795157X',
'9780385518222','0385518226',
'9780385521864','B004FGMD9Q',
'9780770436148','0770436145',
'9780770436155','B0076PGLDC',
'9780770436162','0770436161',
'9780307986146','0307986144',
'9780307986153','B006ZAZ2NE',
'9780770436032','B0076P7Z96',
'9780770436018','0770436013',
'9780770435691','0770435696',
'9780770435707','B0060AY6H2',
'9780307887184','0307887189',
'9780307887177','0307887170',
'9780307887191','B006OFHLG6',
'9780307885302','0307885305',
'9780307885326','B0064C3VF4',
'9780307952578','B005DXOQMC',
'9780307952561','0307952568',
'9780307952554','030795255X',
'9780307951540','B0067TGSOK',
'9780307951526','0307951529',
'9780307719577','B005DXOR08',
'9780307719560','0307719561',
'9780307719232','B0058Z4NR8',
'9780307719225','0307719227',
'9780307719218','0307719219',
'9780307886071','B005723KGW',
'9780307886064','0307886069',
'9780307886057','0307886050',
'9780307888907','0307888908',
'9780307888921','B0050DIWHU',
'9780307592422','B00540PAUQ',
'9780307592392','0307592391',
'9780307720351','0307720357',
'9780307720375','B004J4XGNG',
'9780307588739','0307588734',
'9780307588746','B004UHYH1C',
'9780307953599','B0061UC83A',
'9780307589897','0307589897',
'9780307589910','B004J4WKXI',
'9780307452733','B004KPM1N0',
'9780307452719','0307452719',
'9780307587794','0307587797',
'9780307587817','B004KPM1P8',
'9780307887344','B004J4WKOM',
'9780770433499','0770433499',
'9780307956330','0307956334',
'9780307887320','0307887324',
'9780385531740','B004J4X2VM',
'9780385531733','0385531737',
'9780307956323','0307956326',
'9780307887672','B004KPM1FI',
'9780307461278','0307461270',
'9780307887894','0307887898',
'9780307887658','0307887650',
'9780307887917','B004J4XGN6',
'9780307461285','B004J4WN9E',
'9780385525763','0385525761',
'9780307590190','B004J4WKPQ',
'9780385531665','0385531664',
'9780385531672','B004J4WM1S',
'9780307886231','0307886239',
'9780307886255','B004J4WKEC',
'9780307589101','0307589102',
'9780307589125','B004J4WK2E',
'9780385525077','0385525079',
'9780307720788','B004J4WKHE',
'9780307951793','0307951790',
'9780385531696','B004DEPHGQ',
'9780385531689','0385531680',
'9780307587954','0307587959',
'9780307587978','B003F3PK9K',
'9780307591272','B004C43F3E',
'9780307591265','0307591263',
'9780307453792','0307453790',
'9780307716903','0307716902',
'9780307716910','B003F3PKSQ',
'9780307716897','0307716899',
'9780307460264','0307460266',
'9780307460271','B003EI2E8Q',
'9780307591753','B003F3PMXO',
'9780307591746','0307591743',
'9780307717863','0307717860',
'9780307717870','0307717879',
'9780307717887','B003F3PL7Q',
'9780307452993','0307452999',
'9780307453013','B003E8AJXI',
'9780307886026','0307886026',
'9780307591135','B003B0W1SK',
'9780307591548','0307591549',
'9780307591562','B003F3PMYI',
'9780307591111','0307591115',
'9780307591128','0307591123',
'9780307588005','B003F3PK90',
'9780307587992','0307587991',
'9780307717658','B003F3PKQS',
'9780307453723','0307453723',
'9780307590183','B0036S4EWI',
'9780385526272','038552627X',
'9780307460110','0307460118',
'9780307888310','0307888312',
'9780307460127','B0036S4B8A',
'9780385529068','0385529066',
'9780307715845','B0036S4BXU',
'9780307589132','0307589137',
'9780307589156','B0036S4BEE',
'9780307591630','B0036S49YQ',
'9780307591623','030759162X',
'9780307460875','B0036S4CNE',
'9780307460868','030746086X',
'9780307460851','0307460851',
'9780307452535','0307452530',
'9780307590572','0307590577',
'9780307588579','B0036S4B12',
'9780307587886','0307587886',
'9780307452542','B0036S4CHU',
'9780307590596','B0036S49LY',
'9780307590176','B0036S4CM0',
'9780385528177','0385528175',
'9780385528184','0385528183',
'9780307463760','B002MUAJ2A',
'9780307463746','0307463745',
'9780307461865','B004JCL67Q',
'9780307461889','B0030DHPG6',
'9780307453679','B0036S4D7O',
'9780307453662','0307453669',
'9780307590169','B0030DHPGQ',
'9780307885036','0307885038',
'9780385528757','0385528752',
'9780307453372','0307453375',
'9780307453389','0307453383',
'9780307453396','B0036894XC',
'9780307406514','0307406512',
'9780307462404','B00362XLH8',
'9780307406521','0307406520',
'9780307464224','0307464229',
'9780307464231','0307464237',
'9780307464248','B0034DGPLS',
'9780307591234','B0034DGPKY',
'9780307591227','0307591220',
'9780385531016','038553101X',
'9780307408938','0307408930',
'9780307462589','B002W8QXE0',
'9780307589477','B002W8QXH2',
'9780385531030','B002WA4O9E',
'9780307591203','B002W8QXHM',
'9780307591197','0307591190',
'9780385528115','0385528116',
'9780307589934','B002UM5BF0',
'9780307589941','0307589943',
'9780307589927','0307589927',
'9780307463098','0307463095',
'9780385529945','0385529945',
'9780385529938','B002UBRFFU',
'9780307463111','B002UZ5J3G',
'9780307463104','0307463109',
'9780385529914','0385529910',
'9780385520522','0385520522',
'9780307589439','B002SE643I',
'9780307409386','0307409384',
'9780307462473','B002RLBKBI',
'9780307461704','030746170X',
'9780307461711','B002PYFWAW',
'9780761501640','0761501649',
'9781881052104','1881052109',
'9780307461698','0307461696',
'9780307587763','B002NXORZQ',
'9780307587688','0307587681',
'9780307589422','B002OK2OQ2',
'9780307588173','0307588173',
'9780307588180','B002OK2OOE',
'9780307589460','B002NXOR6K',
'9780385529044','038552904X',
'9780385529006','0385529007',
'9780385525947','038552594X',
'9780385525954','0385525958',
'9780767930536','0767930533',
'9780767932158','B002OK2ON0',
'9780307452276','B002LLRE2I',
'9780307453310','0307453316',
'9780307382351','0307382354',
'9780307453327','B002LLRDYC',
'9780307459701','B002JCJ72E',
'9780307459688','0307459683',
'9780307459695','0307459691',
'9780385527804','0385527802',
'9780307588333','0307588335',
'9780385530392','B002HMJZ9G',
'9780307588357','B002HHPVHQ',
'9780307588340','0307588343',
'9780307450388','0307450384',
'9780307450401','B002FQOHVK',
'9780307450395','0307450392',
'9780307452290','B002C1Z3GG',
'9780307408440','0307408442',
'9780307408457','0307408450',
'9780307407078','0307407071',
'9780307459985','B002AWX6JI',
'9780385530224','B0020HRVG2',
'9780385521338','0385521332',
'9780385526494','0385526490',
'9780385526500','0385526504',
'9780385530354','B00296SVTA',
'9780307459800','B0028MBKUC',
'9780385527019','0385527012',
'9780385527026','0385527020',
'9780385530378','B0027G6XEW',
'9780385526555','0385526555',
'9780385527828','0385527829',
'9780385527835','0385527837',
'9780307449405','0307449408',
'9780385529440','B0026LTNFY',
'9780307449412','B0018QOYQ6',
'9780385530408','B0026LTNHC',
'9780307409508','0307409503',
'9780307452665','B0024NP59M',
'9780307452283','B0023ZLG2Q',
'9780307409492','030740949X',
'9780385529433','B001VT3L5A',
'9780385526005','0385526008',
'9780307452269','B001UFP6RG',
'9780307407139','0307407136',
'9780307407146','0307407144',
'9780385522618','0385522614',
'9780385529402','B001NLL4S2',
'9780767929844','0767929845',
'9780767931526','B001TSZ6LK',
'9780385526326','0385526326',
'9780385528337','B001NLL6QM',
'9780385529426','B001NLKSD4',
'9780385525794','0385525796',
'9780385525787','0385525788',
'9780307407702','0307407705',
'9780767931489','B001NLKYN8',
'9780307452306','B001RS8KMO',
'9780767928557','0767928555',
'9780385529419','B001NLL9HS',
'9780307449344','B001RR5KIW',
'9780385524698','0385524692',
'9780767931601','B001NLL9Q4',
'9780767927413','0767927419',
'9780307449825','B001NLKT4W',
'9780307450234','B001NBEWMM',
'9780307405920','0307405923',
'9780609608289','0609608282',
'9780307461896','0307461890',
'9780307452009','B001M2FTGK',
'9780385528320','B001FA0KMW',
'9780385526302','038552630X',
'9780385523882','0385523882',
'9780385528290','B001FA0VWG',
'9780385523899','0385523890',
'9780385523868','0385523866',
'9780385528283','B0018QQQIU',
'9780307875044','B003F3PKOK',
'9780307451583','0307451585',
'9780385528276','B001FA0KF4',
'9780385523578','0385523572',
'9780307395771','0307395774',
'9780385519052','0385519052',
'9780307449818','B0017SUYWS',
'9780307396013','0307396010',
'9780385528306','B001EW52FG',
'9780307449337','B001E7BLRO',
'9780307339737','0307339734',
'9780767929974','0767929977',
'9780767931021','B001EUTOXO',
'9780307396204','0307396207',
'9780307396211','0307396215',
'9780307449320','B001BAJ2LQ',
'9780385521031','0385521030',
'9780385522724','038552272X',
'9780385526746','B001CBMXAG',
'9780385526753','B001ANUOOS',
'9780307406224','0307406229',
'9780307406187','0307406180',
'9780307449313','B001BANK1Y',
'9780385519014','038551901X',
'9780385519045','0385519044',
'9780385525305','B0018QSO94',
'9780385530606','0385530609',
'9780385526777','B0013TTK1W',
'9780385524384','0385524382',
'9780385526975','B0016H388M',
'9780385526029','0385526024',
'9780307590299','0307590291',
'9780307406194','0307406199',
'9780307409294','B0016H38MI',
'9780307381736','0307381730',
'9780767930246','B0015DWJJG',
'9780307409218','B00139VUVA',
'9780767929738','076792973X',
'9780307409850','B00179FNT6',
'9780307409843','B0010SKT3O',
'9780307393531','0307393534',
'9780767926126','0767926129',
'9780385520430','0385520433',
'9780767930154','B0015KGX50',
'9780385525282','B0015KGWNI',
'9780307395085','0307395081',
'9780307409713','B000UZQH3O',
'9780307339867','0307339866',
'9780307339874','0307339874',
'9780307409812','B0014XDMDE',
'9780385525299','B0010SEN3Q',
'9780385519274','0385519273',
'9780385519281','0385519281',
'9780767923149','0767923146',
'9780767929080','B0013SSPVO',
'9780385512244','0385512244',
'9780307483119','B001LOEFYQ',
'9780385525329','B0010SIPPS',
'9780385523370','0385523378',
'9780307409829','B0010SIPLW',
'9780307352187','0307352188',
'9780767927666','0767927664',
'9780767927673','0767927672',
'9780307352194','0307352196',
'9780767929141','B0012P2OMY',
'9780307588777','0307588777',
'9780385525312','B0012YJYNM',
'9780307409836','B0011UGLJ2',
'9780385524193','B000W918DC',
'9780307451804','0307451801',
'9780307383266','0307383261',
'9780385520553','0385520557',
'9780385520546','0385520549',
'9780385524247','B000W94H3U',
'9780767929240','B00122HF6C',
'9780767926959','0767926951',
'9780307351135','0307351130',
'9780307405715','B000W93CSG',
'9780307405647','B000W917BA',
'9780307351104','0307351106',
'9780307451798','0307451798',
'9780307405654','B000W9167K',
'9780307393692','0307393690',
'9780307451781','030745178X',
'9780307396037','B000UZNR00',
'9780307351661','0307351661',
'9780307395580','B000VRBBE6',
'9780307337207','0307337200',
'9780307338532','0307338533',
'9780307394484','B000UDNBPS',
'9780307481085','B001M60BO6',
'9780385515474','0385515472',
'9780307345745','0307345742',
'9780307394897','B000SCHB6Q',
'9780307350992','0307350991',
'9780307394477','B000RNKMVW',
'9780307394491','B000RRA8J4',
'9780385523011','B000RG1NVW',
'9780307341457','0307341453',
'9780307409263','B0013TRQD6',
'9780385520812','0385520816',
'9780385520805','B001E1O7XA',
'9780307351876','0307351874',
'9780307497802','B001PSEQCE',
'9780307394224','B000QCQ94I',
'9780307351012','0307351017',
'9780307383457','0307383458',
'9780385521024','0385521022',
'9780307719539','0307719537',
'9780385523165','B000QFBXDC',
'9780385518437','0385518439',
'9780385519960','B000SEIFCI',
'9780385518406','0385518404',
'9780385521437','B000OYF016',
'9780385519229','0385519222',
'9780307394118','B000OYF002',
'9780307339720','0307339726',
'9780307548962','B000SEIFJQ',
'9780385516228','0385516223',
'9780385521260','038552126X',
'9780385513579','0385513577',
'9780307492425','B000SEGIIQ',
'9780385521918','B000OI0G5C',
'9780767923309','0767923308',
'9780767927321','B000OI0G7A',
'9781400082476','1400082471',
'9780307339454','0307339459',
'9780767922487','0767922484',
'9780767929288','B0012P2OMO',
'9780767926744','0767926749',
'9780307381965','B000PDZF6G',
'9780307352675','B000N2HCJM',
'9780307341518','0307341518',
'9780385516204','0385516207',
'9780767926300','B000MAHBSM',
'9780767923132','0767923138',
'9780767926331','B000MAHBUA',
'9780385518499','0385518498',
'9780385519922','B000N2HCJC',
'9780307453150','B0019O6IYI',
'9780767922586','0767922581',
'9780385520034','B000N2HCLK',
'9780307345622','0307345629',
'9780385518321','0385518323',
'9780385521536','B000PDZFIE',
'9780307352699','B000JMKVGW',
'9781400082995','1400082994',
'9780307341532','0307341534',
'9780307449610','B0013TXA0E',
'9780385520157','B000JMKRBG',
'9780385518925','0385518927',
'9780385517478','0385517475',
'9780385520102','B000JMKR9S',
'9780307336699','0307336697',
'9780307347121','B000JMKN9C',
'9780385519816','B000JMKN92',
'9780385519809','B000JMKN8S',
'9780385519694','0385519699',
'9780385519687','0385519680',
'9780767920568','0767920562',
'9780767924924','B000GCFW8M',
'9780767920575','0767920570',
'9780385517546','0385517548',
'9780385518918','B000PDZFMA',
'9780767922425','0767922425',
'9780767927857','B000QCQ948',
'9780767927864','B000QCQ8VW',
'9780385518444','B000GCFWBO',
'9780385517096','0385517092',
'9780385522076','038552207X',
'9780767922852','0767922859',
'9780767922845','0767922840',
'9780767918688','0767918681',
'9780385517614','B000PDZFFW',
'9780767924948','B000GCFWGO',
'9780307424136','B000XU4U9O',
'9780385504669','0385504667',
'9781400049660','1400049660',
'9780307498878','B000SEFJ1I',
'9780385517621','B000GCFBOC',
'9780307345493','B000GCFCIC',
'9780307336002','030733600X',
'9780385516518','0385516517',
'9780307498892','B000SEI7F8',
'9780307336132','0307336131',
'9780307345752','B000GCFCQE',
'9780307336842','0307336840',
'9780767921206','0767921208',
'9780767921213','0767921216',
'9780767923880','B000FCKPEE',
'9780307424228','B0012SMGME',
'9780385517775','B000FCKPO4',
'9780385516662','0385516665',
'9780385514354','0385514352',
'9780385513562','0385513569',
'9780385517607','B000FCKPFI',
'9780385516969','B000FCKNP0',
'9780307236999','0307236994',
'9780385514828','0385514824',
'9780307498861','B000S1LEQ0',
'9780385513593','0385513593',
'9780307423993','B0012RMVD4',
'9780385517010','B000FCKPKS',
'9781400098392','1400098394',
'9780385527316','0385527314',
'9780385512077','0385512074',
'9780385516884','B000FCKGJS',
'9780385516853','B000FCKFA8',
'9780385514781','0385514786',
'9781400080939','1400080932',
'9780307337320','B000FCKDKK',
'9781400052202','1400052203',
'9781400052196','140005219X',
'9780307336484','B000FCKCAQ',
'9780307238344','B000FCKCCO',
'9780385516242','038551624X',
'9780767915878','0767915879',
'9780385516808','B000FCKBNO',
'9780385516303','0385516304',
'9780385515351','0385515359',
'9780385515733','B000FCK8W8',
'9780385507578','0385507577',
'9780385515160','B000FCK90O',
'9780385507585','0385507585',
'9780385510301','0385510306',
'9781400050123','140005012X',
'9780307237262','B004SOVC7E',
'9780307237187','B000FCK91S',
'9781400082940','1400082943',
'9780307337023','B000FCK4SQ',
'9781400054626','1400054621',
'9780307423825','B0012RMVFM',
'9780385512480','0385512481',
'9781400080502','1400080509',
'9780307421012','B000XUDG12',
'9780767911788','0767911784',
'9780307236647','B000FCK1T8',
'9781400081981','140008198X',
'9780385512060','0385512066',
'9780385515290','B000FCJZ4K',
'9780385512053','0385512058',
'9780385514347','0385514344',
'9780385515276','B000FCK0HG',
'9781400098309','B000FC2Q9G',
'9781400048663','1400048664',
'9780767920308','B000FC2OM0',
'9780767919463','0767919467',
'9780767919470','0767919475',
'9780767919487','0767919483',
'9780767920247','B000FCJXTM',
'B0020HRVG2', 'B0050DIWHU', 'B00C0ALZ0W', 'B00GL3PZ0K', 'B00H6JBFOS', 'B00ALBR6LO', 'B00BVJG2P6',
'B00G1J1D28', 'B00M60PIXQ', 'B00N6PESWC', '9780385521338', '9780307888907', '9780804137508', '9780804141444',
'9780385346658', '9780770434007', '9780770436735', '9780804137386', '9780385347594', '9780804136723')
business_search_asins = ('9781101904145','B00TWEMGE8',
'9780553419030','055341903X',
'9780553419047','B00S3RD3ZG',
'9780307720252','B00RRT332Y',
'9780307720214','0307720217',
'9780804139137','B00R04ME4O',
'9780804139120','0804139121',
'9780553417784','0553417789',
'9780804141246','B00N6PEN0Y',
'9780804141239','0804141231',
'9780804137751','0804137757',
'9780804137768','B00N6PET3A',
'9780553446975','B00N6PD3EQ',
'9780553446968','0553446967',
'9780385347419','B00N6PCWY8',
'9780385347402','0385347405',
'9780385346559','B00N6PD4CM',
'9780385346542','0385346549',
'9780804136730','B00N6PESWC',
'9780804136723','0804136726',
'9780385347594','0385347596',
'9780385347600','B00M60PIXQ',
'9780804140782','0804140782',
'9780804140799','B00LYXV5OW',
'9780804138383','0804138389',
'9780804138390','B00JTCH6YS',
'9780804138734','0804138737',
'9780804138741','B00JNQMKXW',
'9780385346467','B00JI54HCU',
'9780385346450','038534645X',
'9780804139298','0804139296',
'9780804139304','B00J6YBOFQ',
'9780385346658','0385346654',
'9780385346665','B00H6JBFOS',
'9780385348546','0385348541',
'9780307815514','B00D5BDK4Y',
'9780804137386','0804137382',
'9780804137393','B00G1J1D28',
'9780804141444','0804141444',
'9780804141451','B00GL3PZ0K',
'9780804137362','0804137366',
'9780804137379','B00FO60EMO',
'9780804138888','B00FDS7B3I',
'9780804138871','0804138877',
'9780385347020','0385347022',
'9780385347037','B00EGMQIDG',
'9780385347624','B00DTEMGF6',
'9780385347617','0385347618',
'9780804137515','B00C0ALZ0W',
'9780804137508','0804137501',
'9780804138079','0804138079',
'9780385349369','038534936X',
'9780385349376','B00CGI3DWQ',
'9780770436735','0770436730',
'9780770436742','B00BVJG2P6',
'9780770437497','0770437494',
'9780770437503','B00BRUQ3WG',
'9780307886675','0307886670',
'9780307886699','B00BH0VSIU',
'9780385348348','B00B3GMH4W',
'9780385348331','0385348339',
'9780307951618','0307951618',
'9780307951625','B00AUSCOR4',
'9780307951601','030795160X',
'9780770434021','B00ALBR6LO',
'9780770434007','0770434002',
'9780770437169','B00ALBR6JG',
'9780770437176','0770437176',
'9780770437152','077043715X',
'9780385346955','B009Y4I9C4',
'9780385346948','0385346948',
'9780385349390','0385349394',
'9780307956392','0307956393',
'9780307956415','B009JU6UPG',
'9780770435615','B00985DWVM',
'9780770435608','0770435602',
'9780307889140','B009QJNRTM',
'9780307889126','0307889122',
'9780307986078','0307986071',
'9780307986092','B008WOUFIS',
'9780307956439','B008IU9WLC',
'9780307956422','0307956423',
'9780307720962','0307720969',
'9780307720979','B0083DJUMA',
'9780307720955','0307720950',
'9780307888341','0307888347',
'9780307888365','B00540NWYM',
'9780307951595','B006LSZ7BM',
'9780307951571','030795157X',
'9780385518222','0385518226',
'9780385521864','B004FGMD9Q',
'9780770436148','0770436145',
'9780770436155','B0076PGLDC',
'9780770436162','0770436161',
'9780307986146','0307986144',
'9780307986153','B006ZAZ2NE',
'9780770436032','B0076P7Z96',
'9780770436018','0770436013',
'9780770435691','0770435696',
'9780770435707','B0060AY6H2',
'9780307887184','0307887189',
'9780307887177','0307887170',
'9780307887191','B006OFHLG6',
'9780307885302','0307885305',
'9780307885326','B0064C3VF4',
'9780307952578','B005DXOQMC',
'9780307952561','0307952568',
'9780307952554','030795255X',
'9780307951540','B0067TGSOK',
'9780307951526','0307951529',
'9780307719577','B005DXOR08',
'9780307719560','0307719561',
'9780307719232','B0058Z4NR8',
'9780307719225','0307719227',
'9780307719218','0307719219',
'9780307886071','B005723KGW',
'9780307886064','0307886069',
'9780307886057','0307886050',
'9780307888907','0307888908',
'9780307888921','B0050DIWHU',
'9780307592422','B00540PAUQ',
'9780307592392','0307592391',
'9780307720351','0307720357',
'9780307720375','B004J4XGNG',
'9780307588739','0307588734',
'9780307588746','B004UHYH1C',
'9780307953599','B0061UC83A',
'9780307589897','0307589897',
'9780307589910','B004J4WKXI',
'9780307452733','B004KPM1N0',
'9780307452719','0307452719',
'9780307587794','0307587797',
'9780307587817','B004KPM1P8',
'9780307887344','B004J4WKOM',
'9780770433499','0770433499',
'9780307956330','0307956334',
'9780307887320','0307887324',
'9780385531740','B004J4X2VM',
'9780385531733','0385531737',
'9780307956323','0307956326',
'9780307887672','B004KPM1FI',
'9780307461278','0307461270',
'9780307887894','0307887898',
'9780307887658','0307887650',
'9780307887917','B004J4XGN6',
'9780307461285','B004J4WN9E',
'9780385525763','0385525761',
'9780307590190','B004J4WKPQ',
'9780385531665','0385531664',
'9780385531672','B004J4WM1S',
'9780307886231','0307886239',
'9780307886255','B004J4WKEC',
'9780307589101','0307589102',
'9780307589125','B004J4WK2E',
'9780385525077','0385525079',
'9780307720788','B004J4WKHE',
'9780307951793','0307951790',
'9780385531696','B004DEPHGQ',
'9780385531689','0385531680',
'9780307587954','0307587959',
'9780307587978','B003F3PK9K',
'9780307591272','B004C43F3E',
'9780307591265','0307591263',
'9780307453792','0307453790',
'9780307716903','0307716902',
'9780307716910','B003F3PKSQ',
'9780307716897','0307716899',
'9780307460264','0307460266',
'9780307460271','B003EI2E8Q',
'9780307591753','B003F3PMXO',
'9780307591746','0307591743',
'9780307717863','0307717860',
'9780307717870','0307717879',
'9780307717887','B003F3PL7Q',
'9780307452993','0307452999',
'9780307453013','B003E8AJXI',
'9780307886026','0307886026',
'9780307591135','B003B0W1SK',
'9780307591548','0307591549',
'9780307591562','B003F3PMYI',
'9780307591111','0307591115',
'9780307591128','0307591123',
'9780307588005','B003F3PK90',
'9780307587992','0307587991',
'9780307717658','B003F3PKQS',
'9780307453723','0307453723',
'9780307590183','B0036S4EWI',
'9780385526272','038552627X',
'9780307460110','0307460118',
'9780307888310','0307888312',
'9780307460127','B0036S4B8A',
'9780385529068','0385529066',
'9780307715845','B0036S4BXU',
'9780307589132','0307589137',
'9780307589156','B0036S4BEE',
'9780307591630','B0036S49YQ',
'9780307591623','030759162X',
'9780307460875','B0036S4CNE',
'9780307460868','030746086X',
'9780307460851','0307460851',
'9780307452535','0307452530',
'9780307590572','0307590577',
'9780307588579','B0036S4B12',
'9780307587886','0307587886',
'9780307452542','B0036S4CHU',
'9780307590596','B0036S49LY',
'9780307590176','B0036S4CM0',
'9780385528177','0385528175',
'9780385528184','0385528183',
'9780307463760','B002MUAJ2A',
'9780307463746','0307463745',
'9780307461865','B004JCL67Q',
'9780307461889','B0030DHPG6',
'9780307453679','B0036S4D7O',
'9780307453662','0307453669',
'9780307590169','B0030DHPGQ',
'9780307885036','0307885038',
'9780385528757','0385528752',
'9780307453372','0307453375',
'9780307453389','0307453383',
'9780307453396','B0036894XC',
'9780307406514','0307406512',
'9780307462404','B00362XLH8',
'9780307406521','0307406520',
'9780307464224','0307464229',
'9780307464231','0307464237',
'9780307464248','B0034DGPLS',
'9780307591234','B0034DGPKY',
'9780307591227','0307591220',
'9780385531016','038553101X',
'9780307408938','0307408930',
'9780307462589','B002W8QXE0',
'9780307589477','B002W8QXH2',
'9780385531030','B002WA4O9E',
'9780307591203','B002W8QXHM',
'9780307591197','0307591190',
'9780385528115','0385528116',
'9780307589934','B002UM5BF0',
'9780307589941','0307589943',
'9780307589927','0307589927',
'9780307463098','0307463095',
'9780385529945','0385529945',
'9780385529938','B002UBRFFU',
'9780307463111','B002UZ5J3G',
'9780307463104','0307463109',
'9780385529914','0385529910',
'9780385520522','0385520522',
'9780307589439','B002SE643I',
'9780307409386','0307409384',
'9780307462473','B002RLBKBI',
'9780307461704','030746170X',
'9780307461711','B002PYFWAW',
'9780761501640','0761501649',
'9781881052104','1881052109',
'9780307461698','0307461696',
'9780307587763','B002NXORZQ',
'9780307587688','0307587681',
'9780307589422','B002OK2OQ2',
'9780307588173','0307588173',
'9780307588180','B002OK2OOE',
'9780307589460','B002NXOR6K',
'9780385529044','038552904X',
'9780385529006','0385529007',
'9780385525947','038552594X',
'9780385525954','0385525958',
'9780767930536','0767930533',
'9780767932158','B002OK2ON0',
'9780307452276','B002LLRE2I',
'9780307453310','0307453316',
'9780307382351','0307382354',
'9780307453327','B002LLRDYC',
'9780307459701','B002JCJ72E',
'9780307459688','0307459683',
'9780307459695','0307459691',
'9780385527804','0385527802',
'9780307588333','0307588335',
'9780385530392','B002HMJZ9G',
'9780307588357','B002HHPVHQ',
'9780307588340','0307588343',
'9780307450388','0307450384',
'9780307450401','B002FQOHVK',
'9780307450395','0307450392',
'9780307452290','B002C1Z3GG',
'9780307408440','0307408442',
'9780307408457','0307408450',
'9780307407078','0307407071',
'9780307459985','B002AWX6JI',
'9780385530224','B0020HRVG2',
'9780385521338','0385521332',
'9780385526494','0385526490',
'9780385526500','0385526504',
'9780385530354','B00296SVTA',
'9780307459800','B0028MBKUC',
'9780385527019','0385527012',
'9780385527026','0385527020',
'9780385530378','B0027G6XEW',
'9780385526555','0385526555',
'9780385527828','0385527829',
'9780385527835','0385527837',
'9780307449405','0307449408',
'9780385529440','B0026LTNFY',
'9780307449412','B0018QOYQ6',
'9780385530408','B0026LTNHC',
'9780307409508','0307409503',
'9780307452665','B0024NP59M',
'9780307452283','B0023ZLG2Q',
'9780307409492','030740949X',
'9780385529433','B001VT3L5A',
'9780385526005','0385526008',
'9780307452269','B001UFP6RG',
'9780307407139','0307407136',
'9780307407146','0307407144',
'9780385522618','0385522614',
'9780385529402','B001NLL4S2',
'9780767929844','0767929845',
'9780767931526','B001TSZ6LK',
'9780385526326','0385526326',
'9780385528337','B001NLL6QM',
'9780385529426','B001NLKSD4',
'9780385525794','0385525796',
'9780385525787','0385525788',
'9780307407702','0307407705',
'9780767931489','B001NLKYN8',
'9780307452306','B001RS8KMO',
'9780767928557','0767928555',
'9780385529419','B001NLL9HS',
'9780307449344','B001RR5KIW',
'9780385524698','0385524692',
'9780767931601','B001NLL9Q4',
'9780767927413','0767927419',
'9780307449825','B001NLKT4W',
'9780307450234','B001NBEWMM',
'9780307405920','0307405923',
'9780609608289','0609608282',
'9780307461896','0307461890',
'9780307452009','B001M2FTGK',
'9780385528320','B001FA0KMW',
'9780385526302','038552630X',
'9780385523882','0385523882',
'9780385528290','B001FA0VWG',
'9780385523899','0385523890',
'9780385523868','0385523866',
'9780385528283','B0018QQQIU',
'9780307875044','B003F3PKOK',
'9780307451583','0307451585',
'9780385528276','B001FA0KF4',
'9780385523578','0385523572',
'9780307395771','0307395774',
'9780385519052','0385519052',
'9780307449818','B0017SUYWS',
'9780307396013','0307396010',
'9780385528306','B001EW52FG',
'9780307449337','B001E7BLRO',
'9780307339737','0307339734',
'9780767929974','0767929977',
'9780767931021','B001EUTOXO',
'9780307396204','0307396207',
'9780307396211','0307396215',
'9780307449320','B001BAJ2LQ',
'9780385521031','0385521030',
'9780385522724','038552272X',
'9780385526746','B001CBMXAG',
'9780385526753','B001ANUOOS',
'9780307406224','0307406229',
'9780307406187','0307406180',
'9780307449313','B001BANK1Y',
'9780385519014','038551901X',
'9780385519045','0385519044',
'9780385525305','B0018QSO94',
'9780385530606','0385530609',
'9780385526777','B0013TTK1W',
'9780385524384','0385524382',
'9780385526975','B0016H388M',
'9780385526029','0385526024',
'9780307590299','0307590291',
'9780307406194','0307406199',
'9780307409294','B0016H38MI',
'9780307381736','0307381730',
'9780767930246','B0015DWJJG',
'9780307409218','B00139VUVA',
'9780767929738','076792973X',
'9780307409850','B00179FNT6',
'9780307409843','B0010SKT3O',
'9780307393531','0307393534',
'9780767926126','0767926129',
'9780385520430','0385520433',
'9780767930154','B0015KGX50',
'9780385525282','B0015KGWNI',
'9780307395085','0307395081',
'9780307409713','B000UZQH3O',
'9780307339867','0307339866',
'9780307339874','0307339874',
'9780307409812','B0014XDMDE',
'9780385525299','B0010SEN3Q',
'9780385519274','0385519273',
'9780385519281','0385519281',
'9780767923149','0767923146',
'9780767929080','B0013SSPVO',
'9780385512244','0385512244',
'9780307483119','B001LOEFYQ',
'9780385525329','B0010SIPPS',
'9780385523370','0385523378',
'9780307409829','B0010SIPLW',
'9780307352187','0307352188',
'9780767927666','0767927664',
'9780767927673','0767927672',
'9780307352194','0307352196',
'9780767929141','B0012P2OMY',
'9780307588777','0307588777',
'9780385525312','B0012YJYNM',
'9780307409836','B0011UGLJ2',
'9780385524193','B000W918DC',
'9780307451804','0307451801',
'9780307383266','0307383261',
'9780385520553','0385520557',
'9780385520546','0385520549',
'9780385524247','B000W94H3U',
'9780767929240','B00122HF6C',
'9780767926959','0767926951',
'9780307351135','0307351130',
'9780307405715','B000W93CSG',
'9780307405647','B000W917BA',
'9780307351104','0307351106',
'9780307451798','0307451798',
'9780307405654','B000W9167K',
'9780307393692','0307393690',
'9780307451781','030745178X',
'9780307396037','B000UZNR00',
'9780307351661','0307351661',
'9780307395580','B000VRBBE6',
'9780307337207','0307337200',
'9780307338532','0307338533',
'9780307394484','B000UDNBPS',
'9780307481085','B001M60BO6',
'9780385515474','0385515472',
'9780307345745','0307345742',
'9780307394897','B000SCHB6Q',
'9780307350992','0307350991',
'9780307394477','B000RNKMVW',
'9780307394491','B000RRA8J4',
'9780385523011','B000RG1NVW',
'9780307341457','0307341453',
'9780307409263','B0013TRQD6',
'9780385520812','0385520816',
'9780385520805','B001E1O7XA',
'9780307351876','0307351874',
'9780307497802','B001PSEQCE',
'9780307394224','B000QCQ94I',
'9780307351012','0307351017',
'9780307383457','0307383458',
'9780385521024','0385521022',
'9780307719539','0307719537',
'9780385523165','B000QFBXDC',
'9780385518437','0385518439',
'9780385519960','B000SEIFCI',
'9780385518406','0385518404',
'9780385521437','B000OYF016',
'9780385519229','0385519222',
'9780307394118','B000OYF002',
'9780307339720','0307339726',
'9780307548962','B000SEIFJQ',
'9780385516228','0385516223',
'9780385521260','038552126X',
'9780385513579','0385513577',
'9780307492425','B000SEGIIQ',
'9780385521918','B000OI0G5C',
'9780767923309','0767923308',
'9780767927321','B000OI0G7A',
'9781400082476','1400082471',
'9780307339454','0307339459',
'9780767922487','0767922484',
'9780767929288','B0012P2OMO',
'9780767926744','0767926749',
'9780307381965','B000PDZF6G',
'9780307352675','B000N2HCJM',
'9780307341518','0307341518',
'9780385516204','0385516207',
'9780767926300','B000MAHBSM',
'9780767923132','0767923138',
'9780767926331','B000MAHBUA',
'9780385518499','0385518498',
'9780385519922','B000N2HCJC',
'9780307453150','B0019O6IYI',
'9780767922586','0767922581',
'9780385520034','B000N2HCLK',
'9780307345622','0307345629',
'9780385518321','0385518323',
'9780385521536','B000PDZFIE',
'9780307352699','B000JMKVGW',
'9781400082995','1400082994',
'9780307341532','0307341534',
'9780307449610','B0013TXA0E',
'9780385520157','B000JMKRBG',
'9780385518925','0385518927',
'9780385517478','0385517475',
'9780385520102','B000JMKR9S',
'9780307336699','0307336697',
'9780307347121','B000JMKN9C',
'9780385519816','B000JMKN92',
'9780385519809','B000JMKN8S',
'9780385519694','0385519699',
'9780385519687','0385519680',
'9780767920568','0767920562',
'9780767924924','B000GCFW8M',
'9780767920575','0767920570',
'9780385517546','0385517548',
'9780385518918','B000PDZFMA',
'9780767922425','0767922425',
'9780767927857','B000QCQ948',
'9780767927864','B000QCQ8VW',
'9780385518444','B000GCFWBO',
'9780385517096','0385517092',
'9780385522076','038552207X',
'9780767922852','0767922859',
'9780767922845','0767922840',
'9780767918688','0767918681',
'9780385517614','B000PDZFFW',
'9780767924948','B000GCFWGO',
'9780307424136','B000XU4U9O',
'9780385504669','0385504667',
'9781400049660','1400049660',
'9780307498878','B000SEFJ1I',
'9780385517621','B000GCFBOC',
'9780307345493','B000GCFCIC',
'9780307336002','030733600X',
'9780385516518','0385516517',
'9780307498892','B000SEI7F8',
'9780307336132','0307336131',
'9780307345752','B000GCFCQE',
'9780307336842','0307336840',
'9780767921206','0767921208',
'9780767921213','0767921216',
'9780767923880','B000FCKPEE',
'9780307424228','B0012SMGME',
'9780385517775','B000FCKPO4',
'9780385516662','0385516665',
'9780385514354','0385514352',
'9780385513562','0385513569',
'9780385517607','B000FCKPFI',
'9780385516969','B000FCKNP0',
'9780307236999','0307236994',
'9780385514828','0385514824',
'9780307498861','B000S1LEQ0',
'9780385513593','0385513593',
'9780307423993','B0012RMVD4',
'9780385517010','B000FCKPKS',
'9781400098392','1400098394',
'9780385527316','0385527314',
'9780385512077','0385512074',
'9780385516884','B000FCKGJS',
'9780385516853','B000FCKFA8',
'9780385514781','0385514786',
'9781400080939','1400080932',
'9780307337320','B000FCKDKK',
'9781400052202','1400052203',
'9781400052196','140005219X',
'9780307336484','B000FCKCAQ',
'9780307238344','B000FCKCCO',
'9780385516242','038551624X',
'9780767915878','0767915879',
'9780385516808','B000FCKBNO',
'9780385516303','0385516304',
'9780385515351','0385515359',
'9780385515733','B000FCK8W8',
'9780385507578','0385507577',
'9780385515160','B000FCK90O',
'9780385507585','0385507585',
'9780385510301','0385510306',
'9781400050123','140005012X',
'9780307237262','B004SOVC7E',
'9780307237187','B000FCK91S',
'9781400082940','1400082943',
'9780307337023','B000FCK4SQ',
'9781400054626','1400054621',
'9780307423825','B0012RMVFM',
'9780385512480','0385512481',
'9781400080502','1400080509',
'9780307421012','B000XUDG12',
'9780767911788','0767911784',
'9780307236647','B000FCK1T8',
'9781400081981','140008198X',
'9780385512060','0385512066',
'9780385515290','B000FCJZ4K',
'9780385512053','0385512058',
'9780385514347','0385514344',
'9780385515276','B000FCK0HG',
'9781400098309','B000FC2Q9G',
'9781400048663','1400048664',
'9780767920308','B000FC2OM0',
'9780767919463','0767919467',
'9780767919470','0767919475',
'9780767919487','0767919483',
'9780767920247','B000FCJXTM',
'B0020HRVG2', 'B0050DIWHU', 'B00C0ALZ0W', 'B00GL3PZ0K', 'B00H6JBFOS', 'B00ALBR6LO', 'B00BVJG2P6',
'B00G1J1D28', 'B00M60PIXQ', 'B00N6PESWC', '9780385521338', '9780307888907', '9780804137508', '9780804141444',
'9780385346658', '9780770434007', '9780770436735', '9780804137386', '9780385347594', '9780804136723') | {
"repo_name": "gabelev/MISMI",
"path": "asins.py",
"copies": "1",
"size": "39327",
"license": "mit",
"hash": 2475938425944966700,
"line_mean": 31.0521597392,
"line_max": 126,
"alpha_frac": 0.7616650139,
"autogenerated": false,
"ratio": 2.010171744019628,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.32718367579196284,
"avg_score": null,
"num_lines": null
} |
__all__ = ["TrainerConfig", "ModelConfig"]
class TrainerConfig(object):
# Whether to use GPU in training or not.
use_gpu = False
# The number of computing threads.
trainer_count = 1
# The training batch size.
batch_size = 32
# The epoch number.
num_passes = 10
# The global learning rate.
learning_rate = 1e-3
# The decay rate for L2Regularization
l2_learning_rate = 1e-3
# This parameter is used for the averaged SGD.
# About the average_window * (number of the processed batch) parameters
# are used for average.
# To be accurate, between average_window *(number of the processed batch)
# and 2 * average_window * (number of the processed batch) parameters
# are used for average.
average_window = 0.5
# The buffer size of the data reader.
# The number of buffer size samples will be shuffled in training.
buf_size = 1000
# The parameter is used to control logging period.
# Training log will be printed every log_period.
log_period = 100
class ModelConfig(object):
# The dimension of embedding vector.
emb_size = 28
# The hidden size of sentence vectors.
hidden_size = 128
| {
"repo_name": "qingqing01/models",
"path": "nested_sequence/text_classification/config.py",
"copies": "4",
"size": "1206",
"license": "apache-2.0",
"hash": -3848514369348313600,
"line_mean": 25.2173913043,
"line_max": 77,
"alpha_frac": 0.6683250415,
"autogenerated": false,
"ratio": 4.02,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.66883250415,
"avg_score": null,
"num_lines": null
} |
__all__ = ["TrainerConfig", "ModelConfig"]
class TrainerConfig(object):
# Whether to use GPU in training or not.
use_gpu = True
# The number of computing threads.
trainer_count = 1
# The training batch size.
batch_size = 10
# The epoch number.
num_passes = 10
# Parameter updates momentum.
momentum = 0
# The shape of images.
image_shape = (173, 46)
# The buffer size of the data reader.
# The number of buffer size samples will be shuffled in training.
buf_size = 1000
# The parameter is used to control logging period.
# Training log will be printed every log_period.
log_period = 50
class ModelConfig(object):
# Number of the filters for convolution group.
filter_num = 8
# Use batch normalization or not in image convolution group.
with_bn = True
# The number of channels for block expand layer.
num_channels = 128
# The parameter stride_x in block expand layer.
stride_x = 1
# The parameter stride_y in block expand layer.
stride_y = 1
# The parameter block_x in block expand layer.
block_x = 1
# The parameter block_y in block expand layer.
block_y = 11
# The hidden size for gru.
hidden_size = num_channels
# Use norm_by_times or not in warp ctc layer.
norm_by_times = True
# The list for number of filter in image convolution group layer.
filter_num_list = [16, 32, 64, 128]
# The parameter conv_padding in image convolution group layer.
conv_padding = 1
# The parameter conv_filter_size in image convolution group layer.
conv_filter_size = 3
# The parameter pool_size in image convolution group layer.
pool_size = 2
# The parameter pool_stride in image convolution group layer.
pool_stride = 2
| {
"repo_name": "lcy-seso/models",
"path": "scene_text_recognition/config.py",
"copies": "4",
"size": "1817",
"license": "apache-2.0",
"hash": 8108204166392936000,
"line_mean": 23.2266666667,
"line_max": 70,
"alpha_frac": 0.660429279,
"autogenerated": false,
"ratio": 4.011037527593819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6671466806593819,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Transaction']
import time
import thread
import threading
class Transaction(object):
def __init__(self, store):
self.store = store
self._log = []
def __enter__(self):
self.begin()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end()
return False
def begin(self):
# thread local transaction
tx_queue = self.store.transactions[thread.get_ident()]
tx_queue.append(self)
def end(self):
# thread local transaction
tx = self.store.transactions[thread.get_ident()].pop()
self.execute()
def log(self, inst):
self._log.append(inst)
def check(self):
# print 'check:', self
conflict = False
with self.store.check_lock:
for tx in self.store.commiting_transactions:
for a in self._log:
for b in tx._log:
# detect conflict
# compare databases and tables
if a[0] == b[0] and a[1] == b[1]:
# compare operation's method/function
if a[3] == b[3] == Table._commit_insert:
conflict = True
break
if conflict:
break
if conflict:
break
passed = not conflict
return passed
def commit(self):
# print 'commit:', self
for inst in self._log:
db, table, f, args, kwargs = inst
f(*args, **kwargs)
def execute(self):
# print 'execute:', self
while not self.check():
time.sleep(0.001)
self.commit()
| {
"repo_name": "yadb/yadb",
"path": "backup/store/transaction.py",
"copies": "1",
"size": "1795",
"license": "mit",
"hash": -1324444956722477000,
"line_mean": 25.0144927536,
"line_max": 68,
"alpha_frac": 0.4746518106,
"autogenerated": false,
"ratio": 4.626288659793815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5600940470393815,
"avg_score": null,
"num_lines": null
} |
# All transforms used to answer the four questions of exercise 1, broken up by question.
# These are repeated for clarity in the question answers.
# Python import to ensure HiveContext is ready
try:
sc.stop()
except NameError:
pass
from pyspark import SparkContext
sc = SparkContext()
from pyspark.sql import HiveContext
sqlContext = HiveContext(sc)
# Best Hospitals Transforms
## Create hospital performance table
sqlContext.sql("DROP TABLE hosp_perf_score")
sqlContext.sql("CREATE TABLE hosp_perf_score AS SELECT hospitals.provider_id, hospitals.hospital_name, hospitals.state, CAST(total_performance.total_performance_score AS FLOAT) AS score FROM hospitals JOIN total_performance ON total_performance.provider_number = hospitals.provider_id")
## Create effective_care_simple table: removes most fields
sqlContext.sql("DROP TABLE effective_care_simple")
sqlContext.sql("CREATE TABLE effective_care_simple AS SELECT effective_care_full.provider_id, effective_care_full.condition, effective_care_full.measure_id, effective_care_full.measure_name, CAST(effective_care_full.score AS float) AS score FROM effective_care_full")
## Create hosp_condition_count table
sqlContext.sql("DROP TABLE hosp_condition_count")
sqlContext.sql("CREATE TABLE hosp_condition_count AS SELECT effective_care_simple.provider_id, COUNT (DISTINCT effective_care_simple.condition) AS num_conditions, COUNT (effective_care_simple.measure_id) AS num_measures FROM effective_care_simple WHERE effective_care_simple.score IS NOT NULL GROUP BY effective_care_simple.provider_id")
# Best State Transforms
## Create table of hospital counts per state
sqlContext.sql("DROP TABLE state_all_hosp_count")
sqlContext.sql("CREATE TABLE state_all_hosp_count AS SELECT hospitals.state, COUNT(*) AS count FROM hospitals GROUP BY hospitals.state ORDER BY count DESC")
## Create table of descriptive stats for the number of hospitals in each state
sqlContext.sql("DROP TABLE state_all_hosp_summary")
sqlContext.sql("CREATE TABLE state_all_hosp_summary AS SELECT sum(count) as total_hospitals, avg(count) as avg_hosp_per_state, min(count) as min_hosp, max(count) as max_hosp, stddev_pop(count) as std_dev, percentile(count, 0.5) as median FROM state_all_hosp_count")
## Create temporary table of hospital scores
sqlContext.sql("DROP TABLE TEMP_all_hosp_perf_score")
sqlContext.sql("CREATE TABLE TEMP_all_hosp_perf_score AS SELECT hospitals.provider_id, hospitals.hospital_name, hospitals.state, CAST(total_performance.total_performance_score AS float) AS score FROM hospitals LEFT OUTER JOIN total_performance ON hospitals.provider_id = total_performance.provider_number")
## Replace nulls with 0
sqlContext.sql("DROP TABLE all_hosp_perf_score")
sqlContext.sql("CREATE TABLE all_hosp_perf_score AS SELECT provider_id, hospital_name, state, CASE WHEN score IS NOT NULL THEN score ELSE 0 END AS score FROM TEMP_all_hosp_perf_score")
sqlContext.sql("DROP TABLE TEMP_all_hosp_perf_score")
## Count non-reports per state
sqlContext.sql("DROP TABLE state_non_reports")
sqlContext.sql("CREATE TABLE state_non_reports AS SELECT state, COUNT(*) as missing_scores FROM all_hosp_perf_score WHERE score = 0 GROUP BY state ORDER BY state")
## Calculate the percentage of non-reports by state:
sqlContext.sql("DROP TABLE state_report_percentage")
sqlContext.sql("CREATE TABLE state_report_percentage AS SELECT state_non_reports.state as state, ((state_all_hosp_count.count - state_non_reports.missing_scores) / state_all_hosp_count.count) as report_percentage FROM state_non_reports JOIN state_all_hosp_count ON state_non_reports.state = state_all_hosp_count.state")
## Create a table showing each state's average and variance scores
sqlContext.sql("DROP TABLE state_score")
sqlContext.sql("CREATE TABLE state_score AS SELECT state, avg(score) AS average_score, variance(score) AS variance FROM all_hosp_perf_score GROUP BY state ORDER BY state")
## Create a table showing each state's average, variance score, and report percentage
sqlContext.sql("DROP TABLE state_summary")
sqlContext.sql("CREATE TABLE state_summary AS SELECT state_score.state as state, state_score.average_score, state_score.variance, state_report_percentage.report_percentage FROM state_score LEFT OUTER JOIN state_report_percentage ON (state_score.state = state_report_percentage.state)")
# Hospital Variability Transforms
## Create temp condition table
sqlContext.sql("DROP TABLE TEMP_condition")
sqlContext.sql("CREATE TABLE TEMP_condition AS SELECT condition, count(*) as full_condition_cnt, COUNT(CASE WHEN score >= 0 AND score < 10000000 THEN score ELSE null END) as restricted_cnt, variance(score) as score_variance FROM effective_care_simple GROUP BY condition")
## Create final condition table with percentage of valid scores and a multiplier for final adjusted score use
sqlContext.sql("DROP TABLE condition_variance")
sqlContext.sql("CREATE TABLE condition_variance AS SELECT condition, full_condition_cnt, restricted_cnt, score_variance, (restricted_cnt / full_condition_cnt) as valid_percent, (2 - (restricted_cnt / full_condition_cnt)) AS scalar_adj FROM TEMP_condition")
sqlContext.sql("DROP TABLE TEMP_condition")
# Hospitals and Patients Transforms
## Create table with procedure, outcome, composite, and patient survey scores with the help of a temp table
sqlContext.sql("DROP TABLE TEMP_hosp_patient_scores")
sqlContext.sql("CREATE TABLE TEMP_hosp_patient_scores AS SELECT provider_number, CAST(unweighted_normalized_clinical_process_of_care_domain_score AS FLOAT) AS process_score, CAST(unweighted_normalized_outcome_domain_score AS FLOAT) AS outcome_score, CAST(unweighted_patient_experience_of_care_domain_score AS FLOAT) AS patient_score FROM total_performance")
sqlContext.sql("DROP TABLE hosp_patient_scores")
sqlContext.sql("CREATE TABLE hosp_patient_scores AS SELECT provider_number, process_score, outcome_score, (0.5 * process_score + 0.5 * outcome_score) AS composite_score, patient_score FROM TEMP_hosp_patient_scores")
sqlContext.sql("DROP TABLE TEMP_hosp_patient_scores")
| {
"repo_name": "bruinAlex/indoor-stomach",
"path": "exercise_1/transforming/all_transforms.py",
"copies": "1",
"size": "6048",
"license": "mit",
"hash": -5985247810553818000,
"line_mean": 79.64,
"line_max": 357,
"alpha_frac": 0.7953042328,
"autogenerated": false,
"ratio": 3.595719381688466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48910236144884656,
"avg_score": null,
"num_lines": null
} |
__all__ = ['TreekvParser']
class Logger(object):
def __init__(self):
pass
def write(self, message):
print(message)
class KeyValue(object):
def __init__(self, key, value=None, lineinfo=None, logger=Logger()):
self.key = key
self.value = value
self._list = []
self.lineinfo = lineinfo
self.logger = logger
self._flag = False
self._parent = None
self.path = '<unkown>/{}'.format(key)
def value_list(self):
return list(v.strip() for v in self.value.split(','))
def __iter__(self):
yield self.key
yield self.value
def set_parent(self, parent):
self._parent = parent
paths = []
p = self
while p:
paths.append(p.key)
p = p._parent
self.path = '/'.join(paths[::-1])
def items(self):
return iter(self._list)
def descendants(self):
yield self
for l in self._list:
for ll in l.descendants():
yield ll
def get_path(self, path):
if path=='.':
return [self]
elif '/' not in path:
ret = self.get(path)
return ret
else:
p,sep,q = path.partition('/')
ret = []
for rr in self.get(p):
ret += rr.get_path(q)
return ret
def add(self, kv):
kv.set_parent(self)
self._list.append(kv)
def has_item(self):
return len(self._list)>0
def last(self):
try:
return self._list[-1]
except:
raise IndexError()
def get(self, key):
ret = []
for item in self._list:
if item.key==key:
ret.append(item)
return ret
def __repr__(self):
return "KeyValue({},{},{})".format(self.key, self.value, self._list)
def text(self, indent=0):
ret = ''
if not self._list:
ret = "{}: {} {}\n".format(self.key, self.value, self.path)
else:
ret = "{}: {} {}\n".format(self.key, self.value, self.path)
for l in self._list:
ret += l.text(indent+4)
return ''.join(' '*indent+l+'\n' for l in ret.splitlines())
def set_used(self):
self._flag = True
def get_one(self, path):
r = self.get_path(path)
if not r:
return None
elif len(r)==1:
r[0].set_used()
return r[0]
else:
self.logger.write('Too many items {}'.format(path))
return None
def get_many(self,path):
r = self.get_path(path)
for rr in r:
rr.set_used()
return r
def get_tree(self, path):
r = self.get_path(path)
for rr in r.descendants():
rr.set_used()
return r
def get_unused(self):
for rr in self.descendants():
if not rr._flag:
yield rr
class Lineinfo(object):
def __init__(self, filename, line, lineno):
self.filename = filename
self.line = line
self.lineno = lineno
def error_msg(self, msg):
return '{}\nin "{}", line {}: \n{}'.format(msg, self.filename, self.lineno, self.line)
def count_until(items, value):
count = 0
for i in items:
if i==value:
count += 1
else:
break
return count
class TreekvParser(object):
def __init__(self, tab_size=4, logger=Logger()):
self.tab_size = tab_size
self.logger = logger
def read(self, filename):
return self.readfp(open(filename,'rU'), filename)
def readfp(self, fileobj, filename=None):
if not filename:
try:
filename = fileobj.name
except:
filename = '<unkown>'
lineno = 0
root = KeyValue('root')
tab_stops = [root]
for line in fileobj:
lineno += 1
li = Lineinfo(filename, line, lineno)
tab = count_until(line, ' ')
if tab % self.tab_size != 0:
self.logger.write(li.error_msg('Ignoring the line due to unkown tab stop {}. tab stops must be {}*n'.format(tab, self.tab_size)) )
continue
l = line.lstrip()
if not l or l.startswith('#') or l.startswith('//') or l.startswith(';'):
continue
if not l:
continue
if ':' not in l:
self.logger.write(li.error_msg('Unkown line. line format must be "key:value"'))
continue
key,sep,value = l.partition(':')
item = KeyValue(key.strip(), value.strip(), li, self.logger)
level = int(tab / self.tab_size)
current_level = len(tab_stops) - 1
current_parent = tab_stops[-1]
if level==current_level:
current_parent.add(item)
elif level == current_level+1:
assert(current_parent.has_item())
new_parent = current_parent.last()
new_parent.add(item)
tab_stops.append(new_parent)
elif level > current_level:
self.logger.write(li.error_msg('Too many indent spaces. This indent must be less than {}, but {}'.format(self.tab_size*(level+1), self.tab_size*level) ))
continue
elif level < current_level:
tab_stops = tab_stops[:level+1]
parent = tab_stops[-1]
parent.add(item)
return root
sample = """
general:
gene: NDRG2
primers: primers.txt
bsa_data: bsa_data.txt
tss: TSS
tissues: Brian, Liver, Colon
motifs: motifs
p53BS: GTGCAAGGTCCGGGGCGCTTGGCA
TATAbox: TATAWAW
mir650: TGCCTCC
BamHI: GGATCC
XhoI: CTCGAG
ecorv: GATATC
ecori: GAATTC
WT1: GTGTGTGTGTGTG
HRE3: GCGTG
HRE2: GCGTG
HRE1: GCGTCC
probe: CGGGCGGCTGGACGCTTCCAGGCTCTGCTCGGCTCACCAAAACATTCCAC
pcr: Genomic-PCR
ChIP1: ChIP1-FW, ChIP1-RV
ChIP2: ChIP2-FW, ChIP2-RV
ChIP2-dash: BSP4-FW, ChIP2-RV
ChIP3: ChIP3-FW, ChIP3-RV
upstream: genome-up-stream-FW, NDRG2 cDNA 1ab-3 RV
"""
if __name__=='__main__':
import io
parser = TreekvParser()
kv = parser.readfp(io.StringIO(sample))
print(kv.text())
| {
"repo_name": "mizuy/seqtool",
"path": "seqtool/util/parser.py",
"copies": "1",
"size": "6396",
"license": "mit",
"hash": 8440031361017789000,
"line_mean": 24.8947368421,
"line_max": 169,
"alpha_frac": 0.5129768605,
"autogenerated": false,
"ratio": 3.6361569073337123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.962732705245815,
"avg_score": 0.004361343075112394,
"num_lines": 247
} |
__all__ = ('TreeORAMStorageManagerExplicitAddressing',
'TreeORAMStorageManagerPointerAddressing')
import struct
import copy
from pyoram.util.virtual_heap import \
SizedVirtualHeap
from six.moves import xrange
class TreeORAMStorage(object):
empty_block_id = -1
block_status_storage_string = "!?"
block_id_storage_string = "!L"
block_info_storage_string = "!?L"
block_status_storage_size = \
struct.calcsize(block_status_storage_string)
block_info_storage_size = \
struct.calcsize(block_info_storage_string)
empty_block_bytes_tag = \
struct.pack(block_status_storage_string, False)
real_block_bytes_tag = \
struct.pack(block_status_storage_string, True)
def __init__(self,
storage_heap,
stash):
self.storage_heap = storage_heap
self.stash = stash
vheap = self.storage_heap.virtual_heap
self.bucket_size = self.storage_heap.bucket_size
self.block_size = self.bucket_size // vheap.blocks_per_bucket
assert self.block_size * vheap.blocks_per_bucket == \
self.bucket_size
self.path_stop_bucket = None
self.path_bucket_count = 0
self.path_byte_dataview = \
bytearray(self.bucket_size * vheap.levels)
dataview = memoryview(self.path_byte_dataview)
self.path_bucket_dataview = \
[dataview[(i*self.bucket_size):((i+1)*self.bucket_size)]
for i in xrange(vheap.levels)]
self.path_block_dataview = []
for i in xrange(vheap.levels):
bucketview = self.path_bucket_dataview[i]
for j in xrange(vheap.blocks_per_bucket):
self.path_block_dataview.append(
bucketview[(j*self.block_size):((j+1)*self.block_size)])
max_blocks_on_path = vheap.levels * vheap.blocks_per_bucket
assert len(self.path_block_dataview) == max_blocks_on_path
self.path_block_ids = [-1] * max_blocks_on_path
self.path_block_eviction_levels = [None] * max_blocks_on_path
self.path_block_reordering = [None] * max_blocks_on_path
self.path_blocks_inserted = []
def load_path(self, b):
vheap = self.storage_heap.virtual_heap
Z = vheap.blocks_per_bucket
lcl = vheap.clib.calculate_last_common_level
k = vheap.k
read_level_start = 0
if self.path_stop_bucket is not None:
# don't download the root and any other buckets
# that are common between the previous bucket path
# and the new one
read_level_start = lcl(k, self.path_stop_bucket, b)
assert 0 <= b < vheap.bucket_count()
self.path_stop_bucket = b
new_buckets = self.storage_heap.read_path(
self.path_stop_bucket,
level_start=read_level_start)
self.path_bucket_count = read_level_start + len(new_buckets)
pos = 0
for i in xrange(self.path_bucket_count):
if i >= read_level_start:
self.path_bucket_dataview[i][:] = \
new_buckets[i-read_level_start][:]
for j in xrange(Z):
block_id, block_addr = \
self.get_block_info(self.path_block_dataview[pos])
self.path_block_ids[pos] = block_id
if block_id != self.empty_block_id:
self.path_block_eviction_levels[pos] = \
lcl(k, self.path_stop_bucket, block_addr)
else:
self.path_block_eviction_levels[pos] = None
self.path_block_reordering[pos] = None
pos += 1
max_blocks_on_path = vheap.levels * Z
while pos != max_blocks_on_path:
self.path_block_ids[pos] = None
self.path_block_eviction_levels[pos] = None
self.path_block_reordering[pos] = None
pos += 1
self.path_blocks_inserted = []
def push_down_path(self):
vheap = self.storage_heap.virtual_heap
Z = vheap.blocks_per_bucket
bucket_count = self.path_bucket_count
block_ids = self.path_block_ids
block_eviction_levels = self.path_block_eviction_levels
block_reordering = self.path_block_reordering
def _do_swap(write_pos, read_pos):
block_ids[write_pos], block_eviction_levels[write_pos] = \
block_ids[read_pos], block_eviction_levels[read_pos]
block_ids[read_pos], block_eviction_levels[read_pos] = \
self.empty_block_id, None
block_reordering[write_pos] = read_pos
block_reordering[read_pos] = -1
def _new_write_pos(current):
current -= 1
if current < 0:
return None, None
while (block_eviction_levels[current] is not None):
current -= 1
if current < 0:
return None, None
assert block_ids[current] == \
self.empty_block_id
return current, current // Z
def _new_read_pos(current):
current -= 1
if current < 0:
return None
while (block_eviction_levels[current] is None):
current -= 1
if current < 0:
return None
assert block_ids[current] != \
self.empty_block_id
return current
write_pos, write_level = _new_write_pos(bucket_count * Z)
while write_pos is not None:
read_pos = _new_read_pos(write_pos)
if read_pos is None:
break
while ((read_pos // Z) == write_level) or \
(write_level > block_eviction_levels[read_pos]):
read_pos = _new_read_pos(read_pos)
if read_pos is None:
break
if read_pos is not None:
_do_swap(write_pos, read_pos)
else:
# Jump directly to the start of this
# bucket. There is not point in checking
# for other empty slots because no blocks
# can be evicted to this level.
write_pos = Z * (write_pos//Z)
write_pos, write_level = _new_write_pos(write_pos)
def fill_path_from_stash(self):
vheap = self.storage_heap.virtual_heap
lcl = vheap.clib.calculate_last_common_level
k = vheap.k
Z = vheap.blocks_per_bucket
bucket_count = self.path_bucket_count
stop_bucket = self.path_stop_bucket
block_ids = self.path_block_ids
block_eviction_levels = self.path_block_eviction_levels
blocks_inserted = self.path_blocks_inserted
stash_eviction_levels = {}
largest_write_position = (bucket_count * Z) - 1
for write_pos in xrange(largest_write_position,-1,-1):
write_level = write_pos // Z
if block_ids[write_pos] == self.empty_block_id:
del_id = None
for id_ in self.stash:
if id_ not in stash_eviction_levels:
block_id, block_addr = \
self.get_block_info(self.stash[id_])
assert id_ != self.empty_block_id
eviction_level = stash_eviction_levels[id_] = \
lcl(k, stop_bucket, block_addr)
else:
eviction_level = stash_eviction_levels[id_]
if write_level <= eviction_level:
block_ids[write_pos] = id_
block_eviction_levels[write_pos] = \
eviction_level
blocks_inserted.append(
(write_pos, self.stash[id_]))
del_id = id_
break
if del_id is not None:
del self.stash[del_id]
def evict_path(self):
vheap = self.storage_heap.virtual_heap
Z = vheap.blocks_per_bucket
bucket_count = self.path_bucket_count
stop_bucket = self.path_stop_bucket
bucket_dataview = self.path_bucket_dataview
block_dataview = self.path_block_dataview
block_reordering = self.path_block_reordering
blocks_inserted = self.path_blocks_inserted
for i, read_pos in enumerate(
reversed(block_reordering)):
if (read_pos is not None) and \
(read_pos != -1):
write_pos = len(block_reordering) - 1 - i
block_dataview[write_pos][:] = block_dataview[read_pos][:]
for write_pos, read_pos in enumerate(block_reordering):
if read_pos == -1:
self.tag_block_as_empty(block_dataview[write_pos])
for write_pos, block in blocks_inserted:
block_dataview[write_pos][:] = block[:]
self.storage_heap.write_path(
stop_bucket,
(bucket_dataview[i].tobytes()
for i in xrange(bucket_count)))
def extract_block_from_path(self, id_):
block_ids = self.path_block_ids
block_dataview = self.path_block_dataview
try:
pos = block_ids.index(id_)
# make a copy
block = bytearray(block_dataview[pos])
self._set_path_position_to_empty(pos)
return block
except ValueError:
return None
def _set_path_position_to_empty(self, pos):
self.path_block_ids[pos] = self.empty_block_id
self.path_block_eviction_levels[pos] = None
self.path_block_reordering[pos] = -1
@staticmethod
def tag_block_as_empty(block):
block[:TreeORAMStorage.block_status_storage_size] = \
TreeORAMStorage.empty_block_bytes_tag[:]
@staticmethod
def tag_block_with_id(block, id_):
assert id_ >= 0
struct.pack_into(TreeORAMStorage.block_info_storage_string,
block,
0,
True,
id_)
def get_block_info(self, block):
raise NotImplementedError # pragma: no cover
class TreeORAMStorageManagerExplicitAddressing(
TreeORAMStorage):
"""
This class should be used to implement tree-based ORAMs
that use an explicit position map. Blocks are assumed to
begin with bytes representing the block id.
"""
block_info_storage_string = \
TreeORAMStorage.block_info_storage_string
block_info_storage_size = \
struct.calcsize(block_info_storage_string)
def __init__(self,
storage_heap,
stash,
position_map):
super(TreeORAMStorageManagerExplicitAddressing, self).\
__init__(storage_heap, stash)
self.position_map = position_map
def get_block_info(self, block):
real, id_ = struct.unpack_from(
self.block_info_storage_string, block)
if real:
return id_, self.position_map[id_]
else:
return self.empty_block_id, None
class TreeORAMStorageManagerPointerAddressing(
TreeORAMStorage):
"""
This class should be used to implement tree-based ORAMs
that use a pointer-based position map stored with the
blocks. Blocks are assumed to begin with bytes
representing the block id followed by bytes representing
the blocks current heap bucket address.
"""
block_info_storage_string = \
TreeORAMStorage.block_info_storage_string + "L"
block_info_storage_size = \
struct.calcsize(block_info_storage_string)
def __init__(self,
storage_heap,
stash):
super(TreeORAMStorageManagerPointerAddressing, self).\
__init__(storage_heap, stash)
self.position_map = None
def get_block_info(self, block):
real, id_, addr = struct.unpack_from(
self.block_info_storage_string, block)
if not real:
return self.empty_block_id, 0
else:
return id_, addr
| {
"repo_name": "ghackebeil/PyORAM",
"path": "src/pyoram/oblivious_storage/tree/tree_oram_helper.py",
"copies": "1",
"size": "12252",
"license": "mit",
"hash": -3026134481358254600,
"line_mean": 36.2401215805,
"line_max": 76,
"alpha_frac": 0.5562357166,
"autogenerated": false,
"ratio": 3.8735377805880495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9927807390207902,
"avg_score": 0.0003932213960293718,
"num_lines": 329
} |
__all__ = ['TrendClassifier',
'TrendNormalizer',
'BaselineNormalizer',
'SpikeNormalizer',
'SmoothingNormalizer',
'LogNormalizer']
from collections import OrderedDict
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.base import TransformerMixin as _TransformerMixin
from sklearn.utils import array2d, column_or_1d, check_arrays
from ..algorithms.nikolov import detect_stream
from ..normalizations import *
class TrendClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, gamma=1, theta=1, D_req=1, N_obs=None, N_ref=None):
self.gamma = gamma
self.theta = theta
self.D_req = D_req
self.N_obs = N_obs
self.N_ref = N_ref
def fit(self, X, y):
X, y = check_arrays(X, y, sparse_format='dense')
y = np.asarray(column_or_1d(y), dtype='int8')
n_samples, n_features = X.shape
if self.N_ref is None:
self.N_ref = n_features
if self.N_ref < n_features:
X = X[:, :self.N_ref]
unique_y = np.unique(y)
if not np.array_equal(unique_y, (0, 1)):
raise ValueError
R_pos = X[(y == 1)]
R_neg = X[(y == 0)]
self.R_pos_ = R_pos
self.R_neg_ = R_neg
return self
def predict(self, X):
i_pred = self.detect(X)
y_pred = np.array([i is not None for i in i_pred], dtype='int8')
return y_pred
def detect(self, X):
X = array2d(X)
n_samples, n_features = X.shape
N_obs = self.N_obs if self.N_obs is not None else n_features
if N_obs > self.N_ref:
raise ValueError
i_pred = []
for X_i in X:
detection = detect_stream(X_i, N_obs,
self.R_pos_, self.R_neg_,
self.gamma, self.theta, self.D_req)
i_pred.append(detection)
return i_pred
class TransformerMixin(_TransformerMixin):
def fit(self, X, y=None, **kwargs):
return self
class TrendNormalizer(BaseEstimator, TransformerMixin):
def __init__(self, beta=1, alpha=1.2, N_smooth=1, log=True,
mode='online', epsilon=0.01):
self.beta = beta
self.alpha = alpha
self.N_smooth = N_smooth
self.log = log
self.epsilon = epsilon
transformers = OrderedDict()
if beta is not None:
transformers['baseline'] = \
BaselineNormalizer(beta, mode=mode, epsilon=epsilon)
if alpha is not None:
transformers['spike'] = SpikeNormalizer(alpha)
if N_smooth is not None and N_smooth > 1:
transformers['smoothing'] = SmoothingNormalizer(N_smooth)
if log:
transformers['log'] = LogNormalizer(epsilon=epsilon)
self._transformers = transformers
def transform(self, X, y=None):
X = array2d(X)
for transformer in self._transformers.values():
X = transformer.transform(X)
return X
class BaselineNormalizer(BaseEstimator, TransformerMixin):
def __init__(self, beta=1, mode='online', epsilon=0.01):
self.beta = beta
self.mode = mode
self.epsilon = epsilon
def transform(self, X, y=None):
X = array2d(X)
return normalize_baseline(X, self.beta,
mode=self.mode, epsilon=self.epsilon)
class SpikeNormalizer(BaseEstimator, TransformerMixin):
def __init__(self, alpha=1.2):
self.alpha = alpha
def transform(self, X, y=None):
X = array2d(X)
return normalize_spikes(X, self.alpha)
class SmoothingNormalizer(BaseEstimator, TransformerMixin):
def __init__(self, N=1):
self.N = N
def transform(self, X, y=None):
X = array2d(X)
return moving_average(X, self.N)
class LogNormalizer(BaseEstimator, TransformerMixin):
def __init__(self, epsilon=0.01):
self.epsilon = epsilon
def transform(self, X, y=None):
e = self.epsilon
X = array2d(X)
return np.log(X + e)
| {
"repo_name": "norbert/hearsay",
"path": "hearsay/models/nikolov.py",
"copies": "1",
"size": "4158",
"license": "mit",
"hash": 7295942375702504000,
"line_mean": 26.72,
"line_max": 74,
"alpha_frac": 0.5738335738,
"autogenerated": false,
"ratio": 3.5937770095073467,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46676105833073467,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Tripcode']
class Tripcode (object):
"""
Base class for tripcodes.
"""
def __init__ (self, cipher, key=None):
"""
Initializes a new instance from a ciphertext and an optional key.
"""
self.cipher = cipher
self.key = key
def __cmp__ (self, other):
"""
Compares two tripcodes.
"""
if isinstance(other, Tripcode):
return cmp(self.cipher, other.cipher)
else:
return cmp(type(self), type(other))
def __hash__ (self):
"""
Returns a hash value for this tripcode.
"""
return hash(self.cipher)
def __repr__ (self):
"""
Returns a string representation fit for eval.
"""
return '{self.__class__.__name__}({})'.format (
', '.join(map(repr, (self.cipher, self.key))),
self=self
)
def __str__ (self):
"""
Returns a string representation.
"""
return self.cipher
def solve (self, solver):
"""
Attempts to solve tripcode using a solver.
If the tripcode is already solved (key is not None) then no action is
taken.
"""
if self.key is None:
self.key = solver.solve(self.cipher)
def solved (self):
"""
Returns whether the tripcode is solved or not.
"""
return self.key is not None
| {
"repo_name": "crypt3lx2k/Imageboard-Image-Scraper",
"path": "iwi/core/Tripcode.py",
"copies": "3",
"size": "1447",
"license": "mit",
"hash": 7247972173553072000,
"line_mean": 23.9482758621,
"line_max": 77,
"alpha_frac": 0.5044920525,
"autogenerated": false,
"ratio": 4.243401759530792,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6247893812030791,
"avg_score": null,
"num_lines": null
} |
__all__ = ['Triple', 'Quad', 'q_as_t', 't_as_q', 'Literal', 'NamedNode',
'Prefix', 'BlankNode', 'Graph', 'Dataset', 'PrefixMap', 'TermMap',
'parse_curie', 'is_language', 'lang_match', 'to_curie']
import collections
import datetime
from operator import itemgetter
import urllib
import urlparse
import pymantic.uri_schemes as uri_schemes
from pymantic.util import quote_normalized_iri
from pymantic.serializers import nt_escape
def is_language(lang):
"""Is something a valid XML language?"""
if isinstance(lang, NamedNode):
return False
return True
def lang_match(lang1, lang2):
"""Determines if two languages are, in fact, the same language.
Eg: en is the same as en-us and en-uk."""
if lang1 is None and lang2 is None:
return True
elif lang1 is None or lang2 is None:
return False
lang1 = lang1.partition('-')
lang2 = lang2.partition('-')
return lang1[0] == lang2[0] and (lang1[2] == '' or lang2[2] == '' or\
lang1[2] == lang2[2])
def parse_curie(curie, prefixes):
"""
Parses a CURIE within the context of the given namespaces. Will also accept
explicit URIs and wrap them in an rdflib URIRef.
Specifically:
1) If the CURIE is not of the form [stuff] and the prefix is in the list of
standard URIs, it is wrapped in a URIRef and returned unchanged.
2) Otherwise, the CURIE is parsed by the rules of CURIE Syntax 1.0:
http://www.w3.org/TR/2007/WD-curie-20070307/ The default namespace is
the namespace keyed by the empty string in the namespaces dictionary.
3) If the CURIE's namespace cannot be resolved, a ValueError is raised.
"""
definitely_curie = False
if curie[0] == '[' and curie[-1] == ']':
curie = curie[1:-1]
definitely_curie = True
prefix, sep, reference = curie.partition(':')
if not definitely_curie:
if prefix in uri_schemes.schemes:
return NamedNode(curie)
if not reference and '' in prefixes:
reference = prefix
return Prefix(prefixes[''])(reference)
if prefix in prefixes:
return Prefix(prefixes[prefix])(reference)
else:
raise ValueError('Could not parse CURIE prefix %s from prefixes %s' %
(prefix, prefixes))
def parse_curies(curies, namespaces):
"""Parse multiple CURIEs at once."""
for curie in curies:
yield parse_curie(curie, namespaces)
def to_curie(uri, namespaces, seperator=":", explicit=False):
"""Converts a URI to a CURIE using the prefixes defined in namespaces. If
there is no matching prefix, return the URI unchanged.
namespaces - a dictionary of prefix -> namespace mappings.
separator - the character to use as the separator between the prefix and
the local name.
explicit - if True and the URI can be abbreviated, wrap the abbreviated
form in []s to indicate that it is definitely a CURIE."""
matches = []
for prefix, namespace in namespaces.items():
if uri.startswith(namespace):
matches.append((prefix, namespace))
if len(matches) > 0:
prefix, namespace = sorted(matches, key=lambda pair: -len(pair[1]))[0]
if explicit:
return '[' + uri.replace(namespace, prefix + seperator) + ']'
else:
return uri.replace(namespace, prefix + seperator)
return uri
class Triple(tuple):
"""Triple(subject, predicate, object)
The Triple interface represents an RDF Triple. The stringification of a
Triple results in an N-Triples.
"""
__slots__ = ()
_fields = ('subject', 'predicate', 'object')
def __new__(_cls, subject, predicate, object):
return tuple.__new__(_cls, (subject, predicate, object))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new Triple object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 3:
raise TypeError('Expected 3 arguments, got %d' % len(result))
return result
def __repr__(self):
return 'Triple(subject=%r, predicate=%r, object=%r)' % self
def _asdict(t):
'Return a new dict which maps field names to their values'
return {'subject': t[0], 'predicate': t[1], 'object': t[2]}
def _replace(_self, **kwds):
'Return a new Triple object replacing specified fields with new values'
result = _self._make(map(kwds.pop, ('subject', 'predicate', 'object'),
_self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
return tuple(self)
subject = property(itemgetter(0))
predicate = property(itemgetter(1))
object = property(itemgetter(2))
def __str__(self):
return self.subject.toNT() + ' ' + self.predicate.toNT() + ' ' + \
self.object.toNT() + ' .\n'
def toString(self):
return str(self)
class Quad(tuple):
'Quad(subject, predicate, object, graph)'
__slots__ = ()
_fields = ('subject', 'predicate', 'object', 'graph')
def __new__(_cls, subject, predicate, object, graph):
return tuple.__new__(_cls, (subject, predicate, object, graph))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new Quad object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 4:
raise TypeError('Expected 4 arguments, got %d' % len(result))
return result
def __repr__(self):
return 'Quad(subject=%r, predicate=%r, object=%r, graph=%r)' % self
def _asdict(t):
'Return a new dict which maps field names to their values'
return {'subject': t[0], 'predicate': t[1], 'object': t[2],
'graph': t[3], }
def _replace(_self, **kwds):
'Return a new Quad object replacing specified fields with new values'
result = _self._make(map(kwds.pop, ('subject', 'predicate', 'object',
'graph'), _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
return tuple(self)
subject = property(itemgetter(0))
predicate = property(itemgetter(1))
object = property(itemgetter(2))
graph = property(itemgetter(3))
def __str__(self):
return str(self.subject) + ' ' + str(self.predicate) + ' ' + \
str(self.object) + ' ' + str(self.graph) + ' .\n'
def q_as_t(quad):
return Triple(quad.subject, quad.predicate, quad.object)
def t_as_q(graph_name, triple):
return Quad(triple.subject, triple.predicate, triple.object, graph_name)
class Literal(tuple):
"""Literal(`value`, `language`, `datatype`)
Literals represent values such as numbers, dates and strings in RDF data. A
Literal is comprised of three attributes:
* a lexical representation of the nominalValue
* an optional language represented by a string token
* an optional datatype specified by a NamedNode
Literals representing plain text in a natural language may have a language
attribute specified by a text string token, as specified in [BCP47],
normalized to lowercase (e.g., 'en', 'fr', 'en-gb').
Literals may not have both a datatype and a language."""
__slots__ = ()
_fields = ('value', 'language', 'datatype')
types = {
int: lambda v: (str(v), XSD('integer')),
datetime.datetime: lambda v: (v.isoformat(), XSD('dateTime'))
}
def __new__(_cls, value, language=None, datatype=None):
if not isinstance(value, str) and not isinstance(value, unicode):
value, auto_datatype = _cls.types[type(value)](value)
if datatype is None:
datatype = auto_datatype
return tuple.__new__(_cls, (value, language, datatype))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new Literal object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != 3:
raise TypeError('Expected 3 arguments, got %d' % len(result))
return result
def __repr__(self):
return 'Literal(value=%r, language=%r, datatype=%r)' % self
def _asdict(t):
'Return a new dict which maps field names to their values'
return {'value': t[0], 'language': t[1], 'datatype': t[2]}
def _replace(_self, **kwds):
'Return a new Literal object replacing specified fields with new value'
result = _self._make(map(kwds.pop, ('value', 'language', 'datatype'),
_self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
return tuple(self)
value = property(itemgetter(0))
language = property(itemgetter(1))
datatype = property(itemgetter(2))
interfaceName = "Literal"
def __str__(self):
return unicode(self.value)
def toNT(self):
quoted = '"' + nt_escape(self.value) + '"'
if self.language:
return quoted + '@' + self.language
elif self.datatype:
return quoted + '^^' + str(self.datatype)
else:
return quoted
class NamedNode(unicode):
"""A node identified by an IRI."""
interfaceName = "NamedNode"
@property
def value(self):
return self
def __repr__(self):
return 'NamedNode(' + self.toNT() + ')'
def __str__(self):
return self.value
def toNT(self):
return '<' + nt_escape(quote_normalized_iri(self.value)) + '>'
class Prefix(NamedNode):
"""Node that when called returns the the argument conctantated with
self."""
def __call__(self, name):
return NamedNode(self + name)
XSD = Prefix("http://www.w3.org/2001/XMLSchema#")
class BlankNode(object):
"""A BlankNode is a reference to an unnamed resource (one for which an IRI
is not known), and may be used in a Triple as a unique reference to that
unnamed resource.
BlankNodes are stringified by prepending "_:" to a unique value, for
instance _:b142 or _:me, this stringified form is referred to as a
"blank node identifier"."""
interfaceName = "BlankNode"
@property
def value(self):
return ''.join(chr(ord(c) + 17) for c in hex(id(self))[2:])
def __repr__(self):
return 'BlankNode()'
def __str__(self):
return '_:' + self.value
def toNT(self):
return str(self)
from collections import defaultdict
def Index():
return defaultdict(Index)
class Graph(object):
"""A `Graph` holds a set of one or more `Triple`. Implements the Python
set/sequence API for `in`, `for`, and `len`"""
def __init__(self, graph_uri=None):
if not isinstance(graph_uri, NamedNode):
graph_uri = NamedNode(graph_uri)
self._uri = graph_uri
self._triples = set()
self._spo = Index()
self._pos = Index()
self._osp = Index()
self._actions = set()
@property
def uri(self):
"""URI name of the graph, if it has been given a name"""
return self._uri
def addAction(self, action):
self._actions.add(action)
return self
def add(self, triple):
"""Adds the specified Triple to the graph. This method returns the
graph instance it was called on."""
self._triples.add(triple)
self._spo[triple.subject][triple.predicate][triple.object] = triple
self._pos[triple.predicate][triple.object][triple.subject] = triple
self._osp[triple.object][triple.subject][triple.predicate] = triple
return self
def remove(self, triple):
"""Removes the specified Triple from the graph. This method returns the
graph instance it was called on."""
self._triples.remove(triple)
del self._spo[triple.subject][triple.predicate][triple.object]
del self._pos[triple.predicate][triple.object][triple.subject]
del self._osp[triple.object][triple.subject][triple.predicate]
return self
def match(self, subject=None, predicate=None, object=None):
"""This method returns a new sequence of triples which is comprised of
all those triples in the current instance which match the given
arguments, that is, for each triple in this graph, it is included in
the output graph, if:
* calling triple.subject.equals with the specified subject as an
argument returns true, or the subject argument is null, AND
* calling triple.property.equals with the specified property as an
argument returns true, or the property argument is null, AND
* calling triple.object.equals with the specified object as an argument
returns true, or the object argument is null
This method implements AND functionality, so only triples matching all
of the given non-null arguments will be included in the result.
"""
if subject:
if predicate: # s, p, ???
if object: # s, p, o
if Triple(subject, predicate, object) in self:
yield Triple(subject, predicate, object)
else: # s, p, ?var
for triple in self._spo[subject][predicate].itervalues():
yield triple
else: # s, ?var, ???
if object: # s, ?var, o
for triple in self._osp[object][subject].itervalues():
yield triple
else: # s, ?var, ?var
for predicate in self._spo[subject]:
for triple in \
self._spo[subject][predicate].itervalues():
yield triple
elif predicate: # ?var, p, ???
if object: # ?var, p, o
for triple in self._pos[predicate][object].itervalues():
yield triple
else: # ?var, p, ?var
for object in self._pos[predicate]:
for triple in self._pos[predicate][object].itervalues():
yield triple
elif object: # ?var, ?var, o
for subject in self._osp[object]:
for triple in self._osp[object][subject].itervalues():
yield triple
else:
for triple in self._triples:
yield triple
def removeMatches(self, subject, predicate, object):
"""This method removes those triples in the current graph which match
the given arguments."""
for triple in self.match(subject, predicate, object):
self.remove(triple)
return self
def addAll(self, graph_or_triples):
"""Imports the graph or set of triples in to this graph. This method
returns the graph instance it was called on."""
for triple in graph_or_triples:
self.add(triple)
return self
def merge(self, graph):
"""Returns a new Graph which is a concatenation of this graph and the
graph given as an argument."""
new_graph = Graph()
for triple in graph:
new_graph.add(triple)
for triple in self:
new_graph.add(triple)
return new_graph
def __contains__(self, item):
return item in self._triples
def __len__(self):
return len(self._triples)
def __iter__(self):
return iter(self._triples)
def toArray(self):
"""Return the set of :py:class:`Triple` within the :py:class:`Graph`"""
return frozenset(self._triples)
def subjects(self):
"""Returns an iterator over subjects in the graph."""
return self._spo.iterkeys()
def predicates(self):
"""Returns an iterator over predicates in the graph."""
return self._pos.iterkeys()
def objects(self):
"""Returns an iterator over objects in the graph."""
return self._osp.iterkeys()
class Dataset(object):
def __init__(self):
self._graphs = defaultdict(Graph)
def add(self, quad):
self._graphs[quad.graph]._uri = quad.graph
self._graphs[quad.graph].add(q_as_t(quad))
def remove(self, quad):
self._graphs[quad.graph].remove(q_as_t(quad))
def add_graph(self, graph, named=None):
name = named or graph.uri
if name:
graph._uri = name
self._graphs[graph.uri] = graph
else:
raise ValueError("Graph must be named")
def remove_graph(self, graph_or_uri):
pass
@property
def graphs(self):
return self._graphs.values()
def match(self, subject=None, predicate=None, object=None, graph=None):
if graph:
matches = self._graphs[graph].match(subject, predicate, object)
for match in matches:
yield t_as_q(graph, match)
else:
for graph_uri, graph in self._graphs.iteritems():
for match in graph.match(subject, predicate, object):
yield t_as_q(graph_uri, match)
def removeMatches(self, subject=None, predicate=None, object=None,
graph=None):
"""This method removes those triples in the current graph which match
the given arguments."""
for quad in self.match(subject, predicate, object, graph):
self.remove(quad)
return self
def addAll(self, dataset_or_quads):
"""Imports the graph or set of triples in to this graph. This method
returns the graph instance it was called on."""
for quad in dataset_or_quads:
self.add(quad)
return self
def __len__(self):
return sum(len(g) for g in self.graphs)
def __contains__(self, item):
if hasattr(item, "graph"):
if item.graph in self._graphs:
graph = self._graphs[item.graph]
return q_as_t(item) in graph
else:
for graph in self._graphs.itervalues():
if item in graph:
return True
def __iter__(self):
for graph in self._graphs.itervalues():
for triple in graph:
yield t_as_q(graph.uri, triple)
def toArray(self):
return frozenset(self)
# RDF Enviroment Interfaces
class PrefixMap(dict):
"""A map of prefixes to IRIs, and provides methods to
turn one in to the other.
Example Usage:
>>> prefixes = PrefixMap()
Create a new prefix mapping for the prefix "rdfs"
>>> prefixes['rdfs'] = "http://www.w3.org/2000/01/rdf-schema#"
Resolve a known CURIE
>>> prefixes.resolve("rdfs:label")
u"http://www.w3.org/2000/01/rdf-schema#label"
Shrink an IRI for a known CURIE in to a CURIE
>>> prefixes.shrink("http://www.w3.org/2000/01/rdf-schema#label")
u"rdfs:label"
Attempt to resolve a CURIE with an empty prefix
>>> prefixes.resolve(":me")
":me"
Set the default prefix and attempt to resolve a CURIE with an empty prefix
>>> prefixes.setDefault("http://example.org/bob#")
>>> prefixes.resolve(":me")
u"http://example.org/bob#me"
"""
def resolve(self, curie):
"""Given a valid CURIE for which a prefix is known (for example
"rdfs:label"), this method will return the resulting IRI (for example
"http://www.w3.org/2000/01/rdf-schema#label")"""
return parse_curie(curie, self)
def shrink(self, iri):
"""Given an IRI for which a prefix is known (for example
"http://www.w3.org/2000/01/rdf-schema#label") this method returns a
CURIE (for example "rdfs:label"), if no prefix is known the original
IRI is returned."""
return to_curie(iri, self)
def addAll(self, other, override=False):
if override:
self.update(other)
else:
for key, value in other.iteritems():
if key not in self:
self[key] = value
return self
def setDefault(self, iri):
"""Set the iri to be used when resolving CURIEs without a prefix, for
example ":this"."""
self[''] = iri
class TermMap(dict):
"""A map of simple string terms to IRIs, and provides methods to turn one
in to the other.
Example usage:
>>> terms = TermMap()
Create a new term mapping for the term "member"
>>> terms['member'] = "http://www.w3.org/ns/org#member"
Resolve a known term to an IRI
>>> terms.resolve("member")
u"http://www.w3.org/ns/org#member"
Shrink an IRI for a known term to a term
>>> terms.shrink("http://www.w3.org/ns/org#member")
u"member"
Attempt to resolve an unknown term
>>> terms.resolve("label")
None
Set the default term vocabulary and then attempt to resolve an unknown term
>>> terms.setDefault("http://www.w3.org/2000/01/rdf-schema#")
>>> terms.resolve("label")
u"http://www.w3.org/2000/01/rdf-schema#label"
"""
def addAll(self, other, override=False):
if override:
self.update(other)
else:
for key, value in other.iteritems():
if key not in self:
self[key] = value
return self
def resolve(self, term):
"""Given a valid term for which an IRI is known (for example "label"),
this method will return the resulting IRI (for example
"http://www.w3.org/2000/01/rdf-schema#label").
If no term is known and a default has been set, the IRI is obtained by
concatenating the term and the default iri.
If no term is known and no default is set, then this method returns
null."""
if hasattr(self, 'default'):
return self.get(term, self.default + term)
else:
return self.get(term)
def setDefault(self, iri):
"""The default iri to be used when an term cannot be resolved, the
resulting IRI is obtained by concatenating this iri with the term being
resolved."""
self.default = iri
def shrink(self, iri):
"""Given an IRI for which an term is known (for example
"http://www.w3.org/2000/01/rdf-schema#label") this method returns a
term (for example "label"), if no term is known the original IRI is
returned."""
for term, v in self.iteritems():
if v == iri:
return term
return iri
class Profile(object):
"""Profiles provide an easy to use context for negotiating between CURIEs,
Terms and IRIs."""
def __init__(self, prefixes=None, terms=None):
self.prefixes = prefixes or PrefixMap()
self.terms = terms or TermMap()
if 'rdf' not in self.prefixes:
self.prefixes['rdf'] = \
'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
if 'xsd' not in self.prefixes:
self.prefixes['xsd'] = 'http://www.w3.org/2001/XMLSchema#'
def resolve(self, toresolve):
"""Given an Term or CURIE this method will return an IRI, or null if it
cannot be resolved.
If toresolve contains a : (colon) then this method returns the result
of calling prefixes.resolve(toresolve)
otherwise this method returns the result of calling
terms.resolve(toresolve)"""
if ':' in toresolve:
return self.prefixes.resolve(toresolve)
else:
return self.terms.resolve(toresolve)
def setDefaultVocabulary(self, iri):
"""This method sets the default vocabulary for use when resolving
unknown terms, it is identical to calling the setDefault method on
terms."""
self.terms.setDefault(iri)
def setDefaultPrefix(self, iri):
"""This method sets the default prefix for use when resolving CURIEs
without a prefix, for example ":me", it is identical to calling the
setDefault method on prefixes."""
self.prefixes.setDefault(iri)
def setTerm(self, term, iri):
"""This method associates an IRI with a term, it is identical to
calling the set method on term."""
self.terms[term] = iri
def setPrefix(self, prefix, iri):
"""This method associates an IRI with a prefix, it is identical to
calling the set method on prefixes."""
self.prefixes[prefix] = iri
def importProfile(self, profile, override=False):
"""This method functions the same as calling
prefixes.addAll(profile.prefixes, override) and
terms.addAll(profile.terms, override), and allows easy updating and
merging of different profiles.
This method returns the instance on which it was called."""
self.prefixes.addAll(profile.prefixes, overide)
self.terms.addAll(profile.terms, override)
return self
class RDFEnvironment(Profile):
"""The RDF Environment is an interface which exposes a high level API for
working with RDF in a programming environment."""
def createBlankNode(self):
"""Creates a new :py:class:`BlankNode`."""
return BlankNode()
def createNamedNode(self, value):
"""Creates a new :py:class:`NamedNode`."""
return NamedNode(value)
def createLiteral(self, value, language=None, datatype=None):
"""Creates a :py:class:`Literal` given a value, an optional language
and/or an
optional datatype."""
return Literal(value, language, datatype)
def createTriple(self, subject, predicate, object):
"""Creates a :py:class:`Triple` given a subject, predicate and
object."""
return Triple(subject, predicate, object)
def createGraph(self, triples=tuple()):
"""Creates a new :py:class:`Graph`, an optional sequence of
:py:class:`Triple` to include within the graph may be specified, this
allows easy transition between native sequences and Graphs and is the
counterpart for :py:meth:`Graph.toArray`."""
g = Graph()
g.addAll(triples)
return g
def createAction(self, test, action):
raise NotImplemented
def createProfile(self, empty=False):
if empty:
return Profile()
else:
return Profile(self.prefixes, self.terms)
def createTermMap(self, empty=False):
if empty:
return TermMap()
else:
return TermMap(self.terms)
def createPrefixMap(self, empty=False):
if empty:
return PrefixMap()
else:
return PrefixMap(self.prefixes)
# Pymantic DataSet Extensions
def createQuad(self, subject, predicate, object, graph):
return Quad(subject, predicate, object, graph)
def createDataset(self, quads=tuple()):
ds = Dataset()
ds.addAll(quads)
return ds
| {
"repo_name": "igor-kim/blazegraph-python",
"path": "pymantic/primitives.py",
"copies": "4",
"size": "27194",
"license": "bsd-3-clause",
"hash": 4022649712399411700,
"line_mean": 32.1230207065,
"line_max": 80,
"alpha_frac": 0.6028535706,
"autogenerated": false,
"ratio": 4.074617920287683,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6677471490887684,
"avg_score": null,
"num_lines": null
} |
__all__ = ['triplot','lineplot','lineplot2','cube_quivers','simplex_quivers']
try:
import matplotlib.collections, matplotlib.pylab
except ImportError:
import warnings
warnings.warn("matplotlib not installed, some loss of functionality will result")
from scipy import rand,asarray,zeros,empty,average
from pydec import barycentric_gradients,combinations,Simplex
import numpy
def triplot(vertices, indices, labels=False):
"""
Plot a 2D triangle mesh
"""
vertices,indices = asarray(vertices),asarray(indices)
#3d tensor [triangle index][vertex index][x/y value]
triangles = vertices[numpy.ravel(indices),:].reshape((indices.shape[0],3,2))
col = matplotlib.collections.PolyCollection(triangles)
col.set_facecolor('grey')
col.set_alpha(0.5)
col.set_linewidth(1)
#sub = subplot(111)
sub = matplotlib.pylab.gca()
sub.add_collection(col,autolim=True)
matplotlib.pylab.axis('off')
sub.autoscale_view()
if labels:
barycenters = numpy.average(triangles,axis=1)
for n,bc in enumerate(barycenters):
matplotlib.pylab.text(bc[0], bc[1], str(n), {'color' : 'k', 'fontsize' : 8,
'horizontalalignment' : 'center',
'verticalalignment' : 'center'
})
#matplotlib.pylab.show()
def lineplot2(tails,heads,labels=False,linewidths=1):
#vertices,indices = asarray(vertices),asarray(indices)
#3d tensor [segment index][vertex index][x/y value]
#lines = vertices[numpy.ravel(indices),:].reshape((indices.shape[0],2,2))
data = empty((len(tails),2,2))
data[:,0,:] = tails
data[:,1,:] = heads
col = matplotlib.collections.LineCollection(data)
col.set_color('k')
col.set_linewidth(linewidths)
#sub = subplot(111)
sub = matplotlib.pylab.gca()
sub.add_collection(col,autolim=True)
matplotlib.pylab.axis('off')
sub.autoscale_view()
if labels:
barycenters = numpy.average(lines,axis=1)
for n,bc in enumerate(barycenters):
matplotlib.pylab.text(bc[0], bc[1], str(n), {'color' : 'k', 'fontsize' : 8,
'horizontalalignment' : 'center',
'verticalalignment' : 'center'
})
#matplotlib.pylab.show()
def lineplot(vertices,indices,labels=False,linewidths=1):
"""
Plot 2D line segments
"""
vertices,indices = asarray(vertices),asarray(indices)
#3d tensor [segment index][vertex index][x/y value]
lines = vertices[numpy.ravel(indices),:].reshape((indices.shape[0],2,2))
col = matplotlib.collections.LineCollection(lines)
col.set_color('k')
col.set_linewidth(linewidths)
#sub = subplot(111)
sub = matplotlib.pylab.gca()
sub.add_collection(col,autolim=True)
matplotlib.pylab.axis('off')
sub.autoscale_view()
if labels:
barycenters = numpy.average(lines,axis=1)
for n,bc in enumerate(barycenters):
matplotlib.pylab.text(bc[0], bc[1], str(n), {'color' : 'k', 'fontsize' : 8,
'horizontalalignment' : 'center',
'verticalalignment' : 'center'
})
#matplotlib.pylab.show()
def cube_quivers(cmplx,vals):
N = cmplx.complex_dimension()
quiver_dirs = zeros((cmplx[2].cube_array.shape[0],N))
edge_to_face = cmplx[2].boundary.T.tocsr()
edge_to_face.data = numpy.abs(edge_to_face.data)
num_edges = cmplx[1].cube_array.shape[0]
for i in range(N):
i_edges = (cmplx[1].cube_array[:,-1] == i)
i_vals = zeros(num_edges)
i_vals[i_edges] = vals[i_edges]
quiver_dirs[:,i] = 0.5*(edge_to_face*i_vals)
quiver_bases = cmplx[2].cube_array[:,:N] + 0.5
return quiver_bases,quiver_dirs
def simplex_quivers(sc,form):
"""
Sample a Whitney 1-form at simplex barycenters
"""
quiver_bases = average(sc.vertices[sc[-1].simplices],axis=1)
quiver_dirs = zeros((sc[-1].num_simplices,sc.embedding_dimension()))
s_to_i = sc[1].simplex_to_index
for n,s in enumerate(sc[-1].simplices):
verts = sorted(s)
d_lambda = barycentric_gradients(sc.vertices[verts,:])
edges = [Simplex(x) for x in combinations(s,2)]
indices = [s_to_i[x] for x in edges]
values = [form[i] for i in indices]
for e,v in zip(combinations(range(len(verts)),2),values):
quiver_dirs[n,:] += v*(d_lambda[e[1]] - d_lambda[e[0]])
quiver_dirs /= (sc.complex_dimension() + 1)
return quiver_bases,quiver_dirs
##from scipy import *
##from pydec import *
##from pylab import quiver,show
##v = array([[0,0],[1,0],[0,1]])
##s = array([[0,1,2]])
##sc = SimplicialComplex(v,s)
##b,d = simplex_quivers(sc,array([1.0,0.0,0.0]))
##quiver(b[:,0],b[:,1],d[:,0],d[:,1])
##show()
| {
"repo_name": "DongliangGao/pydec",
"path": "pydec/vis/draw.py",
"copies": "6",
"size": "5172",
"license": "bsd-3-clause",
"hash": 9190885721886063000,
"line_mean": 28.2203389831,
"line_max": 90,
"alpha_frac": 0.5732791957,
"autogenerated": false,
"ratio": 3.4388297872340425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7012108982934042,
"avg_score": null,
"num_lines": null
} |
__all__ = ['TRMM3B40RTFile', 'TRMM3B41RTFile', 'TRMM3B42RTFile']
import sys
import warnings
from gzip import GzipFile
import numpy as np
from numpy import ma
class TRMM3B4XRTFile(object):
"""Base Class for read operations on TRMM 3B4XRT files.
This class should not be used directly, use one of the derived
classes instead.
"""
def __init__(self, filename):
self.filename = filename
self._header_offset = 2880
self._read_header()
self._rows = int(self._hdr['number_of_latitude_bins'])
self._cols = int(self._hdr['number_of_longitude_bins'])
def _read_binary(self):
"""Read file as a binary string.
"""
if self.filename.split('.')[-1] == 'gz':
fp = GzipFile(self.filename)
else: # assume decompressed binary file
fp = open(self.filename, 'rb')
data_string = fp.read()
fp.close()
return data_string
def _read_header(self):
"""Read the file header.
"""
data_string = self._read_binary()
self._hdr = {}
for item in data_string[:self._header_offset].split():
key, val = item.split('=')
self._hdr[key] = val
def _read_scaled_masked_field(self, field_num, dtype=np.float32):
"""Return a scaled and masked data field.
"""
scale_factor = float(self._hdr['variable_scale'].split(',')[field_num])
raw_field = self.read_raw_field(field_num)
field = np.ma.masked_equal(raw_field, int(self._hdr['flag_value']))
field = np.ma.asarray(field, dtype)
field /= scale_factor
return field
def read_raw_field(self, field_num):
"""Read a raw data field from the file.
Reads the requested field from file if possible. The returned
field is unscaled and unmasked integer data.
Parameters
----------
field_num : int
The zero-indexed field number to read.
Returns
-------
field : Numpy ndarray
The unprocessed integer data contained in the file.
"""
dtype_list = self._hdr['variable_type'].split(',')
dtype_list = [int(s[-1]) for s in dtype_list]
nfields = int(self._hdr['number_of_variables'])
if field_num in range(nfields):
strt_offset = self._header_offset
k = field_num - 1
while k >= 0:
strt_offset += dtype_list[k]*self._rows*self._cols
k = k-1
else:
raise IOError("Can't read field number %d. File %s only contains %d fields, and fields are indexed from 0." \
% (field_num, self.filename, nfields))
var_type = self._hdr['variable_type'].split(',')[field_num]
if var_type == 'signed_integer1':
dtype = np.int8
end_offset = strt_offset + self._rows*self._cols
elif var_type == 'signed_integer2':
dtype = np.int16
end_offset = strt_offset + 2*self._rows*self._cols
else:
raise IOError, 'Badly formed header in %s' % self.filename
data_string = self._read_binary()
field = np.fromstring(data_string[strt_offset:end_offset], dtype)
if sys.byteorder == 'little':
field = field.byteswap()
field = field.reshape(self._rows, self._cols)
return field
def header(self):
"""Return a copy of the file header in a dictionary.
"""
return dict(self._hdr)
class TRMM3B40RTFile(TRMM3B4XRTFile):
"""Class for read operations on TRMM 3B40RT files.
Example Usage:
>>> from pytrmm import TRMM3B40RTFile
>>> trmm_file = TRMM3B40RTFile(file_name)
>>> print(trmm_file.header())
>>> precip = trmm_file.precip()
>>> print('Array dimensions:', precip.shape)
>>> print('Data max:', precip.max())
>>> print('Data min:', precip.min())
>>> print('Data mean:', precip.mean())
>>> print('Data std-dev:', precip.std())
"""
def __init__(self, filename):
TRMM3B4XRTFile.__init__(self, filename)
if self._hdr['algorithm_ID'] != '3B40RT':
algo_warning = """\
The file %s is apparently not a 3B40RT file.
Reported algorithm ID is %s. Try using pytrmm.TRMM%sFile instead.
"""
algo_warning = algo_warning % (self.filename,
self._hdr['algorithm_ID'],
self._hdr['algorithm_ID'])
warnings.warn(algo_warning)
def precip(self):
"""Return the field of precipitation values.
The scaled data are in mm/hr and returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(0)
def precip_error(self):
"""Return the field of precipitation RMS error estimates.
The scaled data are in mm/hr and returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(1)
def total_pixels(self):
"""Return the field of total pixels.
The integer data are returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(2, dtype=np.int8)
def ambiguous_pixels(self):
"""Return the field of ambiguous pixels.
The integer data are returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(3, dtype=np.int8)
def rain_pixels(self):
"""Return the field of rain pixels.
The integer data are returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(4, dtype=np.int8)
def source(self):
"""Return the field of data source identifiers.
The integer data are returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(5, dtype=np.int8)
class TRMM3B41RTFile(TRMM3B4XRTFile):
"""Class for read operations on TRMM 3B41RT files.
Example Usage:
>>> from pytrmm import TRMM3B41RTFile
>>> trmm_file = TRMM3B41RTFile(file_name)
>>> print(trmm_file.header())
>>> precip = trmm_file.precip()
>>> print('Array dimensions:', precip.shape)
>>> print('Data max:', precip.max())
>>> print('Data min:', precip.min())
>>> print('Data mean:', precip.mean())
>>> print('Data std-dev:', precip.std())
"""
def __init__(self, filename):
TRMM3B4XRTFile.__init__(self, filename)
if self._hdr['algorithm_ID'] != '3B41RT':
algo_warning = """\
The file %s is apparently not a 3B41RT file.
Reported algorithm ID is %s. Try using pytrmm.TRMM%sFile instead.
"""
algo_warning = algo_warning % (self.filename,
self._hdr['algorithm_ID'],
self._hdr['algorithm_ID'])
warnings.warn(algo_warning)
def precip(self):
"""Return the field of precipitation values.
The scaled data are in mm/hr and returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(0)
def precip_error(self):
"""Return the field of precipitation RMS error estimates.
The scaled data are in mm/hr and returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(1)
def total_pixels(self):
"""Return the field of total pixels.
The integer data are returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(2, dtype=np.int8)
class TRMM3B42RTFile(TRMM3B4XRTFile):
"""Class for read operations on TRMM 3B42RT files.
Example Usage:
>>> from pytrmm import TRMM3B42RTFile
>>> trmm_file = TRMM3B42RTFile(file_name)
>>> print(trmm_file.header())
>>> precip = trmm_file.precip()
>>> print('Array dimensions:', precip.shape)
>>> print('Data max:', precip.max())
>>> print('Data min:', precip.min())
>>> print('Data mean:', precip.mean())
>>> print('Data std-dev:', precip.std())
"""
def __init__(self, filename):
TRMM3B4XRTFile.__init__(self, filename)
if self._hdr['algorithm_ID'] != '3B42RT':
algo_warning = """\
The file %s is apparently not a 3B42RT file.
Reported algorithm ID is %s. Try using pytrmm.TRMM%sFile instead.
"""
algo_warning = algo_warning % (self.filename,
self._hdr['algorithm_ID'],
self._hdr['algorithm_ID'])
warnings.warn(algo_warning)
def precip(self):
"""Return the field of precipitation values.
The scaled data are in mm/hr and returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(0)
def precip_error(self):
"""Return the field of precipitation RMS error estimates.
The scaled data are in mm/hr and returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(1)
def source(self):
"""Return the field of data source identifiers.
The integer data are returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(2, dtype=np.int8)
def uncalibrated_precip(self):
"""Return the field of uncalibrated precipitation values.
The scaled data are in mm/hr and returned as a 2D masked Numpy
array. Invalid data are masked out.
"""
return self._read_scaled_masked_field(3)
| {
"repo_name": "sahg/pytrmm",
"path": "pytrmm/trmm3b4xrt.py",
"copies": "1",
"size": "10026",
"license": "bsd-3-clause",
"hash": 5509368062701922000,
"line_mean": 29.5670731707,
"line_max": 121,
"alpha_frac": 0.5821863156,
"autogenerated": false,
"ratio": 3.8770301624129933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4959216478012993,
"avg_score": null,
"num_lines": null
} |
__all__ = ['tsort']
from functools import reduce
def tsort(data, smallest_first=False, fewest_edges_first=False, flatten=False):
# FIXME: support fewest_edges_first
# make copy of data
tmp = data.copy()
# remove self-references
for k, v in tmp.items():
v.discard(k)
# initially find vertices that do not point to anything
all_vertices = reduce(set.union, tmp.values())
starting_vertices = set(tmp.keys())
empty_vertices = all_vertices - starting_vertices
# insert empty vertices
for k in empty_vertices:
tmp[k] = set()
# algorithm starts here
sorted_vertices = []
while True:
# get all vertices that do not point to anything
empty_vertices = {k for k, v in tmp.items() if not v}
if not empty_vertices:
break
# if required, sort by smallest-numbered available vertex first
if smallest_first:
_empty_vertices = sorted(empty_vertices)
else:
_empty_vertices = (v for v in empty_vertices)
# add current vertices that do not point to any other vertices
if flatten:
sorted_vertices.extend(_empty_vertices)
else:
sorted_vertices.append(_empty_vertices)
# traverse all vertices and take set difference for
# vertices which are not in previously found vertices
# that do not point to any other vertices
# tmp = {
# k: (v - empty_vertices)
# for k, v in tmp.items()
# if k not in empty_vertices
# }
for k, v in list(tmp.items()):
if k in empty_vertices:
del tmp[k]
else:
tmp[k] = v - empty_vertices
if tmp:
raise ValueError('Cyclic dependencies found')
return sorted_vertices
if __name__ == '__main__':
from pprint import pprint
data = {
2: {11},
9: {11, 8},
10: {11, 3},
11: {7, 5},
8: {7, 3},
}
out = tsort(data, smallest_first=True)
pprint(out) | {
"repo_name": "mtasic85/pytsort",
"path": "tsort.py",
"copies": "1",
"size": "2082",
"license": "mit",
"hash": 213660974581948830,
"line_mean": 25.7051282051,
"line_max": 79,
"alpha_frac": 0.5667627281,
"autogenerated": false,
"ratio": 4.06640625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.51331689781,
"avg_score": null,
"num_lines": null
} |
# __all__ = ['tvwap_trades','online_tvwap_trades']
import numpy as np
import ctypes
def _getpath():
return r"~/dev/signals/build"
def _load_signals_lib():
return np.ctypeslib.load_library("libsignals", _getpath())
def tvwap_trades(window, t, px, sz):
#requires = ["CONTIGUOUS", "ALIGNED"]
lib = _load_signals_lib()
lib.c_tvwap.restype = None
lib.c_tvwap.argtypes = [ctypes.c_double,
np.ctypeslib.c_intp,
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous"),
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous"),
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous"),
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous,"
"writeable")]
datalen = np.alen(t)
res = np.empty(datalen)
lib.c_tvwap(window, datalen, t, px, sz, res)
return res
def online_tvwap_trades(window, t, px, sz):
#requires = ["CONTIGUOUS", "ALIGNED"]
lib = _load_signals_lib()
lib.c_online_tvwap.restype = None
lib.c_online_tvwap.argtypes = [ctypes.c_double,
np.ctypeslib.c_intp,
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous"),
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous"),
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous"),
np.ctypeslib.ndpointer(float, ndim=1,
flags="aligned, contiguous,"
"writeable")]
datalen = np.alen(t)
res = np.empty(datalen)
lib.c_online_tvwap(window, datalen, t, px, sz, res)
return res
| {
"repo_name": "tambu-j/signals",
"path": "python/tvwap.py",
"copies": "1",
"size": "2280",
"license": "apache-2.0",
"hash": 2123022970537283000,
"line_mean": 39,
"line_max": 80,
"alpha_frac": 0.4456140351,
"autogenerated": false,
"ratio": 4.427184466019417,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5372798501119417,
"avg_score": null,
"num_lines": null
} |
__all__ = 'tyrving_score'
import typing
from athlib import parse_hms, normalize_gender, normalize_event_code, is_hand_timing
from athlib.codes import PAT_RUN
from typing import Union, Dict, Tuple
class TyrvingCalculator:
def __init__(self, gender, event_code, kind, args):
self.gender = gender
self.event_code = event_code #so we can talk about it
self.kind = kind #race jump pv & throw
self.args = args
def points(self, age: int, perf: Union[int, float, str], timing_kind: str = 'automatic') -> int:
'''compute points according to kind and params'''
meth = getattr(self, self.kind+'_points', self.bad_points)
self.timing_kind = timing_kind
age = int(age)
return meth(age, perf)
@property
def ident(self) -> str:
return '%s(%r,%r,%r)' % (self.__class__.__name__, self.event_code, self.gender, self.kind)
def bad_points(self, *args, **kwds):
raise ValueError('cannot compute points for %s' % self.ident)
def get_base_perf(self, age: int, yv: Union[Dict, Tuple]) -> int:
if isinstance(yv,dict):
base_perf = yv.get(age,None)
else:
y, v = yv
base_perf = v[age-y] if y<=age<y+len(v) else None
if base_perf is None:
raise ValueError('cannot obtain base performance for %s at age=%s' % (self.ident,age))
return base_perf
def race_points(self, age: int, perf: Union[int, float, str]) -> int:
dist, multiplier, yv = self.args
base_perf = self.get_base_perf(age,yv)
#perf is a time
v = perf
if not isinstance(v,(int,float)):
v.replace(',','.')
while v.count('.')>1: v = v.replace('.',':',1)
v = parse_hms(v) if ':' in v else float(v)
timing_kind = self.timing_kind
if self.timing_kind=='manual':
inc = 0.24 if dist in (100,110,200) else 0.20 if dist in (40,60,80,300) else 0.14 if dist == 400 else 0
v += inc #correct for manual timing
return max(0,int(1000 + 1e-8 + (base_perf - v) * (multiplier / (0.01 if dist<=500 else 0.1))))
def jump_points(self, age, perf: Union[int, float, str]) -> int:
multiplier, yv = self.args
base_perf = self.get_base_perf(age,yv)
#perf is a distance
v = perf
if not isinstance(v,(int,float)):
v = float(v.replace(',','.'))
return max(0,int(1000 + 1e-8 + multiplier * (v - base_perf)*100))
def stav_points(self, age, perf: Union[int, float, str]) -> int:
'''this works for all the piecewise linear distance events'''
multipliers, yvs = self.args
levels = [self.get_base_perf(age,yv) for yv in yvs]
v = perf
if not isinstance(v,(int,float)):
v = float(v.replace(',','.'))
diffs = 100*(v - levels[0]), 100*(v - levels[1])
return max(0, int(1000 + 1e-8 + (diffs[0]*multipliers[0] if diffs[0]>=0
else diffs[0]*multipliers[1] if diffs[1]>0
else diffs[1]*multipliers[2]+levels[2]-1000)))
throw_points = stav_points
pv_points = stav_points
def tyrving_score(gender: str, age: int, event_code: str, perf: Union[int, float, str]) -> int:
timing_kind='automatic'
if PAT_RUN.match(event_code):
if is_hand_timing(perf):
timing_kind = 'manual'
event_code = normalize_event_code(event_code)
gender = normalize_gender(gender)
params = _tyrvingTables.get(gender,None)
if params is None:
raise ValueError('Cannot get a Tyrving table for gender=%r' % gender)
params = params.get(event_code,None)
if not params:
raise ValueError('Cannot get a Tyrving calculation for gender=%r event_code=%r' % (gender, event_code))
return TyrvingCalculator(gender, event_code, params[0], params[1]).points(age, perf, timing_kind=timing_kind)
#start tyrving tables created by tyrving-translate.py Mon Feb 24 13:59:28 2020
_tyrvingTables = {
'F': {
'40': ['race', [40, 3.5, [10, [6.6, 6.4]]]],
'60': ['race', [60, 2.7, [10, [9.25, 8.85, 8.55, 8.4, 8.25, 8.15, 8.05, 8,
7.95, 7.9]]]],
'80': ['race', [80, 2.1, [10, [12.2, 11.6, 11.1, 10.85, 10.65, 10.5, 10.35,
10.25, 10.15, 10.1]]]],
'100': ['race', [100, 1.6, [12, [13.8, 13.4, 13.1, 12.9, 12.75, 12.6, 12.5,
12.4]]]],
'200': ['race', [200, 0.78, [11, [29.7, 28.5, 27.7, 27.1, 26.5, 26.15,
25.9, 25.7, 25.55]]]],
'300': ['race', [300, 0.5, [12, [45.8, 44.5, 43.4, 42.5, 41.7, 41.2, 40.8,
40.5]]]],
'400': ['race', [400, 0.38, [14, [61.6, 60, 58.8, 58, 57.5, 57]]]],
'600': ['race', [600, 2.4, [10, [115, 110, 105, 102, 100, 98.5, 97, 96, 95,
94]]]],
'800': ['race', [800, 1.5, [14, [141, 138.5, 136, 134, 133, 132.5]]]],
'1000': ['race', [1000, 1, [14, [189, 186.5, 184, 181.5, 180, 179]]]],
'1500': ['race', [1500, 0.6, [12, [315, 305, 295, 286, 282, 279, 277,
275]]]],
'2000': ['race', [2000, 0.4, [14, [418, 406, 398, 393, 389, 385]]]],
'3000': ['race', [3000, 0.23, [16, [625, 615, 607, 600]]]],
'5000': ['race', [5000, 0.13, [17, [1080, 1065, 1055]]]],
'10000': ['race', [10000, 0.06, [18, [2310, 2280]]]],
'1000W': ['race', [1000, 0.7, [11, [320, 303, 288, 280, 275, 272, 270, 268,
266]]]],
'2000W': ['race', [2000, 0.3, [13, [640, 620, 600, 588, 578, 574, 570]]]],
'3000W': ['race', [3000, 0.18, [13, [1002, 962, 936, 920, 906, 900,
895]]]],
'5000W': ['race', [5000, 0.1, [14, [1660, 1615, 1585, 1555, 1538, 1524]]]],
'10000W': ['race', [10000, 0.05, [15, [3540, 3450, 3390, 3330, 3300]]]],
'20000W': ['race', [20000, 0.02, [18, [3210, 3120]]]],
'60H68cm6.5m': ['race', [60, 1.9, [10, [11.6, 10.9]]]],
'60H76.2cm7m': ['race', [60, 1.9, [12, [10.5]]]],
'60H76.2cm7.5m': ['race', [60, 1.9, [13, [10.1, 9.7]]]],
'80H76.2cm8m': ['race', [80, 1.4, [15, [12.5, 12.2]]]],
'100H76.2cm8.5m': ['race', [100, 1, [17, [15]]]],
'100H84cm8.5m': ['race', [100, 1, [18, [14.9, 14.7]]]],
'200H68cm19m': ['race', [200, 0.5, [11, [36, 34, 32.4]]]],
'200H76.2cm19m': ['race', [200, 0.5, [14, [31.6, 31, 30.4, 30, 29.7,
29.5]]]],
'300H76.2cm35m': ['race', [300, 0.4, [14, [48, 47, 46.4, 45.9, 45.5,
45.2]]]],
'400H76.2cm35m': ['race', [400, 0.3, [18, [63.1, 62.5]]]],
'1500SC': ['race', [1500, 0.46, [14, [340, 330, 325]]]],
'2000SC': ['race', [2000, 0.35, [17, [430, 420, 410]]]],
'3000SC': ['race', [3000, 0.2, [17, [700, 670, 650]]]],
'HJ': ['jump', [7.5, [10, [1.22, 1.34, 1.44, 1.52, 1.58, 1.61, 1.64, 1.66,
1.68, 1.7]]]],
'SHJ': ['jump', [9.5, [10, [1, 1.1, 1.2, 1.26, 1.3, 1.33, 1.35, 1.36, 1.37,
1.38]]]],
'PV': ['pv', [[2, 4, 8],
[[10, [1.8, 2, 2.2, 2.5, 2.8, 2.95, 3.1, 3.2, 3.3, 3.4]],
[10, [1.44, 1.6, 1.76, 2, 2.24, 2.36, 2.48, 2.56, 2.64, 2.72]],
[10, [856, 840, 824, 800, 776, 764, 752, 744, 736, 728]]]]],
'LJ': ['jump', [2.1, [10, [4.1, 4.35, 4.7, 5, 5.25, 5.36, 5.47, 5.55, 5.63,
5.7]]]],
'SLJ': ['jump', [6, [10, [2.05, 2.18, 2.3, 2.4, 2.48, 2.54, 2.59, 2.62,
2.64, 2.66]]]],
'TJ': ['jump', [1, [10, [8, 8.8, 9.5, 10.2, 10.8, 11.4, 11.9, 12.2, 12.4,
12.6]]]],
'SP4K': ['throw', [[0.3, 0.6, 1.2], [[18, [11.4, 11.7]],
[18, [9.12, 9.36]], [18, [863, 859]]]]],
'SP3K': ['throw', [[0.3, 0.6, 1.2], [[14, [10.8, 11.4, 12, 12.6]],
[14, [8.64, 9.12, 9.6, 10.08]], [14, [870, 863, 856, 848]]]]],
'SP2K': ['throw', [[0.3, 0.6, 1.2], [[10, [6.8, 8.8, 10.2, 11.2]],
[10, [5.44, 7.04, 8.16, 8.96]], [10, [918, 894, 877, 865]]]]],
'DT1K': ['throw', [[0.15, 0.3, 0.6], [[16, [37, 40, 42, 43]],
[16, [29.6, 32, 33.6, 34.4]], [16, [778, 760, 748, 742]]]]],
'DT0.75K': ['throw', [[0.15, 0.3, 0.6], [[14, [38, 43]],
[14, [30.4, 34.4]], [14, [772, 742]]]]],
'DT0.6K': ['throw', [[0.15, 0.3, 0.6], [[10, [18, 26, 32, 38]],
[10, [14.4, 20.8, 25.6, 30.4]], [10, [892, 844, 808, 772]]]]],
'HT4K': ['throw', [[0.12, 0.25, 0.5], [[18, [46, 48]], [18, [36.8, 38.4]],
[18, [770, 760]]]]],
'HT3K': ['throw', [[0.12, 0.25, 0.5], [[14, [34, 38, 41, 44]],
[14, [27.2, 30.4, 32.8, 35.2]], [14, [830, 810, 795, 780]]]]],
'HT2K': ['throw', [[0.12, 0.25, 0.5], [[10, [20, 27, 33, 38]],
[10, [16, 21.6, 26.4, 30.4]], [10, [900, 865, 835, 810]]]]],
'JT600': ['throw', [[0.13, 0.25, 0.5], [[18, [42, 43]],
[18, [33.6, 34.4]], [18, [790, 785]]]]],
'JT500': ['throw', [[0.13, 0.25, 0.5], [[15, [38, 40, 42]],
[15, [30.4, 32, 33.6]], [15, [810, 800, 790]]]]],
'JT400': ['throw', [[0.13, 0.25, 0.5], [[10, [20, 27, 32, 36, 39]],
[10, [16, 21.6, 25.6, 28.8, 31.2]], [10, [900, 865, 840, 820, 805]]]]],
'OT150': ['throw', [[0.1, 0.2, 0.4], [[10, [38, 43, 50, 56, 62]],
[10, [30.4, 34.4, 40, 44.8, 49.6]], [10, [848, 828, 800, 776, 752]]]]],
'BT1K': ['throw', [[0.13, 0.25, 0.5], [[10, [20, 26, 31]],
[10, [16, 20.8, 24.8]], [10, [900, 870, 845]]]]]
},
'M': {
'40': ['race', [40, 3.5, [10, [6.6, 6.4]]]],
'60': ['race', [60, 2.7, [10, [9.2, 8.8, 8.4, 8, 7.75, 7.55, 7.4, 7.3,
7.25, 7.2]]]],
'80': ['race', [80, 2.2, [10, [12.15, 11.55, 10.9, 10.4, 10.05, 9.8, 9.6,
9.45, 9.35, 9.3]]]],
'100': ['race', [100, 1.7, [12, [13.5, 12.8, 12.35, 11.95, 11.7, 11.5,
11.35, 11.25]]]],
'200': ['race', [200, 0.85, [11, [29.2, 27.6, 26, 24.8, 24, 23.5, 23,
22.65, 22.5]]]],
'300': ['race', [300, 0.6, [12, [45, 42, 40, 38.6, 37.6, 36.85, 36.45,
36.15]]]],
'400': ['race', [400, 0.4, [14, [56.5, 54.3, 52.5, 51.5, 51, 50.4]]]],
'600': ['race', [600, 2.6, [10, [112, 105, 99, 94.5, 91, 88.5, 86, 84, 83,
82]]]],
'800': ['race', [800, 1.8, [14, [129, 124, 120, 117.5, 115.5, 114]]]],
'1000': ['race', [1000, 1.1, [14, [169, 164, 159, 156, 153, 150]]]],
'1500': ['race', [1500, 0.7, [12, [294, 278, 268, 260, 252, 246, 242,
240]]]],
'2000': ['race', [2000, 0.45, [14, [380, 365, 355, 346, 341, 338]]]],
'3000': ['race', [3000, 0.27, [16, [552, 540, 530, 523]]]],
'5000': ['race', [5000, 0.15, [17, [950, 930, 910]]]],
'10000': ['race', [10000, 0.06, [18, [1980, 1950]]]],
'1000W': ['race', [1000, 0.7, [11, [320, 300, 282, 271, 262, 254, 246, 242,
240]]]],
'2000W': ['race', [2000, 0.3, [13, [594, 570, 548, 531, 518, 510, 505]]]],
'3000W': ['race', [3000, 0.18, [13, [924, 892, 854, 830, 810, 796, 788]]]],
'5000W': ['race', [5000, 0.11, [14, [1600, 1500, 1440, 1395, 1365,
1350]]]],
'10000W': ['race', [10000, 0.05, [15, [3180, 3060, 2960, 2890, 2850]]]],
'20000W': ['race', [20000, 0.02, [18, [6000, 5880]]]],
'60H68cm6.5m': ['race', [60, 2, [10, [11.3, 10.5]]]],
'60H76.2cm7m': ['race', [60, 2, [12, [10.2]]]],
'60H76.2cm7.5m': ['race', [60, 2, [13, [9.8]]]],
'80H84cm8m': ['race', [80, 1.4, [14, [12.05]]]],
'100H84cm8.5m': ['race', [100, 1.1, [15, [14]]]],
'100H91.4cm8.5m': ['race', [100, 1.1, [16, [14.5]]]],
'110H91.4cm9.14m': ['race', [110, 1, [17, [15.3]]]],
'110H1cm9.14m': ['race', [110, 1, [17, [15.8, 15.3, 15]]]],
'110H106.7cm9.14m': ['race', [110, 1, [18, [15.7, 15.4]]]],
'200H68cm18.29m': ['race', [200, 0.6, [11, [34.8, 32.8, 31.2]]]],
'200H76.2cm18.29m': ['race', [200, 0.6, [14, [29.2, 28.4, 27.6, 27, 26.5,
26]]]],
'300H76.2cm35m': ['race', [300, 0.46, [14, [44.7, 43]]]],
'300H84cm35m': ['race', [300, 0.46, [16, [41.8, 41]]]],
'300H91.4cm35m': ['race', [300, 0.46, [18, [40.5, 40]]]],
'400H84cm35m': ['race', [400, 0.34, [17, [57]]]],
'400H91.4cm35m': ['race', [400, 0.34, [18, [56, 55.5]]]],
'1500SC': ['race', [1500, 0.55, [14, [300, 288, 280]]]],
'2000SC': ['race', [2000, 0.41, [17, [385, 375, 370]]]],
'3000SC': ['race', [3000, 0.27, [17, [610, 595, 585]]]],
'HJ': ['jump', [7, [10, [1.25, 1.38, 1.5, 1.61, 1.72, 1.8, 1.88, 1.93,
1.96, 1.99]]]],
'SHJ': ['jump', [8.5, [10, [1, 1.1, 1.2, 1.29, 1.37, 1.44, 1.5, 1.55, 1.59,
1.61]]]],
'PV': ['pv', [[1.7, 3.5, 7],
[[10, [2, 2.3, 2.6, 2.9, 3.2, 3.5, 3.8, 4.05, 4.3, 4.5]],
[10, [1.6, 1.84, 2.08, 2.32, 2.56, 2.8, 3.04, 3.24, 3.44, 3.6]],
[10, [860, 839, 818, 797, 776, 755, 734, 716, 699, 685]]]]],
'LJ': ['jump', [2, [10, [4.15, 4.55, 4.95, 5.35, 5.75, 6.15, 6.4, 6.6, 6.8,
6.95]]]],
'SLJ': ['jump', [5, [10, [2.1, 2.28, 2.45, 2.6, 2.75, 2.88, 2.98, 3.05,
3.1, 3.15]]]],
'TJ': ['jump', [1, [10, [8.2, 9.2, 10.1, 11.1, 12, 12.8, 13.3, 13.7, 14,
14.3]]]],
'SP6K': ['throw', [[0.3, 0.6, 1.2], [[18, [14.6, 15.2]],
[18, [11.68, 12.16]], [18, [824, 817]]]]],
'SP5K': ['throw', [[0.3, 0.6, 1.2], [[16, [14.6, 15.8]],
[16, [11.68, 12.64]], [16, [824, 810]]]]],
'SP4K': ['throw', [[0.3, 0.6, 1.2], [[14, [13.3, 15.3]],
[14, [10.64, 12.24]], [14, [840, 816]]]]],
'SP3K': ['throw', [[0.3, 0.6, 1.2], [[12, [11.3, 13.5]],
[12, [9.04, 10.8]], [12, [864, 838]]]]],
'SP2K': ['throw', [[0.3, 0.6, 1.2], [[10, [8.5, 10.5]], [10, [6.8, 8.4]],
[10, [898, 874]]]]],
'DT1.75K': ['throw', [[0.13, 0.25, 0.5], [[18, [46, 48]],
[18, [36.8, 38.4]], [18, [770, 760]]]]],
'DT1.5K': ['throw', [[0.13, 0.25, 0.5], [[16, [45, 48]], [16, [36, 38.4]],
[16, [775, 760]]]]],
'DT1K': ['throw', [[0.13, 0.25, 0.5], [[14, [44, 51]], [14, [35.2, 40.8]],
[14, [780, 745]]]]],
'DT0.75K': ['throw', [[0.13, 0.25, 0.5], [[12, [32, 40]],
[12, [25.6, 32]], [12, [840, 800]]]]],
'DT0.6K': ['throw', [[0.13, 0.25, 0.5], [[10, [22, 28]],
[10, [17.6, 22.4]], [10, [890, 860]]]]],
'HT6K': ['throw', [[0.1, 0.2, 0.4], [[18, [56, 62]], [18, [44.8, 49.6]],
[18, [776, 752]]]]],
'HT5K': ['throw', [[0.1, 0.2, 0.4], [[16, [52, 59]], [16, [41.6, 47.2]],
[16, [792, 764]]]]],
'HT4K': ['throw', [[0.1, 0.2, 0.4], [[14, [41, 50]], [14, [32.8, 40]],
[14, [836, 800]]]]],
'HT3K': ['throw', [[0.1, 0.2, 0.4], [[13, [37]], [13, [29.6]],
[13, [852]]]]],
'HT2K': ['throw', [[0.1, 0.2, 0.4], [[10, [22, 30, 38]],
[10, [17.6, 24, 30.4]], [10, [912, 880, 848]]]]],
'JT800': ['throw', [[0.1, 0.2, 0.4], [[18, [59, 62]], [18, [47.2, 49.6]],
[18, [764, 752]]]]],
'JT700': ['throw', [[0.1, 0.2, 0.4], [[16, [55, 60]], [16, [44, 48]],
[16, [780, 760]]]]],
'JT600': ['throw', [[0.1, 0.2, 0.4], [[14, [46, 52]], [14, [36.8, 41.6]],
[14, [816, 792]]]]],
'JT400': ['throw', [[0.1, 0.2, 0.4], [[10, [25, 32, 38, 45]],
[10, [20, 25.6, 30.4, 36]], [10, [900, 872, 848, 820]]]]],
'OT150': ['throw', [[0.08, 0.15, 0.3], [[10, [48, 55, 63, 70, 80]],
[10, [38.4, 44, 50.4, 56, 64]], [10, [856, 835, 811, 790, 760]]]]],
'BT1K': ['throw', [[0.08, 0.15, 0.3], [[10, [24, 30, 36]],
[10, [19.2, 24, 28.8]], [10, [928, 910, 892]]]]]
}
}
#end tyrving tables
| {
"repo_name": "openath/athlib",
"path": "athlib/tyrving_score.py",
"copies": "1",
"size": "15114",
"license": "apache-2.0",
"hash": 5321353391485518000,
"line_mean": 49.5484949833,
"line_max": 115,
"alpha_frac": 0.4559348948,
"autogenerated": false,
"ratio": 2.2057793345008756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3161714229300876,
"avg_score": null,
"num_lines": null
} |
__all__ = ['UniformBlock']
class UniformBlock:
'''
UniformBlock
'''
__slots__ = ['mglo', '_index', '_size', '_name', 'extra']
def __init__(self):
self.mglo = None #: Internal representation for debug purposes only.
self._index = None
self._size = None
self._name = None
self.extra = None #: Any - Attribute for storing user defined objects
raise TypeError()
def __repr__(self):
return '<UniformBlock: %d>' % self._index
@property
def binding(self) -> int:
'''
int: The binding of the uniform block.
'''
return self.mglo.binding
@binding.setter
def binding(self, binding):
self.mglo.binding = binding
@property
def value(self) -> int:
'''
int: The value of the uniform block.
'''
return self.mglo.value
@value.setter
def value(self, value):
self.mglo.binding = value
@property
def name(self) -> str:
'''
str: The name of the uniform block.
'''
return self._name
@property
def index(self) -> int:
'''
int: The index of the uniform block.
'''
return self._index
@property
def size(self) -> int:
'''
int: The size of the uniform block.
'''
return self._size
| {
"repo_name": "cprogrammer1994/ModernGL",
"path": "moderngl/program_members/uniform_block.py",
"copies": "1",
"size": "1409",
"license": "mit",
"hash": 7500322488437588000,
"line_mean": 19.7205882353,
"line_max": 78,
"alpha_frac": 0.5117104329,
"autogenerated": false,
"ratio": 4.256797583081571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5268508015981571,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.