index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
990,700 | 6f1631db7ac048861f8b30f209e0297d580c221b | import turtle
turtle.speed(100)
turtle.shape('turtle')
for i in range (1, 10, 1):
turtle.forward(50*i)
turtle.left(90)
turtle.forward(50*i)
turtle.left(90)
turtle.forward(50*i)
turtle.left(90)
turtle.forward(50*i)
turtle.right(45)
turtle.penup()
turtle.forward(50*2**(1/2)/2)
turtle.left(135)
turtle.pendown()
|
990,701 | 9688b7734208ed97d5ee7c625164e1c53c9f50f2 | import re
pattern = re.compile(r'^<HTML>', re.MULTILINE)
pattern.search("<HTML>")
pattern.search(" <HTML>")
pattern.search(" \n<HTML>")
|
990,702 | af1ce09e1642ce1686a7f19b8cb1fc807e4b69d0 | import os, sys
new_path = os.path.dirname(os.getcwd())
sys.path.append(new_path+ "/scripts")
import unittest
from math import pi
from circles import circles_area
class TestCircleArea(unittest.TestCase):
def test_area(self):
self.assertAlmostEqual(circles_area(1), pi)
self.assertAlmostEqual(circles_area(0), 0)
|
990,703 | 803e710bf8683d0787be44a51f20d34281fb7cb7 | import csv
from load_trucks import get_hash_table
hash_table = get_hash_table()
# Read distance & address csv files
# Big O = O(1)
with open('./data/distances.csv', 'r', encoding='utf-8-sig') as distance_csv:
distance_list = list(csv.reader(distance_csv))
with open('./data/addresses.csv', 'r', encoding='utf-8-sig') as address_csv:
address_list = list(csv.reader(address_csv))
# Lookup address
# Big O = O(n)
def address_lookup(address):
for entry in address_list:
if entry[2] == address:
return int(entry[0])
# Get total distance traveled
# Big O = O(1)
def get_total_distance(total, curr_index, dest_index):
distance = distance_list[curr_index][dest_index]
if distance == '':
distance = [dest_index][curr_index]
return total + float(distance)
# Get distance from current location to next location
# Big O = O(1)
def get_current_distance(curr_index, dest_index):
distance = distance_list[curr_index][dest_index]
if distance == '':
distance = distance_list[dest_index][curr_index]
return float(distance)
# Truck lists for packages
first_truck_indices = []
second_truck_indices = []
third_truck_indices = []
# Recursive function that caculates the shortest distance to the next delivery point using a "Greedy" Algorithm.
# The outer loop looks up every package in the hash table then checks if the distances to it is the closest distance
# from the current distance, if so it sets that is the new lowest_distance
#
# The inner loop then checks if the current package distance is equal to the lowest distance, if so it places it on the
# truck and pops that id from the list then sets the current_location to the location of the package that was placed on
#
# the truck. Then it recursively calls calc_shortest_distance() with the shorten load list, truck number, and current location
# Big O = O(n^2)
def calc_shortest_distance(load, truck, curr_location):
if len(load) == 0:
return load
else:
try:
lowest_distance = 50.0
location = 0
# Big O = O(n)
for id in load:
package = hash_table.lookup(id)
next_location = address_lookup(package.address)
if get_current_distance(curr_location, next_location) <= lowest_distance:
lowest_distance = get_current_distance(
curr_location, next_location)
location = next_location
# Big O = O(n)
for id in load:
package = hash_table.lookup(id)
next_location = address_lookup(package.address)
if get_current_distance(curr_location, next_location) == lowest_distance:
if truck == 1:
first_truck_indices.append(package.id)
load.pop(load.index(id))
curr_location = location
calc_shortest_distance(load, 1, curr_location)
elif truck == 2:
second_truck_indices.append(package.id)
load.pop(load.index(id))
curr_location = location
calc_shortest_distance(load, 2, curr_location)
elif truck == 3:
third_truck_indices.append(package.id)
load.pop(load.index(id))
curr_location = location
calc_shortest_distance(load, 3, curr_location)
except IndexError:
pass
# Get filled hash table
# Big O = O(1)
def get_hash_table():
return hash_table
# Get optimized first truck package indices
# Big O = O(1)
def get_first_truck_indices():
return first_truck_indices
# Get optimized second truck package indices
# Big O = O(1)
def get_second_truck_indices():
return second_truck_indices
# Get optimized third truck package indices
# Big O = O(1)
def get_third_truck_indices():
return third_truck_indices
|
990,704 | 6511be134d4641f351052dc567f29ac852731f04 | #! /usr/bin/python
###################
# ConfigParser.py #
###################
import logging
log = logging.getLogger('ConfigParser')
class ParseConfig:
""" Simple config file support -> main-app passes file for required args """
_db_args, _table_args, region = {}, {}, []
def __init__(self, conf_to_parse):
self.opened_conf_file = open(conf_to_parse, 'rt')
self.parse(self.opened_conf_file)
def parse(self, opened_conf_file):
for line in opened_conf_file.readlines():
self.parseline(line)
def parseline(self, line):
if not self.region:
if "## db_args ##" not in line:
return
else:
self.region = ['db_args']
else:
if "## table_args ##" in line:
self.region = ['table_args']
return
if "=" not in line:
return
(conf_line_key, conf_line_value) = line.split('=', 2)
if self.region == ['db_args']:
self._db_args[conf_line_key.strip()] = conf_line_value.strip()
else:
self._table_args[conf_line_key.strip()] = conf_line_value.strip()
def db_args(self):
return self._db_args
def table_args(self):
return self._table_args
def test():
conf_to_parse = sys.argv[1] if len(sys.argv) > 1 else 'PriorityManager.conf'
try:
conf = ParseConfig(conf_to_parse)
except IOError as e:
print('could not open {},'.format(conf_to_parse), e)
else:
db_args, table_args = conf.db_args(), conf.table_args()
print("DB_args:")
for k in sorted(db_args):
print('\t{} is [{}]'.format(k, db_args[k]))
print("\nTable_args:")
for k in sorted(table_args):
print('\t{} is [{}]'.format(k, table_args[k]))
if __name__ == "__main__":
import sys
test()
|
990,705 | 0ef3b06383bd0da51efa44014a8fc03bb518ee75 | #!/usr/local/bin/python3
# -*- coding = 'utf-8' -*-
# This is time line;
import sys
import os
import curses
from time import sleep
class Timelinebar:
progress_bar_lenth = 25 # progress bar progress_bar_lenth******-------
info = ""
info_lenth = 0
count = 0
console_col, console_lin = os.get_terminal_size()
def __init__(self, totlecount):
self.totlecount = totlecount
def setinfo(self, info):
self.info = " " + info
self.info_lenth = len(self.info)
def flush(self, count, padding=''):
rate = count/self.totlecount
print('\r' + ' '*self.console_col + '\r', end='', flush=True)
print('\r'
+ '*'*int(rate*self.progress_bar_lenth)
+ '-'*(self.progress_bar_lenth - int(rate*self.progress_bar_lenth))
+ str(int(rate*100))
+ '%'
+ self.info
+ padding
+ '\r'
, end=''
, flush=True)
# self.sys.stdout.flush()
# self.sys.stdout.write('\r'
# + '*'*int(rate*self.progress_bar_lenth)
# + '-'*(self.progress_bar_lenth - int(rate*self.progress_bar_lenth))
# + str(int(rate*100))
# + '%'
# + self.info
# + padding
# + ' '*45
# + '\r')
# self.sys.stdout.flush()
def toString(self, count):
rate = count/self.totlecount
return(
'*'*int(rate*self.progress_bar_lenth)
+ '-'*(self.progress_bar_lenth - int(rate*self.progress_bar_lenth))
+ str(int(rate*100))
+ '%'
+ self.info
)
def update(self, pad=''):
self.count += 1
#print(self.count)
self.flush(self.count, pad)
def run(self, result_queue):
while(True):
if(self.count == self.totlecount): break
self.update(result_queue.get())
def curses_run(self, result_queue):
wholescr = curses.initscr()
stdscr = curses.newpad(100, 100)
while(True):
self.count += 1
if(self.count == self.totlecount): break
messege_string = result_queue.get()
title_string = self.toString(self.count)
messege_string += ' ' * (self.console_col - len(messege_string))
# stdscr.refresh()
stdscr.refresh(0, 0, 5, 5, 20, 75)
stdscr.addstr(2,0,title_string)
stdscr.addstr(3,0,messege_string)
curses.endwin()
if __name__ == "__main__":
t = Timelinebar(100)
t.setinfo("haha")
for i in range(100):
t.flush(i)
sleep(0.05)
print("\n3")
|
990,706 | b9f07c144261cf9d830ca9f93fee752684c39343 | from Persistence.DBCon.connection import *
#relProcedimietnoMaterial
def create_procedimiento_material(procedimiento, material):
id_procedimiento = procedimiento.id
id_material = material.id
cnx = dbconnect()
cursor = cnx.cursor(buffered=True)
query = ("INSERT INTO relProcedimietnoMaterial VALUES('%d','%d')" % (id_procedimiento, id_material))
cursor.execute(query)
cnx.commit()
dbdisconect(cnx)
def delete_procedimiento_mataterial(procedimiento, material):
id_procedimiento = procedimiento.id
id_material = material.id
cnx = dbconnect()
cursor = cnx.cursor(buffered=True)
query = ("DELETE FROM relProcedimietnoMaterial WHERE id_procedimiento = '%d' AND id_material = '%d'" % id_procedimiento, id_material)
cursor.execute(query)
cnx.commit()
dbdisconect(cnx)
#relEpisodioProcedimiento
def create_episodio_procedimiento(episodio, procedimiento):
id_episodio = episodio.id
id_procedimiento = procedimiento.id
cnx = dbconnect()
cursor = cnx.cursor(buffered=True)
query = ("INSERT INTO relEpisodioProcedimiento VALUES('%d','%d')" % (id_episodio, id_procedimiento))
cursor.execute(query)
cnx.commit()
dbdisconect(cnx)
def delete_episodio_procedimiento(episodio, procedimiento):
id_episodio = episodio.id
id_procedimiento = procedimiento.id
cnx = dbconnect()
cursor = cnx.cursor(buffered=True)
query = ("DELETE FROM relEpisodioProcedimiento WHERE id_episodio = '%d' AND id_procedimiento = '%d'" % id_episodio, id_procedimiento)
cursor.execute(query)
cnx.commit()
dbdisconect(cnx)
#relComplicacionProcedimiento
def create_complicacion_procedimiento(complicacion, procedimiento):
id_complicacion = complicacion.id
id_procedimiento = procedimiento.id
cnx = dbconnect()
cursor = cnx.cursor(buffered=True)
query = ("INSERT INTO relComplicacionProcedimiento VALUES('%d','%d')" % (id_procedimiento, id_complicacion))
cursor.execute(query)
cnx.commit()
dbdisconect(cnx)
def delete_complicacion_procedimiento(complicacion, procedimiento):
id_complicacion = complicacion.id
id_procedimiento = procedimiento.id
cnx = dbconnect()
cursor = cnx.cursor(buffered=True)
query = ("DELETE FROM relComplicacionProcedimiento WHERE id_complicacion = '%d' AND id_procedimiento = '%d'" % id_complicacion, id_procedimiento)
cursor.execute(query)
cnx.commit()
dbdisconect(cnx)
#relEpisodioPdiagnostica
def create_episodio_pdiagnostica(episodio, pdiagnostica):
id_episodio = episodio.id
id_pdiagnostica = pdiagnostica.id
cnx = dbconnect()
cursor = cnx.cursor(buffered=True)
query = ("INSERT INTO relEpisodioPdiagnostica VALUES('%d','%d')" % (id_episodio, id_pdiagnostica))
cursor.execute(query)
cnx.commit()
dbdisconect(cnx)
def delete_episodio_pdiagnostica(episodio, pdiagnostica):
id_episodio = episodio.id
id_pdiagnostica = pdiagnostica.id
cnx = dbconnect()
cursor = cnx.cursor(buffered=True)
query = ("DELETE FROM relEpisodioPdiagnostica WHERE id_episodio = '%d' AND id_pdiagnostica = '%d'" % id_episodio, id_pdiagnostica)
cursor.execute(query)
cnx.commit()
dbdisconect(cnx) |
990,707 | e0053979daa8cc86b23c3ec6a692d416a434a3b3 | from __future__ import with_statement
import logging
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config, pool
from ultron8.api import settings
from ultron8.api.db.u_sqlite.base import Base
from ultron8.api.middleware.logging import log
from ultron8.web import app
log.setup_logging()
##############################
# EVERYTHING YOU NEED TO KNOW ABOUT SQLITE
# https://docs.sqlalchemy.org/en/13/dialects/sqlite.html
# https://docs.sqlalchemy.org/en/13/dialects/sqlite.html#module-sqlalchemy.dialects.sqlite.pysqlite
##############################
# NOTE: If debug logging is enabled, then turn on debug logging for everything in app
if settings.LOG_LEVEL == logging.DEBUG:
# Enable connection pool logging
# SOURCE: https://docs.sqlalchemy.org/en/13/core/engines.html#dbengine-logging
SQLALCHEMY_POOL_LOGGER = logging.getLogger("sqlalchemy.pool")
SQLALCHEMY_ENGINE_LOGGER = logging.getLogger("sqlalchemy.engine")
SQLALCHEMY_ORM_LOGGER = logging.getLogger("sqlalchemy.orm")
SQLALCHEMY_DIALECTS_LOGGER = logging.getLogger("sqlalchemy.dialects")
SQLALCHEMY_POOL_LOGGER.setLevel(logging.DEBUG)
SQLALCHEMY_ENGINE_LOGGER.setLevel(logging.DEBUG)
SQLALCHEMY_ORM_LOGGER.setLevel(logging.DEBUG)
SQLALCHEMY_DIALECTS_LOGGER.setLevel(logging.DEBUG)
if settings.DEBUG_REQUESTS:
# import requests.packages.urllib3.connectionpool as http_client
# http_client.HTTPConnection.debuglevel = 1
REQUESTS_LOGGER = logging.getLogger("requests")
REQUESTS_LOGGER.setLevel(logging.DEBUG)
REQUESTS_LOGGER.propagate = True
URLLIB3_LOGGER = logging.getLogger("urllib3")
URLLIB3_LOGGER.setLevel(logging.DEBUG)
LOGGER = logging.getLogger(__name__)
# from ultron8.debugger import debug_dump_exclude
# https://stackoverflow.com/questions/15648284/alembic-alembic-revision-says-import-error
# parent_dir = os.path.abspath(os.path.join(os.getcwd(), ".."))
# here = os.path.abspath(os.path.dirname(__file__))
# print(f"here: {here}")
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# print(f"parent_dir: {parent_dir}")
# sys.path.append(parent_dir)
# from ultron8.api.db.u_sqlite import metadata
# pylint: disable=no-name-in-module
# from ultron8.api.db.base import Base # noqa
# from ultron8.api.db.u_sqlite.base_class import Base
# pylint: disable=maybe-no-member
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
if settings.DATABASE_URL is None:
raise ValueError(
"You are attempting to run a migration without having 'settings.DATABASE_URL' set, please set environment value and try again."
)
LOGGER.info("settings.DATABASE_URL = %s" % str(settings.DATABASE_URL))
config.set_main_option("sqlalchemy.url", str(settings.DATABASE_URL))
# debug_dump_exclude(settings)
# Interpret the config file for Python logging.
# This line sets up loggers basically.
# fileConfig(config.config_file_name)
fileConfig(config.config_file_name, disable_existing_loggers=False)
# import pdb;pdb.set_trace()
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = Base.metadata
# def get_url():
# user = os.getenv("POSTGRES_USER", "postgres")
# password = os.getenv("POSTGRES_PASSWORD", "")
# server = os.getenv("POSTGRES_SERVER", "db")
# db = os.getenv("POSTGRES_DB", "app")
# return f"postgresql://{user}:{password}@{server}/{db}"
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
# TODO: Enable postgres version 7/23/2019 # url = get_url()
# TODO: Enable postgres version 7/23/2019 # context.configure(
# TODO: Enable postgres version 7/23/2019 # url=url, target_metadata=target_metadata, literal_binds=True, compare_type=True
# TODO: Enable postgres version 7/23/2019 # )
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, "autogenerate", False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
LOGGER.info("No changes in schema detected.")
# TODO: Enable postgres version 7/23/2019 # configuration = config.get_section(config.config_ini_section)
# TODO: Enable postgres version 7/23/2019 # configuration['sqlalchemy.url'] = get_url()
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=process_revision_directives,
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
990,708 | 60b1a6eb2478c640d45718185d1d3c8c5ba9ee4f | # Generated by Django 3.1.3 on 2020-12-07 15:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('myapi', '0003_auto_20201207_2308'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='token',
),
migrations.AddField(
model_name='token',
name='token',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='myapi.user'),
),
]
|
990,709 | 56ca2649424d5aad49a17dd5a698ba4d8441208d | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-07 14:19
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restaurant', '0009_restaurant_restaurant_image_thumbnail'),
]
operations = [
migrations.AlterField(
model_name='restaurant',
name='address_1',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='restaurant',
name='address_2',
field=models.CharField(max_length=500),
),
migrations.AlterField(
model_name='restaurant',
name='city',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='restaurant',
name='country',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='restaurant',
name='locality',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='restaurant',
name='phone_number_1',
field=models.CharField(max_length=15, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.", regex='^\\+?1?\\d{9,15}$')]),
),
migrations.AlterField(
model_name='restaurant',
name='restaurant_image',
field=models.ImageField(default='imagesrestaurant_pic/restaurant_image.jpg', upload_to='images/restaurant_pic/'),
),
migrations.AlterField(
model_name='restaurant',
name='restaurant_image_thumbnail',
field=models.ImageField(default='imagesrestaurant_pic/thumbnail/restaurant_image_thumbnail.jpg', upload_to='images/restaurant_pic/thumbnail/'),
),
migrations.AlterField(
model_name='restaurant',
name='state',
field=models.CharField(max_length=255),
),
]
|
990,710 | 0f9e0de265708d2dec9cd506cd1ff63e0a52dead | from flask import Flask
import ratingscrape
app = Flask(__name__)
@app.route("/analysis/<user>")
def hello(user):
userlist = ratingscrape.getRatings(user)
if __name__ == "__main__":
app.run(host='0.0.0.0') |
990,711 | 0b026da0b84d7c9cd184ddfc1727f3e0e4f2f4db | # -*- coding:utf-8 -*-
import asyncio
import urllib.request
url_imglist = [
'https://ss0.bdstatic.com/70cFvHSh_Q1YnxGkpoWK1HF6hhy/it/u=4858554,2092434492&fm=26&gp=0.jpg',
'https://ss2.bdstatic.com/70cFvnSh_Q1YnxGkpoWK1HF6hhy/it/u=1115057027,1261114857&fm=26&gp=0.jpg',
'https://ss2.bdstatic.com/70cFvnSh_Q1YnxGkpoWK1HF6hhy/it/u=1578307669,1098408709&fm=26&gp=0.jpg',
]
for i_url in url_imglist:
img_pic = urllib.request.urlopen(i_url)
print ("keke111:%s"%img_pic)
print("keke111:%s" % img_pic.read())
#去生成或获取一个时间循环
# print ("keke11",loop)
#将任务放到'任务列表'
async def haha():
print ("jaja")
return
alist = haha()
# print (alist)
loop = asyncio.get_event_loop()
loop.run_until_complete(alist)
#print ("keke11",asyncio)
def GetNearNumber(n, iterobj, down = True):
v = None
r = None
if down == True:
for i in iterobj:
if i <= n:
m = n - i
if v == None or m <= v:
v = m
r = i
elif down == False:
for i in iterobj:
if i >= n:
m = i - n
if v == None or m <= v:
v = m
r = i
else:
for i in iterobj:
m = abs(i - n)
if v == None or m <= v:
v = m
r = i
return r
|
990,712 | 38c25a28031cb53a1096d34c8001397d5748cf85 | import xml.etree.ElementTree as ET
import zipfile
from io import BytesIO
ns = {
"office": "urn:oasis:names:tc:opendocument:xmlns:office:1.0",
"style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0",
"text": "urn:oasis:names:tc:opendocument:xmlns:text:1.0",
"fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0",
"loext": "urn:org:documentfoundation:names:experimental:office:xmlns:loext:1.0",
}
def to_ns(value):
ns_key, value = value.split(':')
return "{%s}%s" % (ns[ns_key], value)
known_styles = {
to_ns("style:header-style"): [],
to_ns("style:footer-style"): [],
to_ns("style:graphic-properties"): [],
to_ns("loext:graphic-properties"): [],
to_ns("text:outline-level-style"): [],
to_ns("text:list-level-style-number"): [],
to_ns("style:page-layout-properties"): ["fo:page-width", "fo:page-height", "fo:print-orientation", "fo:margin-top", "fo:margin-bottom", "fo:margin-right", "fo:margin-left", "fo:line-height"],
to_ns("style:paragraph-properties"): ["fo:text-align", "fo:break-before", "fo:margin-left", "fo:margin-right", "fo:margin-top", "fo:margin-bottom", "fo:text-indent"],
to_ns("style:text-properties"): ["style:font-name", "fo:font-style", "fo:font-weight", "fo:font-size", "style:text-underline-style", "style:text-position"],
}
def to_inches(value):
if type(value) == str:
assert value[-2:] == "in"
return float(value[:-2])
elif value == 0:
return value
else:
assert False
def parse_style(style):
result = {}
if style.tag == to_ns("style:page-layout"):
result["page-usage"] = style.attrib.get(to_ns("style:page-usage"))
psn = style.attrib.get(to_ns("style:parent-style-name"))
if psn:
result['parent-style-name'] = psn
for c in style:
keys = known_styles.get(c.tag)
assert not keys is None, "Unknown style tag %s" % c.tag
for s in keys:
key = to_ns(s)
s = s.split(':')[-1] # drop namespace
if key in c.attrib:
result[s] = c.attrib.get(key)
return result
def merge_styles(*styles):
res = {}
for s in styles:
res |= s
return res
class Paragraph:
def __init__(self):
self.alignment = 'start'
self.margin_top = 0
self.margin_bottom = 0
self.margin_left = 0
self.margin_right = 0
self.text_indent = 0
self.line_height_factor = 1
self.is_break = False
self.element = None
self.style = None
@staticmethod
def from_odt_element(element, style, index):
result = Paragraph()
result.alignment = style.get('text-align', 'start')
result.margin_top = to_inches(style.get('margin-top', 0))
result.margin_bottom = to_inches(style.get('margin-bottom', 0))
result.margin_left = to_inches(style.get('margin-left', 0))
result.margin_right = to_inches(style.get('margin-right', 0))
result.text_indent = to_inches(style.get('text-indent', 0))
line_height = style.get('line-height', "100%")
assert line_height[-1] == "%"
result.line_height_factor = int(line_height[:-1]) / 100
result.is_break = style.get('break-before') == "page"
if index == 0:
result.is_break = False
result.element = element
result.style = style
return result
class ODT:
def __init__(self, filename):
self.styles = {}
z = zipfile.ZipFile(filename)
content = z.read('content.xml')
root = ET.fromstring(content)
styles = root.find('office:automatic-styles', ns)
for s in styles:
self.styles[s.attrib[to_ns("style:name")]] = parse_style(s)
self.body = root.find('office:body', ns)
self.text = self.body.find('office:text', ns)
# styles.xml
styles = z.read('styles.xml')
root = ET.fromstring(styles)
styles = root.find('office:styles', ns)
for s in styles:
style_name = s.attrib.get(to_ns("style:name"))
if style_name is None: continue
self.styles[style_name] = parse_style(s)
# parse page style
styles = root.find('office:automatic-styles', ns)
for s in styles:
self.styles[s.attrib[to_ns("style:name")]] = parse_style(s)
master = root.find('office:master-styles', ns)
master_page = master.find('style:master-page', ns)
master_page_style = master_page.attrib.get(to_ns('style:page-layout-name'))
mps = self.styles[master_page_style]
for key, value in mps.items():
try:
value = float(value.replace('in',''))
except:
pass
setattr(self, key.replace('-', '_'), value)
# 2nd pass: merge all parent styles recursively
def merge_parent_style(style):
if style.get('parent-style-name'):
parent = self.styles[style['parent-style-name']]
merge_parent_style(parent)
for key, value in parent.items():
if not key in style:
style[key] = value
del style['parent-style-name']
for style in self.styles.values():
merge_parent_style(style)
def parse_paragraphs(self):
paragraphs = self.text.iter()
result = []
for i, p in enumerate(paragraphs):
if p.tag in [to_ns("text:h"), to_ns("text:p")]:
style_name = p.attrib.get(to_ns('text:style-name'))
style = self.styles[style_name]
result.append(Paragraph.from_odt_element(p, style, i))
return result
# returns text and style information recursivly from the given xml element
# returns a list of (style, text) pairs
def parse(self, element, style):
result = []
if element.text:
result.append([style, element.text])
for child in element:
el_style_name = child.attrib.get(to_ns('text:style-name'))
if el_style_name:
el_style = self.styles[el_style_name]
sub_style = merge_styles(style, el_style)
else:
sub_style = merge_styles(style)
tag = child.tag;
if tag == to_ns("text:line-break"):
result.append([sub_style, "\r\n"])
elif tag == to_ns("text:tab"):
result.append([sub_style, "\t"])
elif tag == to_ns("text:s"):
c = child.attrib.get(to_ns('text:c'))
if c:
spaceCount = int(c)
else:
spaceCount = 1
result.append([sub_style, " " * spaceCount])
if tag == to_ns("text:soft-page-break"):
result.append([sub_style, "\f"])
else:
result.extend(self.parse(child, sub_style))
if child.tail:
result.append([style, child.tail])
return result
|
990,713 | 84e3380a60593ea8817f47cc59539647975349c0 | """empty message
Revision ID: a561c41b9a5d
Revises:
Create Date: 2019-07-25 20:49:30.563270
"""
import sqlalchemy_utils
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a561c41b9a5d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('customer',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('address', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('event_record',
sa.Column('sequence_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('position', sa.BigInteger(), nullable=False),
sa.Column('topic', sa.String(length=255), nullable=False),
sa.Column('state', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('sequence_id')
)
op.create_index('index', 'event_record', ['sequence_id', 'position'], unique=True)
op.create_table('vehicle',
sa.Column('id', sa.String(length=17), nullable=False),
sa.Column('reg_no', sa.String(length=6), nullable=True),
sa.Column('customer_id', sa.Integer(), nullable=True),
sa.Column('heartbeat_ts', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['customer_id'], ['customer.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('vehicle')
op.drop_index('index', table_name='event_record')
op.drop_table('event_record')
op.drop_table('customer')
# ### end Alembic commands ###
|
990,714 | e2d0271d5659a5a18427a1a85f333edc6f4c1ed3 | """
TCP服务端
1,导入模块
2,创建套接字
3,设置地址重用
4,绑定端口
5,设置监听,让套接字由主动变为被动接收
6,接受客户端连接 定义函数 request_handler()
7,接收客户端游览器发送的请求协议
8,判断协议是否为空
9,拼接响应的报文
10,发送发送响应报文
11,关闭操作
"""
import socket
from application import app_基础框架2
import sys
import threading
"""
1,在类的初始化方法中配置当前的项目
{"2048":"./2048", "植物大战僵尸v1":"./zwdzjs-v1", ...}
2, 在类增加一个初始化项目配置的方法 init_project()
2.1 显示所有可以发布的游戏 菜单
2.2 接收用户的选择
2.3 根据用户的选择发布指定的项目 (保存用户选择的游戏对应的本地目录)
3, 更改Web服务器打开的文件目录
"""
class WebServer(object):
# 初始化方法
def __init__(self, port):
# 1,导入模块
# 2,创建套接字
tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 3,设置地址重用
# 当前套接字 地址重用 值True
tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
# 4,绑定端口
tcp_server_socket.bind(("", port))
# 5,设置监听,让套接字由主动变为被动接收
tcp_server_socket.listen(128)
# 定义实例属性,保存套接字
self.tcp_server_socket = tcp_server_socket
# 定义类的实例属性,project_dict 初始化为空
self.projects_dict = dict()
# 定义实例属性,保存要发布的路径
self.current_dir = ""
self.projects_dict['植物大战僵尸-普通版'] = "zwdzjs-v1"
self.projects_dict['植物大战僵尸-外挂板'] = "zwdzjs-v2"
self.projects_dict['保卫萝卜'] = "tafang"
self.projects_dict['2048'] = "2048"
self.projects_dict['读心术'] = "dxs"
# print(self.projects_dict)
# 调用初始化游戏项目的方法
self.init_project()
# 添加一个初始化项目的方法
def init_project(self):
# 2.1 显示所有可以发布的游戏 菜单
# list(self.projects_dict.keys()) 取出字典的key 并且转换为列表
keys_list = list(self.projects_dict.keys())
# 遍历显示所有的key
# enumerate(keys_list)
# {(0, '植物大战僵尸v1'), (1, '植物大战僵尸v2') ...}
for index, game_name in enumerate(keys_list):
print("%d.%s" % (index, game_name))
# 2.2 接收用户的选择
sel_no = input("请选择要发布的游戏序号:\n")
# 2.3 根据用户的选择发布指定的项目(保存用户选择的游戏对应的本地目录)
# 根据用户的选择,得到游戏的名称(字典的ke)
key = keys_list[int(sel_no)]
# 根据字典的key 得到项目的具体路径
self.current_dir = self.projects_dict[key]
def start(self):
"""启动web服务器"""
while True:
# 6,接受客户端连接 定义函数 request_handler()
new_client_socket, ip_port = self.tcp_server_socket.accept()
# 调用功能函数处理请求并且响应
# self.request_handler(new_client_socket, ip_port)
# 创建一个线程
t1 = threading.Thread(target=self.request_handler, args=(new_client_socket, ip_port))
# 设置线程守护
t1.setDaemon(True)
# 启动线程
t1.start()
def request_handler(self, new_client_socket, ip_port):
"""接受信息,并且做出响应"""
# 7,接收客户端游览器发送的请求协议
recv_data = new_client_socket.recv(1024)
# 8,判断协议是否为空
if not recv_data:
print(f"{ip_port}客户端已下线!")
new_client_socket.close()
return
# 使用 application 文件夹 app 模块的 application() 函数处理
response_data = app_基础框架2.appllication(self.current_dir, recv_data, ip_port)
# 10,发送发送响应报文
new_client_socket.send(response_data)
# 11,关闭当前连接
new_client_socket.close()
def main():
"""主函数"""
"""
1,导入sys 模块
2,判断参数格式是否正确
4,判断端口号是否是一个数字
5,获取端口号
6,在启动Web服务器的时候,使用指定的端口
"""
# print(sys.argv)
# 2,判断参数格式是否正确
if len(sys.argv) != 2:
print("启动失败,参数格式错误!正确格式:python xxx.py 端口号")
return
# 4,判断端口号是否是一个数字
if not sys.argv[1].isdigit():
print("启动失败,端口号不是一个纯数字!")
return
# 5,获取端口号
port = int(sys.argv[1])
# 6,在启动Web服务器的时候,使用指定的端口
# 创建WebServer类的对象
ws = WebServer(port)
# 对象.start() 启动web服务器
ws.start()
if __name__ == '__main__':
main()
|
990,715 | fc4991d3fda556c4b9650b7cf356178369af994b | print('* Write a function in Python code that adds 2+2 and returns the result:')
def sum(num):
return num+num
result=sum(2)
print('Result: ',result)
|
990,716 | da58c9cbe3c55a0000110e4fa36cb033b44d996f | import sys
sys.dont_write_bytecode = True
from flask import Flask, g, Blueprint, url_for, request, jsonify, render_template
#import connection
from sqlalchemy.ext.declarative import declarative_base
from flask.ext.sqlalchemy import SQLAlchemy
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import time
from sqlalchemy import Column,Integer,Text,ForeignKey,Boolean,Float
#Create and configure app object.
api = Flask(__name__, static_folder='static')
api.config.from_object('config.Config')
#Throws a warning if we don't set this.
api.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(api)
#We'll use this db instance throughout the API.
#engine = create_engine('postgresql://localhost:5432')
#Session = sessionmaker(bind=engine)
Base = declarative_base()
class Connection(Base):
__tablename__ = 'connection'
id = Column(Integer,primary_key=True)
uuid = Column(Text)
source_ip = Column(Text)
source_port = Column(Text)
source_deployment = Column(Text)
source_job = Column(Text)
source_index = Column(Integer)
source_user = Column(Text)
source_group = Column(Text)
source_pid = Column(Integer)
source_process_name = Column(Text)
source_age = Column(Integer)
destination_ip = Column(Text)
destination_port = Column(Text)
def __init__(self,**kwargs):
self.__dict__.update(**kwargs)
self.created_at = time.time()
def serialize(self):
return {
'connection': {
'source': {
'ip': self.source_ip,
'port': self.source_port,
'deployment': self.source_deployment,
'job': self.source_job,
'index': self.source_index,
'user': self.source_user,
'group': self.source_group,
'pid': self.source_pid,
'process_name': self.source_process_name,
'age': self.source_age
},
'destination': {
'ip': self.destination_ip,
'port': self.destination_port
}
},
'connection_uuid': self.uuid
}
Base.metadata.create_all(bind=db.engine)
@api.route('/connections',methods=['GET'])
def get_connections():
connections = db.session.query(Connection).all()
connection_list = []
for con in connections:
connection_list.append(con.serialize())
return jsonify({"code":200,"resource":connection_list})
@api.route('/connections',methods=['POST'])
def create_connections():
params = request.json
source = params['source']
destination = params['destination']
new_connection = Connection(
source_ip = source['ip'],
source_port = source['port'],
source_deployment_name = source['deployment'],
source_job = source['job'],
source_index = source['index'],
source_user = source['user'],
source_group = source['group'],
source_pid = source['pid'],
source_process_name = source['process_name'],
source_age = source['age'],
destination_ip = destination['ip'],
destination_port = destination['port']
)
db.session.add(new_connection)
db.session.commit()
return jsonify({"code":200,"message":"Resources created."})
@api.route("/connections",methods=['DELETE'])
def delete_connection():
params = request.json
uuid_to_delete = params["uuid"]
return jsonify({"code":200,"message":"Resource deleted."})
@api.route('/',methods=['GET'])
def index():
return render_template("index.html")
@api.route('/login',methods=['GET'])
def login():
return render_template("login.html")
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Requires port number as argument."
api.run(host='0.0.0.0',port=int(sys.argv[1]),debug=True)
|
990,717 | e3fd7395ee1b08c88155cb1a4bb2ac0937fb0ff7 | from typing import List
import collections
class Solution:
def removeBoxes(self, boxes: List[int]) -> int:
if not boxes:
return 0
boxes_merge = []
num = 0
pre = boxes[0]
box_map = collections.defaultdict(int)
for box in boxes:
if box == pre:
num += 1
else:
boxes_merge.append((pre, num))
box_map[pre] += 1
pre, num = box, 1
boxes_merge.append((pre, num))
box_map[pre] += 1
if len(boxes_merge) == len(boxes):
return len(boxes)
ans = 0
temp = []
for val, num in boxes_merge:
if __name__ == "__main__":
solution = Solution()
print(solution.removeBoxes([1,3,2,2,2,3,4,3,1]))
|
990,718 | 4b4d626c9b350ad4f6887838f3e54b99c5f08d40 | from pyspark.sql import SparkSession
from datetime import datetime, timezone
from io import StringIO
import csv,time
def split_complex(x):
return list(csv.reader(StringIO(x), delimiter=','))[0]
def get_esoda(x):
return int(x[6])
def get_eksoda(x):
return int(x[5])
spark = SparkSession.builder.appName("q2").getOrCreate()
sc = spark.sparkContext
sc.setLogLevel("ERROR")
t1= time.time()
# se oles tis touples rating kanoume reduceByKey me key to userId kai athrizoume ola ta ratings tou
# alla kai tous assous sto reduceByKey
# sto telos filtraroume wste to median_rating > 3.0 kai metrame posoi xristes einai
numberOfUsersWithMedianRatingGreaterThanThree = sc.textFile("hdfs://master:9000/ratings.csv") \
.map(lambda line : line.split(','))\
.map(lambda rating: (rating[0], [rating[2] ,1] ))\
.reduceByKey(lambda x,y: [float(x[0]) + float(y[0]),x[1]+1] )\
.map(lambda x : [ float( x[1][0]) / float( x[1][1]), x[0] ])\
.filter(lambda x : x[0] > 3.0 )\
.sortByKey()\
.count()
# telos briskoyme ton sinoliko arithmo ton distinct users gia ton upologisto sto telos
numberOfAllUsers = sc.textFile("hdfs://master:9000/ratings.csv") \
.map(lambda line : line.split(','))\
.map(lambda rating: (rating[0]) )\
.distinct()\
.count()
print("Number Greater than 3: ", numberOfUsersWithMedianRatingGreaterThanThree)
print("Number of all Users: ", numberOfAllUsers)
# kai to pososto ::
print("RESULT :::::: ", int(numberOfUsersWithMedianRatingGreaterThanThree) / int(numberOfAllUsers))
t2 = time.time()
print("**************")
print("Total time: ", t2-t1)
|
990,719 | f3ed3f13178a5e866fdb5873b8b7d1ff24d31e37 | import sys
import socket
import argparse
from threading import Timer
from time import sleep
def getCmdArg():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", help="Give a port to bind.", action="store", default=9000)
#port = 9000 if not args.port else args.port # if --port not supplied default is 9000.. <- alternative for default value.
parser.add_argument("-i", "--interface", help="Address to open a port on. Default = 127.0.0.1", dest="host", action="store", default="127.0.0.1")
args = parser.parse_args()
return args
def createSocket():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
print("FAILED TO CREATE SOCKET")
return sock
def main():
args = getCmdArg() # getting cmdline arguments from function
port = int(args.port)
host = args.host
#now create a socket
sock = createSocket()
print(type(sock))
#bind socket
sock.bind((host,port))
sock.listen(5)
while True:
conn , addr = sock.accept()
print("Connected with {}:{}".format(addr[0],addr[1]))
data = conn.recv(1024)
print("Host sent data with size of {}".format(sys.getsizeof(data)))
reply = "OK --> " + data.decode('ascii')
if not data:
break # if data is emplty.. no use of sending smth back
conn.send(reply.encode('ascii'))
sock.close()
if __name__ == '__main__':
main() |
990,720 | 6961fdac8a2bbb8e9ea6e97c40a4d56e4ddd538f | import logging
from book.models import Book
from home.models import Utilities
from home.views import all_filters_from_db
from datetime import timedelta,datetime
from redisClient import Client
def categorySchedular():
try:
categories = Book.objects.values_list("category").filter(category__isnull=False).distinct()
categoryKey = Utilities.objects.get(key="categories")
categoryKey.value={"categories":[item for item, in categories.all() ]}
Client.setkey("categories",categoryKey.value.get("categories"),timedelta(days=1))
categoryKey.save()
except Exception as e:
logging.error(str(e))
|
990,721 | 1df33dae9ea47f3a6f637e611bee3c534caea1fb | '''
Description :
Author : CagedBird
Date : 2021-10-10 20:43:19
FilePath : /rl/src/utils/show_chinese.py
'''
def show_chinese():
from matplotlib import rcParams
config = {
"font.family": 'serif',
"font.size": 14,
"mathtext.fontset": 'stix',
"font.serif": ['SimSun'],
"axes.unicode_minus": False
}
rcParams.update(config) |
990,722 | 256f1e9a17d837dfdf1d5b7daf36382b458baf7c | """
Have the function MaximumSquare(strArr) take the strArr parameter being passed which will be a 2D matrix
of 0 and 1's, and determine the area of the largest square submatrix that contains all 1's.
A square submatrix is one of equal width and height, and your program should return the area of the
largest submatrix that contains only 1's. For example: if strArr is ["10100", "10111", "11111", "10010"]
then this looks like the following matrix:
1 0 1 0 0
1 0 1 1 1
1 1 1 1 1
1 0 0 1 0
For the input above, you can see that the largest square submatrix is of size 2x2, so your program should
return the area which is 4.
You can assume the input will not be empty.
"""
def MaximumSquare(strArr):
# Your code goes here
max_step = 1
for x in range(0, len(strArr) + 1):
for y in range(0, len(strArr[0]) + 1):
while check_square(strArr, (x, y), max_step):
max_step += 1
return (max_step - 1) ** 2
def check_square(strArr, point, check_size):
square = True
if point[0] + check_size > len(strArr[0]) or point[1] + check_size > len(strArr):
return False
for x in range(point[0], point[0] + check_size):
for y in range(point[1], point[1] + check_size):
try:
if int(strArr[x][y]) != 1:
square = False
break
except IndexError:
square = False
break
return square |
990,723 | 33fce609bd35a7e258fefdedac4e459f406fa03d | """
Convolutional Denoising Autoencoder
Contains functions to read in preprocessed data, split according to training parameters,
train models, and save model outputs
"""
import os
from numpy.random import seed
seed(1)
import tensorflow
tensorflow.random.set_seed(2)
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Dense, Flatten, Reshape, Input, InputLayer, Conv1D, MaxPooling1D, Conv1DTranspose
from tensorflow.keras.models import Sequential, Model
from src.models.autoencoders.patient_split import *
from sklearn.model_selection import train_test_split
from src.utils.plotting_utils import *
# set_font_size()
def read_in(file_index, normalized, train, split_ratio):
"""
Reads in a file and can toggle between normalized and original files
:param file_index: [int] patient number as string
:param normalized: [boolean] that determines whether the files should be normalized or not
:param train: [int] 0 for full data for training, 1 for tuning model, 2 for full noisy data for training
:param ratio: [float] ratio to split the files into train and test
:return: returns npy array of patient data across 4 leads
"""
filepath = "Working_Data/Normalized_Fixed_Dim_HBs_Idx" + str(file_index) + ".npy"
if normalized:
if train == 0:
# returns data without modification for training models
training, test, full = patient_split_all(filepath, split_ratio)
return training, test, full
elif train == 1:
# returns normal data split into a train and test, and abnormal data
normal_train, normal_test, abnormal = patient_split_train(filepath, split_ratio)
return normal_train, normal_test, abnormal
elif train == 2: # used for model pipeline CDAE
# 3x the data, adding gaussian noise to the 2 duplicated train arrays
train_, test, full = patient_split_all(filepath, split_ratio)
noise_factor = 0.5
noise_train = train_ + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=train_.shape)
noise_train2 = train_ + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=train_.shape)
train_ = np.concatenate((train_, noise_train, noise_train2))
return train_, test, full
elif train == 3: # used for adaptive training
# 3x the data, adding gaussian noise to the 2 duplicated train arrays
train_, remaining = patient_split_adaptive(filepath, split_ratio)
noise_factor = 0.5
noise_train = train_ + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=train_.shape)
noise_train2 = train_ + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=train_.shape)
train_ = np.concatenate((train_, noise_train, noise_train2))
return train_, remaining
else:
# returns the full array
data = np.load(os.path.join("Working_Data", "Fixed_Dim_HBs_Idx" + file_index + ".npy"))
return data
def build_model(encode_size):
"""
Builds a convolutional autoencoder model, returning both the encoder and decoder models
:param encode_size: [int] dimension that we want to reduce to
:return: encoder, decoder models
"""
# Build the encoder
encoder = Sequential()
encoder.add(InputLayer((1000,4)))
encoder.add(Conv1D(5, 11, activation="tanh", padding="same"))
encoder.add(Conv1D(7, 7, activation="relu", padding="same"))
encoder.add(MaxPooling1D(2))
encoder.add(Conv1D(11, 5, activation="tanh", padding="same"))
encoder.add(Conv1D(11, 3, activation="tanh", padding="same"))
encoder.add(MaxPooling1D(2))
encoder.add(Flatten())
encoder.add(Dense(750, activation = 'tanh', kernel_initializer='glorot_normal'))
encoder.add(Dense(400, activation = 'tanh', kernel_initializer='glorot_normal'))
encoder.add(Dense(200, activation = 'tanh', kernel_initializer='glorot_normal'))
encoder.add(Dense(encode_size))
# Build the decoder
decoder = Sequential()
decoder.add(InputLayer((encode_size,)))
decoder.add(Dense(200, activation='tanh', kernel_initializer='glorot_normal'))
decoder.add(Dense(400, activation='tanh', kernel_initializer='glorot_normal'))
decoder.add(Dense(750, activation='tanh', kernel_initializer='glorot_normal'))
decoder.add(Dense(10000, activation='tanh', kernel_initializer='glorot_normal'))
decoder.add(Reshape((1000, 10)))
decoder.add(Conv1DTranspose(8, 11, activation="relu", padding="same"))
decoder.add(Conv1DTranspose(4, 5, activation="linear", padding="same"))
return encoder, decoder
# encoder = Sequential()
# encoder.add(InputLayer((100, 4)))
# encoder.add(Conv1D(5, 11, activation="tanh", padding="same"))
# encoder.add(Conv1D(7, 7, activation="relu", padding="same"))
# encoder.add(MaxPooling1D(2))
# encoder.add(Conv1D(11, 5, activation="tanh", padding="same"))
# encoder.add(Conv1D(11, 3, activation="tanh", padding="same"))
# encoder.add(MaxPooling1D(2))
# encoder.add(Flatten())
# encoder.add(Dense(75, activation='tanh', kernel_initializer='glorot_normal'))
# encoder.add(Dense(40, activation='tanh', kernel_initializer='glorot_normal'))
# encoder.add(Dense(20, activation='tanh', kernel_initializer='glorot_normal'))
# encoder.add(Dense(encode_size))
#
# # Build the decoder
# decoder = Sequential()
# decoder.add(InputLayer((encode_size,)))
# decoder.add(Dense(20, activation='tanh', kernel_initializer='glorot_normal'))
# decoder.add(Dense(40, activation='tanh', kernel_initializer='glorot_normal'))
# decoder.add(Dense(75, activation='tanh', kernel_initializer='glorot_normal'))
# decoder.add(Dense(1000, activation='tanh', kernel_initializer='glorot_normal'))
# decoder.add(Reshape((100, 10)))
# decoder.add(Conv1DTranspose(8, 11, activation="relu", padding="same"))
# decoder.add(Conv1DTranspose(4, 5, activation="linear", padding="same"))
# # print(encoder.summary())
# # print(decoder.summary())
# return encoder, decoder
def tuning_ae(num_epochs, encode_size, file_index, plot_loss, save_files):
"""
Assist in tuning a model parameters and checking for overfit / underfit
:param num_epochs: [int] number of epochs to use for training
:param encode_size: [int] encoded dimension that model will compress to
:param file_index: [int] patient id to run on
:param plot_loss: [boolean] if true will plot the loss curve for the model
:param save_files: [boolean] if true will save the .npy arrays for encoded and reconstructed heartbeats
:return: None
"""
normal, abnormal, all = read_in(file_index, True, 2, 0.3)
normal_train, normal_valid = train_test_split(normal, train_size=0.85, random_state=1)
signal_shape = normal.shape[1:]
batch_size = round(len(normal) * 0.15)
encoder, decoder = build_model(encode_size)
inp = Input(signal_shape)
encode = encoder(inp)
reconstruction = decoder(encode)
autoencoder = Model(inp, reconstruction)
opt = keras.optimizers.Adam(learning_rate=0.001)
autoencoder.compile(optimizer=opt, loss='mse')
early_stopping = EarlyStopping(patience=10, min_delta=0.001, mode='min')
model = autoencoder.fit(x=normal_train, y=normal_train, epochs=num_epochs, batch_size=batch_size,
validation_data=(normal_valid, normal_valid), callbacks=early_stopping)
if plot_loss:
SMALLER_SIZE = 10
MED_SIZE = 12
BIG_SIZE = 18
plt.figure()
# plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MED_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MED_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALLER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALLER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MED_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIG_SIZE) # fontsize of the figure title
plt.plot(model.history['loss'])
plt.plot(model.history['val_loss'])
# plt.title('Example of Training and Validation Loss')
plt.ylabel('Mean Squared Error')
plt.xlabel('Epochs')
plt.legend(['Train', 'Validation'], loc='upper right')
plt.savefig("images/CDAE_" + file_index + "_loss.png", dpi=500)
plt.show()
if save_files:
# using autoencoder to encode all of the patient data
encoded = encoder.predict(all)
reconstruction = decoder.predict(encoded)
# save reconstruction and encoded files
reconstruction_save = "Working_Data/reconstructed_tuning_10hb_cae_" + str(file_index) + ".npy"
encoded_save = "Working_Data/encoded_tuning_10hb_cae_" + str(file_index) + ".npy"
np.save(reconstruction_save, reconstruction)
np.save(encoded_save, encoded)
def training_ae(num_epochs, reduced_dim, file_index, save_model):
"""
Training function for convolutional autoencoder model, saves encoded hbs, reconstructed hbs, and model files
:param num_epochs: [int] number of epochs to use
:param reduced_dim: [int] encoded dimension that model will compress to
:param file_index: [int] patient id to run on
:param save_model: [boolean] if true saves model
:return: None
"""
normal, post_normal = read_in(file_index, 1, 3, 0.3)
three, four, five, six = split(post_normal, 4)
signal_shape = normal.shape[1:]
batch_size = round(len(normal) * 0.15)
encoder, decoder = build_model(reduced_dim)
inp = Input(signal_shape)
encode = encoder(inp)
reconstruction = decoder(encode)
autoencoder = Model(inp, reconstruction)
opt = keras.optimizers.Adam(learning_rate=0.001)
autoencoder.compile(optimizer=opt, loss='mse')
autoencoder.fit(x=normal, y=normal, epochs=num_epochs, batch_size=batch_size)
if save_model:
# save out the model
filename = 'Working_Data/CDAE_patient_' + str(file_index) + '_iter' + str(0) + '_model'
autoencoder.save_weights(filename, save_format = "tf")
print('Model saved for patient: ' + str(file_index))
# using autoencoder to encode all of the patient data
encoded = encoder.predict(three)
reconstruction = decoder.predict(encoded)
# save reconstruction and encoded files
reconstruction_save = "Working_Data/reconstructed_10hb_cae_" + str(file_index) + "_hour2_4" + ".npy"
# encoded_save = "Working_Data/encoded_10hb_cae_" + str(file_index) + ".npy"
np.save(reconstruction_save, reconstruction)
# np.save(encoded_save, encoded)
def load_model(file_index):
"""
Loads pre-trained model and saves npy files for reconstructed heartbeats
:param file_index: [int] patient id for model
:return: None
"""
normal, abnormal, all = read_in(file_index, 1, 2, 0.3)
autoencoder = keras.models.load_model('Working_Data/ae_patient_' + str(file_index) + '_dim' + str(100) + '_model.h5')
reconstructed = autoencoder.predict(all)
reconstruction_save = "Working_Data/reconstructed_cdae_10d_Idx" + str(file_index) + ".npy"
np.save(reconstruction_save, reconstructed)
def run(num_epochs, encoded_dim):
"""
Run training autoencoder over all dims in list
:param num_epochs: number of epochs to train for
:param encoded_dim: dimension to run on
:return None, saves arrays for reconstructed and dim reduced arrays
"""
# for patient_ in get_patient_ids():
for patient_ in ['16']:
print("Starting on index: " + str(patient_))
training_ae(num_epochs, encoded_dim, patient_, True)
print("Completed " + str(patient_) + " reconstruction and encoding, saved test data to assess performance")
# trains and saves a model for each patient from get_patient_ids
if __name__ == "__main__":
# load_model(16) # for use with pre trained models
run(110, 10) # to train a whole new set of models
|
990,724 | 064e5dd8352a16b07425c7758a816ef509ca52df | import matplotlib.pyplot as pyplot
import numpy
from ode_cheb import ode_cheb
f = lambda x: x*0
ab = [(1, 1), (-2, 2), (3, 4), (1, 0), (0, 1)]
for a,b in ab:
n = 20
x, L, rhs = ode_cheb(a, b, f, n)
u = numpy.linalg.solve(L, rhs)
u = u[0:n]
pyplot.plot(x, u, label="a = {}, b = {}".format(a, b))
pyplot.legend(loc="lower left")
pyplot.show()
|
990,725 | 6205c4244a827aeb430de9c581b26723b018f27d | #!/usr/bin/env python
from autoware_msgs.msg import VehicleStatus, Gear
from itolab_senior_car_msgs.msg import Servo
import std_msgs.msg
import math
import rospy
class Mpc_subscriber(object):
def __init__(self):
print("RUN Vehicle Status")
self.center_steering = 85
self.steering = 0
self.accel = 0
self.reverse = 0
def servo_callback(self, data):
print("steering is", data.steering)
self.steering = data.steering
self.accel = data.accel
self.reverse = data.reverse
def fake_vehicle(self):
rospy.init_node("fake_status")
pub = rospy.Publisher("/vehicle_status", VehicleStatus, queue_size=100)
rospy.Subscriber("/servo_cmd", Servo, self.servo_callback)
r = rospy.Rate(5)
gear_msg = Gear()
gear_msg.gear = 0
std_header = std_msgs.msg.Header()
std_header.stamp = rospy.Time.now()
std_header.frame_id = "base_link"
msg = VehicleStatus()
while not rospy.is_shutdown():
msg.header = std_header
msg.tm = ""
msg.drivemode = 0
msg.steeringmode = 0
msg.current_gear = gear_msg
msg.speed = 1.5 # m/s
msg.drivepedal = 0
msg.brakepedal = 0
msg.angle = (self.steering - self.center_steering) * math.pi /180 # rad
msg.lamp = 0
msg.light = 0
pub.publish(msg)
if __name__=="__main__":
Mpc_sub = Mpc_subscriber()
try:
Mpc_sub.fake_vehicle()
except rospy.ROSInterruptException: pass
|
990,726 | 55f7f6b202d34d43a1a88f3bd51cddb2f14b8190 | #!/usr/bin/env python3
from common import *
# input: 8-bit entity (1 means single UTF8 byte, 0 means two UTF8 bytes)
def pshufb_const(pattern):
assert 0x00 << pattern <= 0xff
tmp = {}
for bit, index in enumerate([0, 4, 1, 5, 2, 6, 3, 7]):
byte0_index = 2*index
byte1_index = 2*index + 1
if pattern & (1 << bit):
tmp[index] = [byte0_index]
else:
tmp[index] = [byte1_index, byte0_index]
result = []
for index in range(8):
result.extend(tmp[index])
while len(result) != 16:
result.append(-1)
return result
def generate():
print("static const int8_t compress_16bit_lookup[256][16] = {")
for pattern in range(256):
arr = pshufb_const(pattern)
cpp = cpp_array_initializer(arr)
if pattern < 255:
comma = ","
else:
comma = ""
print(f"{indent}{cpp}{comma}")
print("};")
print()
arr = []
for pattern in range(256):
tmp = pshufb_const(pattern)
arr.append(16 - tmp.count(-1))
print("static const uint8_t compress_16bit_length[256] = ")
print(fill(cpp_array_initializer(arr)) + ";")
if __name__ == '__main__':
generate()
|
990,727 | d771af2f651623f89313e37d1390670f3258c21c | # -*- coding: utf-8 -*-
__author__ = """Joe Walsh"""
__email__ = 'j.thomas.walsh@gmail.com'
__version__ = '0.1.0'
|
990,728 | 7224f1d04bde72466b624b0ed7448ebc21453a5c | # https://atcoder.jp/contests/abc042/tasks/abc042_b
N, L = map(int, input().split())
S = [input() for _ in range(N)]
S.sort()
ans = ""
for i in range(N):
ans += S[i]
print(ans) |
990,729 | 7656ac9c838e6713df680f9cd244e15fa9e37ebf |
#!/usr/bin/env python
from http.server import BaseHTTPRequestHandler, HTTPServer
import http.client
import json
import requests
# HTTPRequestHandler class
class testHTTPServer_RequestHandler(BaseHTTPRequestHandler):
# GET
def do_GET(self):
# Send response status code
self.send_response(200)
# Send headers
self.send_header('Content-type','text/html')
self.end_headers()
# Send message back to client
message = "Hello world!"
# Write content as utf-8 data
self.wfile.write(bytes(message, "utf8"))
return
def run():
print('starting server...')
# Server settings
# Choose port 8080, for port 80, which is normally used for a http server, you need root access
server_address = ('127.0.0.1', 8081)
httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)
print('running server...')
origin = input("origin: ")
destin = input("destination: ")
date = input("YYYY-MM-DD: ")
search(origin, destin, date)
"""
data = {
"request": {
"slice": [
{
"origin": "JFK",
"destination": "LAX",
"date": "2017-10-14"
}
],
"passengers": {
"adultCount": 1,
"infantInLapCount": 0,
"infantInSeatCount": 0,
"childCount": 0,
"seniorCount": 0
},
"solutions": 3,
"refundable": "false"
}
}"""
"""
headers = {"Content-type": "application/json"}
c = http.client.HTTPConnection('https://www.googleapis.com', 80)
c.request('POST', '/qpxExpress/v1/trips/search?=AIzaSyDotnuacvhryCdrIoYJ5b-yYPyN6tm4t-4', json.dumps(data), headers, encode_chunked = False)
doc = c.getresponse().read()
print(doc)
"""
httpd.serve_forever()
def search(origin, destin, date):
api_key = "AIzaSyDotnuacvhryCdrIoYJ5b-yYPyN6tm4t-4"
url = "https://www.googleapis.com/qpxExpress/v1/trips/search?key=" + api_key
headers = {'content-type': 'application/json'}
params = {
"request": {
"slice": [
{
"origin": origin,
"destination": destin,
"date": date
}
],
"passengers": {
"adultCount": 1
},
"solutions": 2,
"refundable": "false"
}
}
response = requests.post(url, data=json.dumps(params), headers=headers)
data = response.json()
print(data)
run() |
990,730 | a425be0f34f303550cd9a2230c187b45eb282b2e | import array
a = array.array('i',[10,20,40,30,50])
print(a[:])
print(a[2:])
print(a[:3])
print(a[1:4])
print(a[2:10])
print(a[-10:2])
print(a[::])
print(a[::1])
print(a[::2])
print(a[2::2])
print(a[:10:3])
print(a[::-1])
print(a[-2:-5:-1])
print(a[::0])
"""output:
array('i', [10, 20, 40, 30, 50])
array('i', [40, 30, 50])
array('i', [10, 20, 40])
array('i', [20, 40, 30])
array('i', [40, 30, 50])
array('i', [10, 20])
array('i', [10, 20, 40, 30, 50])
array('i', [10, 20, 40, 30, 50])
array('i', [10, 40, 50])
array('i', [40, 50])
array('i', [10, 30])
array('i', [50, 30, 40, 20, 10])
array('i', [30, 40, 20])
Traceback (most recent call last):
File "slicing.py", line 18, in <module>
print(a[::0])
ValueError: slice step cannot be zero
"""
""" note: 1)for going in forward direction stepzize should be +ve
2)and -ve to go in backward direction
3)if stepsize is 0 we get value error
4)in slicing we never get IndexErroe
5)while slicing if the data is in the range we get element else []"""
|
990,731 | 6d1143e2e0b876707062552ad3390997d15eb1be | from django.shortcuts import render,get_object_or_404
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from django.contrib.auth.models import User
from .models import Post
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin,UserPassesTestMixin
# dummy data
'''posts=[
{
'author':'SNEHA SINGH',
'title':'DJANGO',
'content':'I finally started this project',
'date_of_post':'3rd may'
},
{
'author':'AUASS',
'title':'STARTUP',
'content':'Think about it',
'date_of_post':'23rd may'
}
]'''
#home func will handle the traffic from our home page blog
#it will take the request arg,even if we don't use request we need to add it in order for our home func to work
#and within the func we will return what the user has to see when they are sent to this route
def home(request):
context={
'posts': Post.objects.all()
}
return render(request,'blog/home.html',context)
class PostListView(ListView):
model=Post
template_name='blog/home.html' #<app>/<model>_<viewtype>.html
context_object_name= 'posts'
ordering=['-date_of_post'] #minus sign is given so that the post are seen from new to old
paginate_by=5
class UserPostListView(ListView):
model=Post
template_name='blog/user_posts.html' #<app>/<model>_<viewtype>.html
context_object_name= 'posts'
paginate_by=5
def get_queryset(self):
user=get_object_or_404(User,username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_of_post')
# now this view manages detail of each post,
# and we write this code by sticking to conventions so the code becomes shorter,doing it by another method
# not giving the template name instead creating the new template
class PostDetailView(DetailView):
model=Post
class PostCreateView(LoginRequiredMixin,CreateView):
model=Post
fields=['title','content']
def form_valid(self,form):
form.instance.author=self.request.user
return super().form_valid(form)
# added this to redirct directly to home page--success_url= reverse_lazy('blog-home')
class PostUpdateView(LoginRequiredMixin,UserPassesTestMixin,UpdateView):
model=Post
fields=['title','content']
def form_valid(self,form):
form.instance.author=self.request.user
return super().form_valid(form)
def test_func(self):
post=self.get_object()
if self.request.user==post.author:
return True
return False
class PostDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):
model=Post
def test_func(self):
post=self.get_object()
if self.request.user==post.author:
return True
return False
success_url='/'
def about(request):
return render(request,'blog/about.html',{'title':'about'})
|
990,732 | a8e82cd5f159cb2de63e31f03432738224e4a208 | #!/usr/bin/env python
from unittest import TestCase, TestLoader, TestSuite, TextTestRunner
from features.test_mysql_datatype import TestMySQLDataType
from features.test_mysql_function import TestMySQLFunction
from tables.test_mysql_table_join import TestMySQLTableJoin
from tables.test_mysql_table_constraint import TestMySQLTableConstraint
from tables.test_mysql_table_delete import TestMySQLTableDelete
from tables.test_mysql_table_select import TestMySQLTableSelect
from tables.test_mysql_table_select_group_by import TestMySQLTableSelectGroupBy
from tables.test_mysql_table_select_order_by import TestMySQLTableSelectOrderBy
from tables.test_mysql_table_trigger import TestMySQLTableTrigger
from tables.test_mysql_table_update import TestMySQLTableUpdate
from use_case.test_mysql_relationship_model import TestMySQLRelationshipModel
def my_suite():
suite = TestSuite()
loader = TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TestMySQLDataType))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLFunction))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLRelationshipModel))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLTableConstraint))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLTableDelete))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLTableJoin))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLTableSelect))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLTableSelectGroupBy))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLTableSelectOrderBy))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLTableTrigger))
suite.addTest(loader.loadTestsFromTestCase(TestMySQLTableUpdate))
return suite
if __name__ == '__main__':
runner = TextTestRunner(verbosity=2)
runner.run(my_suite())
|
990,733 | 704783e7085f9e962a62229107cc8d2a67b287f4 | first_index = urlstr.find('http://')
if first_index!=-1:
first_index+=7
urlstr = urlstr[first_index:]
first_index = urlstr.find('https://')
if first_index!=-1:
first_index+=8
urlstr = urlstr[first_index:]
first_index = urlstr.find('www.')
if first_index!=-1:
first_index+=4
urlstr = urlstr[first_index:]
lastIndex = urlstr.rfind('.')
if(lastIndex!=-1):
urlstr = urlstr[0:lastIndex]
return urlstr |
990,734 | b139e1ffb107536772d9895fd1d60d07ca9170d7 | import os
os.system("node bot.js")
text = "What is my name?"
|
990,735 | 882d4166a6b70dcf64642a81c535a481f5938ced | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from webapp.forms import signupform
from django.http import HttpResponseRedirect
# Create your views here.
def homeview(request):
return render(request,'myapp/home.html')
@login_required()
def javaview(request):
return render (request,'myapp/java.html')
@login_required
def pythonview(request):
return render (request,'myapp/python.html')
@login_required
def aptview(request):
return render (request,'myapp/apt.html')
def logout(request):
return render(request,'myapp/logout.html')
def Studentformview(request):
form=signupform()
if request.method=='POST':
form=signupform(request.POST)
user=form.save()
user.set_password(user.password)
user.save()
return HttpResponseRedirect('/accounts/login')
return render (request,'myapp/signout.html',{'form':form})
|
990,736 | 13ee29462aa50b86682529d6fb5444c211cba70b | from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from flashcards_api.database import Base
class Fact(Base):
__tablename__ = "items"
id = Column(Integer, primary_key=True, index=True)
template = Column(Integer, ForeignKey("templates.id"))
resources = relationship("Resources", back_populates="facts")
cards = relationship("Card") |
990,737 | e64e59ff4583a31961dfe884510bb783d4a92ca0 | from django.contrib import admin
from .models import Creator, Dish, Recipe, Comments
admin.site.register(Creator)
admin.site.register(Dish)
admin.site.register(Recipe)
admin.site.register(Comments) |
990,738 | 8d462b9135b6ea5a1fe3d4ec3d749f77780b5e90 | #----------------------------------------------------
# Lab 3: Connect Four class
# Purpose of class: Create a connect4 Game
#
# Author: Penelope Chen
# Collaborators/references:
#----------------------------------------------------
import copy
class Connect4:
def __init__(self):
'''
Initializes an empty Connect Four board.
Inputs: none
Returns: None
'''
self.board = [] # list of lists, where each internal list represents a column
self.COLS = 7 # number of columns on board
self.ROWS = 6 # maximum number of chips that can fit in each column
# initialize board with 7 empty columns
for i in range(self.COLS):
self.board.append([])
def locationIsEmpty(self, col, row):
'''
Checks if a given location is empty, or if it contains a chip.
Inputs:
col (int) - column index of location to check
row (int) - row index of location to check
Returns: True if location is empty; False otherwise
'''
Col = self.board[col]
return len(Col) <= row
def drawBoard(self):
'''
Displays the current state of the board, formatted with column and row
indices shown along the top and the left side.
Inputs: none
Returns: None
'''
print(' ' ,' 0 1 2 3 4 5 6' )
alist = copy.deepcopy(self.board)
for col in alist:
if len(col)<= self.ROWS:
blank= ' * '.split()
col.extend( blank *int(self.ROWS - len(col)))
newList = []
col = 0
for row in range(self.ROWS):
Row = self.ROWS - row -1
rowList = []
for col in range(self.COLS):
rowList.append(alist[col][Row])
col = col +1
print(Row, '' , ' '. join(rowList))
def update(self, col, chip):
'''
Drops the chip into the indicated column, col, as long as there is still
room in the column.
Inputs:
col (int) - column index to place chip in
chip (str) - colour of chip
Returns: True if attempted update was successful; False otherwise
'''
#TO DO: delete pass and complete the function
yChip = 'Y'
rChip = 'R'
Col = self.board[col]
if len(Col) < self.ROWS:
colEmpty = False
if chip == yChip:
Col.append(yChip)
attempt = True
elif chip == rChip:
Col.append(rChip)
attempt = True
else:
attempt= False
return attempt
def boardFull(self):
'''
Checks if the board has any remaining empty locations.
Inputs: none
Returns: True if the board has no empty locations (full); False otherwise
'''
#TO DO: delete pass and complete the function
full = False
for eCol in self.board:
if len(eCol)> (self.ROWS):
full = True
return full
def isWinner(self, chip):
'''
Checks whether the given player (indicated by the chip) has just won. In
order to win, the player must have just completed a line of 4 identically
coloured chips (i.e. that player's chip colour). That line can be horizontal,
vertical, or diagonal.
Inputs:
chip (str) - colour of chip
Returns: True if current player has won with their most recent move;
False otherwise
'''
#TO DO: delete pass and complete the function
win = False
hwin = False
vwin = False
rdwin = False
ldwin = False
unit = self.board
#horizontal win
for y in range(0, self.ROWS):
for x in range(0, self.COLS):
try:
if unit[x][y] == unit[x+1][y] == unit[x+3][y] == unit [x+2][y] == str(chip):
hwin = True
except IndexError as error:
xwin = False
#Vertical Win
for x in range(self.COLS):
for y in range(self.ROWS):
try:
if unit[x][y] == unit[x][y+1]== unit[x][y+2] == unit[x][y+3] == chip :
vwin = True
except IndexError as error:
xwin = False
# Right Diagonal Win
for x in range(self.COLS):
for y in range(self.ROWS):
try :
if unit[x][y] == unit[x+1][y+1] == unit[x+2][y+2] == unit[x+3][y+3]==str(chip):
rdwin = True
except IndexError as error:
xwin = False
#Left Diagonal Win
for x in range(self.COLS):
for y in range(self.ROWS):
try:
if unit[x][y] == unit[x-1][y+1] == unit[x-2][y+2] == unit[x-3][y+3] == str(chip):
ldwin = True
except IndexError as error:
xwin = False
#if any conditions apply, return win
if ldwin== True or hwin== True or vwin ==True or ldwin == True:
win = True
return win
if __name__ == "__main__":
# TEST EACH METHOD THOROUGHLY HERE
# a few initial tests are provided to get you started, but more tests are required
print('**********************')
print('TESTING Connect4 CLASS')
print('**********************')
BOARD_COLUMNS = 7
BOARD_ROWS = 6
# Test 1:
# start by creating empty board and checking the contents of the board attribute
myGame = Connect4()
print('The initial state of the game board is:')
print(myGame.board)
# Test 2:
# are all of the locations on the board empty?
for column in range(BOARD_COLUMNS):
for row in range(BOARD_ROWS):
if not(myGame.locationIsEmpty(column, row)):
print('\nSomething is wrong with the locationIsEmpty method')
print('Column', column, 'and row', row, 'should be empty.')
# Test 3:
# does the empty board display properly?
myGame.drawBoard()
# is there a winner when no one has played?
print('There is a winner when no one has played', myGame.isWinner('Y'))
print('\nThere is a winner : Yellow', myGame.isWinner('Y'))
print('\nThere is a winner : Red', myGame.isWinner('R'))
# TO DO: write your own tests to verify that all of the methods work correctly
#Test update
print('is game full:',myGame.boardFull())
if not myGame.boardFull():
myGame.update(4,'Y')
myGame.drawBoard()
print('\nThere is a winner : Yellow', myGame.isWinner('Y'))
print('\nThere is a winner : Red', myGame.isWinner('R'))
myGame.update(3,'R')
myGame.update(2,'R')
myGame.update(1,'R')
myGame.update(0,'R')
myGame.drawBoard()
myGame.update(3,'Y')
myGame.update(2,'R')
myGame.update(2, 'Y')
myGame.update(1, 'R')
myGame.update(1, 'R')
myGame.update(1, 'Y')
myGame.drawBoard()
print('\nThere is a winner : Yellow', myGame.isWinner('Y'))
print('\nThere is a winner : Red', myGame.isWinner('R'))
|
990,739 | dfbb42fd106c1fe995fe1126db17705b446d4ed4 | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.index, name='shopping_list-index'),
path('add/', views.add_new_item, name='shopping_list-add'),
path('bought/<item_id>', views.bought_item, name='shopping_list-bought'),
path('delete_item/', views.delete_item, name='shopping_list-delete'),
path('delete_all/', views.delete_all, name='delete_all'),
] |
990,740 | 84d7cb0579235b1a8744e1e12746b800795edbdd | import os
import numpy as np
from collections import namedtuple
Customer = namedtuple("Customer", ['index', 'x', 'y', 'demand', 'start', 'end', 'service', 'pd_mark'])
BASE_DIR = os.path.abspath('.')
class Importer(object):
"""
Read the meta data from the file
"""
def __init__(self):
self.file_lines = list()
self.info = {}
self.coordinates = list()
self.demand_list = list()
self.distance_matrix = None
self.customers = list()
def import_data(self, filename):
self._read_file(filename)
self.info, break_lines = self._read_info()
self._return_node_lists(break_lines)
self._cal_distance_matrix()
def _read_file(self, my_filename):
file_lines = []
with open(my_filename, "rt") as f:
file_lines = f.read().splitlines()
self.file_lines = file_lines
def _read_info(self):
"""
The data information vehicle count, capacity ...
"""
my_filelines = self.file_lines
info = dict()
for i, line in enumerate(my_filelines):
if line.startswith("VEHICLE"):
vehicle_pro_start = i + 2
elif line.startswith("CUSTOMER"):
customer_pro_start = i + 3
elif line.startswith("NUMBER"):
splited = line.split(' ')
info[splited[0]] = 0
info[splited[-1]] = 0
return info, (vehicle_pro_start, customer_pro_start)
def _return_node_lists(self, my_breaklines):
"""
read the node demand and coordinates information
"""
my_filelines = self.file_lines
v_start, c_start = my_breaklines
for i, line in enumerate(my_filelines):
if v_start == i:
vehicle_part = line.strip().split(' ')
self.info['NUMBER'], self.info['CAPACITY'] = int(vehicle_part[0]), int(vehicle_part[-1])
if c_start <= i:
c_part = line.strip().split(' ')
c_store = list()
for j in c_part:
try:
c_store.append(int(j))
except ValueError:
continue
if c_store != []:
if c_store[4]> 130:
self.customers.append(
Customer(c_store[0], c_store[1], c_store[2], c_store[3], c_store[4], c_store[5], c_store[6], 0))
else:
self.customers.append(
Customer(c_store[0], c_store[1], c_store[2], c_store[3], c_store[4], c_store[5], c_store[6], 1))
def _cal_distance_matrix(self):
"""
distance matrix
"""
customer_count = len(self.customers)
self.distance_matrix = np.zeros([customer_count, customer_count])
for i in range(customer_count):
for j in range(customer_count):
if i == j:
continue
else:
distance = np.sqrt(np.square(self.customers[i].x - self.customers[j].x) +
np.square(self.customers[i].y - self.customers[j].y))
self.distance_matrix[i][j] = distance
def init_data(filename):
#the data file
raw_data = Importer()
raw_data.import_data(filename)
# customers (include the depot)
depot = raw_data.customers[0]
customers = raw_data.customers[1:]
# demand list, coordinates list
demand_list = list()
coordinates = list()
for i in raw_data.customers:
demand_list.append(i.demand)
coordinates.append((i.x, i.y))
# vehicle capacity
vehicle_capacity = int(raw_data.info["CAPACITY"])
# distance
distance_matrix = raw_data.distance_matrix
return customers, depot, demand_list, vehicle_capacity, coordinates, distance_matrix
if __name__ == '__main__':
file_name = os.path.join(BASE_DIR, 'data\Solomon_25\C101.25.txt')
customers, depot, demand_list, vehicle_capacity, coordinates, distance_matrix = init_data(file_name)
|
990,741 | d1a0f71b586e59bc455222f94ed3c02c6fa3d2dc | import numpy as np
from sklearn.metrics import silhouette_score
from sklearn import datasets
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
features,_ = make_blobs(n_samples = 1000,
n_features = 10,
centers = 2,
cluster_std = 0.5,
shuffle = True,
random_state = 1)
model = KMeans(n_clusters = 2, random_state=1).fit(features)
target_predicted = model.labels_
print(silhouette_score(features, target_predicted))
|
990,742 | 01339de9ace321012457b5d46240dcc50aa14d82 | jon = len(raw_input().rstrip())
doctor = len(raw_input().rstrip())
if jon >= doctor:
print("go")
else:
print("no")
|
990,743 | 5077e3d9af0f7b8ae69872df7444a6040a0da7d1 | lw,up=input().split()
lw=int(lw)
up=int(up)
lst=[]
for nm in range(lw,up):
temp=nm
sum=0
order=len(str(nm))
while nm >0:
dg=nm % 10
sum+=dg**order
nm=nm//10
if(temp==sum):
lst.append(temp)
for i in range(0,len(lst)):
if i<len(lst)-1:
k=' '
else:k=''
print(lst[i],end=k)
|
990,744 | cd885407d81f77dfaac316d2478f5bea0ae5ee32 | from django import forms
from models import Video
class VideoForm(forms.ModelForm):
class Meta:
model = Video
fields = ['video', 'title', 'description', 'categories', 'tags']
widgets = {
'title': forms.TextInput(attrs={
'id': 'up_vid_title',
'class': 'form-control',
'placeholder': 'Title',
}),
'description': forms.Textarea(attrs={
'id': 'up_vid_des',
'class': 'form-control',
'placeholder': 'Description',
'wrap': 'hard',
'rows': 3,
}),
'categories': forms.SelectMultiple(attrs={
'id': 'up_vid_cat',
'class': 'form-control',
}),
'tags': forms.TextInput(attrs={
'id': 'tags_panel',
'class': 'form-control',
'placeholder': 'Tags',
}),
'video': forms.FileInput(attrs={
'accept': 'video/*',
'class': 'file_input'
}),
}
|
990,745 | 06619d566b54ff9270937ebc9635dc30c0a455c3 | # ! file wraob.py
##############################################################
################# University of L'Aquila #################
################# PST ABruzzo #################
################# MM5 python interface V 0.1 #################
##############################################################
" Output routine for a raob sounding profile. "
import mm5_class
import mm5_proj
from math import log, exp, hypot, atan2, sin, cos, fmod
from string import split, atoi
def crh(t,q,prs):
"Calculates rh given t, q, prs"
if (t >= 273.15):
es=6.112*exp(17.67*((t-273.15)/(t-29.65)))
else:
es=6.11*exp(22.514-(6150./t))
es = es * 100.0
qs=0.622*es/(prs-es)
return(q/qs)
def tc(t):
"Temperature in centigrades"
return (t - 273.15)
def cprs(sigma,ptop,pp,ps):
"Calculates pressure at given sigma"
return((ps*sigma)+ptop+pp)
def hgt(sigma,ptop,bslp,bslt,blr,ter):
"Calculates geopotential hgt"
g = 9.81; r = 287.04;
ps0 = bslp * exp((-1.*bslt/blr)+((((bslt/blr)**2.) - \
(2.*g*(ter/(blr*r))))**0.5))
ps0=ps0-ptop
phydro=ps0*sigma+ptop
z = -1.*(((r*blr/2./g)*((log(phydro/bslp)**2))) + \
((r*bslt/g)*log(phydro/bslp)))
return z-ter
def write(input, start, nstep, lat, lon):
"Transforms mm5 input to raob sound"
levels = input.get_vertcoord()
levval = levels['values']
proj = mm5_proj.projection(input)
(i,j) = proj.latlon_to_ij(lat,lon)
# interpol = proj.nearval
interpol = proj.blinval
# interpol = proj.lwval
# interpol = proj.cubconval
terrain = input.get_field('terrain', 0)
if (terrain == -1):
return -1
terr = interpol(lat,lon,terrain['values'][0],1.0)
mydat = split(terrain['date'], ':')
hour = split(mydat[2], ' ')
outdat = mydat[0] + mydat[1] + hour[0] + hour[1] + '.asc'
del terrain
try:
fout = open(outdat, "w")
except Exception, e:
print "Cannot open output file: ", e
return -1
icount = 1
for timestep in xrange(start,start+nstep):
nlevs = levels['nlevs']
t = input.get_field('t', timestep)
if (t == -1):
return -1
rt = []
for k in xrange(nlevs):
rt.append(interpol(lat,lon,t['values'][k],1.0))
mydat = split(t['date'], ':')
hour = split(mydat[2], ' ')
if (levels['name'] == 'sigma'):
ptop = input.get_val('ptop')
if (input.version == 2): ptop = ptop * 100.0
bslp = input.get_val('basestateslp')
bslt = input.get_val('basestateslt')
blr = input.get_val('basestatelapserate')
ps = input.get_field('pstarcrs',timestep)
if (ps == -1):
return -1
rps = interpol(lat,lon,ps['values'][0],1.0)
if (input.version == 2): rps = rps * 1000.0
pp = input.get_field('pp', timestep)
if (pp == -1):
return -1
q = input.get_field('q',timestep)
if (q == -1):
return -1
rprs = []
rrh = []
rhg = []
for k in xrange(nlevs-1,-1,-1):
rpp = interpol(lat,lon,pp['values'][k],1.0)
rq = interpol(lat,lon,q['values'][k],1.0)
xp = cprs(levval[k],ptop,rpp,rps)
rprs.append(xp)
rrh.append(crh(rt[k],rq,xp))
rhg.append(hgt(levval[k],ptop,bslp,bslt,blr,terr) * 0.001)
del pp
del q
rrt = []
for k in xrange(nlevs-1,-1,-1):
rrt.append(tc(rt[k]))
for k in xrange(nlevs):
rprs[k] = rprs[k] * 0.01
else:
rh = input.get_field('rh', timestep)
if (rh == -1):
return -1
hg = input.get_field('h', timestep)
if (hg == -1):
return -1
rprs = []
rrh = []
rhg = []
rrt = []
for k in xrange(nlevs):
rprs.append(levval[k] * 0.01)
rrh.append(interpol(lat,lon,rh['values'][k],1.0) * 0.01)
rhg.append(interpol(lat,lon,hg['values'][k],1.0) * 0.001)
rrt.append(tc(rt[k]))
del rh
del hg
htp = rhg.pop(0); xx = rrh.pop(0);
xx = rprs.pop(0); xx = rrt.pop(0);
nlevs = nlevs - 1;
while (rhg[0] < htp):
xx = rhg.pop(0); xx = rrh.pop(0);
xx = rprs.pop(0); xx = rrt.pop(0);
nlevs = nlevs - 1;
icloud = 0
for k in xrange(nlevs):
if (rrh[k] > 0.9): icloud = 1
year = atoi(mydat[0]) - 1900
if (year >= 100): year = year - 100
month = atoi(mydat[1])
day = atoi(hour[0])
hour = atoi(hour[1])
istat = 242
irain = 0
header = ('%2d%2d%2d%2d%3d %3d%2d%2d%7d%6.2f%6.2f\n' %
(year,month,day,hour,nlevs,istat,icloud,irain,icount,lat,lon))
icount = icount + 1
fout.write(header)
for k in xrange(nlevs):
valstr = ('%6.1f %6.3f %6.1f %5.3f\n' % \
(rprs[k],rhg[k],rrt[k],rrh[k]))
fout.write(valstr)
fout.close()
del levels
del header
return 0
__path__ = ''
|
990,746 | 47031630dbbb3301b1fc3a80381023e78662f9db | import logging,os,tempfile,functools
if __debug__:#调试模式,即通常模式,如果运行在最优模式,命令行中,选项-O,就不会有日志
logger=logging.getLogger('Logger')
logger.setLevel(logging.DEBUG)
handler=logging.FileHandler(os.path.join(tempfile.gettempdir(),'logged.log'))
print('Note!:creat logfile at'+tempfile.gettempdir())#tempfile.gettempdir获得当前临时文档存取地址
logger.addHandler(handler)
def logged(function):
"这是一个可以记录其修饰的任何函数的名称和参数和结果的修饰函数,使用方法参照grepword_thread文件中"
@functools.wraps(function)
def wrapper(*args,**kwargs):
log='called:'+function.__name__+'('
log+=','.join(['{0!r}'.format(a)for a in args]+['{0!s}={1!r}'.format(k,v)for k,v in kwargs.items()])
result=exception=None
try:
result=function(*args,**kwargs)
return result
except Exception as err:
exception=err
finally:
log+=((") ->"+str(result))if exception is None
else "){0}:{1}".format(type(exception),exception))
logger.debug(log)
if exception is not None:
raise exception
return wrapper
else :
def logged(function):
return function
'''
@logged
def discount_price(price,percentage,make_in=False):
result=price+percentage
return result
f=discount_price(2,3)
print(f)
''' |
990,747 | 6abe213cbe7c8204a3d28e47638a27f4a3be4e74 | # Generated by Django 3.1.3 on 2020-11-22 06:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kidneycare', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=255)),
('subject', models.CharField(max_length=255, null=True)),
('mobileno', models.CharField(max_length=13, null=True)),
('messages', models.CharField(max_length=255)),
('createdon', models.DateTimeField(null=True)),
],
),
]
|
990,748 | b7a262350f512dba5219ef2786113ff11ab05f4f | import numpy as np
import os
import sys
import pdb
#import ase_factorization
#import ase_factorization_via_stan_vb
#import ase_factorization_via_pymc3_lmm_vb
#import ase_factorization_via_pymc3_lmm_vb
#import ase_factorization_via_pymc3_double_lmm_vb
#import ase_factorization_via_pymc3_binomial_double_lmm_vb
#import ase_factorization_via_pymc3_binomial_lmm_vb
#import ase_factorization_via_pymc3_lmm_mb_vb
#import ase_factorization_via_pymc3_lmm_vb
#import ase_factorization_via_pymc3_lmm_dirichlet_vb
#import ase_factorization_via_pymc3_lmm_exponential_vb
#import ase_factorization_via_pymc3_lmm_horseshoe_vb
#import ase_factorization_via_pca
#import ase_factorization_via_pca_regress_out_cell_line
#import ase_factorization_via_als_fixed_conc
#import ase_factorization_via_pymc3_lmm_mixture_vb
#import ase_factorization_via_pymc3_lmm_cell_intercept_vb
#import ase_factorization_via_pymc3_lmm_vb
#import ase_factorization_via_pymc3_lmm_ard_vb
#import ase_factorization_via_pymc3_lmm_mixture_ard_vb
#import ase_factorization_via_pymc3_lmm_mixture_cell_intercept_vb
#import ase_factorization_via_als
#import ase_factorization_via_als_folded_binomial
import ase_factorization_via_fast_em_als_folded_beta_binomial
def load_in_ase_data(ase_file):
full_data = np.loadtxt(ase_file, dtype=str, delimiter='\t', comments='*')
count_data = full_data[1:,1:]
row_num, col_num = count_data.shape
allelic_counts = np.zeros((row_num, col_num))
total_counts = np.zeros((row_num, col_num))
for ii in range(row_num):
for jj in range(col_num):
if count_data[ii,jj] == 'NA':
allelic_counts[ii,jj] = np.nan
total_counts[ii,jj] = np.nan
else:
ref = int(count_data[ii,jj].split('/')[0])
tot = int(count_data[ii,jj].split('/')[1])
ref_min = np.min((ref, tot-ref))
allelic_counts[ii,jj] = ref_min
total_counts[ii,jj] = tot
return np.transpose(allelic_counts), np.transpose(total_counts)
def load_in_non_min_ase_data(ase_file):
full_data = np.loadtxt(ase_file, dtype=str, delimiter='\t', comments='*')
count_data = full_data[1:,1:]
row_num, col_num = count_data.shape
allelic_counts = np.zeros((row_num, col_num))
total_counts = np.zeros((row_num, col_num))
for ii in range(row_num):
for jj in range(col_num):
if count_data[ii,jj] == 'NA':
allelic_counts[ii,jj] = np.nan
total_counts[ii,jj] = np.nan
else:
ref = int(count_data[ii,jj].split('/')[0])
tot = int(count_data[ii,jj].split('/')[1])
allelic_counts[ii,jj] = ref
total_counts[ii,jj] = tot
return np.transpose(allelic_counts), np.transpose(total_counts)
def load_in_non_min_ase_data_min_thresh(ase_file, thresh):
full_data = np.loadtxt(ase_file, dtype=str, delimiter='\t', comments='*')
count_data = full_data[1:,1:]
row_num, col_num = count_data.shape
allelic_counts = np.zeros((row_num, col_num))
total_counts = np.zeros((row_num, col_num))
for ii in range(row_num):
for jj in range(col_num):
if count_data[ii,jj] == 'NA':
allelic_counts[ii,jj] = np.nan
total_counts[ii,jj] = np.nan
else:
ref = int(count_data[ii,jj].split('/')[0])
tot = int(count_data[ii,jj].split('/')[1])
if tot < thresh:
allelic_counts[ii,jj] = np.nan
total_counts[ii,jj] = np.nan
else:
allelic_counts[ii,jj] = ref
total_counts[ii,jj] = tot
return np.transpose(allelic_counts), np.transpose(total_counts)
def load_in_ase_data_max_counts(ase_file, max_val):
full_data = np.loadtxt(ase_file, dtype=str, delimiter='\t', comments='*')
count_data = full_data[1:,1:]
row_num, col_num = count_data.shape
allelic_counts = np.zeros((row_num, col_num))
total_counts = np.zeros((row_num, col_num))
for ii in range(row_num):
for jj in range(col_num):
if count_data[ii,jj] == 'NA':
allelic_counts[ii,jj] = np.nan
total_counts[ii,jj] = np.nan
else:
ref = int(count_data[ii,jj].split('/')[0])
tot = int(count_data[ii,jj].split('/')[1])
ref_min = np.min((ref, tot-ref))
if tot > max_val:
ref_min = int(np.round(max_val*(ref_min/float(tot))))
tot = max_val
allelic_counts[ii,jj] = ref_min
total_counts[ii,jj] = tot
return np.transpose(allelic_counts), np.transpose(total_counts)
def load_in_ase_data_min_counts(ase_file, min_val):
full_data = np.loadtxt(ase_file, dtype=str, delimiter='\t', comments='*')
count_data = full_data[1:,1:]
row_num, col_num = count_data.shape
allelic_counts = np.zeros((row_num, col_num))
total_counts = np.zeros((row_num, col_num))
for ii in range(row_num):
for jj in range(col_num):
if count_data[ii,jj] == 'NA':
allelic_counts[ii,jj] = np.nan
total_counts[ii,jj] = np.nan
else:
ref = int(count_data[ii,jj].split('/')[0])
tot = int(count_data[ii,jj].split('/')[1])
ref_min = np.min((ref, tot-ref))
if tot < min_val:
allelic_counts[ii,jj] = np.nan
total_counts[ii,jj] = np.nan
else:
allelic_counts[ii,jj] = ref_min
total_counts[ii,jj] = tot
return np.transpose(allelic_counts), np.transpose(total_counts)
def add_intercept_column_to_matrix(X):
n,m = X.shape # for generality
X0 = np.ones((n,1))
Xnew = np.hstack((X0, X))
return Xnew
def make_cell_line_vector_into_matrix(Z):
num_cell_lines = len(np.unique(Z))
num_cells = len(Z)
Z_mat = np.zeros((num_cells, num_cell_lines-1))
for n in range(num_cells):
line_index = Z[n]
if line_index != (num_cell_lines-1):
Z_mat[n, int(line_index)] = 1.0
return Z_mat
def train_ase_factorization_model(ase_file, covariate_file, sample_overlap_file, batch_overlap_file, k, model_name, output_dir):
if model_name == 'ase_factorization_via_pymc3_lmm_mb_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_mb_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_vb_non_min_counts':
miny = 3
allelic_counts, total_counts = load_in_non_min_ase_data_min_thresh(ase_file, miny)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization_thresh_' + str(miny))
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_vb_min_counts':
miny = 10
allelic_counts, total_counts = load_in_ase_data_min_counts(ase_file, miny)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization_min_counts_' + str(miny))
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_ard_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_ard_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_global_af_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 2))
global_af = np.nansum(allelic_counts,axis=1)/np.nansum(total_counts,axis=1)
cov_plus_intercept[:,1] = global_af
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_mixture_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_mixture_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_2_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_mixture_ard_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_mixture_ard_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_mixture_global_af_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 2))
global_af = np.nansum(allelic_counts,axis=1)/np.nansum(total_counts,axis=1)
cov_plus_intercept[:,1] = global_af
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_mixture_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_cell_intercept_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_cell_intercept_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_mixture_cell_intercept_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_mixture_cell_intercept_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_mixture_cell_intercept_and_global_af_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 2))
global_af = np.nansum(allelic_counts,axis=1)/np.nansum(total_counts,axis=1)
cov_plus_intercept[:,1] = global_af
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_mixture_cell_intercept_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_cell_intercept_and_global_af_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 2))
global_af = np.nansum(allelic_counts,axis=1)/np.nansum(total_counts,axis=1)
cov_plus_intercept[:,1] = global_af
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_cell_intercept_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_vb_max_counts':
maxy = 75
allelic_counts, total_counts = load_in_ase_data_max_counts(ase_file, maxy)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization_max_counts_' + str(maxy))
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_double_lmm_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz_sample = np.loadtxt(sample_overlap_file)
zz_batch = np.loadtxt(batch_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_double_lmm_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z_sample=zz_sample, z_batch=zz_batch)
elif model_name == 'ase_factorization_via_pymc3_binomial_double_lmm_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
pdb.set_trace()
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz_sample = np.loadtxt(sample_overlap_file)
zz_batch = np.loadtxt(batch_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_binomial_double_lmm_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z_sample=zz_sample, z_batch=zz_batch)
elif model_name == 'ase_factorization_via_pymc3_binomial_lmm_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz_sample = np.loadtxt(sample_overlap_file)
#zz_batch = np.loadtxt(batch_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_binomial_lmm_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z_sample=zz_sample)
elif model_name == 'ase_factorization_via_pymc3_lmm_dirichlet_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_dirichlet_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_exponential_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_exponential_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_horseshoe_vb':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_horseshoe_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pymc3_lmm_horseshoe_vb_max_counts':
maxy = 100
allelic_counts, total_counts = load_in_ase_data_max_counts(ase_file, maxy)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pymc3_lmm_horseshoe_vb.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pca':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pca.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pca_non_min_counts_regress_out_cell_line':
allelic_counts, total_counts = load_in_non_min_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pca_regress_out_cell_line.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_pca_regress_out_cell_line':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
ase_factorization_obj = ase_factorization_via_pca_regress_out_cell_line.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization')
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=cov_plus_intercept, z=zz)
elif model_name == 'ase_factorization_via_als':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
zz_mat = make_cell_line_vector_into_matrix(zz)
full_cov = np.hstack((cov_plus_intercept, zz_mat))
ase_factorization_obj = ase_factorization_via_als.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization', random_seed=4)
elif model_name == 'ase_factorization_via_em_als_folded_beta_binomial':
allelic_counts, total_counts = load_in_non_min_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
zz_mat = make_cell_line_vector_into_matrix(zz)
full_cov = np.hstack((cov_plus_intercept, zz_mat))
ase_factorization_obj = ase_factorization_via_em_als_folded_beta_binomial.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization', random_seed=4)
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=full_cov, z=zz)
elif model_name == 'ase_factorization_via_fast_em_als_folded_beta_binomial':
allelic_counts, total_counts = load_in_non_min_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
zz_mat = make_cell_line_vector_into_matrix(zz)
full_cov = np.hstack((cov_plus_intercept, zz_mat))
ase_factorization_obj = ase_factorization_via_fast_em_als_folded_beta_binomial.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization', random_seed=4)
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=full_cov, z=zz)
elif model_name == 'ase_factorization_via_als_folded_binomial':
allelic_counts, total_counts = load_in_ase_data_min_counts(ase_file, 2)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
zz_mat = make_cell_line_vector_into_matrix(zz)
full_cov = np.hstack((cov_plus_intercept, zz_mat))
ase_factorization_obj = ase_factorization_via_als_folded_binomial.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization', random_seed=4)
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=full_cov)
elif model_name == 'ase_factorization_via_als_max_counts':
maxy = 100
allelic_counts, total_counts = load_in_ase_data_max_counts(ase_file, maxy)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
zz_mat = make_cell_line_vector_into_matrix(zz)
full_cov = np.hstack((cov_plus_intercept, zz_mat))
ase_factorization_obj = ase_factorization_via_als.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization_max_' + str(maxy), random_seed=2)
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=full_cov)
elif model_name == 'ase_factorization_via_als_fixed_conc':
allelic_counts, total_counts = load_in_ase_data(ase_file)
if covariate_file != 'NA':
cov = np.loadtxt(covariate_file)
cov_plus_intercept = add_intercept_column_to_matrix(cov)
else:
cov_plus_intercept = np.ones((allelic_counts.shape[0], 1))
zz = np.loadtxt(sample_overlap_file)
zz_mat = make_cell_line_vector_into_matrix(zz)
full_cov = np.hstack((cov_plus_intercept, zz_mat))
ase_factorization_obj = ase_factorization_via_als_fixed_conc.ASE_FACTORIZATION(K=k, output_root=output_dir + '_ase_factorization', random_seed=4)
ase_factorization_obj.fit(allelic_counts=allelic_counts, total_counts=total_counts, cov=full_cov)
ase_file = sys.argv[1]
covariate_file = sys.argv[2]
sample_overlap_file = sys.argv[3]
batch_overlap_file = sys.argv[4]
k = int(sys.argv[5])
model_name = sys.argv[6]
output_dir = sys.argv[7]
train_ase_factorization_model(ase_file, covariate_file, sample_overlap_file, batch_overlap_file, k, model_name, output_dir) |
990,749 | 4cfffc4b3fc43ede5c9a6c91da25d4deb5783351 | import numpy as np
import ast
import simpleeval # https://github.com/danthedeckie/simpleeval
"""
$ pip install simpleeval
"""
class UserFuncEval:
def __init__(self):
other_functions = {"sin": np.sin, "cos": np.cos, "tan": np.tan, "abs": abs}
other_functions.update({"mod": np.mod, "sign": np.sign, "floor": np.floor, "ceil": np.ceil})
self.s = simpleeval.SimpleEval()
self.s.operators[ast.Mult] = np.multiply
self.s.operators[ast.Pow] = np.power
self.s.operators[ast.Mod] = np.mod
self.s.functions = simpleeval.DEFAULT_FUNCTIONS.copy()
del self.s.functions["str"]
del self.s.functions["rand"]
del self.s.functions["randint"]
self.s.functions.update(other_functions)
self.s.names = {"x": np.arange(256), "pi": np.pi}
self.output = None
# input is a string of the user input function
# returns false if unsuccessful parse. maybe set input font color to red while there is invalid input?
def update(self, input, var_substitutions = None):
if var_substitutions:
self.s.names.update(var_substitutions)
try:
self.output = self.s.eval(input)
except:
return False
return True
def getOutput(self):
return self.output
def getValidOperations(self):
return set(self.s.functions.keys())
|
990,750 | b6a422ab6aee5946f3b36c56a59f2e0d9e15daf2 | # -*- coding: utf-8 -*-
# !/usr/bin/env python
from flask import Blueprint
import requests
import pygal
from pygal.style import LightColorizedStyle as lcs,LightenStyle as ls
github = Blueprint('github',__name__)
@github.route('/getTop30StarPythonProject/')
def getTop30StarPythonProject():
#执行API调用并存储响应,language:python为选择python语言
url='https://api.github.com/search/repositories?q=language:python&sort=stars'
r=requests.get(url)
#打印200表示请求成功
print(r.status_code)
#将API响应存储在一个变量中
response_dict=r.json()
#创建两个列表来存放x轴与y轴数据
names,stars=[],[]
for i in response_dict['items']:
print(i['name'])
print(i['stargazers_count'])
names.append(i['name'])
stars.append(i['stargazers_count'])
#可视化
my_style=ls('#0eb1ff',base_style=lcs)
chart=pygal.Bar(style=my_style,x_label_rotation=145,show_legend=False)
chart.title='GitHub 30个star最多的python项目'
chart.x_labels=names
chart.add('',stars)
chart.render_to_file('Top30StarPythonProject.svg')
return {"code": 0, "msg": 'GitHub 30个star最多的python项目 Top30StarPythonProject.svg已生成'}
|
990,751 | 9f57735c69fb486f532ff342669ec34c1a09905a | #!/usr/bin/env python
# encoding: utf-8
import json
import random
import datetime
INPUT_DEVICES = "output/devices.json"
OUTPUT_FILENAME = "output/syslog.json"
LINES_TO_MAKE = 1000
def main():
in_str = open(INPUT_DEVICES).read()
devices = json.loads(in_str)
syslog = make_syslog(devices=devices)
with open(OUTPUT_FILENAME, 'w') as outfile:
json.dump(syslog, outfile, indent=2)
def make_syslog(devices=None):
syslog = {}
timestamp = datetime.datetime.now() - datetime.timedelta(days=1)
for i in range(0, LINES_TO_MAKE):
routers_num = len(devices)
router_idx = random.randint(0, routers_num -1)
router = str(devices[router_idx]["name"])
if router not in syslog.keys():
syslog[router] = []
deltasec = random.randint(10, 1000)
timestamp = timestamp + datetime.timedelta(seconds=deltasec)
line = "%s %s some text" % (timestamp.isoformat(), router.upper())
syslog[router].append(line)
return syslog
if __name__ == '__main__':
main()
|
990,752 | 90cea96c61a82b9c21f006d486f483b6e03f24fd | # single inheritance 1
class Rectangle: # Blueprint
def __init__(self, length, width): # instance attributes #initializer
self.length = length
self.width = width
def area(self): # instance method always take a return
return self.length * self.width
def circumstance(self):
return (self.length + self.width) * 2
class Square(Rectangle): # inheritance
def __init__(self, length): # initialize
super().__init__(length, length) # super to call the main parent class
def isSquare(
self,
): # instance method to check if the two variable equal one another
return self.length == self.width
class Cube(Square):
# don't have an initialize
def surfaceArea(self):
self.face_area = super(Cube, self).area() # super(thisClass,self) = super()
return self.face_area * 6
def volume(self):
return self.length * self.face_area
length = int(input("Please key in the length:"))
square = Square(length) # instance object
print(square.area()) # dot notation
print(square.circumstance())
print(square.isSquare())
cube = Cube(length) # instance object for class Cube
print(cube.surfaceArea())
print(cube.volume())
print("------------")
print(Cube.__mro__) # this is to check method resolution order
|
990,753 | 1b5abc43fc099ce8bbeadd8fe52e2b5c9b6c76e9 | import idaapi
# --------------------------------------------------------------------------------
class hidestmt_t:
def __init__(self, is64=True, use_relative=True):
self.n = idaapi.netnode("$ hexrays strikeout-plugin")
self.c = 'Q' if is64 else 'L'
self.ptr_size = 8 if is64 else 4
self.use_relative = use_relative
def load(self):
addresses = []
blob = self.n.getblob(0, 'I') or []
imgbase = idaapi.get_imagebase() if self.use_relative else 0
for i, offs in enumerate(range(0, len(blob), self.ptr_size)):
ea = struct.unpack(self.c, blob[offs:offs+self.ptr_size])[0]
addresses.append(imgbase + ea)
return addresses
def kill(self):
self.n.kill()
def save(self, addresses):
imgbase = idaapi.get_imagebase() if self.use_relative else 0
b = bytearray()
for addr in addresses:
b += struct.pack(self.c, addr - imgbase)
blob = bytes(b)
self.n.setblob(blob, 0, 'I')
# --------------------------------------------------------------------------------
def compare_blobs(b1, b2):
if len(b1) != len(b2):
return -1
for p0, p1 in zip(b1, b2):
if p0 != p1:
return 1
return 0
# --------------------------------------------------------------------------------
def clean_func_info(func_ea=idaapi.BADADDR):
if func_ea == idaapi.BADADDR:
func_ea = idaapi.get_screen_ea()
f = idaapi.get_func(func_ea)
if not f:
return (False, 'No function!')
else:
func_ea = f.start_ea
addresses = diag.load()
print(f'Effective parent function: {f.start_ea:x}..{f.end_ea:x}')
new_addresses = []
for addr in addresses:
f = idaapi.get_func(addr)
if f and f.start_ea == func_ea:
print(f'Omitting: {addr:x}')
continue
else:
# print(f'Skipping: {addr:x}')
pass
new_addresses.append(addr)
print(f"Old={len(addresses)} New={len(new_addresses)}")
# Save when change occurs
if len(addresses) != len(new_addresses):
diag.save(new_addresses)
# --------------------------------------------------------------------------------
def dump():
global diag
diag = hidestmt_t()
addresses = diag.load()
print('Dumping address\n---------------')
for ea in addresses:
print(f"{ea:x} ...")
print(f'Total {len(addresses)}')
# --------------------------------------------------------------------------------
if __name__=='__main__':
idaapi.msg_clear()
# dump()
clean_func_info() |
990,754 | f7c2412c8c71aba59969e8e1d9f298a2ac7ee356 | #!/usr/bin/env python3
# importing message stuff
from std_msgs.msg import Int8MultiArray, Float32
from geometry_msgs.msg import Twist, Vector3, Pose
from nav_msgs.msg import Odometry
from sensor_msgs.msg import Imu, LaserScan
from tf.transformations import euler_from_quaternion
from visualization_msgs.msg import Marker
import rospy
import math
import helper
class PersonFollower:
def __init__(self):
# init node
rospy.init_node('person_follower')
# Point of Interest: (distance in meters, direction in radians) in
# polar coordinates in reference to the base link
self.POI = (0,0)
self.twist = Twist(Vector3(0,0,0), Vector3(0,0,0))
rospy.Subscriber('/scan', LaserScan, self.process_scan)
self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
self.marker_pub = rospy.Publisher('/visualization_marker', Marker, queue_size=10)
self.person_marker = helper.create_marker("base_link", "person_follow", 0, 0, 0.2)
def process_scan(self, msg):
""" Gets the closest point in scan's distance and angle and sets it to the POI"""
lidarPoints = msg.ranges
minIndex = lidarPoints.index(min(lidarPoints))
self.POI = (lidarPoints[minIndex], math.pi*minIndex/180)
def run(self):
# Given an angle and a distance from the base_link frame, the neato should aim to
# move in the right direction and close the gap.
# The function should allow for mid-run recalibration
r = rospy.Rate(10)
while not rospy.is_shutdown():
x = self.POI[0]*math.cos(self.POI[1])
y = self.POI[0]*math.sin(self.POI[1])
self.person_marker.pose.position.x = x
self.person_marker.pose.position.y = y
self.marker_pub.publish(self.person_marker)
# Checks if neato is close enough to person to stop
if abs(self.POI[0]) <= .5:
self.twist.linear.x = 0
self.twist.angular.z = 0
self.pub.publish(self.twist)
else:
# Checks if heading of neato is not in the direction of the POI
if abs(self.POI[1]) > .1:
# Continue turning at angular speed based on angle (in rads) left to cover
# We use a sigmoid function function to scale the motor speeds to between 0 and 1*0.6
if 0 < self.POI[1] <= math.pi:
self.twist.angular.z = helper.sigmoid(self.POI[1]) * 0.6
else:
self.twist.angular.z = -helper.sigmoid(self.POI[1]) * 0.6
else:
# Drive straight at speed based on distance to drive
self.twist.linear.x = self.POI[0] * 0.5
self.twist.angular.z = 0
self.pub.publish(self.twist)
r.sleep()
if __name__ == "__main__":
node = PersonFollower()
node.run() |
990,755 | c14edb2fa09bb0966da5db9ef04f9fb8a7798578 | # Generated by Django 3.1.1 on 2020-09-01 13:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('csvfile', '0004_stock'),
]
operations = [
migrations.AlterField(
model_name='stock',
name='close',
field=models.FloatField(),
),
migrations.AlterField(
model_name='stock',
name='high',
field=models.FloatField(),
),
migrations.AlterField(
model_name='stock',
name='low',
field=models.FloatField(),
),
migrations.AlterField(
model_name='stock',
name='opens',
field=models.FloatField(),
),
migrations.AlterField(
model_name='stock',
name='volume',
field=models.FloatField(),
),
]
|
990,756 | d4fd7abe782d41e86f1f70f984cba0d9c1903c7e | #!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
version = "1.0.0"
# python setup.py tag
if sys.argv[-1] == 'tag':
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
# python setup.py publish
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
sys.exit()
setup(name="python-dicks",
version=version,
description='A Python client for the Dicks API',
license="MIT",
install_requires=["simplejson","requests"],
author="Tobias Schmid",
author_email="toashd@gmail.com",
url="http://github.com/toashd/python-dicks",
packages = find_packages(),
keywords= "dicks, dicks as a service",
zip_safe = True)
|
990,757 | eac8d1f9695db5f9326001c7e00fc4ff0467253c | import random
from pygame import *
from pygame.sprite import *
from template import receive
#import modules
#create Box Class for the Box player
class Box(Sprite):
def __init__(self):#initialize the sprite
Sprite.__init__(self)
self.image= pygame.image.load("RedBox.png")#load the image
self.rect = self.image.get_rect()#creates hitbox
self.rect.left = self.rect.top = 60#initial position
def moveRight(self):#move the box to the right
self.rect.left += 2
def moveLeft(self):#move the box to the left
self.rect.left -= 2
def moveUp(self):#move the box forward
self.rect.top -= 2
def moveDown(self):#move the box backward
self.rect.top += 2
class Maze(Sprite):#makes the maze
def __init__(self,grid):
#initializes class as well as asking for input from the template.py
Sprite.__init__(self)
self.M = 19 #amount of the blocks at the row
self.N = 13 #amount of the blocks at the column
self.maze = grid #accepts parameter
def create(self,surface,image): #creates the wall
self.mazewall = Group()#groups the wall
bx = 0#x axis of the blocks
by = 0#y axis of the blocks
for i in range(0,self.M*self.N):#ranges the amount of blocks need to be declared
if self.maze[bx + (by*self.M)]== 1:#
tempwall = MazeWall(bx*50,by*50)
self.mazewall.add(tempwall)#adds wall for every row
bx = bx+1
if bx > self.M-1:#resets the x axis blocks to 0
bx = 0
by = by+1#goes to the next colum
return self.mazewall
class Finish(Sprite):#creates the finish class
def __init__(self):
Sprite.__init__(self)
self.image = pygame.image.load("FinishBox.png").convert()#loads the class image
self.rect = self.image.get_rect()#creates hitbox
x = (850,550)
y = (850,50)
z = (50,550)
rand = [x,y,z]#list of available position
(self.rect.left,self.rect.top) = rand[random.randint(0,2)]#randomize the position
class MazeWall(Sprite):#class for the maze blocks
def __init__(self,x,y):
Sprite.__init__(self)
self.image= pygame.image.load("YellowBox.png").convert()#load the image
self.rect = self.image.get_rect()#creates hitbox
self.rect.top = y
self.rect.left = x
def text_object(text, font):#renders the font
textSurface = font.render(text, True, (BLACK))
return textSurface, textSurface.get_rect()
#main function
pygame.init()#initialize everything
display_width = 950
display_height = 650
display = pygame.display.set_mode((display_width,display_height),HWSURFACE,0)#initialize the window
BLACK = (0,0,0)#values for RGB
WHITE = (255,255,255)
def menu():#function for menu
pygame.display.set_caption("Welcome to a-MAZE-ing World")#caption for the window
apple = True
while apple:
largeText = pygame.font.Font(None, 80)#declares the font template
textSurf, textRect = text_object("PRESS SPACE TO START", largeText)#asks for input
textRect.center = ((display_width/2), (display_height/2))
display.fill((WHITE))#refill the background with white
display.blit(textSurf, textRect)#blits the window
for events in pygame.event.get():
keys = key.get_pressed()#gets the keys to check for input
if events.type == pygame.QUIT:
pygame.quit()
if keys[K_SPACE]:
apple = False
pygame.display.flip()
def tryagain():#function for trying the game again
apple = True
while apple:
largeText = pygame.font.Font(None, 60)
textSurf, textRect = text_object("You win! Do you want to Continue?", largeText)
textRect.center = ((display_width/2), (display_height/2))
display.fill((WHITE))
display.blit(textSurf, textRect)
for events in pygame.event.get():
keys = key.get_pressed()
if keys[K_q]:#pressing q quits the game
pygame,quit()
quit()
if keys[K_c]:#pressing c starts another game
game()
if events.type == pygame.QUIT:#pressing quit leaves the game
pygame.quit()
pygame.display.flip()
def lose():#function for trying the game
apple = True
pygame.mixer.music.load("glass.wav")#loads the sound of losing
pygame.mixer.music.play()#plays it
while apple:
largeText = pygame.font.Font(None, 60)
textSurf, textRect = text_object("You lose! Do you want to Continue?", largeText)
textRect.center = ((display_width/2), (display_height/2))
display.fill((WHITE))
display.blit(textSurf, textRect)
for events in pygame.event.get():
keys = key.get_pressed()
if keys[K_q]:
pygame,quit()
quit()
if keys[K_c]:
game()
if events.type == pygame.QUIT:
pygame.quit()
pygame.display.flip()
def game():#starting the game function
pygame.mixer.music.load("Solution.wav")#loads the music for the game
pygame.mixer.music.play(-1)#loops the game sound
pygame.mixer.music.set_volume(1)#sets the volume
running = True
x = random.randint(0,4)#randomized number between 0-4
y = receive()#receive the list
z = y[x]#getting value of the randomized number and use it to get the list's value
player = Box()#initialize box class
maze = Maze(z)#initialize maze class using the template
finish = Finish()#initialize finish class
mazewallgroup = maze.create(display,image)#create maze
sprites = Group(player)#grouping the sprite
sprite = Group(finish)#grouping the sprite
while running:
keys = pygame.key.get_pressed()
if keys[K_RIGHT]:
player.moveRight()
if spritecollideany(player,mazewallgroup):
lose()
if keys[K_LEFT]:
player.moveLeft()
if spritecollideany(player,mazewallgroup):
lose()
if keys[K_UP]:
player.moveUp()
if spritecollideany(player,mazewallgroup):
lose()
if keys[K_DOWN]:
player.moveDown()
if spritecollideany(player,mazewallgroup):
lose()
if keys[K_ESCAPE]:
running = False
if spritecollideany(player,sprite):
tryagain()
for event in pygame.event.get():
if event == pygame.QUIT:
pygame.quit()
pygame.event.pump()#get event
display.fill(BLACK)
sprites.draw(display)#display everything
sprite.draw(display)
mazewallgroup.draw(display)
pygame.display.flip()
menu()
game()
|
990,758 | f7fbda4c7e903d95552bdbf87874dbddb02ab0fc | from commands.command_helpers import telegram_command, ChatBotException
@telegram_command("ema", pass_args=True)
def ema(args):
"""
/EMA <symbol>
returns the EMA values for any given coin symbol
"""
assert len(args[0])
# from utils.symbols import get_symbol
# from indicators.price import get_current_price_humanized
try:
# symbol = get_symbol(args[0])
pass
except ChatBotException as e:
# logger.debug(e.developer_message)
return e.user_message
except Exception as e:
# logger.warning(str(e))
return "unexpected error"
# todo: get latest ema data from datastore
ema6, ema12, ema24 = 1, 2, 3
# print("returning EMA for" + symbol) # logger.debug("returning EMA for"+symbol)
return "\n".join([
# symbol + " " + get_current_price_humanized(symbol) + " EMA analysis",
"EMA-6: {:,}".format(ema6),
"EMA-12: {:,}".format(ema12),
"EMA-24: {:,}".format(ema24),
])
|
990,759 | 4ceb2dcb1a02241ab23ea4205c1d8c231892e404 | file = open("input.in")
lines = [line.strip() for line in file]
file.close()
numberOfTests = int(lines[0])
currentLine = 1
for testNumber in range(numberOfTests):
firstAnswer = int(lines[currentLine])
currentLine += 1
firstGrid = []
for i in range(4):
firstGrid.append(lines[currentLine].split(" "))
currentLine += 1
secondAnswer = int(lines[currentLine])
currentLine += 1
secondGrid = []
for j in range(4):
secondGrid.append(lines[currentLine].split(" "))
currentLine += 1
possibleCards = []
possibleCards1 = []
possibleCards1.extend(firstGrid[firstAnswer-1])
possibleCards2 = []
possibleCards2.extend(secondGrid[secondAnswer-1])
for card in possibleCards1:
if card in possibleCards2:
possibleCards.append(card)
if len(possibleCards) == 1:
print("Case #"+str(testNumber+1)+": "+possibleCards[0])
elif len(possibleCards) == 0:
print("Case #"+str(testNumber+1)+": Volunteer cheated!")
elif len(possibleCards) > 1:
print("Case #"+str(testNumber+1)+": Bad magician!")
|
990,760 | 877779b20ee777bd577d547ef3b11b7c68a7f08e |
door = "closed"
locked = True
code = 1234
while door == "closed":
command = input(">> ")
commandParts = command.split(" ")
command = commandParts[0]
if command == "open":
if len(commandParts) == 1:
print("Open what?")
continue
object = commandParts[1]
if object == "door":
if locked:
print("you can't open the door! It's locked")
else:
print("you open the door")
door = "open"
else:
print("You don't know how to open that.")
elif command == "unlock":
if len(commandParts) <3:
print("Unlock what with what?")
continue
object = commandParts[1]
code = commandParts[-1]
if object == "door":
if code == str(code):
print("That's the correct code! The door unlocks!")
locked = False
else:
print("That's the wrong code!")
else:
print("You don't know how to unlock that.")
else:
print("you don't know how to do that.")
print("congrautlations! You escaped") |
990,761 | d266d88e6b21cb5985f1c6055823e4ada2c21175 | # -*- coding: utf-8 -*-
from osv import osv,fields
class pelicula(osv.Model):
_name= 'gidsoft.peliculas.pelicula'
_rec_name='nombre_pelicula'
_columns={
'cod_pelicula':fields.char('Codigo Pelicula', required=True, size=4),
'nombre_pelicula':fields.char('Nombre Pelicula', size=42),
'sinopsis':fields.text('Sinopsis de la Pelicula'),
'director_id':fields.many2one('gidsoft.peliculas.director', 'Director'),
'autor_id':fields.many2one('gidsoft.peliculas.autor', 'Autor'),
'genero_id':fields.many2one('gidsoft.peliculas.genero', 'Genero'),
'ano_estreno':fields.date('Año de Estreno'),
'imagen':fields.binary('Imagen', filtars='*.png, *.gif')
} |
990,762 | 9f0518fb9533ed9fee1c861b034e5df66a850423 | from django.shortcuts import render
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib import auth
# Create your views here.
# def say_hello(request):
# name = request.GET.get("name","")
# if name == "":
# return HttpResponse("请输入name参数")
# else:
# #return HttpResponse("hello "+ name)
# return render(request,"index.html",{"name":name})
def index(request):
if request.method == "GET":
return render(request,"index.html")
else:
username = request.POST.get("username", "")
password = request.POST.get("password", "")
if username == "" or password == "":
return render(request, "index.html", {"errmsg": "用户名或密码为空"})
else:
user = auth.authenticate(username=username,password=password)
if user == None:
return render(request, "index.html", {"errmsg": "用户名或密码错误"})
else:
auth.login(request,user) #记录用户登入状态
return HttpResponse("恭喜你,登入成功")
|
990,763 | 609f38fcc62408022669f520216bf0ce06d3ec66 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 14:34:35 2016
@author: ibackus
"""
class ConvergenceTest():
"""
ConvergenceTest(method='ftol', xtol=None, ftol=1e-4, lookback=1, \
minSteps=2)
A simple class for handling convergence tests.
Parameters
----------
method : str
convergence test method to use. Currently implemented are 'ftol'
(follow a scalar quantity like the loss)
xtol : float
(not implemented)
ftol : float
tolerance for fractional change in function (loss) tolerance
lookback : int
Compare current value (loss, argument vals, etc) to max of the previous
lookback number of values
minSteps : int
Minimum number of steps before checking convergence
Examples
--------
>>> conv = convergenceTest(ftol=1e-3, lookback=3)
>>> maxIter = 20
>>> while not conv.converged and conv.nSteps < maxIter:
>>> # Calculate losses
>>> # ...
>>> conv.checkConvergence(loss)
"""
def __init__(self, method='ftol', xtol=None, ftol=1e-4, lookback=1,
minSteps=2):
"""
"""
if method not in ('ftol'):
raise ValueError, 'Unrecognized convergence test method {0}'\
.format(self.method)
self.method = method
self.xtol = xtol
self.ftol = ftol
self.lookback = lookback
self.minSteps = minSteps
self.reset()
def addStep(self, x):
"""
Append a loss (without checking for convergence) or a function argument
If method = 'ftol', append x as a loss
"""
if self.method == 'ftol':
self.loss.append(x)
self.nSteps += 1
def checkConvergence(self, loss):
"""
Append a loss and check for convergence
"""
self.addStep(loss)
if (self.nSteps <= self.minSteps) or (self.nSteps <= self.lookback):
return
if self.method == 'ftol':
self._ftolCheck()
if self.converged:
print 'Converged'
def reset(self):
"""
Reset to step 0...delete losses etc
"""
self.loss = []
self.funcargs = []
self.nSteps = 0
self.converged = False
def _ftolCheck(self):
"""
Check if fractional change in losses
"""
oldLoss = biggestRecentLoss(self.loss, self.lookback)
newLoss = float(self.loss[-1])
fracDiff = 2 * (oldLoss - newLoss)/(oldLoss + newLoss)
if fracDiff < self.ftol:
self.converged = True
def biggestRecentLoss(losses, memory=3):
"""
Of the last 'memory' losses, calculcate the largest.
"""
memory += 1
if len(losses) < memory:
lookback = len(losses)
else:
lookback = memory
oldlosses = losses[-lookback:]
oldloss = max(oldlosses)
return oldloss |
990,764 | 85a6dfdbc61a4965036ebb89bb8b608599626ede | '''
Implement atoi to convert a string to an integer.
https://leetcode.com/problems/string-to-integer-atoi/description/
'''
def atoi(str):
str = str.strip()
if str == '':
return 0
index = 0
result = 0
sign = False
if str[0] in '-+':
if len(str) == 1 or (len(str) > 1 and not str[1].isdigit()):
return 0
if str[0] == '-':
sign = True
index += 1
print(sign)
while index < len(str):
print(str[index])
if str[index].isdigit():
result = result * 10 + int(str[index])
index += 1
else:
break
if sign:
result = -result
print(str, result)
return result
# f = open("atoi_test_cases.txt", 'r')
# for x in f:
# x = x.strip()
# print('%10s : %10s' % (x, str(atoi(x))))
cases = ["", "123", "-123", "1-23", "bc-123",
"123b", "123 45b", "123ab12", "abc",
"+1", "+", " 010"]
for case in cases:
print('%10s : %10d' %(case, atoi(case)))
|
990,765 | cd69085f6352853d0d212199072ba578438f808b | import MatrixCaculation as M
import sys
class Edge(object):
def __init__(self,ymin,ymax,x_ymin,z_ymin,z_ymax,m,vnor_ymin,vnor_ymax,t_ymin,t_ymax):
self.ymin=ymin
self.ymax=ymax
# z-buffer
self.x_ymin=x_ymin
self.m=m
self.z_ymin=z_ymin
self.z_ymax=z_ymax
self.z=z_ymin
# vertex's normal
self.vnor_ymin=vnor_ymin
self.vnor_ymax=vnor_ymax
self.v_normal=vnor_ymin
# texture
self.t_ymin=t_ymin
self.t_ymax=t_ymax
self.t=t_ymin
def ScanConversion(singleobject):
results=[]
for polygon in singleobject.Polygons:
result=[]
edges=[]
num=polygon.pop(0)
for i in range(num-1):
edges.append([polygon[i]-1,polygon[i+1]-1])
edges.append([polygon[num-1]-1,polygon[0]-1])
y=sys.maxsize
edgetable=[]
for edge in edges:
start=singleobject.devPoints[edge[0]]
end=singleobject.devPoints[edge[1]]
# vertex normals
start.append(singleobject.v_normals[edge[0]])
end.append(singleobject.v_normals[edge[1]])
# vertex texture
start.append(singleobject.texture[edge[0]])
end.append(singleobject.texture[edge[1]])
#horizon line doesn't count
if start[1]==end[1]:
continue
if start[1] > end[1]:
start, end= end, start
#computing k
if end[0]==start[0]:
m=0
else:
m=(end[1]-start[1])/(end[0]-start[0])
#shorten one y_max
edgetable.append(Edge(start[1],end[1]-1,start[0],start[2],end[2],m,start[3],end[3],start[4],end[4]))
y=min(start[1],y)
#initialize et & y
ate=[]
y=int(y)
while ate or edgetable:
for i in range(len(edgetable)-1,-1,-1):
if edgetable[i].ymin <y:
ate.append(edgetable[i])
del edgetable[i]
ate=sorted(ate,key= lambda x:x.x_ymin)
# 3.2 Fill in desired pixel values on scan line y by using pairs of x coordinates from the AET
intersects=[]
for edge in ate:
if edge.x_ymin not in [i[0] for i in intersects]:
intersects.append([edge.x_ymin,edge.z,edge.v_normal,edge.t])
# single y line format: [y, [x1,z1],[x2,z2]]
if len(intersects)>1:
intersects.insert(0,y)
result.append(intersects)
for i in range(len(ate)-1,-1,-1):
if ate[i].ymax<y:
del ate[i]
elif ate[i].m!=0:
ate[i].x_ymin+=1/ate[i].m
y1=ate[i].ymax
y2=ate[i].ymin
z1=ate[i].z_ymax
z2=ate[i].z_ymin
l1=ate[i].vnor_ymax
l2=ate[i].vnor_ymin
t1=ate[i].t_ymax
t2=ate[i].t_ymin
ate[i].z+=(z1-z2)/(y1-y2)
ate[i].v_normal=M.add(M.multiple(l1,(y-y2)/(y1-y2)),M.multiple(l2,(y1-y)/(y1-y2)))
ate[i].t=[(t1[0]*(y-y2)+t2[0]*(y1-y))/(y1-y2),(t1[1]*(y-y2)+t2[1]*(y1-y))/(y1-y2)]
# 3.6 Increment y by 1 (to the coordinate of the next scan line)
y+=1
results.append(result)
return results
def Z_buffer(Polygons):
z_buffer=[[sys.maxsize]*1200 for i in range(1000)]
i_buffer=[[-1]*1200 for i in range(1000)]
t_buffer=[[-1]*1200 for i in range(1000)]
for onepolygon in Polygons:
for line in onepolygon:
y=line.pop(0)
start=line[0]
end=line[len(line)-1]
xa=start[0]
xb=end[0]
za=start[1]
zb=end[1]
la=start[2]
lb=end[2]
ta=start[3]
tb=end[3]
xp=xa
zp=za
while xp<xb:
# For every x in y get z
zp+=(zb-za)/(xb-xa)
# Visible
if zp< z_buffer[y][int(xp)]:
z_buffer[y][int(xp)]=zp
i_buffer[y][int(xp)]=M.add(M.multiple(la,(xb-xp)/(xb-xa)),M.multiple(lb,(xp-xa)/(xb-xa)))
t_buffer[y][int(xp)]=[(ta[0]*(xb-xp)+tb[0]*(xp-xa))/(xb-xa),(ta[1]*(xb-xp)+tb[1]*(xp-xa))/(xb-xa)]
xp+=1
return i_buffer,t_buffer
|
990,766 | 005d84fac00a0e97587192daa6e7cc4814e13346 | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from argparse import ArgumentParser
import mmcv
from mmflow.apis import inference_model, init_model
from mmflow.datasets import visualize_flow, write_flow
def parse_args():
parser = ArgumentParser()
parser.add_argument('img1', help='Image1 file')
parser.add_argument('img2', help='Image2 file')
parser.add_argument(
'--valid',
help='Valid file. If the predicted flow is'
'sparse, valid mask will filter the output flow map.')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'out_dir', help='Path of directory to save flow map and flow file')
parser.add_argument(
'--out_prefix',
help='The prefix for the output results '
'including flow file and visualized flow map',
default='flow')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_model(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_model(model, args.img1, args.img2, valids=args.valid)
# save the results
mmcv.mkdir_or_exist(args.out_dir)
visualize_flow(result, osp.join(args.out_dir, f'{args.out_prefix}.png'))
write_flow(result, osp.join(args.out_dir, f'{args.out_prefix}.flo'))
if __name__ == '__main__':
args = parse_args()
main(args)
|
990,767 | f0eb399710d584cffb097b72d47a29630d30e4ee | import os
import sys
# resolves import conflicts between modules
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../app/src')))
import pytest
import json
import logging
import dynamodb_api as db
@pytest.fixture
def prepare_db():
"""
Anything before yield executed before the test
Anything after yield executed after the test
"""
logging.info("Create table and load data")
db.create_rules_table("TestRules")
with open("test/mock_data/rules.json", 'rb') as f:
fake_rules = json.load(f)
db.load_rules(fake_rules, "TestRules")
yield
logging.info("Delete table")
db.delete_table("TestRules")
@pytest.fixture
def clean_db():
"""
Anything before yield executed before the test
Anything after yield executed after the test
"""
yield
logging.info("Delete table")
db.delete_table("TestRules")
def test_create_table(clean_db):
table = "TestRules"
db.create_rules_table(table)
assert table in db.list_tables()
def test_delete_table():
table = "TestRules"
db.create_rules_table(table)
db.delete_table(table)
assert table not in db.list_tables()
def test_list_tables():
assert type(db.list_tables()) == list
def test_get_rules(prepare_db):
table = "TestRules"
rules = db.get_rules(rule_id=1, table=table)
with open("test/mock_data/rules.json", 'r') as f:
expected_rules = json.load(f)[0]
assert rules["RuleId"] == 1
assert expected_rules == rules
|
990,768 | 2cb1bb42b3870afcafe6682ca8d649b1ded48e3b | import sys
sys.path.append('../')
from utils import util
from utils import plotter
import matplotlib.pyplot as plt
#import numpy as np
import autograd.numpy as np
import scipy as sp
from scipy.optimize import minimize
from numpy.random import RandomState
import pandas as pd
from autograd import grad
RS = RandomState(1213)
class FA(object):
def __init__(self,n, dimz = 2, dimx = 3):
self.n = n
self.sigx = 0.000001
#sigw = 1#RS.normal(0,1)
self.W = self.W = RS.normal(0,1, size = (dimx,dimz))
self.dimz = dimz
self.dimx = dimx
data = util.generate_data(n, self.W, self.sigx, dimx, dimz)
self.observed = data[0]
self.latent = data[1]
def get_mu(self, x, W):
temp = np.dot(W.transpose(), W)
temp = np.linalg.inv(temp)
temp = np.dot(temp, W.transpose())
return np.dot(temp, x)
def marginal_likelihood(self, W0):
a = self.sigx*np.identity(self.dimx)
win = lambda w: np.dot(w, w.transpose()) + a
const = lambda w: -(self.n/2.0)*np.log( np.linalg.det(win(w)) )
pdin = lambda w: np.linalg.inv( win(w) )
pd = lambda w,i: np.dot(np.dot(self.observed[i].transpose(), pdin(w)), self.observed[i])
final = lambda w: sum(pd(w, i) for i in range(self.n))
evidence = lambda w: - const(w) + 0.5*final(w)
gradient = grad(evidence)
ans, a = util.gradient_descent(evidence, W0)
#plot learning curve
plt.plot(a)
plt.show()
return ans
def MLE_EP(self, random_init):
w_init = RS.normal(0,1, (self.dimx, self.dimz))
if random_init is False:
w_init = self.W
mus = np.array([])
w = self.marginal_likelihood(w_init)
mus = np.array([])
for i in xrange(self.n):
mu = self.get_mu(self.observed[i], w)
mus = np.hstack((mus, mu))
mus = mus.reshape((self.n,2))
sig = np.dot(self.W.transpose(), self.W)
sig = sig/self.sigx
sig = np.linalg.inv(sig)
return mus, sig
|
990,769 | 40e430d8c424bd0e381766e222a8b3e67a3fa65e | from evaluator import Evaluator
import numpy as np
def get_com_pos(positions, gms):
return np.sum(gms[:, None] * positions, axis=-2) / np.sum(gms)
def get_com_vel(velocities, gms):
return np.sum(gms[:, None] * velocities, axis=-2) / np.sum(gms)
# Center of mass evaluator
class COM_Evaluator(Evaluator):
#
def evaluate(self, scenario, positions, velocities, time):
com_vel_0 = get_com_vel(scenario.get_velocities(), scenario.get_gms())
com_vel_t = get_com_vel(velocities, scenario.get_gms())
com_pos_0 = get_com_pos(scenario.get_positions(), scenario.get_gms())
com_pos_t = get_com_pos(positions, scenario.get_gms())
com_pos_0_t = com_pos_0 + com_vel_0 * time
return com_vel_0, com_vel_t, com_pos_0, com_pos_t, com_pos_0_t |
990,770 | 97d648077431511c8494fb463fc405d888e07fe6 | import math
import copy
"""
Day 6: Memory Reallocation
"""
def reallocateNumber(data, index):
number = data[index]
num_per_cell = math.ceil(number / len(data))
data[index] = 0
act_ind = index
while number > 0:
act_ind = (act_ind + 1) % len(data)
if number - num_per_cell >= 0:
data[act_ind] += num_per_cell
number -= num_per_cell
else:
data[act_ind] += number
number = 0
def listsAreEqual(list1, list2):
if len(list1) != len(list2):
return False
return all(list1[i] == list2[i] for i in range(0, len(list1)))
def wasMetPreviously(list1, previousLists):
try:
ind = previousLists.index(list1)
return True, ind
except ValueError:
return False, None
def main():
data = open('input.txt', 'r').read().split()
# data = '0 2 7 0'.split()
data = list(map(int, data))
previous = []
times = 0
areEqual = False
while not areEqual:
previous.append(copy.deepcopy(data))
max_ind = data.index(max(data))
reallocateNumber(data, max_ind)
times += 1
areEqual, equalInd = wasMetPreviously(data, previous)
print("times: ", times)
print("cycles: ", len(previous) - equalInd)
if __name__ == '__main__':
main()
|
990,771 | 828b8f46a0fe63dbb4c05ff0fe08c4b0f682b480 | from django.apps import AppConfig
class RegistryappConfig(AppConfig):
name = 'RegistryApp'
|
990,772 | dbb0ca118303a901d86c7196ea8842269c3c412d | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
titanic = pd.read_csv('./titanic.csv')
#print titanic
print titanic.head()[['pclass', 'survived', 'age', 'embarked', 'boat', 'sex']]
from sklearn import feature_extraction
def one_hot_dataframe(data, cols, replace=False):
vec = feature_extraction.DictVectorizer |
990,773 | 31e4c4dd4ca99f856ef1b4617c5a34cadcdc22af | Mein neuer Code
Neue Code-Zeile ...
|
990,774 | 13a300d908a4fa50c5ee3ed80d93d400ce68a3a3 | class StringTest:
def __init__(self):
self.str=""
def getString(self):
self.str=input("Enter String : ")
def printString(self):
print(self.str.upper())
obj=StringTest()
obj.getString()
obj.printString() |
990,775 | d1f83b8bb25243898cebfd2d535ad9ba743ae420 | import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_test, axis=1)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(28, 28)))
#Neurons
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(128, activation='relu'))
#The 10 digits
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer='RMSprop', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=3)
model.save('handwritten.model')
loss, accuracy = model.evaluate(x_test, y_test)
print(loss)
print(accuracy)
model = tf.keras.models.load_model('handwritten.model')
image_number = 0
correct_prediction_check = 0
while os.path.isfile(f"My numbers/anynumber{image_number}.png"):
try:
img = cv2.imread(f"My numbers/anynumber{image_number}.png")[:,:,0]
img = np.invert(np.array([img]))
prediction = model.predict(img)
print(f"the number may be a {np.argmax(prediction)}")
plt.imshow(img[0], cmap=plt.cm.binary)
plt.show()
if (image_number == np.argmax(prediction)):
correct_prediction_check = correct_prediction_check + 1
except:
print("Error")
finally:
image_number = image_number + 1
print(f"The got {correct_prediction_check} out of {image_number} images correct") |
990,776 | 74abeef341ae402d46549349459b979819d1b37c | the=["G","P","P","G","P"]
k=1
def brute_grab (arr,k):
i=0
all=[]
print (arr)
while i < len(arr):
if arr[i]=='G':
G = arr[i]+str(i)
for j in range(k+1):
if (i-j)>-1 and arr[i-j]=='P':
#print(G,arr[i-j]+str(i-j))
all.append([G,arr[i-j]+str(i-j)])
for z in range(k+1):
if (i+z)<len(arr) and arr[i+z]=='P' :
#print(G,arr[i+z]+str(i+z))
all.append([G,arr[i+z]+str(i+z)])
i=i+1
# print(all,"\n","-----------------")
all_combi=combinations([],all)
result=[]
max_len=len(all_combi[0])
for i in all_combi:
max_len=max([max_len,len(i)])
for i in all_combi:
if (len(i)==max_len):
result.append(i)
print ("max passenger : "+str(max_len))
print ("way to pick: "+str(len(result)))
def combinations(target,data,res=[]):
for i in range(len(data)):
new_target = target.copy()
new_data = data.copy()
new_target.append(data[i])
new_data = data[i+1:]
ni = True
mi = []
for i in new_target:
if i[0] not in mi and i[1] not in mi:
mi.append(i[0])
mi.append(i[1])
else:
ni = False
if ni == True:
res.append(new_target)
mac=len(new_target)
combinations(new_target,new_data,res)
return res
def greed_grab(lst,k):
temp_lst=lst.copy()
path=[]
for i in range(len(temp_lst)):
if (temp_lst[i]=="G"):
found=False
for j in range(k,0,-1):
if (i-j<0):
continue
elif (i-j>0):
if (temp_lst[i-j]=="P"):
path.append("G: "+str(i)+", P:"+str(int(i-j)))
temp_lst[i-j]="emp"
found=True
break
if (found):
continue
for l in range(1,k+1):
if (l+i>len(lst)-1):
continue
else:
if (temp_lst[l+i]=="P"):
path.append("G: "+str(i)+", P:"+str(int(l+i)))
temp_lst[l+i]="emp"
found=True
break
print ("path\t: "+str(path))
print ("maximum passenger: "+str(path))
return path
def read_prob(filename):
with open(filename,"r") as file:
result=[]
k=0
fst_line=True
for i in file:
if (fst_line):
for j in i.strip():
result.append(j)
else:
k=int(i)
fst_line=False
return (result,k)
arr,k=read_prob("3.1.3.txt")
brute_grab(arr,k)
greed_grab(arr,k)
|
990,777 | 85114ce90e97efa6182bdebefddd9bf416a6e576 | import csv
from io import BytesIO, StringIO
import pytest
from pytest_django.asserts import assertContains, assertRedirects
from opencodelists.tests import factories as opencodelists_factories
from . import factories
pytestmark = pytest.mark.freeze_time("2020-07-23")
@pytest.fixture()
def logged_in_client(client, django_user_model):
"""A Django test client logged in a user."""
user = opencodelists_factories.create_user()
client.force_login(user)
return client
def test_create_codelist(logged_in_client):
p = factories.create_project()
csv_data = "code,description\n1067731000000107,Injury whilst swimming (disorder)"
data = {
"name": "Test Codelist",
"coding_system_id": "snomedct",
"description": "This is a test",
"methodology": "This is how we did it",
"csv_data": _build_file_for_upload(csv_data),
}
rsp = logged_in_client.post(f"/codelist/{p.slug}/", data, follow=True)
assertRedirects(rsp, f"/codelist/{p.slug}/test-codelist/2020-07-23-draft/")
def test_create_codelist_when_not_logged_in(client):
p = factories.create_project()
csv_data = "code,description\n1067731000000107,Injury whilst swimming (disorder)"
data = {
"name": "Test Codelist",
"coding_system_id": "snomedct",
"description": "This is a test",
"methodology": "This is how we did it",
"csv_data": _build_file_for_upload(csv_data),
}
rsp = client.post(f"/codelist/{p.slug}/", data, follow=True)
assertRedirects(rsp, f"/accounts/login/?next=%2Fcodelist%2F{p.slug}%2F")
def test_codelist(client):
clv = factories.create_published_version()
cl = clv.codelist
rsp = client.get(f"/codelist/{cl.project.slug}/{cl.slug}/", follow=True)
assertRedirects(rsp, f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}/")
assertContains(rsp, cl.name)
def test_version(client):
clv = factories.create_published_version()
cl = clv.codelist
rsp = client.get(f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}/")
assertContains(rsp, cl.name)
assertContains(rsp, cl.description)
assertContains(rsp, cl.methodology)
def test_version_redirects(client):
clv = factories.create_published_version()
cl = clv.codelist
rsp = client.get(
f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}-draft/", follow=True
)
assertRedirects(rsp, f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}/")
assertContains(rsp, cl.name)
assertContains(rsp, cl.description)
assertContains(rsp, cl.methodology)
def test_draft_version(client):
clv = factories.create_draft_version()
cl = clv.codelist
rsp = client.get(f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}-draft/")
assertContains(rsp, cl.name)
assertContains(rsp, cl.description)
assertContains(rsp, cl.methodology)
def test_draft_version_redirects(client):
clv = factories.create_draft_version()
cl = clv.codelist
rsp = client.get(
f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}/", follow=True
)
assertRedirects(
rsp, f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}-draft/"
)
assertContains(rsp, cl.name)
assertContains(rsp, cl.description)
assertContains(rsp, cl.methodology)
def test_download(client):
clv = factories.create_published_version()
cl = clv.codelist
rsp = client.get(
f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}/download.csv"
)
reader = csv.reader(StringIO(rsp.content.decode("utf8")))
data = list(reader)
assert data[0] == ["code", "description"]
assert data[1] == ["1067731000000107", "Injury whilst swimming (disorder)"]
def test_download_does_not_redirect(client):
clv = factories.create_published_version()
cl = clv.codelist
rsp = client.get(
f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}-draft/download.csv"
)
assert rsp.status_code == 404
def test_draft_download(client):
clv = factories.create_draft_version()
cl = clv.codelist
rsp = client.get(
f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}-draft/download.csv"
)
reader = csv.reader(StringIO(rsp.content.decode("utf8")))
data = list(reader)
assert data[0] == ["code", "description"]
assert data[1] == ["1067731000000107", "Injury whilst swimming (disorder)"]
def test_draft_download_does_not_redirect(client):
clv = factories.create_draft_version()
cl = clv.codelist
rsp = client.get(
f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}/download.csv"
)
assert rsp.status_code == 404
def test_create_version(logged_in_client):
clv = factories.create_published_version()
cl = clv.codelist
csv_data = "code,description\n1068181000000106, Injury whilst synchronised swimming (disorder)"
data = {
"csv_data": _build_file_for_upload(csv_data),
}
rsp = logged_in_client.post(
f"/codelist/{cl.project.slug}/{cl.slug}/", data, follow=True
)
assertRedirects(rsp, f"/codelist/{cl.project.slug}/{cl.slug}/2020-07-23-a-draft/")
def test_create_version_when_not_logged_in(client):
clv = factories.create_published_version()
cl = clv.codelist
csv_data = "code,description\n1068181000000106, Injury whilst synchronised swimming (disorder)"
data = {
"csv_data": _build_file_for_upload(csv_data),
}
rsp = client.post(f"/codelist/{cl.project.slug}/{cl.slug}/", data, follow=True)
assertRedirects(
rsp, f"/accounts/login/?next=%2Fcodelist%2F{cl.project.slug}%2F{cl.slug}%2F"
)
def test_update_version(logged_in_client):
clv = factories.create_draft_version()
cl = clv.codelist
csv_data = "code,description\n1068181000000106, Injury whilst synchronised swimming (disorder)"
data = {
"csv_data": _build_file_for_upload(csv_data),
}
rsp = logged_in_client.post(
f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}-draft/",
data,
follow=True,
)
assertRedirects(
rsp, f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}-draft/"
)
def test_update_version_when_not_logged_in(client):
clv = factories.create_draft_version()
cl = clv.codelist
csv_data = "code,description\n1068181000000106, Injury whilst synchronised swimming (disorder)"
data = {
"csv_data": _build_file_for_upload(csv_data),
}
rsp = client.post(
f"/codelist/{cl.project.slug}/{cl.slug}/{clv.version_str}-draft/",
data,
follow=True,
)
assertRedirects(
rsp,
f"/accounts/login/?next=%2Fcodelist%2F{cl.project.slug}%2F{cl.slug}%2F{clv.version_str}-draft%2F",
)
def _build_file_for_upload(contents):
buffer = BytesIO()
buffer.write(contents.encode("utf8"))
buffer.seek(0)
return buffer
|
990,778 | 66dbdb45b3878593cf1c46afc85e1a01fc5fef89 | >>> from math import sqrt
>>> sqrt(16)
4.0
>>> import math
>>> math.pi
3.141592653589793
>>> dir(math)
['__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2',
'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod'
, 'frexp', 'fsum', 'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf', 'isnan', 'ldexp', 'lgamma', 'log', 'log10',
'log1p', 'log2', 'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'tau', 'trunc']
# The math module contains more advanced numeric tools as functions, while the ran dom module performs random-number generation
# and random selections
>>> import random
>>> random.random()
0.7082048489415967
>>> random.choice([1, 2, 3, 4])
1
#numbers, strings, and tuples are immutable; lists, dictionaries, and sets are not
#List Operations
>>> L = [123, 'spam', 1.23]
>>> len(L)
3
>>> L*2
[123, 'spam', 1.23, 123, 'spam', 1.23]
>>> L[:]
[123, 'spam', 1.23]
>>> L[2:]
[1.23]
>>> L[:-1]
[123, 'spam']
>>> L.append(23)
[123, 'spam', 1.23, 23]
>>> L.pop(2)
1.23
>>> L
[123, 'spam', 23]
>>> list = [1,23,4,56,33,656,564]
>>> list.sort()
>>> list
[1, 4, 23, 33, 56, 564, 656]
#selecting a partcular column from a 2D list
>>> list2D = [[1,2,3],[4,5,6],[7,8,9]]
>>> list2D[1][2]
6
>>> col2 = [row[1] for row in list2D] #Give me row[1] (2nd element) for each row in matrix M, in a new list.
>>> col2
[2, 5, 8]
>>> M
['bb', 'aa', 'cc']
>>> M.sort()
>>> M
['aa', 'bb', 'cc']
>>> [row[1] for row in M if row[1] % 2 == 0] #Filter out odd items
[2, 8]
#diagonal matrix
>>> diag = [M[i][i] for i in [0, 1, 2]] >>> diag
[1, 5, 9]
# Repeat characters in a string
>>> doubles = [c * 2 for c in 'spam'] >>> doubles
['ss', 'pp', 'aa', 'mm']
>>> list(range(4))
[0, 1, 2, 3]
>>> a = list(range(-6,7,2))
>>> a
[-6, -4, -2, 0, 2, 4, 6]
>>> [[x ** 2, x **3] for x in range(4)]
[[0, 0], [1, 1], [4, 8], [9, 27]]
>>> [[x, x / 2, x * 2] for x in range(-6, 7, 2) if x > 0]
[[2, 1.0, 4], [4, 2.0, 8], [6, 3.0, 12]]
>>> [[x, int(x / 2), x * 2] for x in range(-6, 7, 2) if x > 0]
[[2, 1, 4], [4, 2, 8], [6, 3, 12]]
>>> G = (sum(row) for row in M)
>>> G
<generator object <genexpr> at 0x105b29408>
>>> next(G)
6
>>> next(G)
15
>>> next(G)
24
'''Dictionaries :: Dictionaries, the only mapping type (not a sequence) in Python’s core objects set, are also mutable '''
>>> D = {}
>>> type(D)
<class 'dict'>
>>> D = {'food': 'Spam', 'quantity': 4, 'color': 'pink'}
>>> D
{'food': 'Spam', 'quantity': 4, 'color': 'pink'}
#using dict to define a dictionary
>>> bob1 = dict(name='Bob', job='dev', age=40)
>>> bob1
{'age': 40, 'name': 'Bob', 'job': 'dev'}
#zipping way to define dictionary
>>> bob2 = dict(zip(['name', 'job', 'age'], ['Bob', 'dev', 40]))
>>> bob2
{'name': 'Bob', 'job': 'dev', 'age': 40}
#Complex nesting of different types in python - one of the advantage of using python, complex nesting is easy to implement
>>> rec = {'name': {'first': 'Bob', 'last': 'Smith'}, 'jobs': ['dev', 'mgr'], 'age': 40.5}
>>> rec['jobs'][1]
'mgr'
>>> rec['name']['last']
'Smith'
>>> rec['jobs'].append('support')
>>> rec
{'name': {'first': 'Bob', 'last': 'Smith'}, 'jobs': ['dev', 'mgr', 'support'], 'age': 40.5}
#In Python, when we lose the last reference to the object—by assigning its variable to something else
>>> rec = 0
#Python has a feature known as garbage collection that cleans up unused memory as your program runs and frees you from having to manage such details in your code.
>>> D = {'a': 1, 'b': 2, 'c': 3}
#so now, what ".get" does is it will select the data with the key 'x' in dictionary D, if it doesnyt find it, it will return 0
>>> value = D.get('x', 0)
>>> value
0
#Sorting Keys: for Loops
>>> sorted(D)
['a', 'b', 'c']
>>> Ks = list(D.keys())
>>> Ks
['a', 'c', 'b']
>>> Ks.sort()
>>> Ks
['a', 'b', 'c']
#Tuples :: tuples are sequences, like lists, but they are immutable. Functionally, they’re used to represent fixed collections of items.
>>> T = (1, 2, 3, 4, 5)
>>> len(T)
5
>>> T + (5,6)
(1, 2, 3, 4, 5, 5, 6)
>>> T
(1, 2, 3, 4, 5)
>>> T[0]
1
>>> T.index(4)
3
>>> T.count(4)
1
#tuples provide a sort of integrity constraint
'''Set :: Sets are neither mappings nor sequences; rather, they are unordered collections of unique and immutable objects
they support the usual mathematical set operations
'''
>>> X = set('spam')
>>> X
{'a', 's', 'p', 'm'}
#Set operations
>>> X, Y
({'a', 's', 'p', 'm'}, {'t', 'u', 'a', 's', 'p', 'l'})
>>> X - Y #difference
{'m'}
>>> Y - X #difference
{'t', 'l', 'u'}
>>> X & Y #Intersection
{'a', 's', 'p'}
>>> X | Y #Union
{'t', 'm', 'u', 'a', 's', 'p', 'l'}
#checking superset
>>> X > Y
False
>>> set('spam') == set('asmp')
True
>>> set('spam') - set('ham')
{'p', 's'}
'''Decimal : fixed-precision floating-point numbers, and fraction numbers, which are rational numbers with
both a numerator and a denominator.
'''
>>> import decimal
>>> a = (2/3) + (1/2)
>>> a
1.1666666666666665
>>> d = decimal.Decimal(a)
>>> d
Decimal('1.166666666666666518636930049979127943515777587890625')
#Fraction
>>> from fractions import Fraction
>>> f = Fraction(2, 3)
>>> f
Fraction(2, 3)
>>> f + 1
Fraction(5, 3)
>>> f + Fraction(1,3)
Fraction(1, 1)
|
990,779 | 9602bd089b44e0a57c6b404b6880b2054f606b43 | from RLAgent_DeepQNetwork import DeepQNetwork
import matplotlib.pyplot as plt
import gym
import numpy as np
import gc
# # 設定環境
#
# ## Observation
# | Num | Observation | Min | Max |
# |-----|-------------|-------|------|
# | 0 | position | -1.2 | 0.6 |
# | 1 | velocity | -0.07 | 0.07 |
#
# ## Action
# | Num | Action |
# |-----|------------|
# | 0 | push left |
# | 1 | no push |
# | 2 | push right |
# 創建環境
env = gym.make('MountainCar-v0')
gc.enable()
# gc.set_debug(gc.DEBUG_STATS|gc.DEBUG_LEAK)
# EpsilonFunction
# https://www.desmos.com/calculator/qgg3tdayyt
memory_size = 2000
Agent = DeepQNetwork(
env.observation_space.shape[0],
env.action_space.n,
learningRate=1e-3,
gamma=0.95,
decayRate=5e-5,
# decayRate=0.0002,
batchSize=128,
memorySize=memory_size,
targetReplaceIter=100,
IsOutputGraph=True
)
# # 開始訓練
# 主要有兩個步驟:
# 1. 產生 random 資料,塞滿 memorySize
# 2. 開始按照 explore or exploit 的策略下去 Try
# ## Helper Function
def GenerateRandomData():
state = env.reset()
for i in range(memory_size):
action = env.action_space.sample()
nextState, reward, IsDone, _ = env.step(action)
Agent.storeMemory(state, action, reward, nextState)
if IsDone:
state = env.reset()
state = nextState
# Training Part
# TotalReward = []
def TrainModel(EpochNumber = 300):
env.seed(3)
for i in range(EpochNumber):
# 歸零
state = env.reset()
totalReward = 0
# 開始模擬
while True:
# redner 畫面
# if(i > EpochNumber * 0.75):
env.render()
# 選擇的動作
actionValue = Agent.chooseAction(state, IsTrainning=True)
# 選擇動作後 的結果
nextState, reward, IsDone, Info = env.step(actionValue)
# 修改一下 Reward
# 根據高度修改 (加快收斂)
position, velocity = nextState
reward = abs(position - (-0.5))
totalReward += reward
# 存進記憶庫裡
Agent.storeMemory(
state=state,
action=actionValue,
reward=reward,
nextState=nextState
)
# 學習
Agent.learn()
if IsDone:
print("Epoch:",(i+1)," TotalReward:", totalReward, " P:", Agent._EpsilonFunction())
# TotalReward.append(totalReward)
if i % 100 == 0:
Agent.model.save("MountainCarV0." + str(i) + ".h5")
gc.collect()
break
state = nextState
# 判斷是否完成
# if np.mean(TotalReward[-10:]) > 50:
# break
# 儲存模型
Agent.model.save("MountainCar-v0.h5")
env.close()
# Main
GenerateRandomData()
TrainModel(10000)
exit() |
990,780 | a795f3a55bb90234cc7c45422e55620a21dcda6a | from django.core.management.base import BaseCommand
from query.base_models import ModelDelegator
from scannerpy import Database, DeviceType, Job
from scannerpy.stdlib import readers, pipelines
import os
import cv2
import math
import random
DATASET = os.environ.get('DATASET')
models = ModelDelegator(DATASET)
models.import_all(globals())
class Command(BaseCommand):
help = 'Detect faces in videos'
def add_arguments(self, parser):
parser.add_argument('path')
parser.add_argument('bbox_labeler', nargs='?', default='tinyfaces')
def handle(self, *args, **options):
with open(options['path']) as f:
paths = [s.strip() for s in f.readlines()]
with Database() as db:
filtered = paths
labeler, _ = Labeler.objects.get_or_create(name=options['bbox_labeler'])
filtered = []
for path in paths:
try:
video = Video.objects.get(path=path)
except Video.DoesNotExist:
continue
if len(Face.objects.filter(person__frame__video=video, labeler=labeler)) > 0:
continue
filtered.append(path)
stride = 24
# Run the detector via Scanner
faces_c = pipelines.detect_faces(db,
[db.table(path).column('frame') for path in filtered],
db.sampler.strided(stride), 'tmp_faces')
for path, video_faces_table in zip(filtered, faces_c):
video = Video.objects.filter(path=path).get()
table = db.table(path)
imgs = table.load(['frame'], rows=list(range(0, table.num_rows(), stride)))
video_faces = video_faces_table.load(
['bboxes'], lambda lst, db: readers.bboxes(lst[0], db.protobufs))
for (i, frame_faces), (_, img) in zip(video_faces, imgs):
frame = Frame.objects.get(video=video, number=i * stride)
for bbox in frame_faces:
if labeler.name == 'dummy' and random.randint(0, 10) == 1:
# generate dummy labels, sometimes
# TODO: add boundary checks, shouldn't matter much thouhg.
bbox.x1 += 50
bbox.x2 += 50
bbox.y1 += 50
bbox.y2 += 50
p = Person(frame=frame)
p.save()
f = Face(person=p)
f.bbox_x1 = bbox.x1 / video.width
f.bbox_x2 = bbox.x2 / video.width
f.bbox_y1 = bbox.y1 / video.height
f.bbox_y2 = bbox.y2 / video.height
f.bbox_score = bbox.score
f.labeler = labeler
f.save()
|
990,781 | 86e12fadcce29f7c4d619312bb6087b5b945d7f4 | {
'name': 'Fart Scroll Odoo',
'version': '0.1',
'summary': 'Fart Scroll Odoo',
'author': 'nicolas@blouk.com',
'category': 'Theme/Environment',
'description':
"""
""",
'depends': ['web'],
'data': [
'views/theme.xml',
],
'installable': True,
'auto_install': False,
}
|
990,782 | e6b4093869ecba779d5439a4586ae078c0b32d7a | import unittest
from theatre import Entity
class EntityTest(unittest.TestCase):
def test_constructor(self):
e = Entity('scene')
self.assertEquals(e.scene, 'scene')
def test_add_groups(self):
e = Entity('scene', groups = ['test'])
self.assertTrue('test' in e._groups)
e.add_groups(['test2'])
self.assertTrue('test2' in e._groups)
def test_add_components(self):
e = Entity('scene', components = ['this is a component'])
self.assertEquals(e.str, 'this is a component')
self.assertEquals(e['str'], 'this is a component')
e.add_components([12])
self.assertEquals(e.int, 12)
self.assertEquals(e['int'], 12)
if __name__ == '__main__':
unittest.main()
|
990,783 | a39f2c6a74c30b0f25aa5d6ae864f23c599f228e | import glob
import imageio
import matplotlib.pyplot as plt
import tensorflow as tf
def generate_gif():
anim_file = 'app/saves/img/dcgan.gif'
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob('app/saves/img/image*.png')
filenames = sorted(filenames)
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
def generate_and_save_images(model, epoch, test_input, rgb=False):
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4, 4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
if rgb:
plt.imshow((predictions[i, :, :, :] * 127.5 + 127.5).numpy().astype('uint8'))
else:
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('app/saves/img/image_at_epoch_{:04d}.png'.format(epoch)) |
990,784 | c5d7fdd3b52a35f9be59fa906e78d2e787770995 | import cv2
import rospy
import sys
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
def talker():
DisImg = rospy.Publisher('DisplayingImage', Image, queue_size=1)
rospy.init_node('SendingImage', anonymous=True)
while 1:
img = cv2.imread('rod2.png')
img_re = cv2.resize(img,(600,600))
blur = cv2.GaussianBlur(img_re,(5,5),cv2.BORDER_DEFAULT)
msg_image = CvBridge().cv2_to_imgmsg(blur,"bgr8")
DisImg.publish(msg_image)
rospy.sleep(1)
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
990,785 | 827a98602bc6b9a8e8b2d023a1fa10bdf66d114c | """ Contains Ceres HelloWorld Example in Python
This file contains the Ceres HelloWorld Example except it uses Python Bindings.
"""
import os
pyceres_location="" # Folder where the PyCeres lib is created
if os.getenv('PYCERES_LOCATION'):
pyceres_location=os.getenv('PYCERES_LOCATION')
else:
pyceres_location="../../build/lib" # If the environment variable is not set
# then it will assume this directory. Only will work if built with Ceres and
# through the normal mkdir build, cd build, cmake .. procedure
import sys
sys.path.insert(0, pyceres_location)
import PyCeres # Import the Python Bindings
import numpy as np
# The variable to solve for with its initial value.
initial_x=5.0
x=np.array([initial_x])
# Build the Problem
problem=PyCeres.Problem()
# Set up the only cost function (also known as residual). This uses a helper function written in C++ as Autodiff
# cant be used in Python. It returns a CostFunction*
cost_function=PyCeres.CreateHelloWorldCostFunction()
problem.AddResidualBlock(cost_function,None,x)
options=PyCeres.SolverOptions()
options.linear_solver_type=PyCeres.LinearSolverType.DENSE_QR # Ceres enums live in PyCeres and require the enum Type
options.minimizer_progress_to_stdout=True
summary=PyCeres.Summary()
PyCeres.Solve(options,problem,summary)
print(summary.BriefReport() + " \n")
print( "x : " + str(initial_x) + " -> " + str(x) + "\n")
|
990,786 | 6b3dad57b30f0f2221fb56fe2b831047261f1040 | # coding: utf-8
"""Model para tipos de infrações"""
from django.db import models
from detransapp.manager import TipoInfracaoManager
from detransapp.models.lei import Lei
class TipoInfracao(models.Model):
"""Classe para model de tipos infrações"""
codigo = models.CharField(primary_key=True, max_length=20)
descricao = models.CharField(max_length=200)
lei = models.ForeignKey(Lei)
is_condutor_obrigatorio = models.BooleanField(default=False)
data = models.DateTimeField(auto_now_add=True)
data_alterado = models.DateTimeField(auto_now=True)
ativo = models.BooleanField(default=True)
objects = TipoInfracaoManager()
def __unicode__(self):
return self.descricao
class Meta:
app_label = "detransapp"
|
990,787 | 02b4e94c3a232e64103eac44a07cd02a026f5447 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Module holding the classes for different FFT operators."""
import numpy as np
import pyopencl as cl
import pyopencl.array as clarray
from gpyfft.fft import FFT
from pkg_resources import resource_filename
from pyqmri._helper_fun._calckbkernel import calckbkernel
from pyqmri._helper_fun import CLProgram as Program
class PyOpenCLnuFFT():
"""Base class for FFT calculation.
This class serves as the base class for all FFT object used in
the varous optimization algorithms. It provides a factory method
to generate a FFT object based on the input.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
fft_dim : tuple of int
The dimensions to take the fft over
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
prg : PyOpenCL.Program
The PyOpenCL Program Object containing the compiled kernels.
fft_dim : tuple of int
The dimensions to take the fft over
"""
def __init__(self, ctx, queue, fft_dim, DTYPE, DTYPE_real):
self.DTYPE = DTYPE
self.DTYPE_real = DTYPE_real
self.ctx = ctx
self.queue = queue
self.prg = None
self.fft_dim = fft_dim
@staticmethod
def create(ctx,
queue,
par,
kwidth=5,
klength=1000,
DTYPE=np.complex64,
DTYPE_real=np.float32,
radial=False,
SMS=False,
streamed=False):
"""FFT factory method.
Based on the inputs this method decides which FFT object should be
returned.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict
A python dict containing the necessary information to setup the
object. Needs to contain the number of slices (NSlice), number of
scans (NScan), image dimensions (dimX, dimY), number of coils (NC),
sampling points (N) and read outs (NProj) a PyOpenCL queue (queue)
and the complex coil sensitivities (C).
kwidth : int, 5
The width of the sampling kernel for regridding of non-uniform
kspace samples.
klength : int, 200
The length of the kernel lookup table which samples the contineous
gridding kernel.
DTYPE : Numpy.dtype, numpy.complex64
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype, numpy.float32
The real precision type. Currently float32 is used.
radial : bool, False
Switch for Cartesian (False) and non-Cartesian (True) FFT.
SMS : bool, False
Switch between Simultaneous Multi Slice reconstruction (True) and
simple slice by slice reconstruction.
streamed : bool, False
Switch between normal reconstruction in one big block versus
streamed reconstruction of smaller blocks.
Returns
-------
PyOpenCLnuFFT object:
The setup FFT object.
Raises
------
AssertionError:
If the Combination of passed flags to choose the
FFT aren't compatible with each other. E.g.: Radial and SMS True.
"""
if not streamed:
if radial is True and SMS is False:
if par["is3D"]:
obj = PyOpenCL3DRadialNUFFT(
ctx,
queue,
par,
kwidth=kwidth,
klength=klength,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
else:
obj = PyOpenCLRadialNUFFT(
ctx,
queue,
par,
kwidth=kwidth,
klength=klength,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
elif SMS is True and radial is False:
obj = PyOpenCLSMSNUFFT(
ctx,
queue,
par,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
elif SMS is False and radial is False:
obj = PyOpenCLCartNUFFT(
ctx,
queue,
par,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real)
else:
raise AssertionError("Combination of Radial "
"and SMS not allowed")
if DTYPE == np.complex128:
print('Using double precision')
file = open(
resource_filename(
'pyqmri', 'kernels/OpenCL_gridding_double.c'))
obj.prg = Program(
obj.ctx,
file.read())
else:
print('Using single precision')
file = open(
resource_filename(
'pyqmri', 'kernels/OpenCL_gridding_single.c'))
obj.prg = Program(
obj.ctx,
file.read())
else:
if radial is True and SMS is False:
if par["is3D"]:
raise NotImplementedError("3D non-cartesian and streamed\
not implemented")
obj = PyOpenCLRadialNUFFT(
ctx,
queue,
par,
kwidth=kwidth,
klength=klength,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real,
streamed=True)
elif SMS is True and radial is False:
obj = PyOpenCLSMSNUFFT(
ctx,
queue,
par,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real,
streamed=True)
elif SMS is False and radial is False:
obj = PyOpenCLCartNUFFT(
ctx,
queue,
par,
DTYPE=DTYPE,
DTYPE_real=DTYPE_real,
streamed=True)
else:
raise AssertionError("Combination of Radial "
"and SMS not allowed")
if DTYPE == np.complex128:
print('Using double precision')
file = open(
resource_filename(
'pyqmri',
'kernels/OpenCL_gridding_slicefirst_double.c'))
obj.prg = Program(
obj.ctx,
file.read())
else:
print('Using single precision')
file = open(
resource_filename(
'pyqmri',
'kernels/OpenCL_gridding_slicefirst_single.c'))
obj.prg = Program(
obj.ctx,
file.read())
file.close()
return obj
class PyOpenCLRadialNUFFT(PyOpenCLnuFFT):
"""Non-uniform FFT object.
This class performs the non-uniform FFT (NUFFT) operation. Linear
interpolation of a sampled gridding kernel is used to regrid points
from the non-cartesian grid back on the cartesian grid.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict
A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
kwidth : int
The width of the sampling kernel for regridding of non-uniform
kspace samples.
klength : int
The length of the kernel lookup table which samples the contineous
gridding kernel.
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
traj : PyOpenCL.Array
The comlex sampling trajectory
dcf : PyOpenCL.Array
The densitiy compenation function
ogf (float):
The overgriddingfactor for non-cartesian k-spaces.
fft_shape : tuple of ints
3 dimensional tuple. Dim 0 containts all Scans, Coils and Slices.
Dim 1 and 2 the overgridded image dimensions.
fft_scale : float32
The scaling factor to achieve a good adjointness of the forward and
backward FFT.
cl_kerneltable (PyOpenCL.Buffer):
The gridding lookup table as read only Buffer
cl_deapo (PyOpenCL.Buffer):
The deapodization lookup table as read only Buffer
par_fft : int
The number of parallel fft calls. Typically it iterates over the
Scans.
fft : gpyfft.fft.FFT
The fft object created from gpyfft (A wrapper for clFFT). The object
is created only once an reused in each iterations, iterationg over
all scans to keep the memory footprint low.
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator. This will be determined by the
factory and set after the object is created.
"""
def __init__(
self,
ctx,
queue,
par,
kwidth=5,
klength=200,
DTYPE=np.complex64,
DTYPE_real=np.float32,
streamed=False):
super().__init__(ctx, queue, par["fft_dim"], DTYPE, DTYPE_real)
self.ogf = par["ogf"]
if streamed:
self.fft_shape = (
par["NScan"] *
par["NC"] *
(par["par_slices"] + par["overlap"]),
int(round(par["dimY"]*self.ogf)),
int(round(par["dimX"]*self.ogf)))
else:
self.fft_shape = (
par["NScan"] *
par["NC"] *
par["NSlice"],
int(round(par["dimY"]*self.ogf)),
int(round(par["dimX"]*self.ogf)))
self.fft_scale = DTYPE_real(
np.sqrt(np.prod(self.fft_shape[self.fft_dim[0]:])))
(kerneltable, kerneltable_FT) = calckbkernel(
kwidth, self.ogf, int(self.ogf*par["dimX"]), klength)
deapo = 1 / kerneltable_FT.astype(DTYPE_real)
self.cl_kerneltable = cl.Buffer(
self.ctx,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=kerneltable.astype(DTYPE_real).data)
self.cl_deapo = cl.Buffer(
self.ctx,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=deapo.data)
self.dcf = clarray.to_device(self.queue, par["dcf"])
self.traj = clarray.to_device(self.queue, par["traj"])
self._tmp_fft_array = (
clarray.zeros(
self.queue,
(self.fft_shape),
dtype=DTYPE))
if par["use_GPU"]:
self.par_fft = int(
self.fft_shape[0] / par["NScan"])
else:
self.par_fft = self.fft_shape[0]
self.iternumber = int(self.fft_shape[0]/self.par_fft)
self.fft = FFT(ctx, queue, self._tmp_fft_array[
0:self.par_fft, ...],
out_array=self._tmp_fft_array[
0:self.par_fft, ...],
axes=self.fft_dim)
self._kernelpoints = kerneltable.size
self._kwidth = kwidth / 2
self._check = np.ones(self.fft_shape[-1], dtype=DTYPE_real)
self._check[1::2] = -1
self._check = clarray.to_device(self.queue, self._check)
self._gridsize = self.fft_shape[-1]
def __del__(self):
"""Explicitly delete OpenCL Objets."""
del self.traj
del self.dcf
del self._tmp_fft_array
del self.cl_kerneltable
del self.cl_deapo
del self._check
del self.queue
del self.ctx
del self.prg
del self.fft
def FFTH(self, sg, s, wait_for=None, scan_offset=0):
"""Perform the inverse (adjoint) NUFFT operation.
Parameters
----------
sg : PyOpenCL.Array
The complex image data.
s : PyOpenCL.Array
The non-uniformly gridded k-space
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
# Zero tmp arrays
self._tmp_fft_array.add_event(
self.prg.zero_tmp(
self.queue,
(self._tmp_fft_array.size,
),
None,
self._tmp_fft_array.data,
wait_for=self._tmp_fft_array.events))
# Grid k-space
self._tmp_fft_array.add_event(
self.prg.grid_lut(
self.queue,
(s.shape[0], s.shape[1] * s.shape[2],
s.shape[-2] * s.shape[-1]),
None,
self._tmp_fft_array.data,
s.data,
self.traj.data,
np.int32(self._gridsize),
np.int32(sg.shape[2]),
self.DTYPE_real(self._kwidth),
self.dcf.data,
self.cl_kerneltable,
np.int32(self._kernelpoints),
np.int32(scan_offset),
wait_for=(wait_for +
s.events + self._tmp_fft_array.events)))
# FFT
self._tmp_fft_array.add_event(
self.prg.fftshift(
self.queue,
(self.fft_shape[0],
self.fft_shape[1],
self.fft_shape[2]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=False)[0])
self._tmp_fft_array.add_event(
self.prg.fftshift(
self.queue,
(self.fft_shape[0],
self.fft_shape[1],
self.fft_shape[2]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=fft_events))
return self.prg.deapo_adj(
self.queue,
(sg.shape[0] * sg.shape[1] *
sg.shape[2], sg.shape[3], sg.shape[4]),
None,
sg.data,
self._tmp_fft_array.data,
self.cl_deapo,
np.int32(self._tmp_fft_array.shape[-1]),
self.DTYPE_real(self.fft_scale),
self.DTYPE_real(self.ogf),
wait_for=(wait_for + sg.events +
self._tmp_fft_array.events))
def FFT(self, s, sg, wait_for=None, scan_offset=0):
"""Perform the forward NUFFT operation.
Parameters
----------
s : PyOpenCL.Array
The non-uniformly gridded k-space.
sg : PyOpenCL.Array
The complex image data.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
# Zero tmp arrays
self._tmp_fft_array.add_event(
self.prg.zero_tmp(
self.queue,
(self._tmp_fft_array.size,
),
None,
self._tmp_fft_array.data,
wait_for=
self._tmp_fft_array.events))
# Deapodization and Scaling
self._tmp_fft_array.add_event(
self.prg.deapo_fwd(
self.queue,
(sg.shape[0] * sg.shape[1] * sg.shape[2],
sg.shape[3], sg.shape[4]),
None,
self._tmp_fft_array.data,
sg.data,
self.cl_deapo,
np.int32(self._tmp_fft_array.shape[-1]),
self.DTYPE_real(1 / self.fft_scale),
self.DTYPE_real(self.ogf),
wait_for=wait_for + sg.events + self._tmp_fft_array.events))
# FFT
self._tmp_fft_array.add_event(
self.prg.fftshift(
self.queue,
(self.fft_shape[0],
self.fft_shape[1],
self.fft_shape[2]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=True)[0])
self._tmp_fft_array.add_event(
self.prg.fftshift(
self.queue,
(self.fft_shape[0],
self.fft_shape[1],
self.fft_shape[2]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=fft_events))
# Resample on Spoke
return self.prg.invgrid_lut(
self.queue,
(s.shape[0], s.shape[1] * s.shape[2], s.shape[-2] *
s.shape[-1]),
None,
s.data,
self._tmp_fft_array.data,
self.traj.data,
np.int32(self._gridsize),
np.int32(s.shape[2]),
self.DTYPE_real(self._kwidth),
self.dcf.data,
self.cl_kerneltable,
np.int32(self._kernelpoints),
np.int32(scan_offset),
wait_for=s.events + wait_for + self._tmp_fft_array.events)
class PyOpenCL3DRadialNUFFT(PyOpenCLnuFFT):
"""Non-uniform FFT object.
This class performs the 3D non-uniform FFT (NUFFT) operation. Linear
interpolation of a sampled gridding kernel is used to regrid points
from the non-cartesian grid back on the cartesian grid.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict
A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
kwidth : int
The width of the sampling kernel for regridding of non-uniform
kspace samples.
klength : int
The length of the kernel lookup table which samples the contineous
gridding kernel.
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
traj : PyOpenCL.Array
The comlex sampling trajectory
dcf : PyOpenCL.Array
The densitiy compenation function
ogf (float):
The overgriddingfactor for non-cartesian k-spaces.
fft_shape : tuple of ints
3 dimensional tuple. Dim 0 containts all Scans, Coils and Slices.
Dim 1 and 2 the overgridded image dimensions.
fft_scale : float32
The scaling factor to achieve a good adjointness of the forward and
backward FFT.
cl_kerneltable (PyOpenCL.Buffer):
The gridding lookup table as read only Buffer
cl_deapo (PyOpenCL.Buffer):
The deapodization lookup table as read only Buffer
par_fft : int
The number of parallel fft calls. Typically it iterates over the
Scans.
fft : gpyfft.fft.FFT
The fft object created from gpyfft (A wrapper for clFFT). The object
is created only once an reused in each iterations, iterationg over
all scans to keep the memory footprint low.
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator. This will be determined by the
factory and set after the object is created.
"""
def __init__(
self,
ctx,
queue,
par,
kwidth=5,
klength=200,
DTYPE=np.complex64,
DTYPE_real=np.float32,
streamed=False):
super().__init__(ctx, queue, par["fft_dim"], DTYPE, DTYPE_real)
# self.ogf = par["N"]/par["dimX"]
self.ogf = par["ogf"]
self.fft_shape = (
par["NScan"] *
par["NC"],
int(round(par["NSlice"]*self.ogf)),
int(round(par["dimY"]*self.ogf)),
int(round(par["dimX"]*self.ogf)))
self.fft_scale = DTYPE_real(
np.sqrt(np.prod(self.fft_shape[-3:])))
(kerneltable, kerneltable_FT) = calckbkernel(
kwidth, self.ogf, par["N"], klength)
deapo = 1 / kerneltable_FT.astype(DTYPE_real)
self.cl_kerneltable = cl.Buffer(
self.ctx,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=kerneltable.astype(DTYPE_real).data)
self.cl_deapo = cl.Buffer(
self.ctx,
cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
hostbuf=deapo.data)
self.dcf = clarray.to_device(self.queue, par["dcf"])
self.traj = clarray.to_device(self.queue, par["traj"])
self._tmp_fft_array = (
clarray.zeros(
self.queue,
(self.fft_shape),
dtype=DTYPE))
if par["use_GPU"]:
self.par_fft = int(
self.fft_shape[0] / par["NScan"])
else:
self.par_fft = self.fft_shape[0]
self.iternumber = int(self.fft_shape[0]/self.par_fft)
self.fft = FFT(ctx, queue, self._tmp_fft_array[
0:self.par_fft, ...],
out_array=self._tmp_fft_array[
0:self.par_fft, ...],
axes=self.fft_dim)
self._kernelpoints = kerneltable.size
self._kwidth = kwidth / 2
self._check = np.ones(self.fft_shape[-1], dtype=DTYPE_real)
self._check[1::2] = -1
self._check = clarray.to_device(self.queue, self._check)
self._gridsize = self.fft_shape[-1]
def __del__(self):
"""Explicitly delete OpenCL Objets."""
del self.traj
del self.dcf
del self._tmp_fft_array
del self.cl_kerneltable
del self.cl_deapo
del self._check
del self.queue
del self.ctx
del self.prg
del self.fft
def FFTH(self, sg, s, wait_for=None, scan_offset=0):
"""Perform the inverse (adjoint) NUFFT operation.
Parameters
----------
sg : PyOpenCL.Array
The complex image data.
s : PyOpenCL.Array
The non-uniformly gridded k-space
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
# Zero tmp arrays
self._tmp_fft_array.add_event(
self.prg.zero_tmp(
self.queue,
(self._tmp_fft_array.size,
),
None,
self._tmp_fft_array.data,
wait_for=self._tmp_fft_array.events))
# Grid k-space
self._tmp_fft_array.add_event(
self.prg.grid_lut3D(
self.queue,
(s.shape[0], s.shape[1],
s.shape[-2] * self._gridsize),
None,
self._tmp_fft_array.data,
s.data,
self.traj.data,
np.int32(self._gridsize),
np.int32(sg.shape[2]),
self.DTYPE_real(self._kwidth),
self.dcf.data,
self.cl_kerneltable,
np.int32(self._kernelpoints),
np.int32(scan_offset),
wait_for=(wait_for +
s.events + self._tmp_fft_array.events)))
# FFT
self._tmp_fft_array.add_event(
self.prg.fftshift3D(
self.queue,
(np.prod(self.fft_shape[:2]),
self.fft_shape[2],
self.fft_shape[3]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=False)[0])
self._tmp_fft_array.add_event(
self.prg.fftshift3D(
self.queue,
(np.prod(self.fft_shape[:2]),
self.fft_shape[2],
self.fft_shape[3]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=fft_events))
return self.prg.deapo_adj3D(
self.queue,
(sg.shape[0] * sg.shape[1] *
sg.shape[2], sg.shape[3], sg.shape[4]),
None,
sg.data,
self._tmp_fft_array.data,
self.cl_deapo,
np.int32(self._tmp_fft_array.shape[-1]),
self.DTYPE_real(self.fft_scale),
self.DTYPE_real(self.ogf),
wait_for=(wait_for + sg.events +
self._tmp_fft_array.events))
def FFT(self, s, sg, wait_for=None, scan_offset=0):
"""Perform the forward NUFFT operation.
Parameters
----------
s : PyOpenCL.Array
The non-uniformly gridded k-space.
sg : PyOpenCL.Array
The complex image data.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
# Zero tmp arrays
self._tmp_fft_array.add_event(
self.prg.zero_tmp(
self.queue,
(self._tmp_fft_array.size,
),
None,
self._tmp_fft_array.data,
wait_for=
self._tmp_fft_array.events))
# Deapodization and Scaling
self._tmp_fft_array.add_event(
self.prg.deapo_fwd3D(
self.queue,
(sg.shape[0] * sg.shape[1] * sg.shape[2],
sg.shape[3], sg.shape[4]),
None,
self._tmp_fft_array.data,
sg.data,
self.cl_deapo,
np.int32(self._tmp_fft_array.shape[-1]),
self.DTYPE_real(1 / self.fft_scale),
self.DTYPE_real(self.ogf),
wait_for=wait_for + sg.events + self._tmp_fft_array.events))
# FFT
self._tmp_fft_array.add_event(
self.prg.fftshift3D(
self.queue,
(np.prod(self.fft_shape[:2]),
self.fft_shape[2],
self.fft_shape[3]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=True)[0])
self._tmp_fft_array.add_event(
self.prg.fftshift3D(
self.queue,
(np.prod(self.fft_shape[:2]),
self.fft_shape[2],
self.fft_shape[3]),
None,
self._tmp_fft_array.data,
self._check.data,
wait_for=fft_events))
# Resample on Spoke
return self.prg.invgrid_lut3D(
self.queue,
(s.shape[0], s.shape[1], s.shape[-2] *
self._gridsize),
None,
s.data,
self._tmp_fft_array.data,
self.traj.data,
np.int32(self._gridsize),
np.int32(s.shape[2]),
self.DTYPE_real(self._kwidth),
self.dcf.data,
self.cl_kerneltable,
np.int32(self._kernelpoints),
np.int32(scan_offset),
wait_for=s.events + wait_for + self._tmp_fft_array.events)
class PyOpenCLCartNUFFT(PyOpenCLnuFFT):
"""Cartesian FFT object.
This class performs the FFT operation.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
fft_shape : tuple of ints
3 dimensional tuple. Dim 0 containts all Scans, Coils and Slices.
Dim 1 and 2 the overgridded image dimensions.
fft_scale : float32
The scaling factor to achieve a good adjointness of the forward and
backward FFT.
par_fft : int
The number of parallel fft calls. Typically it iterates over the
Scans.
fft : gpyfft.fft.FFT
The fft object created from gpyfft (A wrapper for clFFT). The object
is created only once an reused in each iterations, iterationg over
all scans to keep the memory footprint low.
mask : PyOpenCL.Array
The undersampling mask for the Cartesian grid.
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator. This will be determined by the
factory and set after the object is created.
"""
def __init__(
self,
ctx,
queue,
par,
DTYPE=np.complex64,
DTYPE_real=np.float32,
streamed=False):
super().__init__(ctx, queue, par["fft_dim"], DTYPE, DTYPE_real)
if streamed:
self.fft_shape = (
par["NScan"] *
par["NC"] *
(par["par_slices"] + par["overlap"]),
par["dimY"],
par["dimX"])
else:
if par["is3D"]:
self.fft_shape = (
par["NScan"] *
par["NC"],
par["NSlice"],
par["dimY"],
par["dimX"])
else:
self.fft_shape = (
par["NScan"] *
par["NC"] *
par["NSlice"],
par["dimY"],
par["dimX"])
if par["fft_dim"] is not None:
self.fft_scale = DTYPE_real(
np.sqrt(np.prod(self.fft_shape[self.fft_dim[0]:])))
self._tmp_fft_array = (
clarray.zeros(
self.queue,
self.fft_shape,
dtype=DTYPE))
if par["use_GPU"]:
self.par_fft = int(
self.fft_shape[0] / par["NScan"])
else:
self.par_fft = self.fft_shape[0]
self.iternumber = int(self.fft_shape[0]/self.par_fft)
self.mask = clarray.to_device(self.queue, par["mask"])
self.fft = FFT(ctx, queue, self._tmp_fft_array[
0:self.par_fft, ...],
out_array=self._tmp_fft_array[
0:self.par_fft, ...],
axes=self.fft_dim)
def __del__(self):
"""Explicitly delete OpenCL Objets."""
if self.fft_dim is not None:
del self._tmp_fft_array
del self.fft
del self.mask
del self.queue
del self.ctx
del self.prg
def FFTH(self, sg, s, wait_for=None, scan_offset=0):
"""Perform the inverse (adjoint) FFT operation.
Parameters
----------
sg : PyOpenCL.Array
The complex image data.
s : PyOpenCL.Array
The uniformly gridded k-space
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
if self.fft_dim is not None:
self._tmp_fft_array.add_event(
self.prg.maskingcpy(
self.queue,
(self._tmp_fft_array.shape[0],
np.prod(self._tmp_fft_array.shape[1:])),
None,
self._tmp_fft_array.data,
s.data,
self.mask.data,
wait_for=s.events+self._tmp_fft_array.events+wait_for))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=False)[0])
return (
self.prg.copy(
self.queue,
(sg.size,
),
None,
sg.data,
self._tmp_fft_array.data,
self.DTYPE_real(
self.fft_scale),
wait_for=sg.events+fft_events))
return self.prg.copy(
self.queue,
(sg.size,
),
None,
sg.data,
s.data,
self.DTYPE_real(1),
wait_for=s.events+sg.events+wait_for)
def FFT(self, s, sg, wait_for=None, scan_offset=0):
"""Perform the forward FFT operation.
Parameters
----------
s : PyOpenCL.Array
The uniformly gridded k-space.
sg : PyOpenCL.Array
The complex image data.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
if self.fft_dim is not None:
self._tmp_fft_array.add_event(
self.prg.copy(
self.queue,
(sg.size,
),
None,
self._tmp_fft_array.data,
sg.data,
self.DTYPE_real(
1 /
self.fft_scale),
wait_for=sg.events+self._tmp_fft_array.events+wait_for))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=True)[0])
return (
self.prg.maskingcpy(
self.queue,
(self._tmp_fft_array.shape[0],
np.prod(self._tmp_fft_array.shape[1:])),
None,
s.data,
self._tmp_fft_array.data,
self.mask.data,
wait_for=s.events+fft_events))
return self.prg.copy(
self.queue,
(sg.size,
),
None,
s.data,
sg.data,
self.DTYPE_real(1),
wait_for=s.events+sg.events+wait_for)
class PyOpenCLSMSNUFFT(PyOpenCLnuFFT):
"""Cartesian FFT-SMS object.
This class performs the FFT operation assuming a SMS acquisition.
Parameters
----------
ctx : PyOpenCL.Context
The context for the PyOpenCL computations.
queue : PyOpenCL.Queue
The computation Queue for the PyOpenCL kernels.
par : dict A python dict containing the necessary information to
setup the object. Needs to contain the number of slices (NSlice),
number of scans (NScan), image dimensions (dimX, dimY), number of
coils (NC), sampling points (N) and read outs (NProj)
a PyOpenCL queue (queue) and the complex coil
sensitivities (C).
DTYPE : Numpy.dtype
The comlex precision type. Currently complex64 is used.
DTYPE_real : Numpy.dtype
The real precision type. Currently float32 is used.
Attributes
----------
fft_shape : tuple of ints
3 dimensional tuple. Dim 0 containts all Scans, Coils and Slices.
Dim 1 and 2 the overgridded image dimensions.
fft_scale : float32
The scaling factor to achieve a good adjointness of the forward and
backward FFT.
par_fft : int
The number of parallel fft calls. Typically it iterates over the
Scans.
fft : gpyfft.fft.FFT
The fft object created from gpyfft (A wrapper for clFFT). The object
is created only once an reused in each iterations, iterationg over
all scans to keep the memory footprint low.
mask : PyOpenCL.Array
The undersampling mask for the Cartesian grid.
packs : int
The distance between the slices
MB : int
The multiband factor
shift : PyOpenCL.Array
The vector pixel shifts used in the fft computation.
prg : PyOpenCL.Program
The PyOpenCL.Program object containing the necessary kernels to
execute the linear Operator. This will be determined by the
factory and set after the object is created.
"""
def __init__(
self,
ctx,
queue,
par,
DTYPE=np.complex64,
DTYPE_real=np.float32,
streamed=False):
super().__init__(ctx, queue, par["fft_dim"], DTYPE, DTYPE_real)
if streamed:
self.fft_shape = (
par["NC"] *
par["NSlice"],
par["dimY"],
par["dimX"])
else:
self.fft_shape = (
par["NScan"] *
par["NC"] *
par["NSlice"],
par["dimY"],
par["dimX"])
self.packs = int(par["packs"])
self.MB = int(par["MB"])
self.shift = clarray.to_device(
self.queue, par["shift"].astype(DTYPE_real))
if par["fft_dim"] is not None:
self.fft_scale = DTYPE_real(
np.sqrt(np.prod(self.fft_shape[self.fft_dim[0]:])))
self._tmp_fft_array = (
clarray.zeros(
self.queue,
self.fft_shape,
dtype=DTYPE))
if par["use_GPU"] and not streamed:
self.par_fft = int(
self.fft_shape[0] / par["NScan"])
else:
self.par_fft = self.fft_shape[0]
self.iternumber = int(self.fft_shape[0]/self.par_fft)
self.mask = clarray.to_device(self.queue, par["mask"])
self.fft = FFT(ctx, queue, self._tmp_fft_array[
0:self.par_fft, ...],
out_array=self._tmp_fft_array[
0:self.par_fft, ...],
axes=self.fft_dim)
def __del__(self):
"""Explicitly delete OpenCL Objets."""
if self.fft_dim is not None:
del self._tmp_fft_array
del self.fft
del self.mask
del self.queue
del self.ctx
del self.prg
def FFTH(self, sg, s, wait_for=None, scan_offset=0):
"""Perform the inverse (adjoint) FFT operation.
Parameters
----------
sg : PyOpenCL.Array
The complex image data.
s : PyOpenCL.Array
The uniformly gridded k-space compressed by the MB factor.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
if self.fft_dim is not None:
self._tmp_fft_array.add_event(
self.prg.copy_SMS_adjkspace(
self.queue,
(sg.shape[0] * sg.shape[1],
sg.shape[-2],
sg.shape[-1]),
None,
self._tmp_fft_array.data,
s.data,
self.shift.data,
self.mask.data,
np.int32(self.packs),
np.int32(self.MB),
self.DTYPE_real(self.fft_scale),
np.int32(sg.shape[2]/self.packs/self.MB),
wait_for=s.events+wait_for+self._tmp_fft_array.events))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=False)[0])
return (self.prg.copy(self.queue,
(sg.size,),
None,
sg.data,
self._tmp_fft_array.data,
self.DTYPE_real(self.fft_scale),
wait_for=sg.events+fft_events))
return self.prg.copy_SMS_adj(
self.queue,
(sg.shape[0] * sg.shape[1],
sg.shape[-2],
sg.shape[-1]),
None,
sg.data,
s.data,
self.shift.data,
self.mask.data,
np.int32(self.packs),
np.int32(self.MB),
self.DTYPE_real(1),
np.int32(sg.shape[2]/self.packs/self.MB),
wait_for=s.events+sg.events+wait_for)
def FFT(self, s, sg, wait_for=None, scan_offset=0):
"""Perform the forward FFT operation.
Parameters
----------
s : PyOpenCL.Array
The uniformly gridded k-space compressed by the MB factor.
sg : PyOpenCL.Array
The complex image data.
wait_for : list of PyopenCL.Event, None
A List of PyOpenCL events to wait for.
scan_offset : int, 0
Offset compared to the first acquired scan.
Returns
-------
PyOpenCL.Event: A PyOpenCL event to wait for.
"""
if wait_for is None:
wait_for = []
if self.fft_dim is not None:
self._tmp_fft_array.add_event(
self.prg.copy(
self.queue,
(sg.size,),
None,
self._tmp_fft_array.data,
sg.data,
self.DTYPE_real(1 / self.fft_scale),
wait_for=self._tmp_fft_array.events+sg.events+wait_for))
cl.wait_for_events(self._tmp_fft_array.events)
fft_events = []
for j in range(self.iternumber):
fft_events.append(self.fft.enqueue_arrays(
data=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
result=self._tmp_fft_array[
j * self.par_fft:(j + 1) * self.par_fft, ...],
forward=True)[0])
return (
self.prg.copy_SMS_fwdkspace(
self.queue,
(s.shape[0] * s.shape[1], s.shape[-2], s.shape[-1]),
None,
s.data,
self._tmp_fft_array.data,
self.shift.data,
self.mask.data,
np.int32(self.packs),
np.int32(self.MB),
self.DTYPE_real(self.fft_scale),
np.int32(sg.shape[2]/self.packs/self.MB),
wait_for=s.events+fft_events+wait_for))
return (
self.prg.copy_SMS_fwd(
self.queue,
(s.shape[0] * s.shape[1], s.shape[-2], s.shape[-1]),
None,
s.data,
sg.data,
self.shift.data,
self.mask.data,
np.int32(self.packs),
np.int32(self.MB),
self.DTYPE_real(1),
np.int32(sg.shape[2]/self.packs/self.MB),
wait_for=s.events+sg.events+wait_for))
|
990,788 | 1618ba5dcc34ae9e33ed319498e753f9553a062d | from openpyxl import load_workbook
import pprint
import re
pp = pprint.PrettyPrinter(indent=4)
# from project.api.constants import DAY_TO_ISO
TEACHER = 1
TECHNICIAN = 2
DAY_TO_ISO = {"Mon": 1, "Tue": 2, "Wed": 3, "Thu": 4, "Fri": 5}
def extract_users(filename):
try:
wb_staff = load_workbook(filename, data_only=True)
except IOError as e:
return None, str(e)
try:
ws_teachers = wb_staff["TEACHERS"]
ws_technicians = wb_staff["TECHNICIANS"]
except Exception as e:
return None, str(e)
# check that the headers of the template are retained in uploaded file
headers = ['Name', 'Email', 'Staff Code']
for i in range(1, 4):
if ws_teachers.cell(row=1, column=i).value != headers[i-1]:
return None, 'Please ensure you use the template provided.'
for i in range(1, 3):
if ws_technicians.cell(row=1, column=i).value != headers[i-1]:
return None, 'Please ensure you use the template provided.'
staff = []
teacher_rows = tuple(ws_teachers.rows)
tech_rows = tuple(ws_technicians.rows)
for i in range(2, 1 + len(teacher_rows)):
teacher = {
"name": ws_teachers.cell(row=i, column=1).value,
"email": ws_teachers.cell(row=i, column=2).value,
"role_code": TEACHER,
"staff_code": ws_teachers.cell(row=i, column=3).value
}
staff.append(teacher)
for i in range(2, 1 + len(tech_rows)):
technician = {
"name": ws_technicians.cell(row=i, column=1).value,
"email": ws_technicians.cell(row=i, column=2).value,
"role_code": TECHNICIAN,
"staff_code": None
}
staff.append(technician)
return wb_staff, staff
def extract_lessons(filename):
wb_tt = load_workbook(filename, data_only=True)
ws = wb_tt.active
users_timetables = {}
columns = tuple(ws.columns)
rows = tuple(ws.rows)
# get staff codes
for i in range(2, len(columns)):
if ws.cell(row=5, column=i).value is not None:
users_timetables[ws.cell(row=5, column=i).value] = {}
for j in range(6, len(rows)):
if ws.cell(row=j, column=i).value is not None:
""" if lesson data is only text or full-stops, most likely
NOT to be a lesson... skip. """
if re.match('[a-z .A-Z]', ws.cell(row=j, column=i).value):
continue
users_timetables[ws.cell(
row=5,
column=i
).value][ws.cell(
row=j,
column=1
).value] = ws.cell(row=j, column=i).value
lessons = []
for staff_code in users_timetables:
for ttlesson in users_timetables[staff_code]:
period_full = ttlesson.split(":")
''' if 1 week TT, expect period format to be eg Mon:3.
if 2 Week TT, expect period format to be eg 1Mon:3
Test by casting first char to int. '''
try:
int(period_full[0][0])
week = period_full[0][0]
day_txt = period_full[0][1:4]
except ValueError:
week = 1
day_txt = period_full[0][0:3]
if day_txt not in ['Mon', 'Tue', 'Wed', 'Thu', 'Fri']:
raise ValueError('Could not parse periods.')
# if period number is not an integer - 'reg', 'asm', 'pmr' etc,
# skip this lesson
try:
period = int(period_full[1])
except ValueError:
continue
class_room = users_timetables[staff_code][ttlesson].split(' ')
lesson = {
"staff_code": staff_code,
"week": week,
"period": period,
"day_txt": day_txt,
"day": DAY_TO_ISO[day_txt],
"class": class_room[0],
"room": class_room[1]
}
lessons.append(lesson)
return lessons
|
990,789 | 5163a39a3ba3038af6cae509580ddedbda2cbc1a | from biodig.base.exceptions import BadRequestException
from rest_framework.views import APIView
from rest_framework.response import Response
from biodig.rest.v2.TagGroups.forms import MultiGetForm, PostForm, PutForm, DeleteForm, SingleGetForm
class TagGroupList(APIView):
'''
Class for rendering the view for creating TagGroups and
searching through the TagGroups.
'''
def get(self, request, image_id):
'''
Method for getting multiple TagGroups either through search
or general listing.
'''
params = dict((key, val) for key, val in request.QUERY_PARAMS.iteritems())
params['image_id'] = image_id
form = MultiGetForm(params)
if not form.is_valid():
raise BadRequestException()
return Response(form.submit(request))
def post(self, request, image_id):
'''
Method for creating a new TagGroup.
'''
params = dict((key, val) for key, val in request.DATA.iteritems())
params.update(request.QUERY_PARAMS)
params['image_id'] = image_id
form = PostForm(params)
if not form.is_valid():
raise BadRequestException()
return Response(form.submit(request))
class TagGroupSingle(APIView):
'''
Class for rendering the view for getting a TagGroup, deleting a TagGroup
and updating a TagGroup.
'''
def get(self, request, image_id, tag_group_id):
'''
Method for getting multiple TagGroups either thorugh search
or general listing.
'''
params = dict((key, val) for key, val in request.QUERY_PARAMS.iteritems())
params['image_id'] = image_id
params['tag_group_id'] = tag_group_id
form = SingleGetForm(params)
if not form.is_valid():
raise BadRequestException()
return Response(form.submit(request))
def put(self, request, image_id, tag_group_id):
'''
Method for updating a TagGroup's information.
'''
params = dict((key, val) for key, val in request.DATA.iteritems())
params['image_id'] = image_id
params['tag_group_id'] = tag_group_id
form = PutForm(params)
if not form.is_valid():
raise BadRequestException()
return Response(form.submit(request))
def delete(self, request, image_id, tag_group_id):
'''
Method for deleting a a TagGroup.
'''
params = dict((key, val) for key, val in request.QUERY_PARAMS.iteritems())
params['image_id'] = image_id
params['tag_group_id'] = tag_group_id
form = DeleteForm(params)
if not form.is_valid():
raise BadRequestException()
return Response(form.submit(request))
|
990,790 | c034cc9dc8a4402aaa724c3cc27c413d7acf17c9 | import numpy as np
import matplotlib.pyplot as plt
import sys
import seaborn as sns
sys.path.append('../../../../scripts/')
from fig_settings import configure_fig_settings
sys.path.append('../../modules_gammak24/')
from plotobservables import PlotObservables
from readparams import ReadParams
width = 3.487
height = width
# see user_inputs.md for details on what typically goes in these inputs.
user_input = input("input string of a gamma,k24 pair, "
"using comma as delimiter: ")
gamma,k24 = user_input.split(',')
scan = {}
scan['\\gamma_s']=gamma
scan['k_{24}']=k24
observable_list = ['E','R','eta','delta','surfacetwist']
configure_fig_settings()
fig = {}
ax = {}
for observable in observable_list:
fig[observable],ax[observable] = plt.subplots()
fig[observable].set_size_inches(width,height)
colors = sns.color_palette()
savesuf = ["K_{33}","k_{24}","d_0","\\gamma_s"]
loadsuf = ["K_{33}","k_{24}","d_0","\\gamma_s"]
rp = ReadParams(scan=scan,loadsuf=loadsuf,savesuf=savesuf)
obs = PlotObservables(["\\Lambda","\\omega"],rp)
print(obs.observables_fname())
for j,observable in enumerate(observable_list):
obs.plot_observable(ax[observable],observable,color=colors[j],
label=fr'$\gamma_s,k_{{24}}={float(gamma):.2f},{float(k24):.1f}$')
xlabel = r'$\Lambda=3\omega$'
for observable in observable_list:
if observable == 'surfacetwist':
ylabel = r'$\psi(R)$'
elif len(observable) > 1:
ylabel = fr'$\{observable}$'
else:
ylabel = fr'${observable}$'
ax[observable].set_xlabel(xlabel)
ax[observable].set_ylabel(ylabel)
ax[observable].legend(frameon=False)
fig[observable].tight_layout()
fig[observable].savefig(obs.observable_sname(observable))
|
990,791 | f2183e51d9e255ec6b726864accf4bfb02e68ae5 | # for every roll of paper towels, you get $0.25 rebate
# but if you buy more than 10 rolls, you get $0.35 rebate for each time
# but if you're a value club member, you get a 2$ rebate for buying at least one roll
# find out if user is a value club member
print("Are you a value club member? Respond yes or no?")
club = raw_input()
#find out how many rolls of paper towels the user bought
print(" How many rolls of paper towels did you buy?")
rolls = int(raw_input())
# if they are in the club, they get an ext $2
if club == "yes":
if rolls > 10:
rebate = rolls * .35 + 2
else:
rebate = rolls * .25 + 2
else:
if rolls < 10:
rebate = rolls * .35
else:
rebate = rolls * .25
# print rebate
print(" Your rebate is $" + str(rebate))
|
990,792 | 87aa080f62b69225ab5563351def95b5e357fecb | import heapq
n,m = map(int,input().split())
a = list(map(lambda x:int(x)*(-1),input().split()))
heapq.heapify(a)
for _ in range(m):
val = heapq.heappop(a) * (-1)
heapq.heappush(a, (val // 2) * (-1))
print(-sum(a))
|
990,793 | e92b116c7625e4bfaffb2c0562db4699e0b39d7f | # Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Command line tool for creating and extracting ar files."""
from __future__ import print_function
import argparse
import io
import os
import shutil
import stat
import sys
import time
# pylint: disable=relative-import
import arfile
class ProgressReporter(object):
def __init__(self, every):
self.every = int(every)
self.start = time.time()
self.filecount = 0
self.lastreport = 0
def inc(self):
self.filecount += 1
if (self.filecount - self.lastreport) >= self.every:
self.report()
def report(self):
if self.every:
t = time.time()-self.start
print(u'Took %f for %i files == %f files/second' % (
t, self.filecount, self.filecount/t), file=sys.stderr)
self.lastreport = self.filecount
def __del__(self):
self.report()
def create_cmd(
filename, dirs, progress, read_ahead, verbose, dont_use_defaults):
afw = arfile.ArFileWriter(filename)
try:
for path in dirs:
for dirpath, child_dirs, filenames in os.walk(path):
# In-place sort the child_dirs so we walk in lexicographical order
child_dirs.sort()
filenames.sort()
for fn in filenames:
fp = os.path.join(dirpath, fn)
if verbose:
print(fp, file=sys.stderr)
progress.inc()
with open(fp, 'rb') as f:
if dont_use_defaults:
afw.addfile(
arfile.ArInfo.frompath(fp[len(path)+1:], cwd=path),
f)
continue
# If a file is small, it is cheaper to just read the file rather
# than doing a stat
data = f.read(read_ahead)
if len(data) < read_ahead:
afw.addfile(arfile.ArInfo.fromdefault(
fp[len(path)+1:], len(data)), io.BytesIO(data))
else:
size = os.stat(fp).st_size
f.seek(0)
afw.addfile(arfile.ArInfo.fromdefault(
fp[len(path)+1:], size), f)
finally:
afw.close()
def list_cmd(filename, progress):
afr = arfile.ArFileReader(filename, fullparse=False)
for ai, _ in afr:
print(ai.name)
progress.inc()
def extract_cmd(
filename, progress, verbose, dont_use_defaults, blocksize=1024*64):
afr = arfile.ArFileReader(filename, fullparse=dont_use_defaults)
for ai, ifd in afr:
assert not ai.name.startswith('/')
if verbose:
print(ai.name, file=sys.stderr)
try:
os.makedirs(os.path.dirname(ai.name))
except OSError:
pass
with open(ai.name, 'wb') as ofd:
written = 0
while written < ai.size:
readsize = min(blocksize, ai.size-written)
ofd.write(ifd.read(readsize))
written += readsize
progress.inc()
def main(name, args):
parser = argparse.ArgumentParser(
prog=name,
description=sys.modules[__name__].__doc__)
subparsers = parser.add_subparsers(
dest='mode', help='sub-command help')
# Create command
parser_create = subparsers.add_parser(
'create', help='Create a new ar file')
parser_create.add_argument(
'-r', '--read-ahead',
type=int, default=1024*64,
help='Amount of data to read-ahead before doing a stat.')
parser_create.add_argument(
'-f', '--filename',
type=argparse.FileType('wb'), default=sys.stdout,
help='ar file to use')
parser_create.add_argument(
'dirs', nargs='+', help='Directory or file to add to the ar file')
# List command
parser_list = subparsers.add_parser('list', help='List a new ar file')
# Extract command
parser_extract = subparsers.add_parser(
'extract', help='Extract an existing ar file to current directory')
# Add to output commands
for p in parser_list, parser_extract:
p.add_argument(
'-f', '--filename',
type=argparse.FileType('rb'), default=sys.stdin,
help='ar file to use')
for p in parser_create, parser_extract:
p.add_argument(
'--dont-use-defaults',
action='store_true', default=False,
help='Don\'t use default value for file information.')
p.add_argument(
'-v', '--verbose',
action='store_true',
help='Output file names to stderr while running.')
# Add to all commands
for p in parser_create, parser_list, parser_extract:
p.add_argument(
'-p', '--progress',
type=ProgressReporter, default='10000',
help='Output progress information every N files.')
args = parser.parse_args(args)
mode = getattr(sys.modules[__name__], args.mode + '_cmd')
del args.mode
return mode(**args.__dict__)
if __name__ == '__main__':
sys.exit(main('artool', (a.decode('utf-8') for a in sys.argv[1:])))
|
990,794 | c6a6db4d5c61a8463beaba9becaf026a1cbf746c | from invariant_point_attention.invariant_point_attention import InvariantPointAttention, IPABlock, IPATransformer
|
990,795 | ad770488273f841cc5b952292b8563007d19002d | # Master 继承 object; School继承Master;Prentice继承School
# 为单继承
class Master(object):
def __init__(self):
self.kungfu = '古老的配方'
def make_cake(self):
print('按照%s的方法制作了一份煎饼果子' % self.kungfu)
class School(Master):
def __init__(self):
self.kungfu = '教学方法'
def make_cake(self):
print('按照%s制作了一份煎饼果子' % self.kungfu)
super().__init__() # 执行父类的构造函数
super().make_cake()
class Prentice(School):
def __init__(self):
self.kungfu = '猫氏的配方'
def make_cake(self):
print('按照%s制作了一份煎饼果子' % self.kungfu)
def make_all_cake(self):
# 1.
# School.__init__(self)
# School.make_cake(self)
#
# Master.__init__(self)
# Master.make_cake(self)
#
# self.__init__()
# self.make_cake()
# 2
# super(Prentice, self).__init__()
# super(Prentice, self).make_cake()
self.make_cake()
super().__init__() # 执行父类的构造函数, 用于单继承, pythonic
super().make_cake()
damao = Prentice()
damao.make_all_cake()
# 同时执行所有父类方法
# 子类继承了多个父类,如果父类类名修修改了,那么子类也要多次修改
# 子类继承了多个父类,需要重复多次调用,代码比较臃肿
# super():执行父类的方法
# 使用super()可以逐一调用所有的父类方法,并且只执行一次
# 调用顺序遵循__mro__类属性
# Prentice.__mro__获取所有父类的序列
|
990,796 | c0cda72e0cd369d9649866963e027148881dbbef | # This is my first program in python .
# It's simple hello world program (Print's Hello World) :-) .
print('hello world'.title())
# title() function used to convert the first character in each word to Uppercase :) . |
990,797 | 85137c89cad92835b9ec3477c5c0fcacd58def25 | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.http import Http404
from django.views import generic
from django.urls import reverse
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
import json
from braces.views import SelectRelatedMixin
# from . import forms
from . import models
from django.contrib.auth import get_user_model
CustomUser = get_user_model()
# Create your views here.
class ArticleListView(SelectRelatedMixin, generic.ListView):
model = models.Article
select_related = ("customuser", "category")
class ArticleDetailView(SelectRelatedMixin, generic.DetailView):
model = models.Article
select_related = ("customuser", "category")
template_name = "Articles/article_detail_2.html"
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(
id__iexact=self.kwargs.get("pk")
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
try:
context['user_vote'] = models.Vote.objects.filter(article=self.object, customuser=self.request.user).get()
except Exception:
pass
context['total_votes'] = self.object.get_total_votes()
try:
context['art_top_soapbox'] = [self.object.art_soapboxes.first()]
if context['art_top_soapbox'][0] == None:
context['art_top_soapbox'] = None
except Exception:
print('Article Top Soapbox Problem')
try:
context['sources'] = models.Source.objects.all()
except Exception:
context['sources'] = None
return context
class CreateArticleView(LoginRequiredMixin, generic.CreateView):
fields = ('question','sub_header','message','category')
model = models.Article
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.customuser = self.request.user
self.object.save()
return super().form_valid(form)
class DeleteArticleView(LoginRequiredMixin, SelectRelatedMixin, generic.DeleteView):
model = models.Article
select_related = ("customuser", "category")
success_url = reverse_lazy("Articles:articles-list")
def get_queryset(self):
queryset = super().get_queryset()
return queryset.filter(customuser__id=self.request.user.id)
def delete(self, *args, **kwargs):
messages.success(self.request, "Post Deleted")
return super().delete(*args, **kwargs)
class VoteRedirectView(LoginRequiredMixin, generic.RedirectView):
def get_redirect_url(self, *args, **kwargs):
return reverse("Articles:article-detail", kwargs={"pk": self.kwargs.get("article_id")}) + "#QUESTION"
# Vote can only be accessed if user is logged in and hasn't voted due to how article_detail page is setup with template tags
# def get(self, request, *args, **kwargs):
# if self.kwargs.get('user_id'):
# try:
# choice = get_object_or_404(models.Choice, pk=self.kwargs.get("choice_id"))
# models.Vote.objects.create(article=choice.article, customuser=self.request.user, choice=choice)
# choice.votes += 1
# choice.save()
# except Exception:
# print("Error in VoteRedirectView")
#
# return super().get(request, *args, **kwargs)
@login_required
def VoteView(request):
data = {
'new_total': 'New Total Error',
}
if request.GET:
print("Voting")
try:
choice_id = request.GET.get('choice_id')
choice = get_object_or_404(models.Choice, pk=choice_id)
vote = models.Vote.objects.create(article=choice.article, customuser=request.user, choice=choice)
choice.votes += 1
choice.save()
data['percs'] = choice.all_vote_percentages()
data['msg'] = choice.choice_text
data['new_total'] = str(choice.article.get_total_votes())
except Exception:
data['msg'] = 'Voting Error'
data['percs'] = ["Percentage Error",]
print("Voted")
return JsonResponse(data)
|
990,798 | e2b77d2eec334b9a6960d439353f2672497b391d | from django.contrib import admin
from .models import EmergencyContact
# Register your models here.
admin.site.register(EmergencyContact) |
990,799 | fa1ea135e4d2d78ed98cb5ecc31b2a0e018677f0 | # Copyright (c) 2014 SRI International
# Developed under DARPA contract N66001-11-C-4022.
# Authors:
# Hasnain Lakhani (HL)
"""
Certification New Node with Authority Reboot: Tests whether certification works
correctly when a new node joins the network, and an authority is down
but brought back up later.
The test uses a simple 4 node configuration, with ALICE as the authority node.
ALICE authorizes all nodes for certification.
ALICE, BOB, and EVE are booted up, and allowed to exchange certificates.
BOB publishes a data object, and it should successfully be received at EVE.
EVE publishes a data object, and it should successfully be received at BOB.
ALICE is shut down.
MALLORY is booted up.
MALLORY publishes a data object, it should not be received at BOB due to missing certificates.
ALICE is booted up.
MALLORY should make certificate signature requests, and receive signed certificates.
MALLORY publishes a data object, and it should successfully be received at BOB and EVE.
EVE publishes a data object, and it should successfully be received at MALLORY.
"""
CATEGORIES=['certification']
def runTest(env, nodes, results, Console):
ALICE, BOB, EVE, MALLORY = env.createNodes('ALICE', 'BOB', 'EVE', 'MALLORY')
env.calculateHaggleNodeIDsExternally()
ALICE.addNodeSharedSecret('BOB')
ALICE.addNodeSharedSecret('EVE')
ALICE.addNodeSharedSecret('MALLORY')
ALICE.setAuthority()
BOB.addAuthorities('ALICE')
EVE.addAuthorities('ALICE')
MALLORY.addAuthorities('ALICE')
ALICE.authorizeNodesForCertification('BOB', 'EVE', 'MALLORY')
ALICE.createConfig(securityLevel='HIGH')
BOB.createConfig(securityLevel='HIGH')
EVE.createConfig(securityLevel='HIGH')
MALLORY.createConfig(securityLevel='HIGH')
ALICE.start()
BOB.start()
EVE.start()
env.sleep('Letting nodes exchange certificates', env.config.exchangeDelay)
BOB.publishItem('object1', '')
results.expect('Subscribing to object1 at EVE', True, EVE.subscribeItem('object1'))
EVE.publishItem('object2', '')
results.expect('Subscribing to object2 at BOB', True, BOB.subscribeItem('object2'))
env.stopNode('ALICE')
MALLORY.start()
env.sleep('Letting MALLORY boot', env.config.exchangeDelay)
MALLORY.publishItem('object3', '')
results.expect('Subscribing to object3 at BOB', False, BOB.subscribeItem('object3'))
ALICE.start()
env.sleep('Letting ALICE boot and MALLORY receive certificates', env.config.exchangeDelay)
MALLORY.publishItem('object4', '')
results.expect('Subscribing to object4 at EVE', True, EVE.subscribeItem('object4'))
results.expect('Subscribing to object4 at BOB', True, BOB.subscribeItem('object4'))
EVE.publishItem('object5', '')
results.expect('Subscribing to object5 at MALLORY', True, MALLORY.subscribeItem('object5'))
env.stopAllNodes()
predicate = lambda c: c >= 1
for node in [BOB, EVE, MALLORY]:
results.expect('Checking whether signed certificate was received at %s.' % node.name, predicate, node.countMatchingLinesInLog(
'{SecurityHelper::handleSecurityDataResponse}: Saved signed certificate issued by %s' % ALICE.haggleNodeID))
results.expect('Checking whether ALICE signed certificate for %s.' % node.name, predicate, ALICE.countMatchingLinesInLog(
'{SecurityHelper::signCertificate}: Signing certificate for id=%s' % node.haggleNodeID))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.