id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3254302 | import requests
import json
import isodate
import datetime
from bs4 import BeautifulSoup
# This is the URL that we're pulling the video list from.
VIDEO_PAGE = "https://labs.metafilter.com/recent-youtube-posts"
# Set your Youtube API key here. Get one: https://developers.google.com/youtube/v3/getting-started
YT_CREDS = None
if YT_CREDS is None:
raise ValueError('Enter your YouTube API credential into the script first.')
# Set our custom header so people know who wuz here
headers = {
'User-Agent': 'blubox 0.1 alpha extreme turbo',
}
print("Fetching {}...".format(VIDEO_PAGE))
# Try and get the page
r = requests.get(VIDEO_PAGE, headers=headers)
if r.status_code == 200:
# What time is it anyway?
last_updated = datetime.datetime.isoformat(datetime.datetime.utcnow())
# Create the dict that holds our file data
file_data = {'updated_at': last_updated, 'videos': []}
print(last_updated)
print("Loaded the URL.")
html_doc = r.text
print("Souping it up.")
soup = BeautifulSoup(html_doc, 'html.parser')
# print(soup)
for link in soup.find_all("div", class_="copy"):
try:
print("Found link {} {}".format(
link.a.get('title'), link.a.get('href')))
video_id = link.a.get('href')[31:]
searchUrl = "https://www.googleapis.com/youtube/v3/videos?id=" + \
video_id + "&key=" + YT_CREDS + "&part=contentDetails"
video_detail_request = requests.get(searchUrl)
if video_detail_request.status_code == 200:
video_detail = video_detail_request.json()
all_data = video_detail['items']
contentDetails = all_data[0]['contentDetails']
duration = isodate.parse_duration(contentDetails['duration'])
print(duration.total_seconds())
new_video = {'title': link.a.get(
'title'), 'duration': duration.total_seconds(), 'id': video_id}
file_data['videos'].append(new_video)
else:
pass
except Exception as e:
print(e)
print("Writing out file...")
with open("videos.json", "wt") as out_file:
out_file.write(json.dumps(file_data))
print("Wrote the file.")
else:
print("Status Code: {}".format(r.status_code))
print("*sad beep*")
print("*satisfied beep*")
| StarcoderdataPython |
1777093 | <gh_stars>0
import os.path
import yaml
from flask import render_template, request, Blueprint, flash, redirect, url_for
main = Blueprint('main', __name__)
@main.route("/", methods=['GET', 'POST'])
@main.route("/home", methods=['GET', 'POST'])
def home():
path = os.path.dirname(__file__)
filename = os.path.join(path, 'messages.yml')
with open(filename) as file:
messages = yaml.full_load(file)
return render_template('home.html', messages=messages)
| StarcoderdataPython |
3214501 | <filename>tests/test_currentthreadscheduler.py
from datetime import datetime, timedelta
from rx.concurrency import Scheduler, CurrentThreadScheduler
def test_currentthread_now():
res = Scheduler.now() - datetime.utcnow()
assert res < timedelta(milliseconds=1000)
def test_currentthread_scheduleaction():
scheduler = CurrentThreadScheduler()
ran = False
def action(scheduler, state=None):
nonlocal ran
ran = True
scheduler.schedule(action)
assert ran == True
def test_currentthread_scheduleactionerror():
scheduler = CurrentThreadScheduler()
class MyException(Exception):
pass
def action(scheduler, state=None):
raise MyException()
try:
return scheduler.schedule(action)
except MyException:
assert True
def test_currentthread_scheduleactionnested():
scheduler = CurrentThreadScheduler()
ran = False
def action(scheduler, state=None):
def inner_action(scheduler, state=None):
nonlocal ran
ran = True
return scheduler.schedule(inner_action)
scheduler.schedule(action)
assert ran == True
def test_currentthread_ensuretrampoline():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduer, state=None):
def action1(scheduler, state=None):
nonlocal ran1
ran1 = True
scheduler.schedule(action1)
def action2(scheduler, state=None):
nonlocal ran2
ran2 = True
return scheduler.schedule(action2)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == True
def test_currentthread_ensuretrampoline_nested():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
scheduler.ensure_trampoline(inner_action1)
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
return scheduler.ensure_trampoline(inner_action2)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == True
def test_currentthread_ensuretrampoline_and_cancel():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
d = scheduler.schedule(inner_action2)
d.dispose()
return scheduler.schedule(inner_action1)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == False
def test_currentthread_ensuretrampoline_and_canceltimed():
scheduler = CurrentThreadScheduler()
ran1, ran2 = False, False
def outer_action(scheduler, state):
def inner_action1(scheduler, state):
nonlocal ran1
ran1 = True
def inner_action2(scheduler, state):
nonlocal ran2
ran2 = True
d = scheduler.schedule_relative(timedelta(milliseconds=500), inner_action2)
d.dispose()
return scheduler.schedule(inner_action1)
scheduler.ensure_trampoline(outer_action)
assert ran1 == True
assert ran2 == False
| StarcoderdataPython |
1712182 | #!/usr/bin/env python2
import re
import copy
from collections import namedtuple, defaultdict
from parsec import *
from utils import *
import fwsynthesizer
################################################################################
# TYPES
Rule = namedtuple('Rule', ['number', 'action', 'protocol',
'src', 'dst', 'options', 'action_target'])
NatDef = namedtuple('NatDef', ['number', 'options'])
################################################################################
# UTILS / GLOBALS
PROTOCOLS = protocols()
PORTS = services()
ACCEPT_CMDS = ['allow', 'accept', 'pass', 'permit']
DROP_CMDS = ['deny', 'drop', 'reset']
KEYWORDS = ACCEPT_CMDS + DROP_CMDS + ['from', 'to', 'in', 'out', 'log', 'any',
'via', 'setup', 'keep-state']
def protocol_number(proto):
try:
return int(proto)
except:
return int(PROTOCOLS[proto])
def port_from_name(name):
try:
return Port(int(name))
except:
return Port(int(PORTS[name]))
################################################################################
# PARSERS
identifier = not_in(regex('[a-zA-Z0-9\-\_\/]+'), KEYWORDS)
ipfw_negate = (lambda p: (optional(regex("not\s+")) + p)
.parsecmap(lambda (n, s): Negate(s) if n else s))
ipfw_cmd = symbol("ipfw") >> optional(alternative("-q", "-f"))
addr_spec = ip_subnet ^ ip_addr
# IMPORTANT: Tables are not supported
addresses = ipfw_negate(symbol("any") ^ symbol("me") ^ sepBy1(token(ip_subnet ^ ip_addr), symbol(",")))
port_spec = token(port) ^ identifier.parsecmap(port_from_name)
ports = ipfw_negate(sepBy1(port_spec + optional(symbol("-") >> port_spec), symbol(",")))
skip_opt = spaces >> until(' \n') >> spaces.result(None)
@generate
def ipfw_options():
established = symbol("established").result(("established", True))
direction = alternative("in", "out").parsecmap(lambda r: ("direction", r))
interface = ((alternative("via", "recv", "xmit") + (identifier << spaces))
.parsecmap(lambda t: ("interface", t)))
opts = yield many(interface ^ established ^ direction ^ skip_opt)
# Other options are ignored
preturn ( defaultdict(lambda: None, filter(lambda r: r is not None, opts)) )
# IMPORTANT: or-block deprecated syntax is not supported
@generate
def ipfw_rule():
_ = yield ipfw_cmd >> symbol("add")
rule_numb = yield token(number)
action = yield alternative(*(ACCEPT_CMDS + DROP_CMDS
+ ['check-state', 'count', 'skipto', 'call', 'return', 'nat']))
action_target = None
if action in ['call', 'skipto', 'nat']:
action_target = yield token(number)
protocol = None
src = None
dst = None
if action != 'check-state':
_ = yield optional(switch("log") << optional(symbol("logamount") << until(" ") << spaces))
protocol = yield optional(ipfw_negate(number | identifier))
src = yield symbol("from") >> addresses + optional(ports)
dst = yield symbol("to") >> addresses + optional(ports)
options = yield ipfw_options
preturn ( Rule(rule_numb, action, protocol, src, dst, options, action_target) )
# IMPORTANT: LSNAT not supported
@generate
def ipfw_nat():
pair = lambda s: lambda x: (s, x)
@generate
def redirect_port():
_ = yield symbol("redirect_port")
proto = yield token(number | identifier)
addr_p = yield token((addr_spec << string(":")) + port_spec)
port = yield port_spec
preturn ( ('redirect_port', (proto, addr_p, port)) )
redirect_addr = (symbol('redirect_addr') >> token(addr_spec) + token(addr_spec)).parsecmap(pair("redirect_addr"))
ip = symbol("ip") >> ip_addr.parsecmap(pair("ip"))
interface = symbol("if") >> identifier.parsecmap(pair("interface"))
# NAT declaration
_ = yield ipfw_cmd >> symbol("nat")
num = yield token(number)
_ = yield symbol("config")
opts = yield many(redirect_port ^ redirect_addr ^ interface ^ ip ^ skip_opt)
options = defaultdict(list)
for k in opts:
if k: options[k[0]].append(k[1])
options = defaultdict(lambda: None, options)
preturn ( NatDef(num, options) )
ipfw_conf = (optional((comment << space_endls) ^ endl_comments)
>> many1((ipfw_nat ^ ipfw_rule) << endl_comments))
################################################################################
# CONVERTER
def make_nat_table(rules):
table = {}
for rule in rules:
if isinstance(rule, NatDef):
table[rule.number] = rule.options
return table
def convert_rule(rule, interfaces, nat_table, prefix):
conditions = []
def format_ports(pair):
xs = map(lambda x: "{}".format(x.value), filter(lambda x: x is not None, pair))
return "-".join(xs)
def append_condition(variable, operator, value, mapper=None):
cs = []
negate = isinstance(value, Negate)
if negate: value = value.value
if not isinstance(value, list): value = [value]
for v in filter(lambda x: x, value):
if isinstance(v, Negate):
cs.append("not ({} {} {})".format(variable, operator, mapper(v.value) if mapper else v.value))
else:
cs.append("{} {} {}".format(variable, operator, mapper(v) if mapper else v))
if cs:
if negate:
conditions.append("not ({})".format(" || ".join(cs)))
else:
conditions.append("({})".format(" || ".join(cs))
if len(cs) > 1 else cs[0])
if rule.src:
srcip, srcport = rule.src
if srcip == "me": srcip = [ local for _, (_, local) in interfaces.items()]
if srcip != "any":
append_condition("srcIp", "==", srcip)
append_condition("srcPort", "==", srcport, mapper=format_ports)
if rule.dst:
dstip, dstport = rule.dst
if dstip == "me": dstip = [ local for _, (_, local) in interfaces.items()]
if dstip != "any":
append_condition("dstIp", "==", dstip)
append_condition("dstPort", "==", dstport, mapper=format_ports)
if rule.protocol not in ['all', 'ip']:
append_condition("protocol", "==", rule.protocol, mapper=protocol_number)
# Supported options: established, in, out, recv, xmit, via
if rule.options['established']:
append_condition("state", "==", 1)
ext_constraint = lambda var, constraints: "not ({})".format(
" || ".join("{} == {}".format(var, addr) for addr in constraints))
# Interface
# the addresses are constrained using the subnet of the specified interface
# source address in case of recv
# destination in case of xmit
# both in case of via but with logical disjunction
# If interface subnet is 0.0.0.0/0 constrain ip not to be in all others and interface local
if rule.options['interface']:
if_direction, interface = rule.options['interface']
if if_direction == 'recv':
conditions.append(fwsynthesizer.constrain_interface(interfaces, "srcIp", interface))
if if_direction == 'xmit':
conditions.append(fwsynthesizer.constrain_interface(interfaces, "dstIp", interface))
if if_direction == 'via':
raise RuntimeError("Invalid option 'via': rule was not preprocessed!")
rules = []
target = None
if rule.action in ACCEPT_CMDS: target = "ACCEPT"
if rule.action in DROP_CMDS: target = "DROP"
if rule.action == "return": target = "RETURN"
if rule.action == "call": target = "CALL(R_{}_{})".format(prefix, rule.action_target)
if rule.action == "skipto": target = "GOTO(R_{}_{})".format(prefix, rule.action_target)
if target is not None:
rules.append((conditions, target))
if rule.action == "check-state":
rules.append((conditions + ['state == 1'], "CHECK-STATE(<->)"))
if rule.action == "nat":
direction = rule.options['direction']
isinput = direction is None or direction == 'in'
isoutput = direction is None or direction == 'out'
nat = nat_table[rule.action_target]
for opt in nat:
if opt == 'redirect_port' and isinput:
for option in nat[opt]:
proto, (addr, port), port1 = option
target = "NAT({}:{}, Id)".format(addr, port.value)
rules.append((conditions + ['protocol == {}'.format(protocol_number(proto)),
'dstPort == {}'.format(port1.value)], target))
elif opt == 'redirect_addr' and isinput:
for option in nat[opt]:
addr, addr1 = option
target = "NAT({}, Id)".format(addr)
rules.append((conditions + ['dstIp == {}'.format(addr1)], target))
elif opt == 'interface' and isoutput:
for option in nat[opt]:
ifaddr = interfaces[option][1]
target = "NAT(Id, {})".format(ifaddr)
rules.append((conditions, target))
elif opt == 'ip' and isoutput:
for option in nat[opt]:
target = "NAT(Id, {})".format(option)
rules.append((conditions, target))
return ["({}, {})".format('true' if len(conditions) == 0 else ' && '.join(conditions),
target)
for conditions, target in rules]
def preprocess_rules(rules, interfaces):
new_rules = []
for rule in rules:
if rule.options['interface'] and rule.options['interface'][0] == 'via':
if_name = rule.options['interface'][1]
if rule.options['direction'] and rule.options['direction'] == 'in':
rule.options['interface'] = ('recv', if_name)
new_rules.append(rule)
elif rule.options['direction'] and rule.options['direction'] == 'out':
rule.options['interface'] = ('xmit', if_name)
new_rules.append(rule)
else:
in_rule = rule._replace(number=rule.number + 0.1, options=copy.copy(rule.options))
out_rule = rule._replace(number=rule.number + 0.2, options=copy.copy(rule.options))
in_rule.options['direction'] = 'in'
in_rule.options['interface'] = ('recv', if_name)
out_rule.options['direction'] = 'out'
out_rule.options['interface'] = ('xmit', if_name)
new_rules.extend([in_rule, out_rule])
else:
new_rules.append(rule)
return new_rules
## ip_input ip_output
def convert_rules(rules, interfaces):
ip_input = []
ip_output = []
nat_table = make_nat_table(rules)
rules = sorted(filter(lambda x: isinstance(x, Rule) and x.action not in ["count"], rules), key=lambda x: x.number)
rules = preprocess_rules(rules, interfaces)
for rule in rules:
direction = rule.options['direction']
numbered = rule.number, rule
ip_input.append((rule.number, rule if direction is None or direction == 'in' else None))
ip_output.append((rule.number, rule if direction is None or direction == 'out' else None))
output = ""
output += "CHAIN ip_input DROP:\n(true, GOTO(R_in_{}))\n\n".format(ip_input[0][0])
output += "CHAIN ip_output DROP:\n(true, GOTO(R_out_{}))\n\n".format(ip_output[0][0])
for prefix, chain in [("in", ip_input), ("out", ip_output)]:
for i in range(len(chain)):
number, rule = chain[i]
rules = convert_rule(rule, interfaces, nat_table, prefix) if rule else []
output += "CHAIN R_{}_{}:\n".format(prefix, number)
output += "\n".join(rules) + "\n" if rules else ""
if i == len(chain)-1:
output += "(true, ACCEPT)"
else:
output += "(true, GOTO(R_{}_{}))".format(prefix, chain[i+1][0])
output += "\n\n"
return output
################################################################################
# QUERY FUNCTIONS
def get_lines(contents):
return [ line.strip() for line in contents.split("\n")
if not line.startswith("#") and line.strip() != ""
and re.match("ipfw (-q |-f )*add.*", line)]
def delete_rule(rules, rule_number):
new_rules = []
target = None
# Remove rule
for rule in rules:
if isinstance(rule, Rule):
rule_number -=1
if rule_number == -1:
target = rule.number
continue
else:
new_rules.append(rule)
else:
new_rules.append(rule)
# Reconnect GOTOS
for i in range(len(new_rules)):
rule = new_rules[i]
if (isinstance(rule, Rule)
and rule.action not in ['nat', 'rdr']
and rule.action_target == target):
new_rules[i] = rule._replace(action_target = new_rules[i+1].number)
return new_rules
| StarcoderdataPython |
67267 | <reponame>taoyan/python
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-08-22 10:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('videos', '0002_video_content_template'),
]
operations = [
migrations.AlterField(
model_name='video',
name='screen_shot',
field=models.FileField(default='/screen_shot/default.png', upload_to='screen_shot/', verbose_name='缩略图'),
),
]
| StarcoderdataPython |
3323860 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 28 10:15:29 2017
@author: Kjell
"""
import time
import math
from AirSimClient import *
# connect to the AirSim simulator
client = MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
client.armDisarm(True)
def straight(duration, speed):
pitch, roll, yaw = client.getPitchRollYaw()
vx = math.cos(yaw) * speed
vy = math.sin(yaw) * speed
client.moveByVelocityZ(vx, vy, -6, duration, DrivetrainType.ForwardOnly)
start = time.time()
return start, duration
def take_action():
start = time.time()
duration = 0
collided = False
start, duration = straight(5, 4) # for 5 sec with "speed" 4 or until it collides
while duration > time.time() - start:
if client.getCollisionInfo().has_collided == True:
client.moveByVelocity(0, 0, 0, 1)
return True
return collided
def reset():
client.reset()
client.enableApiControl(True)
client.armDisarm(True)
client.moveToZ(-6, 3)
time.sleep(3)
if __name__ == "__main__":
reset()
for idx in range(250000): #250k
collided = take_action()
if collided == True:
reset()
print("%d" % idx)
# that's enough fun for now. let's quite cleanly
client.enableApiControl(False)
| StarcoderdataPython |
1664617 | <filename>deepthought/bricks/data_dict.py
import logging
log = logging.getLogger(__name__)
def generate_data_dict(dataset, source, name='dict', verbose=False):
import numpy as np
import theano
dtype = theano.config.floatX
# get data into a dict, need to use the full dataset (no subset!)
state = dataset.open()
request = slice(0, dataset.num_examples)
data_dict = dataset.get_data(request=request)[dataset.sources.index(source)]
dataset.close(state)
# FIXME: move this to original dataset generator code
#data_dict = np.rollaxis(data_dict, 3, 1) # convert b01c format into bc01 format
shape = data_dict.shape
data_dict = theano.shared(theano._asarray(data_dict, dtype=dtype), # for GPU usage
name=name, borrow=False)
if verbose:
log.debug('generated data dict "{}", shape={}, type={}'
.format(data_dict, shape, data_dict.type))
return data_dict
| StarcoderdataPython |
176707 | import asyncio
import csv
import os
import time
from datetime import datetime, timedelta
import aiohttp
import psycopg2
from six.moves import urllib_parse
CONCURRENCY = 10
HUB_OUTPUT_FILE = "hub_babyswitches.csv"
RAPIDPRO_OUTPUT_FILE = "rapidpro_babyswitch_updates.csv"
LIMIT = 10_000_000
RAPIDPRO_URL = "https://rapidpro.prd.momconnect.co.za/"
RAPIDPRO_TOKEN = os.environ["RAPIDPRO_TOKEN"]
HUB_DB_PASSWORD = os.environ["HUB_PASS"]
total = 0
excluded = 0
start, d_print = time.time(), time.time()
async def get_rapidpro_contact(session, contact_id):
url = urllib_parse.urljoin(RAPIDPRO_URL, f"/api/v2/contacts.json?uuid={contact_id}")
headers = {
"Authorization": f"TOKEN {RAPIDPRO_TOKEN}",
"Content-Type": "application/json",
"Connection": "Keep-Alive",
}
async with session.get(url, headers=headers) as response:
response_body = await response.json()
if response_body["results"]:
return response_body["results"][0]
return None
def in_postbirth_group(contact):
for group in contact["groups"]:
if "post" in group["name"].lower():
return True
return False
def get_contact_msisdn(contact):
for urn in contact["urns"]:
if "whatsapp" in urn:
return "+" + urn.split(":")[1]
def get_baby_dob_field(fields):
for i in range(1, 4):
dob_field = f"baby_dob{i}"
if not fields[dob_field]:
return dob_field
def get_babyswitches(conn):
babyswitches = {}
cursor = conn.cursor("baby_switches")
print("Fetching Baby Switches...")
cursor.execute(
f"""
select contact_id, timestamp
from eventstore_babyswitch
order by timestamp asc
limit {LIMIT}
"""
) # 158680
total = 0
start, d_print = time.time(), time.time()
for (contact_id, timestamp) in cursor:
babyswitches[contact_id] = timestamp
if time.time() - d_print > 1:
print(
f"\rFetched {total} babyswitches at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nFetched {total} babyswitches in {time.time() - start:.0f}s")
print("-------------------------------------------")
return babyswitches
def get_optouts(conn):
optouts = {}
print("Fetching Optouts...")
cursor = conn.cursor("optouts")
cursor.execute(
f"""
select contact_id, timestamp
from eventstore_optout
order by timestamp asc
limit {LIMIT}
"""
) # 255855
total = 0
start, d_print = time.time(), time.time()
for (contact_id, timestamp) in cursor:
optouts[contact_id] = timestamp
if time.time() - d_print > 1:
print(
f"\rFetched {total} optouts at " f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nFetched {total} optouts in {time.time() - start:.0f}s")
print("-------------------------------------------")
return optouts
def get_registrations(conn, babyswitches, optouts):
registrations = []
print("Fetching Prebirth Registrations...")
cursor = conn.cursor("prebirth_registrations")
cursor.execute(
f"""
select contact_id, timestamp
from eventstore_prebirthregistration
where edd < '2021-04-20'
order by timestamp asc
limit {LIMIT}
"""
) # 216808
total = 0
start, d_print = time.time(), time.time()
for (contact_id, timestamp) in cursor:
if contact_id in babyswitches and timestamp < babyswitches[contact_id]:
continue
if contact_id in optouts and timestamp < optouts[contact_id]:
continue
registrations.append(contact_id)
if time.time() - d_print > 1:
print(
f"\rFetched {total} registrations at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
total += 1
print(f"\nFetched {total} registrations in {time.time() - start:.0f}s")
print("-------------------------------------------")
return registrations
async def process_registration(session, contact_id, hub_writer, rp_writer):
global total
global excluded
global d_print
global start
total += 1
contact = await get_rapidpro_contact(session, contact_id)
if contact:
msisdn = get_contact_msisdn(contact)
in_group = in_postbirth_group(contact)
if (
in_group
or not msisdn
or contact["fields"].get("preferred_channel") != "WhatsApp"
):
excluded += 1
else:
baby_dob_field = get_baby_dob_field(contact["fields"])
edd = str(contact["fields"]["edd"]).replace("Z", "")
try:
dob = datetime.fromisoformat(edd) + timedelta(days=14)
except (TypeError, ValueError):
excluded += 1
return
rp_writer.writerow(
{
"contact_id": contact_id,
"baby_dob_field": baby_dob_field,
"baby_dob": dob.isoformat(),
}
)
# write to csv for jembi and hub
hub_writer.writerow(
{
"contact_id": contact_id,
"msisdn": msisdn,
"timestamp": datetime.now().isoformat(),
}
)
if time.time() - d_print > 1:
print(
f"\rProcessed {total}({excluded}) registrations at "
f"{total/(time.time() - start):.0f}/s",
end="",
)
d_print = time.time()
async def bounded_process_registration(session, contact_id, hub_writer, rp_writer, sem):
async with sem:
await process_registration(session, contact_id, hub_writer, rp_writer)
async def process_registrations(registrations):
global total
global start
sema = asyncio.Semaphore(CONCURRENCY)
print("Processing Registrations...")
with open(HUB_OUTPUT_FILE, "w", newline="") as hub_target, open(
RAPIDPRO_OUTPUT_FILE, "w", newline=""
) as rp_target:
hub_writer = csv.DictWriter(
hub_target, fieldnames=["contact_id", "msisdn", "timestamp"]
)
hub_writer.writeheader()
rp_writer = csv.DictWriter(
rp_target, fieldnames=["contact_id", "baby_dob_field", "baby_dob"]
)
rp_writer.writeheader()
connector = aiohttp.TCPConnector(limit=CONCURRENCY)
async with aiohttp.ClientSession(connector=connector) as session:
tasks = []
for contact_id in registrations:
tasks.append(
bounded_process_registration(
session, contact_id, hub_writer, rp_writer, sema
)
)
await asyncio.gather(*tasks)
print(f"\nProcessed {total} registrations in {time.time() - start:.0f}s")
if __name__ == "__main__":
conn = psycopg2.connect(
dbname="hub", user="hub", password=<PASSWORD>, host="localhost", port=7000
)
babyswitches = get_babyswitches(conn)
optouts = get_optouts(conn)
registrations = get_registrations(conn, babyswitches, optouts)
asyncio.run(process_registrations(registrations))
| StarcoderdataPython |
3316158 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from typing import *
from uuid import uuid4
from peewee import ForeignKeyField, DateTimeField, FixedCharField
from api.models.base import BaseModel
from api.models.user import User
class AccessToken(BaseModel):
class Meta:
table_name = "access_tokens"
owner = ForeignKeyField(User, backref="access_tokens", on_delete="CASCADE")
expired_on = DateTimeField()
token = FixedCharField(max_length=32)
def is_valid(self):
return self.expired_on >= datetime.now()
@staticmethod
def create_token(owner: User, lifespan_hours: int):
return AccessToken.create(
owner=owner,
token=str(uuid4()).replace("-", ""),
expired_on=datetime.now() + timedelta(hours=lifespan_hours))
| StarcoderdataPython |
3204160 | from sys import stdin
def primos(p,q):
for i in range(p):
for j in range(2,q[i]):
if q[i]%j==0:
print ("No" )
break
else:
print("Si /n")
break
def main():
p = int(stdin.readline().strip())
q = list(map(int,stdin.readline().strip().split(' ')))
primos(p,q)
main()
| StarcoderdataPython |
1734568 | from year2021.python.day1.day1_func import *
debts = [int(debt) for debt in open('../../data/day1_data.txt')]
sonarSingle = SonarSingle()
singleDebt = sonarSingle.GetDebtCount(debts)
print(f"Part 1: {singleDebt}")
sonarWindow = SonarWindow()
windowDebt = sonarWindow.GetDebtCount(debts)
print(f"Part 2: {windowDebt}")
| StarcoderdataPython |
1710373 | <reponame>gurcani/pyhw
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 13:40:11 2018
@author: ogurcan
"""
import numpy as np
import h5py as h5
import pyfftw as pyfw
import os
import subprocess as sbp
tmpdir='pyhw_tempdir'
eps=1e-20
def get_spec(i,ntav):
global fftw_objf,phik0,nk0,kx,ky,k,N,kn,dkx,dky,Nx,Ny,phires,nres
phi0=phires[i:i+ntav,:,:]
nf0=nres[i:i+ntav,:,:]
phik0=fftw_objf(phi0,phik0)
nk0=fftw_objf(nf0,nk0)
Ek0=np.fft.fftshift(np.mean(np.abs(phik0)**2,0)*(kx**2+ky**2),0)
Fk0=np.fft.fftshift(np.mean(np.abs(nk0)**2,0),0)
En=np.zeros(N)
Fn=np.zeros(N)
for l in range(N-1):
En[l]=np.sum(Ek0[(k>=kn[l]) & (k<kn[l+1])])*dkx*dky/Nx**2/Ny**2
Fn[l]=np.sum(Fk0[(k>=kn[l]) & (k<kn[l+1])])*dkx*dky/Nx**2/Ny**2
return En,Fn
def spec(flname,ntav=2,nt0=-1):
global fl,fftw_objf,phik0,nk0,kx,ky,k,N,kn,dkx,dky,Nx,Ny,Nt,phires,nres
if (':' in flname):
flname=sync_rf(flname)
fl=h5.File(flname,"r")
phires=fl["fields/phi"]
nres=fl["fields/n"]
kx,ky=fl["fields/kx"][:],fl["fields/ky"][:]
dkx=kx[1,0]-kx[0,0]
dky=ky[0,1]-ky[0,0]
Nt=phires.shape[0]
Nx=phires.shape[1]
Ny=phires.shape[2]
k=np.sqrt(np.fft.fftshift(kx,0)**2+np.fft.fftshift(ky,0)**2)
kmin=0.0
kmax=np.max(kx)+1.0
N=300
dk=(kmax-kmin)/N
kn=np.arange(kmin,kmax,dk)
if (nt0<0):
nt0=phires.shape[0]-ntav+nt0+1
phi0=phires[nt0:nt0+ntav,:,:]
# nf0=nres[nt0:nt0+ntav,:,:]
phik0=pyfw.empty_aligned((ntav,Nx,int(Ny/2+1)),'complex');
nk0=pyfw.empty_aligned((ntav,Nx,int(Ny/2+1)),'complex');
fftw_objf = pyfw.FFTW(phi0, phik0, axes=(1, 2))
En,Fn=get_spec(nt0,ntav)
return En,Fn,kn
def sync_rf(flname):
if(not os.path.exists(tmpdir)) : os.mkdir(tmpdir)
flname_orig=flname
sbp.call(['rsync','-havuP',flname,'./'+tmpdir+'/'])
return tmpdir+'/'+os.path.basename(flname_orig)
def do_plot(En,Fn,kn,fkn1,fkn2,lab1,lab2,ax):
qd=ax.loglog(kn[En>eps],En[En>eps],'x-',kn[Fn>eps],Fn[Fn>eps],'+-')
kr=np.arange(3,50)
ax.loglog(kn[kr],fkn1(kn[kr]),'k--')
ax.loglog(kn[kr],fkn2(kn[kr]),'k--')
ax.legend([lab1,lab2],fontsize=14)
ax.text(kn[kr[-10]],fkn1(kn[kr[-10]])/2,'$k^{-3}$',fontsize=14)
ax.text(kn[kr[-10]],fkn2(kn[kr[-10]])/2,'$k^{-1}$',fontsize=14)
return qd
def plot_spec(flname,ntav=2,nt0=-1,fkn1 = lambda k : 1e-4*k**(-3), fkn2 = lambda k : 1e-3*k**(-1), lab1='$E(k)$', lab2='$F(k)$'):
global fl
import matplotlib as mpl
mpl.use('Qt5Agg')
import matplotlib.pylab as plt
if (':' in flname):
flname=sync_rf(flname)
En,Fn,kn=spec(flname,ntav,nt0)
do_plot(En,Fn,kn,fkn1,fkn2,lab1,lab2,ax=plt.gca())
kr=np.arange(3,50)
plt.loglog(kn[En>eps],En[En>eps],'x-',kn[Fn>eps],Fn[Fn>eps],'+-')
plt.loglog(kn[kr],fkn1(kn[kr]),'k--')
plt.loglog(kn[kr],fkn2(kn[kr]),'k--')
plt.legend([lab1,lab2],fontsize=14)
plt.text(kn[kr[-10]],fkn1(kn[kr[-10]])/2,'$k^{-3}$',fontsize=14)
plt.text(kn[kr[-10]],fkn2(kn[kr[-10]])/2,'$k^{-1}$',fontsize=14)
fl.close()
def update_spec_anim(j,qd,phi,n,ntav):
print(j)
En,Fn=get_spec(j,ntav)
qd[0].set_xdata(kn[En>eps])
qd[0].set_ydata(En[En>eps])
qd[1].set_xdata(kn[Fn>eps])
qd[1].set_ydata(Fn[Fn>eps])
return qd
def spec_anim(flname,outfl,vmin=1e-8,vmax=1e-2,ntav=2):
global fl,Nt
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pylab as plt
import matplotlib.animation as anim
if (':' in flname):
flname=sync_rf(flname)
En,Fn,kn=spec(flname,ntav)
w, h = plt.figaspect(0.8)
fig,ax=plt.subplots(1,1,sharey=True,figsize=(w,h))
qd=ax.loglog(kn[En>eps],En[En>eps],'x-',kn[Fn>eps],Fn[Fn>eps],'+-')
kr=np.arange(3,50)
ax.loglog(kn[kr],1e-4*kn[kr]**(-3),'k--')
ax.loglog(kn[kr],1e-3*kn[kr]**(-1),'k--')
ax.legend(['$E(k)$','$F(k)$'],fontsize=14)
ax.text(kn[kr[-10]],3e-4*kn[kr[-10]]**(-3),'$k^{-3}$',fontsize=14)
ax.text(kn[kr[-10]],3e-3*kn[kr[-10]]**(-1),'$k^{-1}$',fontsize=14)
ax.axis([kn[1]-eps,kn[-1],vmin,vmax])
ani = anim.FuncAnimation(fig, update_spec_anim, interval=0, frames=Nt-ntav, blit=True, fargs=(qd,phires,nres,ntav))
ani.save(outfl,dpi=200,fps=25)
fl.close()
sbp.call(['vlc',outfl])
def update_anim(j,qd,phi,n):
print(j)
qd[0].set_array(np.real(phi[j,]).T.ravel())
qd[1].set_array(np.real(n[j,]).T.ravel())
return qd
def anim(flname,outfl,vm=1.0,vmn=1.0,ntav=2):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pylab as plt
import matplotlib.animation as anim
if (':' in flname):
flname=sync_rf(flname)
fl=h5.File(flname,"r")
phi=fl['fields/phi']
n=fl['fields/n']
w, h = plt.figaspect(0.5)
fig,ax=plt.subplots(1,2,sharey=True,figsize=(w,h))
qd0 = ax[0].pcolormesh(np.real(phi[0,].T),shading='flat',vmin=-vm,vmax=vm,cmap='seismic',rasterized=True)
qd1 = ax[1].pcolormesh(np.real(n[0,].T),shading='flat',vmin=-vmn,vmax=vmn,cmap='seismic',rasterized=True)
fig.tight_layout()
ax[0].axis('square')
ax[1].axis('square')
Nt=phi.shape[0]
ani = anim.FuncAnimation(fig, update_anim, interval=0, frames=Nt, blit=True, fargs=((qd0,qd1),phi,n))
ani.save(outfl,dpi=200,fps=25)
sbp.call(['vlc',outfl])
fl.close() | StarcoderdataPython |
2593 | # coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sparse Fine-tuning the library models for question answering.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
from nn_pruning.sparse_trainer import SparseTrainer
from .qa_train import QATrainer
# SparseTrainer should appear first in the base classes, as its functions must override QATrainer and its base classes (Trainer)
class QASparseTrainer(SparseTrainer, QATrainer):
def __init__(self, sparse_args, *args, **kwargs):
QATrainer.__init__(self, *args, **kwargs)
SparseTrainer.__init__(self, sparse_args)
| StarcoderdataPython |
3318502 | <reponame>zhexiao/kweets<filename>tweets/scripts/tw_streaming.py<gh_stars>0
from gevent import monkey; monkey.patch_all()
from gevent.pool import Pool
from pprint import pprint
from TwitterAPI import TwitterAPI
from datetime import datetime
import gevent, sys, os, redis, MySQLdb
import ujson as json
from config_import import *
# streaming class
class Streaming:
def __init__(self):
self.TW_CONSUMER_KEY = TW_CONSUMER_KEY
self.TW_CONSUMER_SECRET = TW_CONSUMER_SECRET
self.TW_TOKEN = TW_TOKEN
self.TW_TOKEN_SECRET = TW_TOKEN_SECRET
# set a limit pool thread for redis pub
self.THREAD_POOL_SIZE = REDIS_POLL_SIZE
self.pool = Pool(self.THREAD_POOL_SIZE)
# connect db
self.connect_db_redis()
# define database connect
def connect_db_mysql(self):
db = MySQLdb.connect(host=DB_HOST, user=DB_USER, passwd=DB_PASSWORD, db=DB_NAME, charset="utf8")
return db
# define redis connect
def connect_db_redis(self):
self.REDIS_CONF = {
'host': REDIS_DB_HOST,
'port': REDIS_PORT,
'db': REDIS_DB_NUMBER,
}
self.redis_conn = redis.StrictRedis( **self.REDIS_CONF )
# show error function
def error_print(self, message):
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, message, fname, exc_tb.tb_lineno)
# twitter track api
def twitter_track(self, track_text):
self.track_text = track_text
try:
api = TwitterAPI(self.TW_CONSUMER_KEY, self.TW_CONSUMER_SECRET, self.TW_TOKEN, self.TW_TOKEN_SECRET)
tracks_data = { 'track' : "{0}".format( ",".join(self.track_text ) ) }
stream_res = api.request('statuses/filter', tracks_data).get_iterator()
for item in stream_res:
# omit retweets
if 'retweeted_status' in item:
continue
if 'text' in item:
self.pool.spawn(self.redis_pub, item)
gevent.sleep(1)
except Exception as e:
self.error_print(e)
# redis publish method
# def redis_pub(self, tweet):
# try:
# for t_t in self.track_text:
# if t_t in tweet['text']:
# # pprint( 'redis published, twitter id is %s'%(tweet['id_str']) )
# self.redis_conn.publish(t_t, json.dumps(tweet))
# except Exception as e:
# self.error_print(e)
def redis_pub(self, tweet):
try:
published = False
if not published:
for mention in tweet['entities']['user_mentions']:
if '@'+mention['screen_name'] in self.track_text:
published = True
self.redis_conn.publish('@'+mention['screen_name'], json.dumps(tweet))
if not published:
for hashtag in tweet['entities']['hashtags']:
if '#'+hashtag['text'] in self.track_text:
self.redis_conn.publish('#'+hashtag['text'], json.dumps(tweet))
except Exception as e:
self.error_print(e)
# get all twitter tracks data
def get_tw_tracks(self):
try:
db = self.connect_db_mysql()
cursor = db.cursor(MySQLdb.cursors.DictCursor)
query = ('''SELECT * FROM twitter_tracks order by id desc''')
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
db.close()
tracks_data = []
if rows:
for t_d in rows:
tracks_data.append(t_d['text'])
return tracks_data
except Exception as e:
self.error_print(e)
# get the last twitter track id
def get_tw_last_track_id(self):
try:
db = self.connect_db_mysql()
cursor = db.cursor(MySQLdb.cursors.DictCursor)
query = ('''SELECT id FROM twitter_tracks order by id desc limit 1''')
cursor.execute(query)
rows = cursor.fetchone()
cursor.close()
db.close()
if rows:
return rows['id']
else:
return None
except Exception as e:
self.error_print(e)
# initial class
stream = Streaming()
# get all twitter tracks data
tw_tracks_data = stream.get_tw_tracks()
latest_track_id = stream.get_tw_last_track_id()
# start running streaming
streaming_stopped = True
while True:
try:
# get the last insert track id
last_insert_track_id = stream.get_tw_last_track_id()
# check streaming is running or not
if streaming_stopped:
# running thread
thread_m = gevent.spawn(stream.twitter_track, tw_tracks_data)
# if thread start, set stop variable to false
streaming_stopped = not thread_m.started
# if last insert track id is changed
if last_insert_track_id != latest_track_id:
# kill thread and get new track data
thread_m.kill()
streaming_stopped = True
tw_tracks_data = stream.get_tw_tracks()
latest_track_id = last_insert_track_id
gevent.sleep(10)
except Exception as e:
print(e)
| StarcoderdataPython |
3220592 | <reponame>SBRG/sbaas<filename>sbaas/analysis/analysis_stage01_resequencing/stage01_resequencing_execute.py
'''resequencing class'''
from sbaas.analysis.analysis_base import *
from .stage01_resequencing_query import *
from .stage01_resequencing_io import *
class stage01_resequencing_execute():
'''class for resequencing analysis'''
def __init__(self,session_I=None):
if session_I: self.session = session_I;
else: self.session = Session();
self.stage01_resequencing_query = stage01_resequencing_query(self.session);
self.calculate = base_calculate();
#analysis
def execute_filterMutations_population(self,experiment_id,p_value_criteria=0.01,quality_criteria=6.0,frequency_criteria=0.1,sample_names_I=None):
'''Filter mutations that do not meet the desired criteria'''
print('Executing filterMutations_population...')
data_O = [];
# query sample names from the experiment
if sample_names_I:
sample_names = sample_names_I;
else:
sample_names = [];
sample_names = self.stage01_resequencing_query.get_sampleNames_experimentID_dataStage01ResequencingMetadata(experiment_id,8);
for sn in sample_names:
print('Filtering mutations for sample_name ' + sn);
#query mutation data filtered by frequency
data_mutations_list = [];
data_mutations_list = self.stage01_resequencing_query.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutations(experiment_id,sn);
for data_mutations in data_mutations_list:
print('Filtering mutations for mutation id ' + str(data_mutations['mutation_id']));
#query data filtered by evidence-specific criteria
data_evidence_list = [];
for pid in data_mutations['parent_ids']:
print('Filtering mutations for parent id ' + str(pid));
data_evidence_dict = {};
data_evidence_dict = self.stage01_resequencing_query.get_evidence_experimentIDAndSampleNameAndParentID_dataStage01ResequencingEvidence(experiment_id,sn,pid);
data_evidence_list.append(data_evidence_dict);
if data_evidence_list[0]: #check that filtered evidence was found
data_O.append(data_mutations);
#add data to the database table
row = None;
row = data_stage01_resequencing_mutationsFiltered(data_mutations['experiment_id'],
data_mutations['sample_name'],
data_mutations['mutation_id'],
data_mutations['parent_ids'],
data_mutations['mutation_data']);
#json.dumps(data_mutations['mutation_data']));
self.session.add(row);
#add data to the database table
self.session.commit();
def execute_annotateFilteredMutations(self,experiment_id,sample_names_I=[],
ref_genome_I='data/U00096.2.gb'):
from Bio import SeqIO
from Bio import Entrez
record = SeqIO.read(ref_genome_I,'genbank')
print('Executing annotation of filtered mutations...')
genotype_phenotype_O = [];
# query sample names
if sample_names_I:
sample_names = sample_names_I;
else:
sample_names = [];
sample_names = self.stage01_resequencing_query.get_sampleNames_experimentID_dataStage01ResequencingMutationsFiltered(experiment_id);
for sn in sample_names:
print('analyzing sample_name ' + sn);
# query mutation data:
mutations = [];
mutations = self.stage01_resequencing_query.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutationsFiltered(experiment_id,sn);
mutation_data_O = [];
for end_cnt,mutation in enumerate(mutations):
print('analyzing mutations')
data_tmp = {};
# annotate each mutation based on the position
annotation = {};
annotation = self.find_genesFromMutationPosition(mutation['mutation_data']['position'],record);
data_tmp['mutation_genes'] = annotation['gene']
data_tmp['mutation_locations'] = annotation['location']
data_tmp['mutation_annotations'] = annotation['product']
# generate a link to ecogene for the genes
data_tmp['mutation_links'] = [];
for bnumber in annotation['locus_tag']:
if bnumber:
ecogenes = [];
ecogenes = self.stage01_resequencing_query.get_ecogeneAccessionNumber_biologicalmaterialIDAndOrderedLocusName_biologicalMaterialGeneReferences('MG1655',bnumber);
if ecogenes:
ecogene = ecogenes[0];
ecogene_link = self.generate_httplink2gene_ecogene(ecogene['ecogene_accession_number']);
data_tmp['mutation_links'].append(ecogene_link)
else: print('no ecogene_accession_number found for ordered_locus_location ' + bnumber);
data_tmp['experiment_id'] = mutation['experiment_id'];
data_tmp['sample_name'] = mutation['sample_name'];
frequency = 1.0;
if 'frequency' in mutation['mutation_data']:
frequency = mutation['mutation_data']['frequency'];
data_tmp['mutation_frequency'] = frequency
data_tmp['mutation_position'] = mutation['mutation_data']['position']
data_tmp['mutation_type'] = mutation['mutation_data']['type']
data_tmp['mutation_data'] = mutation['mutation_data'];
mutation_data_O.append(data_tmp);
# add data to the database
row = [];
row = data_stage01_resequencing_mutationsAnnotated(data_tmp['experiment_id'],
data_tmp['sample_name'],
data_tmp['mutation_frequency'],
data_tmp['mutation_type'],
data_tmp['mutation_position'],
data_tmp['mutation_data'],
data_tmp['mutation_annotations'],
data_tmp['mutation_genes'],
data_tmp['mutation_locations'],
data_tmp['mutation_links'],
True,
None);
self.session.add(row);
self.session.commit();
def execute_analyzeLineage_population(self,experiment_id,strain_lineage):
'''Analyze a strain lineage to identify the following:
1. conserved mutations
2. changes in frequency of mutations
3. hitch-hiker mutations'''
#Input:
# experiment_id = experiment id
# strain_lineage = {"lineage_name":{0:sample_name,1:sample_name,2:sample_name,...,n:sample_name}}
# where n is the end-point strain
#Output:
#TODO: drive from analysis table
#TODO: convert time-point to lineage
# lineage = [i for i,tp in enumerate(time_points)];
print('Executing analyzeLineage_population...')
data_O = [];
for lineage_name,strain in strain_lineage.items():
print('analyzing lineage ' + lineage_name);
lineage = list(strain.keys());
end_point = max(lineage)
# query end data:
end_mutations = [];
end_mutations = self.stage01_resequencing_query.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutationsFiltered(experiment_id,strain[end_point]);
intermediates = [i for i in lineage if i!=end_point];
intermediate_mutations = [];
for intermediate in intermediates:
print('analyzing intermediate ' + str(intermediate));
# query intermediate data:
intermediate_mutations = [];
intermediate_mutations = self.stage01_resequencing_query.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutationsFiltered(experiment_id,strain[intermediate]);
for end_cnt,end_mutation in enumerate(end_mutations):
print('end mutation type/position ' + end_mutation['mutation_data']['type'] + '/' + str(end_mutation['mutation_data']['position']));
for inter_cnt,intermediate_mutation in enumerate(intermediate_mutations):
print('intermediate mutation type/position ' + intermediate_mutation['mutation_data']['type'] + '/' + str(intermediate_mutation['mutation_data']['position']));
if intermediate == 0 and inter_cnt == 0:
#copy end_point data (only once per strain lineage)
data_tmp = {};
data_tmp['experiment_id'] = end_mutation['experiment_id'];
data_tmp['sample_name'] = end_mutation['sample_name'];
data_tmp['intermediate'] = end_point;
frequency = 1.0;
if 'frequency' in end_mutation['mutation_data']: frequency = end_mutation['mutation_data']['frequency'];
data_tmp['mutation_frequency'] = frequency
data_tmp['mutation_position'] = end_mutation['mutation_data']['position']
data_tmp['mutation_type'] = end_mutation['mutation_data']['type']
data_tmp['lineage_name'] = lineage_name;
data_tmp['mutation_data'] = end_mutation['mutation_data'];
data_O.append(data_tmp);
# find the mutation in the intermediates
# filter by mutation type-specific criteria
match = {};
if end_mutation['mutation_data']['type'] == 'SNP':
if end_mutation['mutation_data']['type']==intermediate_mutation['mutation_data']['type'] and \
end_mutation['mutation_data']['position']==intermediate_mutation['mutation_data']['position'] and \
end_mutation['mutation_data']['new_seq']==intermediate_mutation['mutation_data']['new_seq']:
match = intermediate_mutation;
elif end_mutation['mutation_data']['type'] == 'SUB':
if end_mutation['mutation_data']['type']==intermediate_mutation['mutation_data']['type'] and \
end_mutation['mutation_data']['position']==intermediate_mutation['mutation_data']['position'] and \
end_mutation['mutation_data']['size']==intermediate_mutation['mutation_data']['size'] and \
end_mutation['mutation_data']['new_seq']==intermediate_mutation['mutation_data']['new_seq']:
match = intermediate_mutation;
elif end_mutation['mutation_data']['type'] == 'DEL':
if end_mutation['mutation_data']['type']==intermediate_mutation['mutation_data']['type'] and \
end_mutation['mutation_data']['position']==intermediate_mutation['mutation_data']['position'] and \
end_mutation['mutation_data']['size']==intermediate_mutation['mutation_data']['size']:
match = intermediate_mutation;
elif end_mutation['mutation_data']['type'] == 'INS':
if end_mutation['mutation_data']['type']==intermediate_mutation['mutation_data']['type'] and \
end_mutation['mutation_data']['position']==intermediate_mutation['mutation_data']['position'] and \
end_mutation['mutation_data']['new_seq']==intermediate_mutation['mutation_data']['new_seq']:
match = intermediate_mutation;
elif end_mutation['mutation_data']['type'] == 'MOB':
if end_mutation['mutation_data']['type']==intermediate_mutation['mutation_data']['type'] and \
end_mutation['mutation_data']['repeat_name']==intermediate_mutation['mutation_data']['repeat_name'] and \
end_mutation['mutation_data']['strand']==intermediate_mutation['mutation_data']['strand'] and \
end_mutation['mutation_data']['duplication_size']==intermediate_mutation['mutation_data']['duplication_size']:
match = intermediate_mutation;
elif end_mutation['mutation_data']['type'] == 'AMP':
if end_mutation['mutation_data']['type']==intermediate_mutation['mutation_data']['type'] and \
end_mutation['mutation_data']['position']==intermediate_mutation['mutation_data']['position'] and \
end_mutation['mutation_data']['size']==intermediate_mutation['mutation_data']['size'] and \
end_mutation['mutation_data']['new_copy_number']==intermediate_mutation['mutation_data']['new_copy_number']:
match = intermediate_mutation;
elif end_mutation['mutation_data']['type'] == 'CON':
if end_mutation['mutation_data']['type']==intermediate_mutation['mutation_data']['type'] and \
end_mutation['mutation_data']['position']==intermediate_mutation['mutation_data']['position'] and \
end_mutation['mutation_data']['size']==intermediate_mutation['mutation_data']['size'] and \
end_mutation['mutation_data']['region']==intermediate_mutation['mutation_data']['region']:
match = intermediate_mutation;
elif end_mutation['mutation_data']['type'] == 'INV':
if end_mutation['mutation_data']['type']==intermediate_mutation['mutation_data']['type'] and \
end_mutation['mutation_data']['position']==intermediate_mutation['mutation_data']['position'] and \
end_mutation['mutation_data']['size']==intermediate_mutation['mutation_data']['size']:
match = intermediate_mutation;
else:
print('unknown mutation type');
if match:
data_tmp = {};
data_tmp['experiment_id'] = match['experiment_id'];
data_tmp['sample_name'] = match['sample_name'];
data_tmp['intermediate'] = intermediate;
frequency = 1.0;
if 'frequency' in match['mutation_data']: frequency = match['mutation_data']['frequency'];
data_tmp['mutation_frequency'] = frequency
data_tmp['mutation_position'] = match['mutation_data']['position']
data_tmp['mutation_type'] = match['mutation_data']['type']
data_tmp['lineage_name'] = lineage_name;
data_tmp['mutation_data'] = match['mutation_data'];
data_O.append(data_tmp);
for d in data_O:
row = [];
row = data_stage01_resequencing_lineage(d['experiment_id'],
d['lineage_name'],
d['sample_name'],
d['intermediate'],
d['mutation_frequency'],
d['mutation_type'],
d['mutation_position'],
d['mutation_data'],
None,None,None,None,None);
self.session.add(row);
self.session.commit();
def execute_analyzeEndpointReplicates_population(self,experiment_id,end_points):
'''Analyze a endpoint replicates to identify the following:
1. conserved mutations among replicates
2. unique mutations among replicates'''
#Input:
# experiment_id = experiment id
# end_points = {analysis_id: [sample_name_1,sample_name_2,sample_name_3,...]}
#Output:
#TODO: drive from analysis table
print('Executing analyzeEndpointReplicates_population...')
data_O = [];
for analysis_id,strains in end_points.items():
print('analyzing endpoint ' + analysis_id);
analyzed_strain1 = []; # strain1s that have been analyzed
analyzed_mutation_pairs = []; # mutation pairs that have been analyzed
matched_mutations = {};
for strain1 in strains:
# query strain 1 data:
strain1_mutations = [];
strain1_mutations = self.stage01_resequencing_query.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutationsFiltered(experiment_id,strain1);
analyzed_strain1.append(strain1);
analyzed_strain1_mutations = []; # mutations from strain 1 that have been analyzed
analyzed_strain2_mutations_all = []; # all mutations from strain 2 that have been analyzed
strain2_cnt = 0;
for strain2 in strains:
if strain2 == strain1: continue; # do not compare the same strain to itself
print('comparing ' + strain1 + ' to ' + strain2);
# query strain 1 data:
strain2_mutations = [];
strain2_mutations = self.stage01_resequencing_query.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutationsFiltered(experiment_id,strain2);
analyzed_strain2_mutations = []; # mutations from strain 2 that have been analyzed
for strain1_mutation_cnt,strain1_mutation in enumerate(strain1_mutations):
print('strain1 mutation type/position ' + strain1_mutation['mutation_data']['type'] + '/' + str(strain1_mutation['mutation_data']['position']));
if strain2_cnt == 0: # record strain 1 mutations only once for all strain 2 mutations
analyzed_strain1_mutations.append((strain1_mutation['mutation_data']['type'],strain1_mutation['mutation_data']['position']));
for strain2_mutation_cnt,strain2_mutation in enumerate(strain2_mutations):
print('strain2 mutation type/position ' + strain2_mutation['mutation_data']['type'] + '/' + str(strain2_mutation['mutation_data']['position']));
if strain2_mutation_cnt == 0 and \
(strain1,strain1_mutation['mutation_data']['type'],strain1_mutation['mutation_data']['position']) not in matched_mutations:
matched_mutations[(strain1,strain1_mutation['mutation_data']['type'],strain1_mutation['mutation_data']['position'])] = 0;
# find the mutations that are common to strain1 and strain2
# filter by mutation type-specific criteria
match = {};
if strain1_mutation['mutation_data']['type'] == 'SNP':
if strain1_mutation['mutation_data']['type']==strain2_mutation['mutation_data']['type'] and \
strain1_mutation['mutation_data']['position']==strain2_mutation['mutation_data']['position'] and \
strain1_mutation['mutation_data']['new_seq']==strain2_mutation['mutation_data']['new_seq']:
match = strain1_mutation;
elif strain1_mutation['mutation_data']['type'] == 'SUB':
if strain1_mutation['mutation_data']['type']==strain2_mutation['mutation_data']['type'] and \
strain1_mutation['mutation_data']['position']==strain2_mutation['mutation_data']['position'] and \
strain1_mutation['mutation_data']['size']==strain2_mutation['mutation_data']['size'] and \
strain1_mutation['mutation_data']['new_seq']==strain2_mutation['mutation_data']['new_seq']:
match = strain1_mutation;
elif strain1_mutation['mutation_data']['type'] == 'DEL':
if strain1_mutation['mutation_data']['type']==strain2_mutation['mutation_data']['type'] and \
strain1_mutation['mutation_data']['position']==strain2_mutation['mutation_data']['position'] and \
strain1_mutation['mutation_data']['size']==strain2_mutation['mutation_data']['size']:
match = strain1_mutation;
elif strain1_mutation['mutation_data']['type'] == 'INS':
if strain1_mutation['mutation_data']['type']==strain2_mutation['mutation_data']['type'] and \
strain1_mutation['mutation_data']['position']==strain2_mutation['mutation_data']['position'] and \
strain1_mutation['mutation_data']['new_seq']==strain2_mutation['mutation_data']['new_seq']:
match = strain1_mutation;
elif strain1_mutation['mutation_data']['type'] == 'MOB':
if strain1_mutation['mutation_data']['type']==strain2_mutation['mutation_data']['type'] and \
strain1_mutation['mutation_data']['position']==strain2_mutation['mutation_data']['position'] and \
strain1_mutation['mutation_data']['repeat_name']==strain2_mutation['mutation_data']['repeat_name'] and \
strain1_mutation['mutation_data']['strand']==strain2_mutation['mutation_data']['strand'] and \
strain1_mutation['mutation_data']['duplication_size']==strain2_mutation['mutation_data']['duplication_size']:
match = strain1_mutation;
elif strain1_mutation['mutation_data']['type'] == 'AMP':
if strain1_mutation['mutation_data']['type']==strain2_mutation['mutation_data']['type'] and \
strain1_mutation['mutation_data']['position']==strain2_mutation['mutation_data']['position'] and \
strain1_mutation['mutation_data']['size']==strain2_mutation['mutation_data']['size'] and \
strain1_mutation['mutation_data']['new_copy_number']==strain2_mutation['mutation_data']['new_copy_number']:
match = strain1_mutation;
elif strain1_mutation['mutation_data']['type'] == 'CON':
if strain1_mutation['mutation_data']['type']==strain2_mutation['mutation_data']['type'] and \
strain1_mutation['mutation_data']['position']==strain2_mutation['mutation_data']['position'] and \
strain1_mutation['mutation_data']['size']==strain2_mutation['mutation_data']['size'] and \
strain1_mutation['mutation_data']['region']==strain2_mutation['mutation_data']['region']:
match = strain1_mutation;
elif strain1_mutation['mutation_data']['type'] == 'INV':
if strain1_mutation['mutation_data']['type']==strain2_mutation['mutation_data']['type'] and \
strain1_mutation['mutation_data']['position']==strain2_mutation['mutation_data']['position'] and \
strain1_mutation['mutation_data']['size']==strain2_mutation['mutation_data']['size']:
match = strain1_mutation;
else:
print('unknown mutation type');
if match and \
matched_mutations[(strain1,strain1_mutation['mutation_data']['type'],strain1_mutation['mutation_data']['position'])] == 0:
# check that the mutation combination and pairs of strains have not already been analyzed
data_tmp = {};
data_tmp['experiment_id'] = match['experiment_id'];
data_tmp['sample_name'] = match['sample_name'];
frequency = 1.0;
if 'frequency' in match['mutation_data']: frequency = match['mutation_data']['frequency'];
data_tmp['mutation_frequency'] = frequency
data_tmp['mutation_position'] = match['mutation_data']['position']
data_tmp['mutation_type'] = match['mutation_data']['type']
data_tmp['analysis_id'] = analysis_id;
data_tmp['mutation_data'] = match['mutation_data'];
data_tmp['isUnique'] = False;
data_O.append(data_tmp);
matched_mutations[(strain1,strain1_mutation['mutation_data']['type'],strain1_mutation['mutation_data']['position'])] += 1;
if strain1_mutation_cnt == 0: # record strain 2 mutations only once for all strain 1 mutations
analyzed_strain2_mutations.append((strain2_mutation['mutation_data']['type'],strain2_mutation['mutation_data']['position']));
analyzed_strain2_mutations_all.append(analyzed_strain2_mutations);
strain2_cnt += 1;
# check for unique mutations and for conserved mutations
for analyzed_strain1_mutation in analyzed_strain1_mutations:
isUnique_bool = True;
isConserved_cnt = 0;
for analyzed_strain2_mutations_cnt,analyzed_strain2_mutations in enumerate(analyzed_strain2_mutations_all):
for analyzed_strain2_mutation in analyzed_strain2_mutations:
if analyzed_strain1_mutation == analyzed_strain2_mutation:
isUnique_bool = False;
isConserved_cnt += 1;
if isUnique_bool:
for strain1_mutation_cnt,strain1_mutation in enumerate(strain1_mutations):
if (strain1_mutation['mutation_data']['type'],strain1_mutation['mutation_data']['position'])==analyzed_strain1_mutation:
data_tmp = {};
data_tmp['experiment_id'] = strain1_mutation['experiment_id'];
data_tmp['sample_name'] = strain1_mutation['sample_name'];
frequency = 1.0;
if 'frequency' in strain1_mutation['mutation_data']: frequency = strain1_mutation['mutation_data']['frequency'];
data_tmp['mutation_frequency'] = frequency
data_tmp['mutation_position'] = strain1_mutation['mutation_data']['position']
data_tmp['mutation_type'] = strain1_mutation['mutation_data']['type']
data_tmp['analysis_id'] = analysis_id;
data_tmp['mutation_data'] = strain1_mutation['mutation_data'];
data_tmp['isUnique'] = True;
data_O.append(data_tmp);
for d in data_O:
row = [];
row = data_stage01_resequencing_endpoints(d['experiment_id'],
d['analysis_id'],
d['sample_name'],
d['mutation_frequency'],
d['mutation_type'],
d['mutation_position'],
d['mutation_data'],
#json.dumps(d['mutation_data'],
d['isUnique'],
None,None,None,None,None);
self.session.add(row);
self.session.commit();
def execute_annotateMutations_lineage(self,experiment_id,sample_names_I=[],ref_genome_I='data/U00096.2.gb'):
'''Annotate mutations for date_stage01_resequencing_lineage
based on position, reference genome, and reference genome biologicalmaterial_id'''
from Bio import SeqIO
from Bio import Entrez
record = SeqIO.read(ref_genome_I,'genbank')
print('Executing annotateMutations_lineage...')
data_O = [];
# query sample names from the experiment
if sample_names_I:
sample_names = sample_names_I;
else:
sample_names = [];
sample_names = self.stage01_resequencing_query.get_sampleNames_experimentID_dataStage01ResequencingLineage(experiment_id);
for sn in sample_names:
print('annotating mutation for sample_name ' + sn);
# query rows that match the sample name
rows = [];
rows = self.stage01_resequencing_query.get_row_experimentIDAndSampleName_dataStage01ResequencingLineage(experiment_id,sn);
for row in rows:
# annotate each mutation based on the position
annotation = {};
annotation = self.find_genesFromMutationPosition(row['mutation_position'],record);
row['mutation_genes'] = annotation['gene']
row['mutation_locations'] = annotation['location']
row['mutation_annotations'] = annotation['product']
# generate a link to ecogene for the genes
row['mutation_links'] = [];
for bnumber in annotation['locus_tag']:
if bnumber:
ecogenes = [];
ecogenes = self.stage01_resequencing_query.get_ecogeneAccessionNumber_biologicalmaterialIDAndOrderedLocusName_biologicalMaterialGeneReferences('MG1655',bnumber);
if ecogenes:
ecogene = ecogenes[0];
ecogene_link = self.generate_httplink2gene_ecogene(ecogene['ecogene_accession_number']);
row['mutation_links'].append(ecogene_link)
else: print('no ecogene_accession_number found for ordered_locus_location ' + bnumber);
data_O.append(row);
# update rows in the database
io = stage01_resequencing_io();
io.update_dataStage01ResequencingLineage(data_O);
def execute_annotateMutations_endpoints(self,experiment_id,sample_names_I=[],ref_genome_I='data/U00096.2.gb'):
'''Annotate mutations for date_stage01_resequencing_endpoints
based on position, reference genome, and reference genome biologicalmaterial_id'''
from Bio import SeqIO
from Bio import Entrez
record = SeqIO.read(ref_genome_I,'genbank')
print('Executing annotateMutations_endpoints...')
data_O = [];
# query sample names from the experiment
if sample_names_I:
sample_names = sample_names_I;
else:
sample_names = [];
sample_names = self.stage01_resequencing_query.get_sampleNames_experimentID_dataStage01ResequencingEndpoints(experiment_id);
for sn in sample_names:
print('annotating mutation for sample_name ' + sn);
# query rows that match the sample name
rows = [];
rows = self.stage01_resequencing_query.get_row_experimentIDAndSampleName_dataStage01ResequencingEndpoints(experiment_id,sn);
for row in rows:
# annotate each mutation based on the position
annotation = {};
annotation = self.find_genesFromMutationPosition(row['mutation_position'],record);
row['mutation_genes'] = annotation['gene']
row['mutation_locations'] = annotation['location']
row['mutation_annotations'] = annotation['product']
# generate a link to ecogene for the genes
row['mutation_links'] = [];
for bnumber in annotation['locus_tag']:
if bnumber:
ecogenes = [];
ecogenes = self.stage01_resequencing_query.get_ecogeneAccessionNumber_biologicalmaterialIDAndOrderedLocusName_biologicalMaterialGeneReferences('MG1655',bnumber);
if ecogenes:
ecogene = ecogenes[0];
ecogene_link = self.generate_httplink2gene_ecogene(ecogene['ecogene_accession_number']);
row['mutation_links'].append(ecogene_link)
else: print('no ecogene_accession_number found for ordered_locus_location ' + bnumber);
data_O.append(row);
# update rows in the database
io = stage01_resequencing_io();
io.update_dataStage01ResequencingEndpoints(data_O);
#helper functions
def find_genesFromMutationPosition(self,mutation_position_I,record_I):
'''find genes at the position or closes to the position given the reference genome'''
#input:
# mutation_position_I = mutation position [int]
# record = genbank record [SeqRecord]
snp_records = {};
snp_records['gene'] = []
snp_records['db_xref'] = []
snp_records['locus_tag'] = []
snp_records['EC_number'] = []
snp_records['product'] = []
snp_records['location'] = []
# find features in the coding region of the genome that bracket the mutation position
for feature_cnt,feature in enumerate(record_I.features):
if mutation_position_I in feature and feature.type == 'gene':
snp_records['gene'] = feature.qualifiers.get('gene')
snp_records['db_xref'] = feature.qualifiers.get('db_xref')
snp_records['locus_tag'] = feature.qualifiers.get('locus_tag')
elif mutation_position_I in feature and feature.type == 'CDS':
if feature.qualifiers.get('EC_number'):snp_records['EC_number'] = feature.qualifiers.get('EC_number')
else:snp_records['EC_number'] = [None];
if feature.qualifiers.get('product'):snp_records['product'] = feature.qualifiers.get('product')
else:snp_records['product'] = [None];
snp_records['location'] = ['coding'];
elif mutation_position_I in feature and feature.type == 'repeat_region':
snp_records['location'] = feature.qualifiers.get('note')
elif mutation_position_I in feature and feature.type == 'mobile_element':
snp_records['location'] = feature.qualifiers.get('mobile_element_type')
elif mutation_position_I in feature and feature.type == 'misc_feature':
snp_records['location'] = feature.qualifiers.get('note')
elif mutation_position_I in feature and feature.type == 'mat_peptide':
snp_records['gene'] = feature.qualifiers.get('gene')
snp_records['locus_tag'] = feature.qualifiers.get('locus_tag')
#snp_records['location'] = feature.qualifiers.get('note')
if feature.qualifiers.get('EC_number'):snp_records['EC_number'] = feature.qualifiers.get('EC_number')
else:snp_records['EC_number'] = [None];
if feature.qualifiers.get('product'):snp_records['product'] = feature.qualifiers.get('product')
else:snp_records['product'] = [None];
snp_records['location'] = ['coding'];
elif mutation_position_I in feature and feature.type == 'tRNA':
snp_records['gene'] = feature.qualifiers.get('gene')
snp_records['locus_tag'] = feature.qualifiers.get('locus_tag')
#snp_records['location'] = feature.qualifiers.get('note')
if feature.qualifiers.get('EC_number'):snp_records['EC_number'] = feature.qualifiers.get('EC_number')
else:snp_records['EC_number'] = [None];
if feature.qualifiers.get('product'):snp_records['product'] = feature.qualifiers.get('product')
else:snp_records['product'] = [None];
snp_records['location'] = ['coding'];
elif mutation_position_I in feature and feature.type == 'rRNA':
snp_records['gene'] = feature.qualifiers.get('gene')
snp_records['db_xref'] = feature.qualifiers.get('db_xref')
snp_records['locus_tag'] = feature.qualifiers.get('locus_tag')
#snp_records['location'] = feature.qualifiers.get('note')
if feature.qualifiers.get('EC_number'):snp_records['EC_number'] = feature.qualifiers.get('EC_number')
else:snp_records['EC_number'] = [None];
if feature.qualifiers.get('product'):snp_records['product'] = feature.qualifiers.get('product')
else:snp_records['product'] = [None];
snp_records['location'] = ['coding'];
elif mutation_position_I in feature and feature.type == 'ncRNA':
snp_records['gene'] = feature.qualifiers.get('gene')
snp_records['db_xref'] = feature.qualifiers.get('db_xref')
snp_records['locus_tag'] = feature.qualifiers.get('locus_tag')
#snp_records['location'] = feature.qualifiers.get('note')
if feature.qualifiers.get('EC_number'):snp_records['EC_number'] = feature.qualifiers.get('EC_number')
else:snp_records['EC_number'] = [None];
if feature.qualifiers.get('product'):snp_records['product'] = feature.qualifiers.get('product')
else:snp_records['product'] = [None];
snp_records['location'] = ['coding'];
elif mutation_position_I in feature and feature.type != 'source':
print(feature)
if not snp_records['location']:
# no features in the coding region were found that bracket the mutation
# find features before and after the mutation position
start_prev = 0;
stop_prev = 0;
inter1_start = None;
inter1_stop = None;
inter2_start = None;
inter2_stop = None;
# pass 1: locate the start and stop positions of the features before and after the mutation
for feature_cnt,feature in enumerate(record_I.features):
start = feature.location.start.position
stop = feature.location.end.position
if mutation_position_I > stop_prev and mutation_position_I < start:
inter1_start = start_prev;
inter1_stop = stop_prev;
inter2_start = start;
inter2_stop = stop
break;
start_prev = start;
stop_prev = stop;
if not inter1_start:
# the end of the genome was reached without finding features both before and after the mutation
# record the last entry
inter1_start = start_prev;
inter1_stop = stop_prev;
inter2_start = start;
inter2_stop = stop
# pass 2: record features before and after the mutation
for feature_cnt,feature in enumerate(record_I.features):
start = feature.location.start.position
stop = feature.location.end.position
if (inter1_start == start and inter1_stop == stop) or (inter2_start == start and inter2_stop == stop):
if feature.type == 'gene':
snp_records['gene'] += feature.qualifiers.get('gene')
snp_records['db_xref'] += feature.qualifiers.get('db_xref')
snp_records['locus_tag'] += feature.qualifiers.get('locus_tag')
if feature.type == 'CDS':
if feature.qualifiers.get('EC_number'):snp_records['EC_number'] += feature.qualifiers.get('EC_number')
else:snp_records['EC_number'] += [None]
if feature.qualifiers.get('product'):snp_records['product'] += feature.qualifiers.get('product')
else:snp_records['product'] += [None]
for gene in snp_records['gene']:
snp_records['location'] += ['intergenic']
return snp_records;
def generate_httplink2gene_ecogene(self,ecogene_I):
'''Generate link to ecocyc using the ecogene accession number'''
ecogene_httplink = 'http://ecocyc.org/ECOLI/NEW-IMAGE?type=GENE&object='+ecogene_I;
return ecogene_httplink
#table initializations:
def drop_dataStage01(self):
try:
data_stage01_resequencing_evidence.__table__.drop(engine,True);
data_stage01_resequencing_mutations.__table__.drop(engine,True);
data_stage01_resequencing_metadata.__table__.drop(engine,True);
data_stage01_resequencing_validation.__table__.drop(engine,True);
data_stage01_resequencing_mutationsFiltered.__table__.drop(engine,True);
data_stage01_resequencing_lineage.__table__.drop(engine,True);
data_stage01_resequencing_endpoints.__table__.drop(engine,True);
data_stage01_resequencing_mutationsAnnotated.__table__.drop(engine,True);
data_stage01_resequencing_analysis.__table__.drop(engine,True);
data_stage01_resequencing_heatmap.__table__.drop(engine,True);
data_stage01_resequencing_dendrogram.__table__.drop(engine,True);
except SQLAlchemyError as e:
print(e);
def reset_dataStage01(self,experiment_id_I = None,analysis_id_I = None):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_resequencing_metadata).filter(data_stage01_resequencing_metadata.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_mutations).filter(data_stage01_resequencing_mutations.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_evidence).filter(data_stage01_resequencing_evidence.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_validation).filter(data_stage01_resequencing_validation.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_mutationsFiltered).filter(data_stage01_resequencing_mutationsFiltered.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_lineage).filter(data_stage01_resequencing_lineage.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
#reset = self.session.query(data_stage01_resequencing_endpoints).filter(data_stage01_resequencing_endpoints.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
#reset = self.session.query(data_stage01_resequencing_mutationsAnnotated).filter(data_stage01_resequencing_mutationsAnnotated.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
elif analysis_id_I:
reset = self.session.query(data_stage01_resequencing_endpoints).filter(data_stage01_resequencing_endpoints.analysis_id.like(analysis_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_analysis).filter(data_stage01_resequencing_analysis.analysis_id.like(analysis_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage01_resequencing_metadata).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_mutations).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_evidence).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_validation).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_mutationsFiltered).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_lineage).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_endpoints).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_mutationsAnnotated).delete(synchronize_session=False);
reset = self.session.query(data_stage01_resequencing_analysis).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_resequencing_heatmap(self,analysis_id_I = None):
try:
if analysis_id_I:
reset = self.session.query(data_stage01_resequencing_heatmap).filter(data_stage01_resequencing_heatmap.analysis_id.like(analysis_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage01_resequencing_heatmap).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_resequencing_dendrogram(self,analysis_id_I = None):
try:
if analysis_id_I:
reset = self.session.query(data_stage01_resequencing_dendrogram).filter(data_stage01_resequencing_dendrogram.analysis_id.like(analysis_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage01_resequencing_dendrogram).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def initialize_dataStage01(self):
try:
data_stage01_resequencing_metadata.__table__.create(engine,True);
data_stage01_resequencing_mutations.__table__.create(engine,True);
data_stage01_resequencing_evidence.__table__.create(engine,True);
data_stage01_resequencing_validation.__table__.create(engine,True);
data_stage01_resequencing_mutationsFiltered.__table__.create(engine,True);
data_stage01_resequencing_lineage.__table__.create(engine,True);
data_stage01_resequencing_endpoints.__table__.create(engine,True);
data_stage01_resequencing_mutationsAnnotated.__table__.create(engine,True);
data_stage01_resequencing_analysis.__table__.create(engine,True);
data_stage01_resequencing_heatmap.__table__.create(engine,True);
data_stage01_resequencing_dendrogram.__table__.create(engine,True);
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_filtered(self,experiment_id_I = None):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_resequencing_mutationsFiltered).filter(data_stage01_resequencing_mutationsFiltered.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage01_resequencing_mutationsFiltered).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_lineage(self,analysis_id_I = None):
try:
if analysis_id_I:
reset = self.session.query(data_stage01_resequencing_lineage).filter(data_stage01_resequencing_lineage.analysis_id.like(analysis_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage01_resequencing_lineage).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_endpoints(self,analysis_id_I = None):
try:
if analysis_id_I:
reset = self.session.query(data_stage01_resequencing_endpoints).filter(data_stage01_resequencing_endpoints.analysis_id.like(analysis_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage01_resequencing_endpoints).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
#TODO:
def execute_heatmap_lineage(self, analysis_id_I,
row_pdist_metric_I='euclidean',row_linkage_method_I='complete',
col_pdist_metric_I='euclidean',col_linkage_method_I='complete',
mutation_id_exclusion_list = []):
'''Execute hierarchical cluster on row and column data'''
print('executing heatmap...');
# get the analysis information
experiment_ids,lineage_names = [],[];
experiment_ids,lineage_names = self.stage01_resequencing_query.get_experimentIDAndLineageName_analysisID_dataStage01ResequencingAnalysis(analysis_id_I);
# partition into variables:
intermediates_lineage = [];
mutation_data_lineage_all = [];
rows_lineage = [];
n_lineages = len(lineage_names)
cnt_sample_names = 0;
for lineage_name_cnt,lineage_name in enumerate(lineage_names):
# get ALL intermediates by experiment_id and lineage name
intermediates = [];
intermediates = self.stage01_resequencing_query.get_intermediates_experimentIDAndLineageName_dataStage01ResequencingLineage(experiment_ids[lineage_name_cnt],lineage_name);
intermediates_lineage.append(intermediates);
cnt_sample_names += len(intermediates)
# get ALL mutation data by experiment_id and lineage name
mutation_data = [];
mutation_data = self.stage01_resequencing_query.get_mutationData_experimentIDAndLineageName_dataStage01ResequencingLineage(experiment_ids[lineage_name_cnt],lineage_name);
mutation_data_lineage_all.extend(mutation_data);
# get ALL mutation frequencies by experiment_id and lineage name
rows = [];
rows = self.stage01_resequencing_query.get_row_experimentIDAndLineageName_dataStage01ResequencingLineage(experiment_ids[lineage_name_cnt],lineage_name)
rows_lineage.extend(rows);
mutation_data_lineage_unique = list(set(mutation_data_lineage_all));
mutation_data_lineage = [x for x in mutation_data_lineage_unique if not x in mutation_id_exclusion_list];
min_inter = min(intermediates_lineage)
max_inter = max(intermediates_lineage);
# generate the frequency matrix data structure (mutation x intermediate)
data_O = numpy.zeros((cnt_sample_names,len(mutation_data_lineage)));
labels_O = {};
lineages=[];
col_cnt = 0;
# order 2: groups each lineage by mutation (intermediate x mutation)
for lineage_name_cnt,lineage_name in enumerate(lineage_names): #all lineages for intermediate j / mutation i
for intermediate_cnt,intermediate in enumerate(intermediates_lineage[lineage_name_cnt]):
if intermediate_cnt == min(intermediates_lineage[lineage_name_cnt]):
lineages.append(lineage_name+": "+"start"); # corresponding label from hierarchical clustering (in this case, arbitrary)
elif intermediate_cnt == max(intermediates_lineage[lineage_name_cnt]):
lineages.append(lineage_name+": "+"end"); # corresponding label from hierarchical clustering (in this case, arbitrary)
else:
lineages.append(lineage_name+": "+str(intermediate)); # corresponding label from hierarchical clustering (in this case, arbitrary)
for mutation_cnt,mutation in enumerate(mutation_data_lineage): #all mutations i for intermediate j
for row in rows_lineage:
if row['mutation_id'] == mutation and row['intermediate'] == intermediate and row['lineage_name'] == lineage_name:
data_O[col_cnt,mutation_cnt] = row['mutation_frequency'];
#print col_cnt,mutation_cnt
col_cnt+=1;
# generate the clustering for the heatmap
heatmap_O = [];
dendrogram_col_O = {};
dendrogram_row_O = {};
heatmap_O,dendrogram_col_O,dendrogram_row_O = self.calculate.heatmap(data_O,lineages,mutation_data_lineage,
row_pdist_metric_I=row_pdist_metric_I,row_linkage_method_I=row_linkage_method_I,
col_pdist_metric_I=col_pdist_metric_I,col_linkage_method_I=col_linkage_method_I);
# add data to to the database for the heatmap
for d in heatmap_O:
row = None;
row = data_stage01_resequencing_heatmap(
analysis_id_I,
d['col_index'],
d['row_index'],
d['value'],
d['col_leaves'],
d['row_leaves'],
d['col_label'],
d['row_label'],
d['col_pdist_metric'],
d['row_pdist_metric'],
d['col_linkage_method'],
d['row_linkage_method'],
'frequency',True, None);
self.session.add(row);
# add data to the database for the dendrograms
row = None;
row = data_stage01_resequencing_dendrogram(
analysis_id_I,
dendrogram_col_O['leaves'],
dendrogram_col_O['icoord'],
dendrogram_col_O['dcoord'],
dendrogram_col_O['ivl'],
dendrogram_col_O['colors'],
dendrogram_col_O['pdist_metric'],
dendrogram_col_O['pdist_metric'],
'frequency',True, None);
self.session.add(row);
row = None;
row = data_stage01_resequencing_dendrogram(
analysis_id_I,
dendrogram_row_O['leaves'],
dendrogram_row_O['icoord'],
dendrogram_row_O['dcoord'],
dendrogram_row_O['ivl'],
dendrogram_row_O['colors'],
dendrogram_row_O['pdist_metric'],
dendrogram_row_O['pdist_metric'],
'frequency',True, None);
self.session.add(row);
self.session.commit();
def execute_heatmap(self, analysis_id_I,mutation_id_exclusion_list=[],frequency_threshold=0.1,
row_pdist_metric_I='euclidean',row_linkage_method_I='complete',
col_pdist_metric_I='euclidean',col_linkage_method_I='complete'):
'''Execute hierarchical cluster on row and column data'''
print('executing heatmap...');
# get the analysis information
experiment_ids,sample_names = [],[];
experiment_ids,sample_names = self.stage01_resequencing_query.get_experimentIDAndSampleName_analysisID_dataStage01ResequencingAnalysis(analysis_id_I);
# partition into variables:
mutation_data_O = [];
mutation_ids_all = [];
for sample_name_cnt,sample_name in enumerate(sample_names):
# query mutation data:
mutations = [];
mutations = self.stage01_resequencing_query.get_mutations_experimentIDAndSampleName_dataStage01ResequencingMutationsAnnotated(experiment_ids[sample_name_cnt],sample_name);
for end_cnt,mutation in enumerate(mutations):
if mutation['mutation_position'] > 4000000: #ignore positions great than 4000000
continue;
if mutation['mutation_frequency']<frequency_threshold:
continue;
# mutation id
mutation_genes_str = '';
for gene in mutation['mutation_genes']:
mutation_genes_str = mutation_genes_str + gene + '-/-'
mutation_genes_str = mutation_genes_str[:-3];
mutation_id = mutation['mutation_type'] + '_' + mutation_genes_str + '_' + str(mutation['mutation_position'])
tmp = {};
tmp.update(mutation);
tmp.update({'mutation_id':mutation_id});
mutation_data_O.append(tmp);
mutation_ids_all.append(mutation_id);
mutation_ids_all_unique = list(set(mutation_ids_all));
mutation_ids = [x for x in mutation_ids_all_unique if not x in mutation_id_exclusion_list];
# generate the frequency matrix data structure (mutation x intermediate)
data_O = numpy.zeros((len(sample_names),len(mutation_ids)));
samples=[];
# order 2: groups each sample by mutation (intermediate x mutation)
for sample_name_cnt,sample_name in enumerate(sample_names): #all samples for intermediate j / mutation i
samples.append(sample_name); # corresponding label from hierarchical clustering
for mutation_cnt,mutation in enumerate(mutation_ids): #all mutations i for intermediate j
for row in mutation_data_O:
if row['mutation_id'] == mutation and row['sample_name'] == sample_name:
data_O[sample_name_cnt,mutation_cnt] = row['mutation_frequency'];
# generate the clustering for the heatmap
heatmap_O = [];
dendrogram_col_O = {};
dendrogram_row_O = {};
heatmap_O,dendrogram_col_O,dendrogram_row_O = self.calculate.heatmap(data_O,samples,mutation_ids,
row_pdist_metric_I=row_pdist_metric_I,row_linkage_method_I=row_linkage_method_I,
col_pdist_metric_I=col_pdist_metric_I,col_linkage_method_I=col_linkage_method_I);
# add data to to the database for the heatmap
for d in heatmap_O:
row = None;
row = data_stage01_resequencing_heatmap(
analysis_id_I,
d['col_index'],
d['row_index'],
d['value'],
d['col_leaves'],
d['row_leaves'],
d['col_label'],
d['row_label'],
d['col_pdist_metric'],
d['row_pdist_metric'],
d['col_linkage_method'],
d['row_linkage_method'],
'frequency',True, None);
self.session.add(row);
# add data to the database for the dendrograms
row = None;
row = data_stage01_resequencing_dendrogram(
analysis_id_I,
dendrogram_col_O['leaves'],
dendrogram_col_O['icoord'],
dendrogram_col_O['dcoord'],
dendrogram_col_O['ivl'],
dendrogram_col_O['colors'],
dendrogram_col_O['pdist_metric'],
dendrogram_col_O['pdist_metric'],
'frequency',True, None);
self.session.add(row);
row = None;
row = data_stage01_resequencing_dendrogram(
analysis_id_I,
dendrogram_row_O['leaves'],
dendrogram_row_O['icoord'],
dendrogram_row_O['dcoord'],
dendrogram_row_O['ivl'],
dendrogram_row_O['colors'],
dendrogram_row_O['pdist_metric'],
dendrogram_row_O['pdist_metric'],
'frequency',True, None);
self.session.add(row);
self.session.commit();
| StarcoderdataPython |
3226790 | from Constant import Constant
from Moment import Moment
from Team import Team
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib.patches import Circle, Rectangle, Arc
import numpy as np
class Event:
"""A class for handling and showing events"""
def __init__(self, event):
moments = event['moments']
self.moments = [Moment(moment) for moment in moments]
home_players = event['home']['players']
guest_players = event['visitor']['players']
players = home_players + guest_players
player_ids = [player['playerid'] for player in players]
player_names = [" ".join([player['firstname'],
player['lastname']]) for player in players]
player_jerseys = [player['jersey'] for player in players]
values = list(zip(player_names, player_jerseys))
# Example: 101108: ['<NAME>', '3']
self.player_ids_dict = dict(zip(player_ids, values))
def get_traj(self):
moment_length = len(self.moments)
traj_num = moment_length // 150 # 50 + 100, past 50(0,04s, 2s), future 100 (0.04s, 4s)
all_all_player_locations = [] #(N,15,11,2)
for i in range(traj_num):
all_player_locations = [] # (15,11,2)
# check if is 10 people
flag = True
for j in range(15):
time_stamp = 150 * i + 10 * j
cur_moment = self.moments[time_stamp]
if len(cur_moment.players) < 10:
flag = False
if not flag:
continue
# check if the same people
flag = True
time_stamp1 = 150 * i
time_stamp2 = 150 * i + 140
cur_moment1 = self.moments[time_stamp1]
cur_moment2 = self.moments[time_stamp2]
for j in range(10):
if cur_moment1.players[j].id != cur_moment2.players[j].id:
flag = False
if not flag:
continue
time_stamp1 = 150 * i
time_stamp2 = 150 * i + 140
if self.moments[time_stamp2].game_clock - self.moments[time_stamp1].game_clock < -5.7 or self.moments[time_stamp2].game_clock - self.moments[time_stamp1].game_clock> -5.5:
continue
for j in range(15):
time_stamp = 150 * i + 10 * j
cur_moment = self.moments[time_stamp]
player_locations = [] #(11,2)
for k in range(10):
player_locations.append([cur_moment.players[k].x,cur_moment.players[k].y])
player_locations.append([cur_moment.ball.x,cur_moment.ball.y])
all_player_locations.append(player_locations)
all_all_player_locations.append(all_player_locations)
all_all_player_locations = np.array(all_all_player_locations,dtype=np.float32)
del_list = []
# check if the traj contiguous
for i in range(len(all_all_player_locations)):
seq_data = all_all_player_locations[i] #(15,11,2)
diff_v = seq_data[1:,:-1,:] - seq_data[:-1,:-1,:]
diff_a = diff_v[1:,:,:] - diff_v[:-1,:,:]
diff_v = np.linalg.norm(diff_v,ord=2,axis=2)
diff_a = np.linalg.norm(diff_a,ord=2,axis=2)
if np.max(diff_v) >= 9 or np.max(diff_a) >= 5: # people cannot move that fast
del_list.append(i)
all_all_player_locations = np.delete(all_all_player_locations,del_list,axis=0)
# check if the ball out of court
del_list = []
for i in range(len(all_all_player_locations)):
seq_data = all_all_player_locations[i] #(15,11,2)
ball_x = seq_data[:,-1,0]
ball_y = seq_data[:,-1,1]
if np.max(ball_x) > Constant.X_MAX - Constant.DIFF or np.min(ball_x) < 0 or np.max(ball_y) > Constant.Y_MAX or np.min(ball_y) < 0:
del_list.append(i)
all_all_player_locations = np.delete(all_all_player_locations,del_list,axis=0)
return all_all_player_locations
def update_radius(self, i, player_circles, ball_circle, annotations, clock_info):
moment = self.moments[i]
for j, circle in enumerate(player_circles):
circle.center = moment.players[j].x, moment.players[j].y
annotations[j].set_position(circle.center)
clock_test = 'Quarter {:d}\n {:02d}:{:02d}\n {:03.1f}'.format(
moment.quarter,
int(moment.game_clock) % 3600 // 60,
int(moment.game_clock) % 60,
moment.shot_clock)
clock_info.set_text(clock_test)
ball_circle.center = moment.ball.x, moment.ball.y
ball_circle.radius = moment.ball.radius / Constant.NORMALIZATION_COEF
return player_circles, ball_circle
def show(self):
# Leave some space for inbound passes
ax = plt.axes(xlim=(Constant.X_MIN,
Constant.X_MAX),
ylim=(Constant.Y_MIN,
Constant.Y_MAX))
ax.axis('off')
fig = plt.gcf()
ax.grid(False) # Remove grid
start_moment = self.moments[0]
player_dict = self.player_ids_dict
clock_info = ax.annotate('', xy=[Constant.X_CENTER, Constant.Y_CENTER],
color='black', horizontalalignment='center',
verticalalignment='center')
annotations = [ax.annotate(self.player_ids_dict[player.id][1], xy=[0, 0], color='w',
horizontalalignment='center',
verticalalignment='center', fontweight='bold')
for player in start_moment.players]
# Prepare table
sorted_players = sorted(start_moment.players, key=lambda player: player.team.id)
home_player = sorted_players[0]
guest_player = sorted_players[5]
column_labels = tuple([home_player.team.name, guest_player.team.name])
column_colours = tuple([home_player.team.color, guest_player.team.color])
cell_colours = [column_colours for _ in range(5)]
home_players = [' #'.join([player_dict[player.id][0], player_dict[player.id][1]]) for player in sorted_players[:5]]
guest_players = [' #'.join([player_dict[player.id][0], player_dict[player.id][1]]) for player in sorted_players[5:]]
players_data = list(zip(home_players, guest_players))
table = plt.table(cellText=players_data,
colLabels=column_labels,
colColours=column_colours,
colWidths=[Constant.COL_WIDTH, Constant.COL_WIDTH],
loc='bottom',
cellColours=cell_colours,
fontsize=Constant.FONTSIZE,
cellLoc='center')
table.scale(1, Constant.SCALE)
# table_cells = table.properties()['child_artists']
# for cell in table_cells:
# cell._text.set_color('white')
player_circles = [plt.Circle((0, 0), Constant.PLAYER_CIRCLE_SIZE, color=player.color)
for player in start_moment.players]
ball_circle = plt.Circle((0, 0), Constant.PLAYER_CIRCLE_SIZE,
color=start_moment.ball.color)
for circle in player_circles:
ax.add_patch(circle)
ax.add_patch(ball_circle)
anim = animation.FuncAnimation(
fig, self.update_radius,
fargs=(player_circles, ball_circle, annotations, clock_info),
frames=len(self.moments), interval=Constant.INTERVAL)
court = plt.imread("court.png")
plt.imshow(court, zorder=0, extent=[Constant.X_MIN, Constant.X_MAX - Constant.DIFF,
Constant.Y_MAX, Constant.Y_MIN])
anim.save('example.gif',writer='imagemagick')
# plt.show()
| StarcoderdataPython |
3271836 | """Module score_bars."""
__author__ = '<NAME> (japinol)'
from codemaster.utils.colors import Color
from codemaster.utils import utils_graphics as libg_jp
from codemaster.resources import Resource
from codemaster.config.settings import Settings
from codemaster.models.actors.actor_types import ActorType
class ScoreBar:
"""Represents a score bar."""
def __init__(self, game, screen):
self.game = game
self.player = game.player
self.screen = screen
self.level_no = None
self.level_no_old = None
def draw_chars_render_text(self, text, x, y, color=Color.YELLOW):
libg_jp.draw_text_rendered(text, x, y, self.screen, color)
def render_stats_if_necessary(self, x, y, stats_name):
# player stats
libg_jp.draw_text_rendered(text=f'{self.player.stats[stats_name]}',
x=x, y=y, screen=self.screen, color=Color.GREEN)
if self.player.stats[stats_name] != self.player.stats_old[stats_name]:
self.player.stats_old[stats_name] = self.player.stats[stats_name]
def draw_stats(self):
# Draw player score titles
self.screen.blit(*Resource.txt_surfaces['sb_level_title'])
self.screen.blit(Resource.images['sb_lives_title1'],
(Settings.score_pos_lives1[0], Settings.score_pos_lives_y))
self.screen.blit(Resource.images['sb_batteries_title'],
(Settings.score_pos_batteries1[0], Settings.score_pos_batteries1_y))
self.screen.blit(Resource.images['sb_apples_title'],
(Settings.score_pos_apples1[0], Settings.score_pos_apples_y - 5))
self.screen.blit(*Resource.txt_surfaces['sb_score_title1'])
self.screen.blit(Resource.images['sb_potions_health_title'],
(Settings.score_pos_potions_health[0], Settings.score_pos_potions_health_y - 2))
self.screen.blit(Resource.images['sb_potions_power_title'],
(Settings.score_pos_potions_power[0], Settings.score_pos_potions_power_y - 2))
self.screen.blit(Resource.images['sb_door_keys_title'],
(Settings.score_pos_apples1[0] + 301, Settings.score_pos_apples_y - 5))
bullet_pos_x = 150
bullets_stats = ['sb_bullets_t1', 'sb_bullets_t2', 'sb_bullets_t3', 'sb_bullets_t4']
for bullet_stats in bullets_stats:
self.screen.blit(Resource.images[bullet_stats],
(bullet_pos_x * Settings.font_pos_factor,
int(Settings.score_pos_bullets_size[1]
+ Settings.score_pos_bullets_y)))
bullet_pos_x += 85
f_disk_pos_x = 745
f_disks_stats = ['sb_f_disks_t1', 'sb_f_disks_t2', 'sb_f_disks_t3', 'sb_f_disks_t4']
for f_disks_stats in f_disks_stats:
self.screen.blit(Resource.images[f_disks_stats],
(f_disk_pos_x * Settings.font_pos_factor,
Settings.score_pos_f_disks_y))
f_disk_pos_x += 112
if self.game.level.completed:
self.screen.blit(Resource.images['sb_level_completed'],
(Settings.score_pos_apples1[0] - 130, Settings.score_pos_apples_y - 6))
if self.game.is_magic_on:
self.screen.blit(Resource.images['sb_magic_activated'],
(Settings.score_pos_apples1[0] + 410, Settings.score_pos_apples_y - 6))
self.screen.blit(*Resource.txt_surfaces['sb_pc_level_title'])
# Draw score stats and render them if needed
self.render_stats_if_necessary(Settings.score_pos_lives1[1],
Settings.screen_bar_near_top, 'lives')
self.render_stats_if_necessary(Settings.score_pos_batteries1[1],
Settings.screen_bar_near_top, 'batteries')
self.render_stats_if_necessary(Settings.score_pos_score1[1],
Settings.screen_bar_near_top, 'score')
self.render_stats_if_necessary(Settings.score_pos_apples1[1],
(Settings.score_pos_apples_y - 2) * Settings.font_pos_factor, 'apples')
self.render_stats_if_necessary(Settings.score_pos_potions_health[1],
Settings.score_pos_potions_health_y * Settings.font_pos_factor,
ActorType.POTION_HEALTH.name)
self.render_stats_if_necessary(Settings.score_pos_potions_power[1],
Settings.score_pos_potions_power_y * Settings.font_pos_factor,
ActorType.POTION_POWER.name)
self.render_stats_if_necessary(Settings.score_pos_door_keys[1],
(Settings.score_pos_apples_y - 2) * Settings.font_pos_factor, 'door_keys')
self.render_stats_if_necessary(Settings.score_pos_door_keys[1],
(Settings.score_pos_apples_y - 2) * Settings.font_pos_factor, 'door_keys')
self.render_stats_if_necessary(Settings.score_pos_pc_level[1],
Settings.screen_bar_near_top, 'level')
bullet_pos_x = 179
bullets_stats = ['bullets_t01', 'bullets_t02', 'bullets_t03', 'bullets_t04']
for bullet_stats in bullets_stats:
self.render_stats_if_necessary(
bullet_pos_x * Settings.font_pos_factor,
Settings.score_pos_bullets_y + 10 * Settings.font_pos_factor,
bullet_stats)
bullet_pos_x += 83
f_disk_pos_x = 790
f_disks_stats = [ActorType.FILES_DISK_D.name, ActorType.FILES_DISK_C.name,
ActorType.FILES_DISK_B.name, ActorType.FILES_DISK_A.name]
for f_disk_stats in f_disks_stats:
self.render_stats_if_necessary(
f_disk_pos_x * Settings.font_pos_factor,
Settings.score_pos_f_disks_y + 5 * Settings.font_pos_factor,
f_disk_stats)
f_disk_pos_x += 112
libg_jp.draw_text_rendered(
text=f'{self.level_no + 1}',
x=Settings.score_pos_level[1],
y=Settings.screen_bar_near_top,
screen=self.screen, color=Color.CYAN)
def update(self, level_no, level_no_old):
self.level_no = level_no
self.level_no_old = level_no_old
libg_jp.draw_bar_graphic(
self.screen, amount_pct=self.player.stats['health'] / self.player.health_total,
x=Settings.score_pos_health1_xy[0], y=Settings.score_pos_health1_xy[1],
bar_width=Settings.score_pos_health_size[0],
bar_height=Settings.score_pos_health_size[1])
libg_jp.draw_bar_graphic(
self.screen, amount_pct=self.player.stats['power'] / self.player.power_total,
x=Settings.score_pos_health1_xy[0],
y=Settings.score_pos_health1_xy[1] + 39 * Settings.font_pos_factor,
bar_width=Settings.score_pos_power_size[0],
bar_height=Settings.score_pos_power_size[1],
color_max=Color.BLUE, color_med=Color.BLUE_VIOLET, color_min=Color.CYAN)
self.draw_stats()
| StarcoderdataPython |
168166 | import unittest
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn
from pyoptmat import ode, models, flowrules, hardening, utility, damage
from pyoptmat.temperature import ConstantParameter as CP
torch.set_default_tensor_type(torch.DoubleTensor)
torch.autograd.set_detect_anomaly(True)
def differ(mfn, p0, eps=1.0e-6):
v0 = mfn(p0).numpy()
puse = p0.numpy()
result = np.zeros(puse.shape)
for ind, val in np.ndenumerate(puse):
dp = np.abs(val) * eps
if dp < eps:
dp = eps
pcurr = np.copy(puse)
pcurr[ind] += dp
v1 = mfn(torch.tensor(pcurr)).numpy()
result[ind] = (v1 - v0) / dp
return result
def simple_diff(fn, p0):
res = []
for i in range(len(p0)):
def mfn(pi):
ps = [pp for pp in p0]
ps[i] = pi
return fn(ps)
res.append(differ(mfn, p0[i]))
return res
class CommonGradient:
def test_gradient_strain(self):
bmodel = self.model_fn([Variable(pi, requires_grad=True) for pi in self.p])
res = torch.norm(
bmodel.solve_strain(self.times, self.strains, self.temperatures)
)
res.backward()
grad = self.extract_grad(bmodel)
ngrad = simple_diff(
lambda p: torch.norm(
self.model_fn(p).solve_strain(
self.times, self.strains, self.temperatures
)
),
self.p,
)
for i, (p1, p2) in enumerate(zip(grad, ngrad)):
print(i, p1, p2)
self.assertTrue(np.allclose(p1, p2, rtol=1e-4))
def test_gradient_stress(self):
bmodel = self.model_fn([Variable(pi, requires_grad=True) for pi in self.p])
res = torch.norm(
bmodel.solve_stress(self.times, self.stresses, self.temperatures)
)
res.backward()
grad = self.extract_grad(bmodel)
ngrad = simple_diff(
lambda p: torch.norm(
self.model_fn(p).solve_stress(
self.times, self.stresses, self.temperatures
)
),
self.p,
)
# Skipping the first step helps with noise issues
for i, (p1, p2) in enumerate(zip(grad[1:], ngrad[1:])):
print(i, p1, p2)
self.assertTrue(np.allclose(p1, p2, rtol=1e-4, atol=1e-7))
class TestPerfectViscoplasticity(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.p = [self.E, self.n, self.eta]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]), flowrules.PerfectViscoplasticity(CP(p[1]), CP(p[2]))
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 100.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestIsotropicOnly(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.s0 = torch.tensor(10.0)
self.p = [self.E, self.n, self.eta, self.s0, self.R, self.d]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.NoKinematicHardeningModel(),
),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 200.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestHardeningViscoplasticity(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.C = torch.tensor(1000.0)
self.g = torch.tensor(10.0)
self.s0 = torch.tensor(10.0)
self.p = [self.E, self.n, self.eta, self.s0, self.R, self.d, self.C, self.g]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.FAKinematicHardeningModel(CP(p[6]), CP(p[7])),
),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
m.model.flowrule.kinematic.C.pvalue.grad.numpy(),
m.model.flowrule.kinematic.g.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 200.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestHardeningViscoplasticityDamage(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.C = torch.tensor(1000.0)
self.g = torch.tensor(10.0)
self.s0 = torch.tensor(10.0)
self.A = torch.tensor(2000.0)
self.xi = torch.tensor(6.5)
self.phi = torch.tensor(1.7)
self.p = [
self.E,
self.n,
self.eta,
self.s0,
self.R,
self.d,
self.C,
self.g,
self.A,
self.xi,
self.phi,
]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.FAKinematicHardeningModel(CP(p[6]), CP(p[7])),
),
dmodel=damage.HayhurstLeckie(CP(p[8]), CP(p[9]), CP(p[10])),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
m.model.flowrule.kinematic.C.pvalue.grad.numpy(),
m.model.flowrule.kinematic.g.pvalue.grad.numpy(),
m.model.dmodel.A.pvalue.grad.numpy(),
m.model.dmodel.xi.pvalue.grad.numpy(),
m.model.dmodel.phi.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 0.03, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 200, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestChabocheViscoplasticity(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 4
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.C = torch.tensor([1000.0, 750.0, 100.0])
self.g = torch.tensor([10.0, 1.2, 8.6])
self.s0 = torch.tensor(10.0)
self.p = [self.E, self.n, self.eta, self.s0, self.R, self.d, self.C, self.g]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.ChabocheHardeningModel(CP(p[6]), CP(p[7])),
),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
m.model.flowrule.kinematic.C.pvalue.grad.numpy(),
m.model.flowrule.kinematic.g.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 200.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
| StarcoderdataPython |
1660579 | <reponame>ned21/aquilon<gh_stars>1-10
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2009,2010,2011,2012,2013,2015 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Script generating bash completion code from input.xml.
'''
import os
import sys
import optparse
import xml.etree.ElementTree as ET
try:
import ms.version
except ImportError:
pass
else:
ms.version.addpkg('Cheetah', '2.4.4')
from Cheetah.Template import Template
usage = """%prog [options] template1 [template2 ...]
or: %prog [options] --all"""
if __name__ == "__main__":
parser = optparse.OptionParser(usage=usage)
parser.add_option("-o", "--outputdir", type="string", dest="output_dir",
help="the directory to put generated files in",
metavar="DIRECTORY")
parser.add_option("-t", "--templatedir", type="string", dest="template_dir",
help="the directory to search for templates",
metavar="DIRECTORY")
parser.add_option("-i", "--input", type="string", dest="input_filename",
help="name of the input XML file", metavar="FILE")
parser.set_defaults(generate_all=False)
parser.add_option("-a", "--all", action="store_true", dest="generate_all",
help="generate output for all available templates")
bindir = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "..")
parser.set_defaults(output_dir=".",
template_dir=os.path.join(bindir, "etc", "templates"),
input_filename=os.path.join(bindir, "etc", "input.xml"))
(options, args) = parser.parse_args()
if options.generate_all:
if len(args) >= 1:
parser.print_help()
sys.exit(os.EX_USAGE)
for f in os.listdir(options.template_dir):
if f.endswith('.tmpl'):
args.append(f[0:-5])
if len(args) < 1:
parser.print_help()
sys.exit(os.EX_USAGE)
tree = ET.parse(options.input_filename)
for template_name in args:
template = Template(file=os.path.join(options.template_dir,
template_name + ".tmpl"))
template.tree = tree
output_filename = os.path.join(options.output_dir,
"aq_" + template_name + "_completion.sh")
output_file = open(output_filename, "w")
output_file.write(str(template))
output_file.close()
| StarcoderdataPython |
1640708 | <reponame>mabrains/ALIGN-public<filename>tests/gdsconv/test_gds_txt.py<gh_stars>100-1000
import os
import sys
import filecmp
import pathlib
import pytest
mydir = str(pathlib.Path(__file__).resolve().parent)
@pytest.fixture
def binary_dir():
return os.path.dirname(sys.executable)
def test_gds_txt_roundtrip (binary_dir):
os.system (f"{binary_dir}/gds2txt {mydir}/file.gds > {mydir}/fromgds5.txt") #nosec
os.system (f"{binary_dir}/txt2gds {mydir}/fromgds5.txt -o {mydir}/fromtxt5.gds") #nosec
assert (filecmp.cmp (f"{mydir}/file.gds", f"{mydir}/fromtxt5.gds"))
def test_txt_gds_roundtrip (binary_dir):
os.system (f"{binary_dir}/txt2gds {mydir}/file.txt -o {mydir}/fromtxt6.gds") #nosec
os.system (f"{binary_dir}/gds2txt {mydir}/fromtxt6.gds > {mydir}/fromgds6.txt") #nosec
assert (filecmp.cmp (f"{mydir}/file.txt", f"{mydir}/fromgds6.txt"))
| StarcoderdataPython |
1676230 | # =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DescribeResourceGroupItemsAction(BaseAction):
action = 'DescribeResourceGroupItems'
command = 'describe-resource-group-items'
usage = '%(prog)s [-r <resource_groups> ...] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-g", "--resource-groups", dest="resource_groups",
action="store", type=str, default=None,
help="an array including IDs of resource groups.")
parser.add_argument("-r", "--resources", dest="resources",
action="store", type=str, default=None,
help="an array including IDs of resources, used to query all resource groups for the resource.")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store", type=int, default=1,
help="Whether to return redundant message.if it is 1, return the details of the instance related other resources.")
parser.add_argument("-k", "--sort-key", dest="sort_key",
action="store", type=str, default=None,
help="the sort key, which defaults be create_time.")
parser.add_argument("-R", "--reverse", dest="reverse",
action="store", type=int, default=0,
help="0 for Ascending order, 1 for Descending order.")
@classmethod
def build_directive(cls, options):
directive = {
"resource_groups": explode_array(options.resource_groups),
"resources": explode_array(options.resources),
"verbose": options.verbose,
"sort_key": options.sort_key,
"reverse": options.reverse,
"limit": options.limit,
"offset": options.offset
}
return directive
| StarcoderdataPython |
1643989 | <filename>home_taks/home_task4_0.py
list_for_chenging = [1, 2, 3, 4, 5, 6, 7, 8]
new_list = [i ** i for i in list_for_chenging]
print(new_list) | StarcoderdataPython |
3289115 | <filename>export.py<gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cgi
from modules.gitdox_sql import *
from modules.ether import ether_to_sgml, get_socialcalc, build_meta_tag, ether_to_csv
from modules.logintools import login
import zipfile
from StringIO import StringIO
from shutil import copyfileobj
import sys, tempfile
from collections import defaultdict
def create_zip(content_name_pairs):
io_file = StringIO()
zf = zipfile.ZipFile(io_file, mode='w', compression=zipfile.ZIP_DEFLATED)
for content, name in content_name_pairs:
try:
zf.writestr(name, content)#.encode("utf8"))
except Exception as e:
try:
zf.writestr(name, content.encode("utf8"))
except:
print("Content-type:text/html\r\n\r\n")
raise e
return io_file
def export_all_docs(config=None, corpus_filter=None, status=None, extension="sgml"):
docs = get_all_docs(corpus_filter,status)
files = []
all_corpus_meta = defaultdict(dict)
for doc in docs:
doc_id, docname, corpus, mode, content = doc
if corpus not in all_corpus_meta:
corpus_meta = get_doc_meta(doc_id, corpus=True)
for md in corpus_meta:
key, val = md[2], md[3]
all_corpus_meta[corpus][key] = val
if corpus_filter is None: # All documents exported, use corpus prefix to avoid name clashes
filename = corpus + "_" + docname
else: # Only exporting one user specified corpus, name documents without prefix
filename = docname
if mode == "xml" and config!="[CSV]":
content = build_meta_tag(doc_id) + content.strip() + "\n</meta>\n"
files.append((content,filename + ".xml"))
elif mode == "ether":
ether_name = "_".join(["gd", corpus, docname])
if config=="[CSV]":
csv = ether_to_csv(ether_url,ether_name)
files.append((csv, filename + ".csv"))
else:
sgml = ether_to_sgml(get_socialcalc(ether_url, ether_name),doc_id,config=config)
files.append((sgml, filename + "." + extension))
for corp in all_corpus_meta:
serialized_meta = ""
for key in all_corpus_meta[corp]:
serialized_meta += key + "\t" + all_corpus_meta[corp][key] + "\n"
files.append((serialized_meta.encode("utf8"), "_meta_" + corp + ".tab"))
zip_io = create_zip(files)
temp = tempfile.NamedTemporaryFile(delete=False, mode='w+b')
temp.write(zip_io.getvalue())
temp.close()
if corpus_filter is not None:
zipname = corpus_filter + ".zip"
else:
zipname = "export.zip"
print("Content-type: application/download")
print("Content-Disposition: attachment; filename=" + zipname)
print("")
sys.stdout.flush()
with open(temp.name,'rb') as z:
copyfileobj(z, sys.stdout)
os.remove(temp.name)
def export_doc(doc_id, stylesheet=None):
docname, corpus, filename, status, assignee_username, mode, schema = get_doc_info(doc_id)
ether_name = "_".join(["gd", corpus, docname])
sgml = ether_to_sgml(get_socialcalc(ether_url, ether_name), doc_id, config=stylesheet)
cpout = ""
cpout += "Content-Type: application/download\n"
cpout += "Content-Disposition: attachment; filename=" + corpus + "_" + docname + ".sgml\n\n"
if isinstance(cpout,unicode):
cpout = str(cpout.encode("utf8"))
cpout += sgml
print(cpout)
if __name__ == "__main__":
#print("Content-type:text/html\r\n\r\n")
import cgitb
#cgitb.enable()
from paths import ether_url
thisscript = os.environ.get('SCRIPT_NAME', '')
action = None
theform = cgi.FieldStorage()
scriptpath = os.path.dirname(os.path.realpath(__file__)) + os.sep
userdir = scriptpath + "users" + os.sep
action, userconfig = login(theform, userdir, thisscript, action)
user = userconfig["username"]
admin = userconfig["admin"]
export_stylesheet = "--default--"
if "extension" in theform:
extension = theform.getvalue("extension")
else:
extension = "sgml"
if "stylesheet" in theform:
export_stylesheet = theform.getvalue("stylesheet")
else:
export_stylesheet = None
if "status" in theform:
status = theform.getvalue("status")
else:
status = None
if "corpus" in theform:
corpus = theform.getvalue("corpus")
else:
corpus = None
if corpus == "--ALL--":
corpus = None
if status == "--ALL--":
status = None
if "docs" in theform:
docs = theform.getvalue("docs")
if docs == "%all%":
export_all_docs(export_stylesheet,corpus_filter=corpus,extension=extension,status=status)
else:
export_doc(docs, export_stylesheet)
| StarcoderdataPython |
1767419 | <filename>setup.py<gh_stars>0
# Copyright 2021 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
from pathlib import Path
from setuptools import setup, find_packages
# Package meta-data.
NAME = "FLORIS"
DESCRIPTION = "A controls-oriented engineering wake model."
URL = "https://github.com/NREL/FLORIS"
EMAIL = "<EMAIL>"
AUTHOR = "NREL National Wind Technology Center"
REQUIRES_PYTHON = ">=3.8.0"
# What packages are required for this module to be executed?
REQUIRED = [
# simulation
"attrs",
"pyyaml",
"numexpr",
"numpy>=1.20",
"scipy>=1.1",
# tools
"matplotlib>=3",
"pandas",
"shapely",
# utilities
"coloredlogs>=10.0",
]
# What packages are optional?
EXTRAS = {
"docs": {"readthedocs-sphinx-ext", "Sphinx", "sphinxcontrib-napoleon"},
"develop": {"pytest", "coverage[toml]", "pre-commit", "black", "isort"},
}
ROOT = Path(__file__).parent
with open(ROOT / "floris" / "version.py") as version_file:
VERSION = version_file.read().strip()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=DESCRIPTION,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
package_data={'floris': ['turbine_library/*.yaml']},
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license="Apache-2.0",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy"
],
)
| StarcoderdataPython |
1703271 | import os.path
def write_to_file(filename: str, text: str) -> None:
os.chdir("..")
dirname = os.path.abspath(os.curdir)
save_path = "/output/"
complete_name = os.path.join(dirname + save_path, filename + ".tex")
file = open(complete_name, "w", encoding="utf-8")
file.write(text)
file.close()
| StarcoderdataPython |
18200 | <filename>cracking_the_coding_interview_qs/8.7-8.8/get_all_permutations_of_string_test.py
import unittest
from get_all_permutations_of_string import get_all_permutations_of_string, get_all_permutations_of_string_with_dups
class Test_Case_Get_All_Permutations_Of_String(unittest.TestCase):
def test_get_all_permutations_of_string(self):
self.assertListEqual(get_all_permutations_of_string("tea"), ['tea', 'eta', 'ate', 'tae', 'eat', 'aet'])
def test_get_all_permutations_of_string_with_dups(self):
self.assertListEqual(get_all_permutations_of_string_with_dups("aaa"), ['aaa'])
self.assertListEqual(get_all_permutations_of_string_with_dups("teat"), ['ttea', 'ttae', 'teta', 'teat', 'tate', 'taet', 'etta', 'etat', 'eatt', 'atte', 'atet', 'aett']) | StarcoderdataPython |
1791526 | <gh_stars>100-1000
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
NUM_CLASSES = 1000
def dense_block(x, blocks, name):
for i in range(blocks):
x = conv_block(x, 32, name=name + '_block' + str(i + 1))
return x
def transition_block(x, reduction, name):
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(
x)
x = layers.Activation('relu', name=name + '_relu')(x)
x = layers.Conv2D(
int(backend.int_shape(x)[bn_axis] * reduction),
1,
use_bias=False,
name=name + '_conv')(
x)
x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x)
return x
def conv_block(x, growth_rate, name):
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(
x)
x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
x1 = layers.Conv2D(
4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(
x1)
x1 = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x1)
x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
x1 = layers.Conv2D(
growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(
x1)
x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x
def DenseNet121(input_shape=None):
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=224,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=True)
img_input = layers.Input(shape=input_shape)
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
x = layers.BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(
x)
x = layers.Activation('relu', name='conv1/relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)
x = dense_block(x, 6, name='conv2')
x = transition_block(x, 0.5, name='pool2')
x = dense_block(x, 12, name='conv3')
x = transition_block(x, 0.5, name='pool3')
x = dense_block(x, 24, name='conv4')
x = transition_block(x, 0.5, name='pool4')
x = dense_block(x, 16, name='conv5')
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
x = layers.Activation('relu', name='relu')(x)
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
imagenet_utils.validate_activation('softmax', None)
x = layers.Dense(NUM_CLASSES, activation='softmax',
name='predictions')(x)
# Create model.
model = training.Model(img_input, x, name='densenet121')
return model
| StarcoderdataPython |
1619203 | from pincushion import reddit
r = reddit.Reddit()
r.get_new_saved_posts()
r.get_new_upvoted_posts()
| StarcoderdataPython |
3396524 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 12:59:05 2020
@author: <NAME>
Explanation: To change from categorical to ordinal variable
"""
import pandas as pd
df_train = pd.read_csv("train.csv")
df_train['Title'] = df_train.Name.str.extract(' ([A-Za-z]+)\.', expand = False)
for i in range(len(df_train.Title)):
if df_train.Title[i] == 'Mlle':
df_train.Title[i] = 'Miss'
elif df_train.Title[i] =='Ms':
df_train.Title[i] = 'Miss'
elif df_train.Title[i] == 'Mme':
df_train.Title[i] = 'Mrs'
elif (df_train.Title[i] != 'Mrs') and (df_train.Title[i] != 'Miss') and \
(df_train.Title[i] != 'Mr') and (df_train.Title[i] != 'Master'):
df_train.Title[i] = 'Rare'
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
# Ordinal variable
df_train['Title'] = le.fit_transform(df_train['Title'])
# Invertir Ordinal variable
df_train['Title'] = le.inverse_transform(df_train['Title'])
| StarcoderdataPython |
3229501 | <reponame>charlesfu4/MT
import numpy as np
import matplotlib.lines as mlines
from matplotlib import pyplot as plt
def set_size(width, fraction=1):
"""Set figure dimensions to avoid scaling in LaTeX.
Parameters
----------
width: float
Document textwidth or columnwidth in pts
fraction: float, optional
Fraction of the width which you wish the figure to occupy
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
# Width of figure (in pts)
fig_width_pt = width * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
# https://disq.us/p/2940ij3
golden_ratio = (5**.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio
fig_dim = (fig_width_in, fig_height_in)
return fig_dim
def set_size_mul(width, fraction=1, subplots=(1, 1)):
"""Set figure dimensions to avoid scaling in LaTeX.
Parameters
----------
width: float or string
Document width in points, or string of predined document type
fraction: float, optional
Fraction of the width which you wish the figure to occupy
subplots: array-like, optional
The number of rows and columns of subplots.
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
if width == 'thesis':
width_pt = 426.79135
elif width == 'beamer':
width_pt = 307.28987
else:
width_pt = width
# Width of figure (in pts)
fig_width_pt = width_pt * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
# https://disq.us/p/2940ij3
golden_ratio = (5**.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio * (subplots[0] / subplots[1])
return (fig_width_in, fig_height_in)
## plot dataframe creation
def plot_df(arr, name):
plot_df = pd.DataFrame()
i = 0
for row in arr:
plot_df.insert(i, "{}".format(name), row, True)
i += 1
return plot_df
def plot_ci_forest(n, ci_term, estimator, features, targets):
"""confidence interval plot for forest estimator"""
"""
Parameters
----------
n: int
The n timestamp of prediction going to be plotted
ci_term:
"""
predictions = []
for est in estimator.estimators_:
predictions.append(est.predict(features.iloc[n,:].to_numpy().reshape(1,-1)))
predictions = np.array(predictions)
prediction_list = predictions.reshape(predictions.shape[0], predictions.shape[2])
fig = plt.figure(figsize=(16,7))
plt.plot(np.quantile(prediction_list, 0.5, axis = 0), 'gx-', label='Prediction')
plt.plot(np.quantile(prediction_list, ci_term, axis = 0), 'g--', label='{} % lower bond'.format(ci_term*100))
plt.plot(np.quantile(prediction_list, 1-ci_term, axis = 0), 'g--', label='{} % upper bond'.format(100-ci_term*100))
plt.plot(targets.iloc[n,:].to_numpy(), 'ro', label='Ground truth')
plt.xlabel('hours', **font)
plt.ylabel('KWh', **font)
plt.legend(loc='upper left', fontsize = 15)
plt.show()
## confidence interval check function for forest estimator
def verf_ci_qunatile_forest(quantile, estimator, ttest, pftest, n_samples):
q_ub = []
q_lb = []
q_m = []
for idx in range(n_samples):
predictions = []
for est in estimator.estimators_:
predictions.append(est.predict(pftest.iloc[idx,:].to_numpy().reshape(1,-1)))
predictions = np.array(predictions)
prediction_list = predictions.reshape(predictions.shape[0], predictions.shape[2])
q_ub.append(np.quantile(prediction_list, 1 - quantile, axis = 0))
q_lb.append(np.quantile(prediction_list, quantile, axis = 0))
q_m.append(np.quantile(prediction_list, 0.5, axis = 0))
q_ub = np.array(q_ub)
q_lb = np.array(q_lb)
q_m = np.array(q_m)
precentage_list = []
err_count = 0
for i in range(n_samples):
count = 0
for j in range(ttest.shape[1]):
if ttest.iloc[i,j] >= q_ub[i,j] or ttest.iloc[i,j] <= q_lb[i,j]:
count += 1
if count/ttest.shape[1] > quantile*2:
err_count += 1
precentage_list.append(count/ttest.shape[1])
print("out_of_bound_pecentage", err_count/n_samples)
fig = plt.figure(figsize = (16,7))
font = {'family' : 'Lucida Grande',
'weight' : 'bold',
'size' : 15}
plt.rc('font', **font)
plt.style.use('seaborn')
plt.xlabel('Number of testing sets', **font)
plt.ylabel('Out_of_bound_error', **font)
plt.plot(precentage_list)
## confidence interval plot of n's prediction
def plot_conf_dynamic(predicted_error, test_y, ypred_t, n, ci_term):
# confidence interval
if(ci_term == 1.96):
alpha = 5
elif(ci_term == 1.645):
alpha = 10
elif(ci_term == 1.28):
alpha = 20
std = np.sqrt(predicted_error)
ypred_t_ub = ypred_t + ci_term*std
ypred_t_lb = ypred_t - ci_term*std
# plot
fig = plt.figure(figsize=(16,7))
font = {'family' : 'Lucida Grande',
'weight' : 'bold',
'size' : 15}
plt.rc('font', **font)
plt.style.use('seaborn')
plt.plot(ypred_t[n, :].reshape(-1,1), 'gx-',label='Prediction')
plt.plot(ypred_t_ub[n, :].reshape(-1,1), 'g--', label='{} % upper bond'.format(100-alpha*0.5))
plt.plot(ypred_t_lb[n, :].reshape(-1,1), 'g--', label='{} % lower bond'.format(alpha*0.5))
plt.plot(test_y.iloc[n, :].to_numpy().reshape(-1,1), 'ro', label='Ground truth')
#plt.fill(np.concatenate([xx, xx[::-1]]),
# np.concatenate([y_upper, y_lower[::-1]]),
# alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('hours', **font)
plt.ylabel('KWh', **font)
plt.legend(loc='upper left', fontsize = 15)
plt.show()
def plot_conf_static(val_y, val_y_pred, test_y, test_y_pred, n, ci_term):
# confidence interval
if(ci_term == 1.96):
alpha = 5
elif(ci_term == 1.645):
alpha = 10
elif(ci_term == 1.28):
alpha = 20
std = np.std(val_y - val_y_pred).to_numpy()
ypred_t_ub = test_y_pred + ci_term*std
ypred_t_lb = test_y_pred - ci_term*std
# plot
fig = plt.figure(figsize=(16,7))
font = {'family' : 'Lucida Grande',
'weight' : 'bold',
'size' : 15}
plt.rc('font', **font)
plt.style.use('seaborn')
plt.plot(test_y_pred[n, :].reshape(-1,1), 'gx-',label='Prediction')
plt.plot(ypred_t_ub[n, :].reshape(-1,1), 'g--', label='{} % upper bond'.format(100-alpha*0.5))
plt.plot(ypred_t_lb[n, :].reshape(-1,1), 'g--', label='{} % lower bond'.format(alpha*0.5))
plt.plot(test_y.iloc[n, :].to_numpy().reshape(-1,1), 'ro', label='Ground truth')
#plt.fill(np.concatenate([xx, xx[::-1]]),
# np.concatenate([y_upper, y_lower[::-1]]),
# alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('hours', **font)
plt.ylabel('KWh', **font)
plt.legend(loc='upper left', fontsize = 15)
plt.show()
## Distribution plot funciton
def distri_plot(df):
num_cols = df.shape[1]
f, axes = plt.subplots(num_cols//3 + 1, 3, figsize=(15, 11), sharex=False)
for idx, col_name in enumerate(df.columns, 0):
idx = int(idx)
sns.distplot(df[col_name],ax=axes[idx//3,idx%3])
## plot
plt.tight_layout()
## Scatter plot function
def scatter_plot(df):
num_cols = df.shape[1]
f, axes = plt.subplots(num_cols//2 + 1, 2, figsize=(15, 11), sharex=False)
for idx, col_name in enumerate(df.columns, 0):
idx = int(idx)
sns.scatterplot(x= col_name,y = "energy", data = df, ax=axes[idx//2,idx%2])
## plot
plt.tight_layout()
## plot dataframe creation
def plot_df(arr, name):
plot_df = pd.DataFrame()
i = 0
for row in arr:
plot_df.insert(i, "{}".format(name), row, True)
i += 1
return plot_df
def calibration_plot(observation, prediction, name, index = 0):
"""
Parameters
----------
observation: pandas.dataframe
The real value observered.
prediction: list
The list contain predictions with different esitmators in ensemble.
name: str
The name for the plot: "calibration_{name}_{index}"
index: int, default = 0
Index assigned to be checked.
"""
obs = observation.to_numpy()
ci_range1 = np.arange(0.1, 1.,0.1)
ci_range2 = np.arange(0.9, 1., 0.025)
ci_range = np.concatenate([ci_range1, ci_range2], axis = 0)
fraction = [] #empty list for storing fraction of obs within each CI bound
for ci in ci_range:
ub = np.quantile(prediction, 1-(1-ci)/2, axis = 0)[index,:]
lb = np.quantile(prediction, (1-ci)/2, axis = 0)[index,:]
# calculate fraction of points out of bound
count = 0
for idx in range(observation.shape[1]):
if (obs[index,idx] <= ub[idx] and obs[index,idx] >= lb[idx]):
count += 1
fraction.append(count/observation.shape[1])
# Plot the calibration plot
plt.style.available
plt.style.use('seaborn')
fig, ax = plt.subplots(figsize=set_size(398))
tex_fonts = {
# Use LaTeX to write all text
"text.usetex": True,
"font.family": "serif",
# Use 10pt font in plots, to match 11pt font in document
"axes.labelsize": 11,
"font.size": 11,
# Make the legend/label fonts a little smaller
"legend.fontsize": 11,
"xtick.labelsize": 11,
"ytick.labelsize": 11
}
plt.rcParams.update(tex_fonts)
# only these two lines are calibration curves
plt.plot(fraction,ci_range, marker='o', linewidth=1)
# reference line, legends, and axis labels
line = mlines.Line2D([0, 1], [0, 1], color='black')
ax.add_line(line)
fig.suptitle('Calibration plot for Confidence Interval')
ax.set_xlabel('Contain percentage yielded')
ax.set_ylabel('Expacted contain percentage')
plt.xlim(0,1)
plt.ylim(0,1)
plt.show()
fig.savefig('../figures/london/calibration_{}_{}.png'.format(name, index), dpi= 300,
format='png', bbox_inches='tight')
| StarcoderdataPython |
70564 | import os
import warnings
from typing import Optional, Tuple, Union, List
import joblib
import numpy as np
from ConfigSpace import Configuration
from sklearn import clone
from sklearn.base import is_classifier
from sklearn.model_selection import check_cv
from sklearn.model_selection._validation import _fit_and_predict, _check_is_permutation
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
from dswizard.components.base import EstimatorComponent
from dswizard.core.config_cache import ConfigCache
from dswizard.core.logger import ProcessLogger
from dswizard.core.model import CandidateId, ConfigKey, Dataset
from dswizard.core.worker import Worker
from dswizard.pipeline.pipeline import FlexiblePipeline
from dswizard.util import util
from dswizard.util.util import model_file
warnings.filterwarnings("ignore", category=UserWarning)
class SklearnWorker(Worker):
def compute(self,
ds: Dataset,
cid: CandidateId,
config: Optional[Configuration],
cfg_cache: Optional[ConfigCache],
cfg_keys: Optional[List[ConfigKey]],
pipeline: FlexiblePipeline,
process_logger: ProcessLogger) -> List[float]:
if config is None:
# Derive configuration on complete data set. Test performance via CV
cloned_pipeline = clone(pipeline)
cloned_pipeline.cfg_cache = cfg_cache
cloned_pipeline.cfg_keys = cfg_keys
cloned_pipeline.fit(ds.X, ds.y, logger=process_logger)
config = process_logger.get_config(cloned_pipeline)
cloned_pipeline = clone(pipeline)
cloned_pipeline.set_hyperparameters(config.get_dictionary())
score, _, _, models = self._score(ds, cloned_pipeline)
self._store_models(cid, models)
return score
def transform_dataset(self, ds: Dataset, cid: CandidateId, component: EstimatorComponent,
config: Configuration) -> Tuple[np.ndarray, Optional[float]]:
component.set_hyperparameters(config.get_dictionary())
if is_classifier(component):
score, y_pred, y_prob, models = self._score(ds, component)
try:
y_pred = y_pred.astype(float)
except ValueError:
pass
X = np.hstack((ds.X, y_prob, np.reshape(y_pred, (-1, 1))))
else:
models = [component.fit(ds.X, ds.y)]
X = models[0].transform(ds.X)
score = None
self._store_models(cid, models)
return X, score
def _score(self, ds: Dataset, estimator: Union[EstimatorComponent, FlexiblePipeline], n_folds: int = 4):
y = ds.y
y_pred, y_prob, models = self._cross_val_predict(estimator, ds.X, y, cv=n_folds)
# Meta-learning only considers f1. Calculate f1 score for structure search
score = [util.score(y, y_prob, y_pred, ds.metric), util.score(y, y_prob, y_pred, 'f1')]
return score, y_pred, y_prob, models
@staticmethod
def _cross_val_predict(pipeline, X, y=None, cv=None):
X, y, groups = indexable(X, y, None)
cv = check_cv(cv, y, classifier=is_classifier(pipeline))
prediction_blocks = []
probability_blocks = []
fitted_pipelines = []
for train, test in cv.split(X, y, groups):
cloned_pipeline = clone(pipeline)
probability_blocks.append(_fit_and_predict(cloned_pipeline, X, y, train, test, 0, {}, 'predict_proba'))
prediction_blocks.append(cloned_pipeline.predict(X))
fitted_pipelines.append(cloned_pipeline)
# Concatenate the predictions
probabilities = [prob_block_i for prob_block_i, _ in probability_blocks]
predictions = [pred_block_i for pred_block_i in prediction_blocks]
test_indices = np.concatenate([indices_i for _, indices_i in probability_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
probabilities = np.concatenate(probabilities)
predictions = np.concatenate(predictions)
if isinstance(predictions, list):
return [p[inv_test_indices] for p in predictions], [p[inv_test_indices] for p in
probabilities], fitted_pipelines
else:
return predictions[inv_test_indices], probabilities[inv_test_indices], fitted_pipelines
def _store_models(self, cid: CandidateId, models: List[EstimatorComponent]):
name = model_file(cid)
file = os.path.join(self.workdir, name)
with open(file, 'wb') as f:
joblib.dump(models, f)
| StarcoderdataPython |
1767001 | """Logger module for setting up a logger."""
import logging
import logging.handlers
def setup_custom_logger(name: str, propagate: bool = False) -> logging.Logger:
"""Sets up a custom logger.
Parameters
----------
name : str
Name of the file where this function is called.
propagate : bool, optional
Whether to propagate logger messages or not, by default False
Returns
-------
logging.Logger
The logger.
Examples
---------
Detailed information, typically of interest only when diagnosing problems:
>>> logger.debug("")
Confirmation that things are working as expected:
>>> logger.info("")
An indication that something unexpected happened, or indicative of some
problem in the near future:
>>> logger.warning("")
The software has not been able to perform some function due to a more
serious problem:
>>> logger.error("")
A serious error, indicating that the program itself may be unable to
continue running:
>>> logger.critical("")
"""
log_format = "%(asctime)s [%(levelname)s]: %(filename)s(%(funcName)s:%(lineno)s) >> %(message)s"
log_filemode = "w" # w: overwrite; a: append
# Setup
logging.basicConfig(format=log_format, filemode=log_filemode, level=logging.INFO)
logger = logging.getLogger(name)
logger.propagate = propagate
# Console output
consoleHandler = logging.StreamHandler()
logFormatter = logging.Formatter(log_format)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
return logger
| StarcoderdataPython |
3298815 | # Generated by Django 3.1.6 on 2021-09-04 06:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0023_auto_20210903_0736'),
]
operations = [
migrations.RemoveField(
model_name='historicaljob',
name='alert_on_failure_email',
),
migrations.RemoveField(
model_name='historicaljob',
name='alert_on_start_email',
),
migrations.RemoveField(
model_name='historicaljob',
name='alert_on_success_email',
),
migrations.RemoveField(
model_name='historicaljob',
name='description',
),
migrations.RemoveField(
model_name='historicaljob',
name='name',
),
migrations.RemoveField(
model_name='historicaljob',
name='notebook_task',
),
migrations.RemoveField(
model_name='historicaljob',
name='retries',
),
migrations.RemoveField(
model_name='historicaljob',
name='template_task',
),
migrations.RemoveField(
model_name='historicaljob',
name='time_out',
),
migrations.RemoveField(
model_name='historicaljob',
name='wait_time_between_retry',
),
]
| StarcoderdataPython |
1689609 | import cv2
import numpy as np
cap=cv2.VideoCapture('data/vtest.avi')
ret,frame1=cap.read()
ret,frame2=cap.read()
while cap.isOpened():
diff=cv2.absdiff(frame1,frame2)
gray=cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)
blur=cv2.GaussianBlur(gray,(5,5),0)
_,thresh=cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
dilated=cv2.dilate(thresh,None,iterations=3)
contours,_=cv2.findContours(dilated,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(frame1,contours,-1,(255,0,0),2)
for i in contours:
(x,y,w,h)=cv2.boundingRect(i)
if cv2.contourArea(i)<10:
continue
cv2.rectangle(frame1,(x,y),(x+2*w,y+2*h),(0,255,0),2)
cv2.putText(frame1 ,"status: MOVEMENT",(10,20),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255) ,3)
cv2.imshow('frame',frame1)
frame1=frame2
ret,frame2=cap.read()
if cv2.waitKey(40)==27:
break
cv2.destroyAllWindows()
cap.release() | StarcoderdataPython |
3306111 | import numpy as np
import pandas as pd
import copy
def agent_add_liq(params, substep, state_history, prev_state, policy_input):
"""
This function updates agent local states when liquidity is added in one asset.
If symmetric liquidity add is enabled additional calculations are made.
"""
asset_id = policy_input['asset_id'] # defines asset subscript
agent_id = policy_input['agent_id']
U_agents = copy.deepcopy(prev_state['uni_agents'])
chosen_agent = U_agents[U_agents['m']==agent_id]
total_liquidity = (prev_state['UNI_S' + asset_id])
Q_reserve = (prev_state['UNI_Q' + asset_id])
Ri_reserve = (prev_state['UNI_R' + asset_id])
ri_deposit = (policy_input['ri_deposit'])
liquidity_minted = (policy_input['ri_deposit'] * total_liquidity // Ri_reserve)
U_agents.at[agent_id,'r_' + asset_id + '_out'] = chosen_agent['r_' + asset_id + '_out'].values - ri_deposit
U_agents.at[agent_id,'r_' + asset_id + '_in'] = chosen_agent['r_' + asset_id + '_in'].values + ri_deposit
U_agents.at[agent_id,'s_' + asset_id] = chosen_agent['s_' + asset_id].values + liquidity_minted
###############################################################
################## SYMMETRIC #################
if params['ENABLE_SYMMETRIC_LIQ']:
alpha = ri_deposit / prev_state['UNI_R' + asset_id]
Q_prime = (1 + alpha) * Q_reserve
q_amount = Q_prime - Q_reserve
U_agents.at[agent_id,'h'] = chosen_agent['h'].values - q_amount
U_agents.at[agent_id,'q_' + asset_id] = chosen_agent['q_' + asset_id].values + q_amount
##################### something like this ##############################
# print(U_agents)
return ('uni_agents', U_agents)
def agent_remove_liq(params, substep, state_history, prev_state, policy_input):
"""
This function updates agent states when liquidity is removed in one asset.
If symmetric liquidity add is enabled additional calculations are made.
"""
asset_id = policy_input['asset_id'] # defines asset subscript
agent_id = policy_input['agent_id']
U_agents = copy.deepcopy(prev_state['uni_agents'])
chosen_agent = U_agents[U_agents['m']==agent_id]
total_liquidity = (prev_state['UNI_S' + asset_id])
si_burn = (policy_input['UNI_burn'])
Q_reserve = (prev_state['UNI_Q' + asset_id])
Ri_reserve = (prev_state['UNI_R' + asset_id])
##### WATCH INTEGERS ###################
ri_amount = (si_burn * Ri_reserve // total_liquidity)
q_amount = (si_burn * Q_reserve // total_liquidity)
# print(ri_amount)
U_agents.at[agent_id,'r_' + asset_id + '_out'] = chosen_agent['r_' + asset_id + '_out'].values + ri_amount
U_agents.at[agent_id,'r_' + asset_id + '_in'] = chosen_agent['r_' + asset_id + '_in'].values - ri_amount
################## SYMMETRIC #################
if params['ENABLE_SYMMETRIC_LIQ']:
U_agents.at[agent_id,'h'] = chosen_agent['h'].values + q_amount
U_agents.at[agent_id,'q_' + asset_id] = chosen_agent['q_' + asset_id].values - q_amount
#####################################################
U_agents.at[agent_id,'s_' + asset_id] = chosen_agent['s_' + asset_id].values - si_burn
return ('uni_agents', U_agents)
def getInputPrice(input_amount, input_reserve, output_reserve, params):
"""
Calculates the input price, considering fees
"""
fee_numerator = params['fee_numerator']
fee_denominator = params['fee_denominator']
input_amount_with_fee = input_amount * fee_numerator
numerator = input_amount_with_fee * output_reserve
denominator = (input_reserve * fee_denominator) + input_amount_with_fee
return (numerator // denominator)
def agent_q_to_r_trade(params, substep, state_history, prev_state, policy_input):
"""
This function updates agent states when a 'q to r' trade is performed:
"""
asset_id = policy_input['asset_id'] # defines asset subscript
agent_id = policy_input['agent_id']
U_agents = copy.deepcopy(prev_state['uni_agents'])
chosen_agent = U_agents[U_agents['m']==agent_id]
q_sold = policy_input['q_sold'] #amount of Q being sold by the user
Q_reserve = prev_state['UNI_Q' + asset_id]
Ri = prev_state['UNI_R' + asset_id]
# if q_sold == 0:
# return ('UNI_R' + asset_id, Ri)
# else:
r_bought = getInputPrice(q_sold, Q_reserve, Ri, params)
U_agents.at[agent_id,'h'] = chosen_agent['h'].values - q_sold
U_agents.at[agent_id,'r_' + asset_id + '_out'] = chosen_agent['r_' + asset_id + '_out'].values + r_bought
return ('uni_agents', U_agents)
def agent_r_to_q_trade(params, substep, state_history, prev_state, policy_input):
"""
This function updates agent states when a 'r to q' trade is performed:
"""
asset_id = policy_input['asset_id'] # defines asset subscript
agent_id = policy_input['agent_id']
U_agents = copy.deepcopy(prev_state['uni_agents'])
chosen_agent = U_agents[U_agents['m']==agent_id]
ri = (policy_input['ri_sold']) #amount of Ri being sold by the user
Q = (prev_state['UNI_Q' + asset_id])
# if r == 0:
# # return ('UNI_Q' + asset_id, Q)
# else:
Ri = (prev_state['UNI_R' + asset_id])
q_bought = (getInputPrice(ri, Ri, Q, params))
# return ('UNI_Q' + asset_id, Q - q_bought)
# print(q_bought)
U_agents.at[agent_id,'h'] = chosen_agent['h'].values + q_bought
U_agents.at[agent_id,'r_' + asset_id + '_out'] = chosen_agent['r_' + asset_id + '_out'].values - ri
return ('uni_agents', U_agents)
def agent_r_to_r_swap(params, substep, state_history, prev_state, policy_input):
"""
This function updates agent states when a swap is performed between two risk assets
"""
asset_id = policy_input['asset_id'] # defines asset subscript
purchased_asset_id = policy_input['purchased_asset_id'] # defines asset subscript
agent_id = policy_input['agent_id']
U_agents = prev_state['uni_agents']
chosen_agent = U_agents[U_agents['m']==agent_id]
delta_Ri = policy_input['ri_sold'] #amount of Ri being sold by the user
if delta_Ri == 0:
return ('uni_agents', U_agents)
Q = prev_state['UNI_' + purchased_asset_id + asset_id]
# if r == 0:
# # return ('UNI_Q' + asset_id, Q)
# else:
Ri = prev_state['UNI_' + asset_id + purchased_asset_id]
delta_Rk = getInputPrice(delta_Ri, Ri, Q, params)
U_agents.at[agent_id,'r_' + asset_id + '_out'] = chosen_agent['r_' + asset_id + '_out'] - delta_Ri
U_agents.at[agent_id,'r_' + purchased_asset_id + '_out'] = chosen_agent['r_' + purchased_asset_id + '_out'] + delta_Rk
## NOT SURE ABOUT DIRECTION (+/-) OF FOLLOWING: ##
U_agents.at[agent_id,'r_' + asset_id + '_in'] = chosen_agent['r_' + asset_id + '_in'] + delta_Rk
U_agents.at[agent_id,'r_' + purchased_asset_id + '_in'] = chosen_agent['r_' + purchased_asset_id + '_in'] - delta_Ri
## NOT SURE ABOUT DIRECTION (+/-) OF PRECEEDING: ##
return ('uni_agents', U_agents) | StarcoderdataPython |
8085 | import collections
import nltk
import os
from sklearn import (
datasets, model_selection, feature_extraction, linear_model, naive_bayes,
ensemble
)
def extract_features(corpus):
'''Extract TF-IDF features from corpus'''
sa_stop_words = nltk.corpus.stopwords.words("english")
# words that might invert a sentence's meaning
white_list = [
'what', 'but', 'if', 'because', 'as', 'until', 'against',
'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again',
'further', 'then', 'once', 'here', 'there', 'why', 'how', 'all', 'any',
'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own',
'same', 'so', 'than', 'too', 'can', 'will', 'just', 'don', 'should']
# take these out of the standard NLTK stop word list
sa_stop_words = [sw for sw in sa_stop_words if sw not in white_list]
# vectorize means we turn non-numerical data into an array of numbers
count_vectorizer = feature_extraction.text.CountVectorizer(
lowercase=True, # for demonstration, True by default
tokenizer=nltk.word_tokenize, # use the NLTK tokenizer
min_df=2, # minimum document frequency, i.e. the word must appear more than once.
ngram_range=(1, 2),
stop_words=sa_stop_words
)
processed_corpus = count_vectorizer.fit_transform(corpus)
processed_corpus = feature_extraction.text.TfidfTransformer().fit_transform(
processed_corpus)
return processed_corpus
data_directory = 'movie_reviews'
movie_sentiment_data = datasets.load_files(data_directory, shuffle=True)
print('{} files loaded.'.format(len(movie_sentiment_data.data)))
print('They contain the following classes: {}.'.format(
movie_sentiment_data.target_names))
movie_tfidf = extract_features(movie_sentiment_data.data)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
movie_tfidf, movie_sentiment_data.target, test_size=0.30, random_state=42)
# similar to nltk.NaiveBayesClassifier.train()
clf1 = linear_model.LogisticRegression()
clf1.fit(X_train, y_train)
print('Logistic Regression performance: {}'.format(clf1.score(X_test, y_test)))
clf2 = linear_model.SGDClassifier()
clf2.fit(X_train, y_train)
print('SGDClassifier performance: {}'.format(clf2.score(X_test, y_test)))
clf3 = naive_bayes.MultinomialNB()
clf3.fit(X_train, y_train)
print('MultinomialNB performance: {}'.format(clf3.score(X_test, y_test)))
clf4 = naive_bayes.BernoulliNB()
clf4.fit(X_train, y_train)
print('BernoulliNB performance: {}'.format(clf4.score(X_test, y_test)))
voting_model = ensemble.VotingClassifier(
estimators=[('lr', clf1), ('sgd', clf2), ('mnb', clf3), ('bnb', clf4)],
voting='hard')
voting_model.fit(X_train, y_train)
print('Voting classifier performance: {}'.format(
voting_model.score(X_test, y_test)))
| StarcoderdataPython |
1686037 | #!/usr/bin/env python3
"""Simple multiprocess HTTP server written using an event loop."""
import argparse
import os
import socket
import signal
import time
import asyncio
import aiohttp
import aiohttp.server
from aiohttp import websocket
ARGS = argparse.ArgumentParser(description="Run simple HTTP server.")
ARGS.add_argument(
'--host', action="store", dest='host',
default='127.0.0.1', help='Host name')
ARGS.add_argument(
'--port', action="store", dest='port',
default=8080, type=int, help='Port number')
ARGS.add_argument(
'--workers', action="store", dest='workers',
default=2, type=int, help='Number of workers.')
class HttpRequestHandler(aiohttp.server.ServerHttpProtocol):
@asyncio.coroutine
def handle_request(self, message, payload):
print('{}: method = {!r}; path = {!r}; version = {!r}'.format(
os.getpid(), message.method, message.path, message.version))
path = message.path
if (not (path.isprintable() and path.startswith('/')) or '/.' in path):
path = None
else:
path = '.' + path
if not os.path.exists(path):
path = None
else:
isdir = os.path.isdir(path)
if not path:
raise aiohttp.HttpProcessingError(code=404)
if isdir and not path.endswith('/'):
path = path + '/'
raise aiohttp.HttpProcessingError(
code=302, headers=(('URI', path), ('Location', path)))
response = aiohttp.Response(
self.writer, 200, http_version=message.version)
response.add_header('Transfer-Encoding', 'chunked')
# content encoding
accept_encoding = message.headers.get('accept-encoding', '').lower()
if 'deflate' in accept_encoding:
response.add_header('Content-Encoding', 'deflate')
response.add_compression_filter('deflate')
elif 'gzip' in accept_encoding:
response.add_header('Content-Encoding', 'gzip')
response.add_compression_filter('gzip')
response.add_chunking_filter(1025)
if isdir:
response.add_header('Content-type', 'text/html')
response.send_headers()
response.write(b'<ul>\r\n')
for name in sorted(os.listdir(path)):
if name.isprintable() and not name.startswith('.'):
try:
bname = name.encode('ascii')
except UnicodeError:
pass
else:
if os.path.isdir(os.path.join(path, name)):
response.write(b'<li><a href="' + bname +
b'/">' + bname + b'/</a></li>\r\n')
else:
response.write(b'<li><a href="' + bname +
b'">' + bname + b'</a></li>\r\n')
response.write(b'</ul>')
else:
response.add_header('Content-type', 'text/plain')
response.send_headers()
try:
with open(path, 'rb') as fp:
chunk = fp.read(8192)
while chunk:
response.write(chunk)
chunk = fp.read(8192)
except OSError:
response.write(b'Cannot open')
yield from response.write_eof()
if response.keep_alive():
self.keep_alive(True)
class ChildProcess:
def __init__(self, up_read, down_write, args, sock):
self.up_read = up_read
self.down_write = down_write
self.args = args
self.sock = sock
def start(self):
# start server
self.loop = loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
def stop():
self.loop.stop()
os._exit(0)
loop.add_signal_handler(signal.SIGINT, stop)
f = loop.create_server(
lambda: HttpRequestHandler(debug=True, keep_alive=75),
sock=self.sock)
srv = loop.run_until_complete(f)
x = srv.sockets[0]
print('Starting srv worker process {} on {}'.format(
os.getpid(), x.getsockname()))
# heartbeat
asyncio.async(self.heartbeat())
asyncio.get_event_loop().run_forever()
os._exit(0)
@asyncio.coroutine
def heartbeat(self):
# setup pipes
read_transport, read_proto = yield from self.loop.connect_read_pipe(
aiohttp.StreamProtocol, os.fdopen(self.up_read, 'rb'))
write_transport, _ = yield from self.loop.connect_write_pipe(
aiohttp.StreamProtocol, os.fdopen(self.down_write, 'wb'))
reader = read_proto.reader.set_parser(websocket.WebSocketParser)
writer = websocket.WebSocketWriter(write_transport)
while True:
try:
msg = yield from reader.read()
except:
print('Supervisor is dead, {} stopping...'.format(os.getpid()))
self.loop.stop()
break
if msg.tp == websocket.MSG_PING:
writer.pong()
elif msg.tp == websocket.MSG_CLOSE:
break
read_transport.close()
write_transport.close()
class Worker:
_started = False
def __init__(self, loop, args, sock):
self.loop = loop
self.args = args
self.sock = sock
self.start()
def start(self):
assert not self._started
self._started = True
up_read, up_write = os.pipe()
down_read, down_write = os.pipe()
args, sock = self.args, self.sock
pid = os.fork()
if pid:
# parent
os.close(up_read)
os.close(down_write)
asyncio.async(self.connect(pid, up_write, down_read))
else:
# child
os.close(up_write)
os.close(down_read)
# cleanup after fork
asyncio.set_event_loop(None)
# setup process
process = ChildProcess(up_read, down_write, args, sock)
process.start()
@asyncio.coroutine
def heartbeat(self, writer):
while True:
yield from asyncio.sleep(15)
if (time.monotonic() - self.ping) < 30:
writer.ping()
else:
print('Restart unresponsive worker process: {}'.format(
self.pid))
self.kill()
self.start()
return
@asyncio.coroutine
def chat(self, reader):
while True:
try:
msg = yield from reader.read()
except:
print('Restart unresponsive worker process: {}'.format(
self.pid))
self.kill()
self.start()
return
if msg.tp == websocket.MSG_PONG:
self.ping = time.monotonic()
@asyncio.coroutine
def connect(self, pid, up_write, down_read):
# setup pipes
read_transport, proto = yield from self.loop.connect_read_pipe(
aiohttp.StreamProtocol, os.fdopen(down_read, 'rb'))
write_transport, _ = yield from self.loop.connect_write_pipe(
aiohttp.StreamProtocol, os.fdopen(up_write, 'wb'))
# websocket protocol
reader = proto.reader.set_parser(websocket.WebSocketParser)
writer = websocket.WebSocketWriter(write_transport)
# store info
self.pid = pid
self.ping = time.monotonic()
self.rtransport = read_transport
self.wtransport = write_transport
self.chat_task = asyncio.Task(self.chat(reader))
self.heartbeat_task = asyncio.Task(self.heartbeat(writer))
def kill(self):
self._started = False
self.chat_task.cancel()
self.heartbeat_task.cancel()
self.rtransport.close()
self.wtransport.close()
os.kill(self.pid, signal.SIGTERM)
class Supervisor:
def __init__(self, args):
self.loop = asyncio.get_event_loop()
self.args = args
self.workers = []
def start(self):
# bind socket
sock = self.sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((self.args.host, self.args.port))
sock.listen(1024)
sock.setblocking(False)
# start processes
for idx in range(self.args.workers):
self.workers.append(Worker(self.loop, self.args, sock))
self.loop.add_signal_handler(signal.SIGINT, lambda: self.loop.stop())
self.loop.run_forever()
def main():
if getattr(os, "fork", None) is None:
print("os.fork isn't supported by your OS")
return
args = ARGS.parse_args()
if ':' in args.host:
args.host, port = args.host.split(':', 1)
args.port = int(port)
supervisor = Supervisor(args)
supervisor.start()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4810303 | <reponame>MKLab-ITI/news-popularity-prediction
__author__ = '<NAME> (<EMAIL>)'
import os
from news_popularity_prediction.datautil.common import load_pickle, store_pickle
def get_within_dataset_user_anonymization(output_file,
document_gen,
comment_generator,
extract_user_name):
"""
Either reads or calculates a dictionary anonymizer from user name to anonymized integer id.
:param output_file: The file path where the map will be stored.
:param document_gen: A generator that yields dictionaries containing online discussions.
:param comment_generator: Returns a generator that yields the comments from an online discussion.
:param extract_user_name: Extracts the user name from a comment.
:return: within_dataset_user_anonymize: The map from raw data user names to anonymized integer ids.
"""
if os.path.exists(output_file):
within_dataset_user_anonymize = load_dataset_user_anonymizer(output_file)
else:
user_name_set,\
within_dataset_user_anonymize = calculate_within_dataset_user_anonymization(document_gen,
comment_generator,
extract_user_name)
store_dataset_user_anonymizer(output_file,
within_dataset_user_anonymize)
return within_dataset_user_anonymize
def calculate_within_dataset_user_anonymization(document_gen,
comment_generator,
extract_user_name):
"""
Calculates a dictionary anonymizer from user name to anonymized integer id.
:param document_gen: A generator that yields dictionaries containing online discussions.
:param comment_generator: Returns a generator that yields the comments from an online discussion.
:param extract_user_name: Extracts the user name from a comment.
:return: user_name_set: A set of raw data user names.
within_dataset_user_anonymize: The map from raw data user names to anonymized integer ids.
"""
# Initialize user set.
user_name_set = list()
append_user_name = user_name_set.append
# Iterate over all dataset documents.
document_counter = 0
for document in document_gen:
document_counter += 1
if document_counter % 10000 == 0:
print(document_counter)
user_name_set = list(set(user_name_set))
append_user_name = user_name_set.append
comment_gen = comment_generator(document)
# First comment is the initial post.
initial_post = next(comment_gen)
op_name = extract_user_name(initial_post)
append_user_name(op_name)
# If others exist, they are the actual comments.
for comment in comment_gen:
commenter_name = extract_user_name(comment)
append_user_name(commenter_name)
# If anonymous users can post, the Anonymous name can be included here.
user_name_set = set(user_name_set)
print("Number of distinct users in dataset:", len(user_name_set))
# Within dataset anonymization.
within_dataset_user_anonymize = dict(zip(user_name_set, range(len(user_name_set))))
return user_name_set, within_dataset_user_anonymize
def store_dataset_user_anonymizer(output_file,
within_dataset_user_anonymize):
"""
Stores the dictionary anonymizer from user name to anonymized integer id.
:param output_file: The file path where the map will be stored.
:param within_dataset_user_anonymize: The map from raw data user names to anonymized integer ids.
:return: None
"""
store_pickle(output_file, within_dataset_user_anonymize)
def load_dataset_user_anonymizer(output_file):
"""
Loads the dictionary anonymizer from user name to anonymized integer id.
:param output_file: The file path where the map is stored.
:return: within_dataset_user_anonymize: The map from raw data user names to anonymized integer ids.
"""
within_dataset_user_anonymize = load_pickle(output_file)
return within_dataset_user_anonymize
| StarcoderdataPython |
1743609 | <gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
webcamera server
for opencv 3.0
クライアントからデータを受け取る
画像データはAES暗号がかけられている
通信はsslで行う
現状はMySQLに日付と画像を格納
NoSQLを使ってみたいという願望がある
settingファイルからポート番号と
"""
import SocketServer
import cv2
import numpy
import socket
import sys
import datetime
import ConfigParser
import readSetting as RS
import TCPServer as tcpserver
import TCPServerWithSSL as sslserver
#for OpenCV3.0 python interface
#picturePath = ''
if __name__ == "__main__":
#read setting file
res = RS.getSettings([["settings","host"],["settings","port"]])
if res != [None]:
HOST,PORT = res
else:
print "fail to get setting for host,port."
exit()
res = RS.getSettings([["settings","cert_path"],["settings","key_path"]])
if res != [None]:
cert_path,key_path = res
else:
print "fail to get setting for cert_path,key_path."
exit()
print 'starting server : port %d'%PORT
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
"""
server = SocketServer.TCPServer((HOST,PORT),
tcpserver.testHandler)
print "listening", server.socket.getsockname()
server.serve_forever()
"""
sslserver.MySSL_ThreadingTCPServer((HOST,PORT),sslserver.testHandler,
cert_path,
key_path).serve_forever()
| StarcoderdataPython |
161588 | <gh_stars>100-1000
import traceback
from rdflib.namespace import Namespace
from owmeta_core.dataobject import ObjectProperty
from owmeta_core.datasource import GenericTranslation
from owmeta_core.data_trans.csv_ds import CSVDataSource, CSVDataTranslator
from .. import CONTEXT
from ..network import Network
from ..worm import Worm
from ..website import Website
from ..evidence import Evidence
from .data_with_evidence_ds import DataWithEvidenceDataSource
from .common_data import DTMixin, DSMixin
class WormAtlasCellListDataSource(DSMixin, CSVDataSource):
class_context = CONTEXT
csv_header = ['Cell', 'Lineage Name', 'Description']
csv_field_delimiter = '\t'
class WormAtlasCellListDataTranslation(GenericTranslation):
class_context = CONTEXT
neurons_source = ObjectProperty()
def defined_augment(self):
return super(WormAtlasCellListDataTranslation, self).defined_augment() and \
self.neurons_source.has_defined_value()
def identifier_augment(self):
return self.make_identifier(super(WormAtlasCellListDataTranslation, self).identifier_augment().n3() +
self.neurons_source.onedef().identifier.n3())
class WormAtlasCellListDataTranslator(DTMixin, CSVDataTranslator):
class_context = CONTEXT
input_type = (WormAtlasCellListDataSource, DataWithEvidenceDataSource)
output_type = DataWithEvidenceDataSource
translation_type = WormAtlasCellListDataTranslation
def make_translation(self, sources):
res = super(WormAtlasCellListDataTranslator, self).make_translation()
res.source(sources[0])
res.neurons_source(sources[1])
return res
def translate(self, data_source, neurons_source):
res = self.make_new_output(sources=(data_source, neurons_source))
net_q = neurons_source.data_context.stored(Network).query()
net = next(net_q.load(), net_q)
# TODO: Improve this evidence marker
doc = res.evidence_context(Website)(url="http://www.wormatlas.org/celllist.htm")
ev = res.evidence_context(Evidence)(reference=doc)
doc_ctx = res.data_context_for(document=doc)
ev.supports(doc_ctx.rdf_object)
w = doc_ctx(Worm)()
with self.make_reader(data_source, skipinitialspace=True, skipheader=True) as csvreader:
cell_name_counters = dict()
data = dict()
for j in csvreader:
name = j[0]
lineageName = j[1]
desc = j[2]
# XXX: These renaming choices are arbitrary; may be inappropriate
if name == "DB1/3":
name = "DB1"
elif name == "DB3/1":
name = "DB3"
elif name == "AVFL/R":
if lineageName[0] == "W":
name = "AVFL"
elif lineageName[0] == "P":
name = "AVFR"
if name in cell_name_counters:
basename = name
while name in cell_name_counters:
cell_name_counters[basename] += 1
name = basename + "(" + str(cell_name_counters[basename]) + ")"
else:
cell_name_counters[name] = 0
data[name] = {"lineageName": lineageName, "desc": desc}
for n in net.neurons():
# Get the name of the neuron in its original context
name = n.name.one()
cell_data = data[str(name)]
# Make statements in the result context
nn = doc_ctx(n)
nn.lineageName(cell_data['lineageName'])
nn.description(cell_data['desc'])
w.cell(nn)
# TODO: Add data for other cells here. Requires relating named
# muscle cells to their counterparts in the cell list (e.g. mu_bod(#))
print("uploaded lineage and descriptions")
return res
| StarcoderdataPython |
1694657 | # Song-to-playlist classifier utils.
from __future__ import print_function
from __future__ import division
from utils.evaluation import compute_metrics, summarize_metrics
from sklearn.utils import check_random_state, shuffle
from tqdm import tqdm
import theano.tensor as T
import theano
import lasagne as lg
import numpy as np
import cPickle
import time
import os
import sys
EVERY = 10
def select_model(model_path):
""" Select model and related functions. """
cfg_dir, model_dir = os.path.dirname(model_path).split('/')
model_file = os.path.basename(model_path).split('.py')[0]
model = False
exec ('from {}.{} import {} as model'.format(cfg_dir, model_dir, model_file))
model.name = model_file
return model
def show_design(model):
""" Print details contained in a specification file. """
print(
'\tStructure\n'
'\tn_layers = {}\n'
'\tn_hidden = {}\n'
'\thid_nl = {}\n'
'\tout_nl = {}\n\n'
'\tTraining options\n'
'\tbatch_size = {}\n'
'\tlearning_rate = {}\n'
'\tmax_epochs = {}\n'
'\tmomentum = {}\n\n'
'\tEarly-stop options\n'
'\tpatience = {}\n'
'\trefinement = {}\n'
'\tfactor_lr = {}\n'
'\tmax_epochs_increase = {}\n'
'\tsignificance_level = {}\n\n'
'\tRegularization\n'
'\tinput_dropout = {}\n'
'\thidden_dropout = {}\n'
'\tpositive_weight = {}\n'
'\tnonpositive_weight = {}\n'
'\tl1_weight = {}\n'
'\tl2_weight = {}\n\n'
'\tFeatures\n'
'\tfeature = {}\n'
'\tstandardize = {}\n'
'\tnormalize = {}'.format(
model.n_layers, model.n_hidden, model.hid_nl, model.out_nl,
model.batch_size, model.learning_rate, model.max_epochs,
model.momentum, model.patience, model.refinement, model.factor_lr,
model.max_epochs_increase, model.significance_level,
model.input_dropout, model.hidden_dropout, model.positive_weight,
model.nonpositive_weight, model.l1_weight, model.l2_weight,
model.feature, model.standardize, model.normalize)
)
def build_model(feature_size, n_classes, model, verbose=True):
"""
Build a feed forward neural net.
Parameters
----------
feature_size: int
Dimensionality of the input features.
n_classes: int
Number of classes we want to classify into.
model: model specification file
Contains the model config.
verbose: bool
Print info if True.
Returns
-------
input_layer: Lasagne layer
Input layer.
output_layer: Lasagne layer
Output layer.
"""
if verbose:
print('\tBuilding model...', end='')
# input layer
input_layer = lg.layers.InputLayer(shape=(None, feature_size))
# dropout input units (rescale by default)
input_layer_drop = lg.layers.DropoutLayer(
incoming=input_layer,
p=model.input_dropout
)
# hidden layer
hidden_layer = lg.layers.batch_norm(
lg.layers.DenseLayer(
incoming=input_layer_drop,
num_units=model.n_hidden,
nonlinearity=getattr(lg.nonlinearities, model.hid_nl)
)
)
# dropout hidden units (rescale by default)
hidden_layer = lg.layers.DropoutLayer(
incoming=hidden_layer,
p=model.hidden_dropout
)
# stack n_layers - 1 more hidden layers
for l in range(model.n_layers - 1):
hidden_layer = lg.layers.batch_norm(
lg.layers.DenseLayer(
incoming=hidden_layer,
num_units=model.n_hidden,
nonlinearity=getattr(lg.nonlinearities, model.hid_nl)
)
)
# dropout hidden units (rescale by default)
hidden_layer = lg.layers.DropoutLayer(
incoming=hidden_layer,
p=model.hidden_dropout
)
# output layer
output_layer = lg.layers.batch_norm(
lg.layers.DenseLayer(
incoming=hidden_layer,
num_units=n_classes,
nonlinearity=getattr(lg.nonlinearities, model.out_nl)
)
)
# inform about the network size
num_params = lg.layers.count_params(output_layer)
if verbose:
print(' [{} parameters]'.format(num_params))
return input_layer, output_layer
def define_cost(output_layer, target, model, determ):
"""
Define Theano tensor for the cost as a function of the network output.
The network output is also returned for convenience.
Parameters
----------
output_layer: Lasagne layer
Output layer.
target: Theano tensor
Prediction target.
model: model specification file
Contains the model config.
determ: bool
Deterministic pass if True, else enable dropout.
Returns
-------
output: Theano tensor
Network output.
cost: Theano tensor
Cost as a function of output and target.
"""
# Get network output
output = lg.layers.get_output(output_layer, deterministic=determ)
if model.out_nl == 'sigmoid':
# Weighted BCE lets us put different trust in positive vs negative
# observations (similar to weightedMF). The following holds if we
# code t=1 for positive and t=0 for negative/not-known examples:
# llh(example) = - w+ * t * log(p) - w- * (1 - t) * log(1 - p)
cost = -1. * T.mean(
model.positive_weight * target * T.log(output) +
model.nonpositive_weight * (1. - target) * T.log(1. - output)
)
else:
# categorical cross-entropy
cost = T.mean(T.nnet.categorical_crossentropy(output, target))
# regularize
if model.l1_weight > 0:
l1_reg = lg.regularization.regularize_network_params(output_layer, lg.regularization.l1)
cost += model.l1_weight * l1_reg
if model.l2_weight > 0:
l2_reg = lg.regularization.regularize_network_params(output_layer, lg.regularization.l2)
cost += model.l2_weight * l2_reg
return output, cost
def declare_theano_variables(output_layer, model, verbose=True):
"""
Define target, network output, cost and learning rate.
Parameters
----------
output_layer: Lasagne layer
Output layer.
model: model specification file
Contains the model config.
verbose: bool
Print info if True.
Returns
-------
target: Theano tensor
Prediction target.
stochastic_out: tuple
Theano tensors for stochastic output and cost.
deterministic_out: tuple
Theano tensors for deterministic output and cost.
learning_rate: Theano shared variable
Learning rate for the optimizers.
"""
if verbose:
print('\tDeclaring theano variables...')
# scale learning rate by a factor of 0.9 if momentum is applied,
# to counteract the larger update steps that momentum yields
lr = model.learning_rate - 0.9 * model.learning_rate * model.momentum
learning_rate = theano.shared(np.asarray(lr, dtype=theano.config.floatX))
# define target placeholder for the cost functions
target = T.bmatrix('target')
# stochastic cost expression
stochastic_out = define_cost(output_layer, target, model, determ=False)
# deterministic cost expression
deterministic_out = define_cost(output_layer, target, model, determ=True)
return target, stochastic_out, deterministic_out, learning_rate
def compile_theano_functions(input_layer, output_layer, target, stochastic_out,
deterministic_out, learning_rate, model, verbose=True):
"""
Compile Theano functions for training, test and prediction.
Parameters
----------
input_layer: Lasagne layer
Input layer.
output_layer: Lasagne layer
Output layer.
target: Theano tensor
Prediction target.
stochastic_out: tuple
Theano tensors for stochastic output and cost.
deterministic_out: tuple
Theano tensors for deterministic output and cost.
learning_rate: Theano shared variable
Learning rate for the optimizers.
model: model specification file
Contains the model config.
verbose: bool
Print info if True.
Returns
-------
train_model: Theano function
Stochastic cost and output (with updates).
test_model: Theano function
Deterministic cost and output (without updates).
predict_model: Theano function
Deterministic output (without updates).
"""
if verbose:
print('\tCompiling theano functions...')
# retrieve all parameters from the network
all_params = lg.layers.get_all_params(output_layer, trainable=True)
# define updates and adapt if momentum is applied (*_out[1] is the cost)
updates = lg.updates.adagrad(loss_or_grads=stochastic_out[1],
params=all_params,
learning_rate=learning_rate)
if model.momentum:
updates = lg.updates.apply_nesterov_momentum(updates)
# compute stochastic cost and output, and update params
train_model = theano.function(inputs=[input_layer.input_var, target],
outputs=stochastic_out,
updates=updates)
# compute deterministic cost and output, and don't update
test_model = theano.function(inputs=[input_layer.input_var, target],
outputs=deterministic_out)
# compute deterministic output, don't update
output = lg.layers.get_output(output_layer, deterministic=True)
predict_model = theano.function(inputs=[input_layer.input_var],
outputs=output)
return train_model, test_model, predict_model
def iter_minibatches(X, Y, batch_size):
""" Iterate over rows in X, Y in mini-batches. """
assert X.shape[0] == Y.shape[0]
# do as many minibatches of batch_size as possible
for start_idx in range(0, X.shape[0] - batch_size + 1, batch_size):
excerpt = slice(start_idx, start_idx + batch_size)
yield X[excerpt], Y[excerpt]
# do a final small minibatch if some samples remain
if X.shape[0] % batch_size != 0:
last_start = int(np.floor(X.shape[0] / batch_size)) * batch_size
excerpt = slice(last_start, None)
yield X[excerpt], Y[excerpt]
def train(model, train_input, train_target, valid_input, valid_target, out_dir,
random_state):
"""
Train the hybrid classifier to a training dataset of song-features and
song-playlist examples. Monitoring on a validation dataset. Return nothing.
Parameters
----------
model: model file
Model specification.
train_input: numpy array, shape (num songs, feature size)
Input array of song features for training.
train_target: numpy array, shape (num songs, num playlists)
Target array of playlists the songs belong to for training.
valid_input: numpy array, shape (num songs, feature size)
Input array of song features for validation.
valid_target: numpy array, shape (num songs, num playlists)
Target array of playlists the songs belong to for validation.
out_dir: string
Path to the params and logging directory
random_state: None, int or numpy RandomState
Used to shuffle.
"""
# set random behavior
rng = check_random_state(random_state)
print('\nSetting up training...')
# identify dimensions
feat_size = train_input.shape[1]
n_classes = train_target.shape[1]
# build network
input_layer, output_layer = build_model(feat_size, n_classes, model)
# define theano variables
theano_vars = declare_theano_variables(output_layer, model)
target, stochastic_metrics, deterministic_metrics, learning_rate = theano_vars
# define theano functions
train_model, test_model, predict_model = compile_theano_functions(
input_layer, output_layer, target, stochastic_metrics,
deterministic_metrics, learning_rate, model
)
# set up metrics monitoring
metrics = ['cost', 'med_rank', 'mrr', 'map', 'mean_rec10', 'mean_rec30', 'mean_rec100']
train_log = {metric: [] for metric in metrics}
valid_log = {metric: [] for metric in metrics}
file_log = '{}_log_train.pkl'.format(model.name)
# initialize best epoch info
best_valid_cost = np.inf
best_epoch = 1
best_params = lg.layers.get_all_param_values(output_layer)
best_file = '{}_best.pkl'.format(model.name)
with open(os.path.join(out_dir, best_file), 'wb') as f:
cPickle.dump((best_valid_cost, best_epoch, best_params), f)
# initialize early stop and learning rate schedule
early_stop = False
epoch = 1
max_epochs = model.max_epochs
patience = model.patience
refinement = model.refinement
# train the classifier
print('\nTraining...')
while epoch <= max_epochs and not early_stop:
# keep track of time
start_time = time.time()
# shuffle training data before each pass
train_input, train_target = shuffle(train_input, train_target, random_state=rng)
# training on mini-batches
train_cost = 0.
num_batches = 0
if epoch % EVERY != 0:
# do not compute ranking metrics
for batch in iter_minibatches(train_input, train_target, model.batch_size):
batch_input, batch_target = batch
_, batch_cost = train_model(batch_input, batch_target.toarray())
train_cost += np.asscalar(batch_cost) # theano returns an array
num_batches += 1
# put together batches
train_log['cost'].append(train_cost / num_batches)
else:
# compute ranking metrics
output_list = []
for batch in iter_minibatches(train_input, train_target, model.batch_size):
batch_input, batch_target = batch
batch_output, batch_cost = train_model(batch_input, batch_target.toarray())
train_cost += np.asscalar(batch_cost) # theano returns an array
num_batches += 1
output_list.append(batch_output)
# put together batches
train_log['cost'].append(train_cost / num_batches)
train_output = np.vstack(output_list)
# compute training metrics (transpose to have playlists as rows)
train_metrics = compute_metrics(train_output.T, train_target.T.tocsr(), k_list=[10, 30, 100], verbose=False)
train_metrics = summarize_metrics(*train_metrics, k_list=[10, 30, 100], ci=False, pivotal=False, verbose=False)
# validation on single batch
valid_output, valid_cost = test_model(valid_input, valid_target.toarray())
valid_cost = np.asscalar(valid_cost) # theano returns an array
valid_log['cost'].append(valid_cost)
if epoch % EVERY == 0:
# compute validation metrics (transpose to have playlists as rows)
valid_metrics = compute_metrics(valid_output.T, valid_target.T.tocsr(), k_list=[10, 30, 100], verbose=False)
valid_metrics = summarize_metrics(*valid_metrics, k_list=[10, 30, 100], ci=False, pivotal=False, verbose=False)
print(('\n\t\t' + '{:<13}' + '{:<13}' * 6).format('split', *metrics[1:]))
print(('\t\t' + '{:<13}' + '{:<13.1f}' * 1 + '{:<13.2%}' * 5).format('train', *train_metrics))
print(('\t\t' + '{:<13}' + '{:<13.1f}' * 1 + '{:<13.2%}' * 5).format('valid', *valid_metrics))
print('')
for m, tm, vm in zip(metrics[1:], train_metrics, valid_metrics):
train_log[m].append(tm)
valid_log[m].append(vm)
print('\tEpoch {} of {} took {:.3f}s'.format(epoch, max_epochs, time.time() - start_time))
# revisit best epoch details
if valid_cost < best_valid_cost:
if valid_cost < best_valid_cost * model.significance_level:
# extend max_epochs if the improvement is significant
if max_epochs < int(epoch * model.max_epochs_increase):
max_epochs = int(epoch * model.max_epochs_increase)
print('\n\tSet max_epochs to {}.\n'.format(max_epochs))
# update best setting
best_valid_cost = valid_cost
best_epoch = epoch
best_params = lg.layers.get_all_param_values(output_layer)
else:
# decrease patience
patience -= 1
print('\n\tDecrease patience. Currently patience={}, refinement={}.'.format(patience, refinement))
if patience == 0:
print('\n\tPatience exhausted: restoring best model...')
lg.layers.set_all_param_values(output_layer, best_params)
if refinement > 0:
# decrease refinement
refinement -= 1
print('\n\tDecrease refinement. Currently patience={}, refinement={}.'.format(patience, refinement))
# update learning rate
old_lr = learning_rate.get_value()
new_lr = np.asarray(old_lr * model.factor_lr, dtype=theano.config.floatX)
learning_rate.set_value(new_lr)
print('\n\tUpdate learning rate to {}.'.format(new_lr))
# restore patience
patience = model.patience
print('\n\tRestore patience. Currently patience={}, refinement={}.'.format(patience, refinement))
else:
print('\n\tPatience and refinement steps exhausted. '
'Early stopping!')
early_stop = True
elif epoch == max_epochs:
print('\n\tReached max_epochs without improvement.')
epoch += 1
print('\nBest valid cost was {:.6f} at epoch {}.'.format(best_valid_cost, best_epoch))
# save metrics and best setting
with open(os.path.join(out_dir, file_log), 'wb') as f:
cPickle.dump((train_log, valid_log), f)
with open(os.path.join(out_dir, best_file), 'wb') as f:
cPickle.dump((best_valid_cost, best_epoch, best_params), f)
def fit(model, fit_input, fit_target, out_dir, random_state):
"""
Fit the hybrid classifier to a training dataset of song-features and
song-playlist examples. Return nothing.
Parameters
----------
model: model file
Model specification.
fit_input: numpy array, shape (num songs, feature size)
Input array of song features.
fit_target: numpy array, shape (num songs, num playlists)
Target array of playlists the songs belong to.
out_dir: string
Path to the params and logging directory
random_state: None, int or numpy RandomState
Used to shuffle.
"""
# set random behavior
rng = check_random_state(random_state)
print('\nSetting up fit...')
# identify dimensions
feat_size = fit_input.shape[1]
n_classes = fit_target.shape[1]
# build network
input_layer, output_layer = build_model(feat_size, n_classes, model)
# define theano variables
theano_vars = declare_theano_variables(output_layer, model)
target, stochastic_metrics, deterministic_metrics, learning_rate = theano_vars
# define theano functions
train_model, _, _ = compile_theano_functions(
input_layer, output_layer, target, stochastic_metrics,
deterministic_metrics, learning_rate, model
)
# set up metrics monitoring and params file
metrics = ['cost', 'med_rank', 'mrr', 'map', 'mean_rec10', 'mean_rec30', 'mean_rec100']
log = {metric: [] for metric in metrics}
log_file = '{}_log_fit.pkl'.format(model.name)
params_file = '{}_params.pkl'.format(model.name)
# fit the classifier
print('\nFitting...')
start = time.time()
for epoch in tqdm(xrange(1, model.max_epochs + 1)):
# shuffle training data before every pass
fit_input, fit_target = shuffle(fit_input, fit_target, random_state=rng)
# fitting on mini-batches
fit_cost = 0.
num_batches = 0
if epoch % EVERY != 0:
# do not compute ranking metrics
for batch in iter_minibatches(fit_input, fit_target, model.batch_size):
b_input, b_target = batch
_, b_cost = train_model(b_input, b_target.toarray())
fit_cost += np.asscalar(b_cost) # theano returns an array
num_batches += 1
# put together batches
log['cost'].append(fit_cost / num_batches)
else:
# compute ranking metrics
output_list = []
for batch in iter_minibatches(fit_input, fit_target, model.batch_size):
b_input, b_target = batch
b_output, b_cost = train_model(b_input, b_target.toarray())
fit_cost += np.asscalar(b_cost) # theano returns an array
num_batches += 1
output_list.append(b_output)
# put together batches
log['cost'].append(fit_cost / num_batches)
fit_output = np.vstack(output_list)
# compute training metrics (transpose to have playlists as rows)
fit_metrics = compute_metrics(fit_output.T, fit_target.T.tocsr(), k_list=[10, 30, 100], verbose=False)
fit_metrics = summarize_metrics(*fit_metrics, k_list=[10, 30, 100], ci=False, pivotal=False, verbose=False)
tqdm.write(('\n\t\t' + '{:<13}' + '{:<13}' * 6).format('split', *metrics[1:]))
tqdm.write(('\t\t' + '{:<13}' + '{:<13.1f}' * 1 + '{:<13.2%}' * 5).format('train', *fit_metrics))
tqdm.write('')
for m, fm in zip(metrics[1:], fit_metrics):
log[m].append(fm)
print('\nTime fitting: {:.4f} sec.'.format(time.time() - start))
# save metrics
with open(os.path.join(out_dir, log_file), 'wb') as f:
cPickle.dump(log, f)
# save fit model
print('\nSaving fit model weights...')
params = lg.layers.get_all_param_values(output_layer)
with open(os.path.join(out_dir, params_file), 'w') as f:
cPickle.dump(params, f)
def compute_scores(model, params_dir, cont_input, cont_target):
"""
Compute the song-playlist scores.
Parameters
----------
model: model file
Model specification.
params_dir: string
Path to the directory with previously fit parameters.
cont_input: numpy array, shape (num songs, feature size)
Input array of song features.
cont_target: numpy array, shape (num songs, num playlists)
Matrix of song-playlist co-occurrences at the continuation split.
"""
# identify dimensions
feat_size = cont_input.shape[1]
n_classes = cont_target.shape[1]
# build network
input_layer, output_layer = build_model(feat_size, n_classes, model)
# define theano variables
theano_vars = declare_theano_variables(output_layer, model)
target, stochastic_metrics, deterministic_metrics, learning_rate = theano_vars
# define theano functions
_, _, predict_model = compile_theano_functions(
input_layer, output_layer, target, stochastic_metrics,
deterministic_metrics, learning_rate, model
)
# load previously fit hybrid classifier weights
print('\nLoading fit weights to the model...')
params_file = '{}_params.pkl'.format(model.name)
if os.path.isfile(os.path.join(params_dir, params_file)):
with open(os.path.join(params_dir, params_file), 'rb') as f:
params = cPickle.load(f)
else:
sys.exit('\tThe file {} does not exist yet. You need to fit the model '
'first.'.format(os.path.join(params_dir, params_file)))
# load the weights on the defined model
lg.layers.set_all_param_values(output_layer, params)
# use the classifier to populate a matrix of song-playlist scores
print('\nPredicting song-playlist scores...')
start = time.time()
cont_output = predict_model(cont_input)
print('\nTime predicting: {} sec.'.format(round(time.time() - start, 4)))
return cont_output
| StarcoderdataPython |
3295445 | <reponame>tanthanadon/senior<filename>src/churn.py
from pathlib import Path
import os
import pandas as pd
from tqdm import trange, tqdm
import time
import matplotlib
def saveText(PATH_SAMPLE, PATH_TEXT):
#print(PATH_TEXT)
for file in PATH_SAMPLE.iterdir():
# Get into the directory of the target project
os.chdir(str(file))
command = "git log --numstat --no-merges --pretty= > {0}.txt | mv {0}.txt {1}".format(file.name, PATH_TEXT)
print(command)
os.system(command)
print("########### saveText() Finished ############")
def convert(str):
if(str == "-"):
n = 0
else:
n = int(str)
return n
def prepareData(PATH_TEXT, PATH_CSV):
for file in PATH_TEXT.iterdir():
#print(file.name.split(file.suffix)[0])
project_id = file.name.split(file.suffix)[0]
p = Path(file)
l = []
for i in p.read_text().split("\n"):
#print(i)
dict = {}
arr = i.split("\t")
# Find added and deleted lines of each Python file
if(len(i) != 0 and arr[2].endswith(".py")):
#print(i.split("\t"))
dict['project_id'] = project_id
dict['add'] = convert(arr[0])
dict['delete'] = convert(arr[1])
dict['pathname'] = arr[2]
l.append(dict)
# print(l)
df = pd.DataFrame.from_dict(l)
df.to_csv("{0}/{1}.csv".format(PATH_CSV,project_id), index=False)
print("########### prepareData() Finished ############")
def mergeChurn(PATH_CHURN_CSV, PATH_CSV):
# print(PATH_CHURN_CSV)
# print(PATH_CSV)
df = pd.DataFrame()
for csv in PATH_CHURN_CSV.iterdir():
# print(pd.read_csv(str(csv)))
temp = pd.read_csv(str(csv))
df = df.append(temp, ignore_index=True)
# print(df.groupby('project_id').sum())
#print(df)
# Calculate churn for each project
churn = df['add'] + df['delete']
df.insert(3, "churn", churn)
#print(df)
# Export csv files as churn.csv
df.to_csv("{0}/churn_original.csv".format(PATH_CSV), index=False)
# Group rows by project id
df = df.groupby('project_id').sum()
df.reset_index(inplace=True)
#print(df)
# Export final version of csv files in project level
df.to_csv("{0}/churn_final.csv".format(PATH_CSV), index=False)
print("########### mergeChurn() Finished ############")
def main():
# Statis Paths
PATH_SAMPLE = Path("../Sample_Projects/").resolve()
# Create the main directory for cloning projects
PATH_SAMPLE.mkdir(parents=True, exist_ok=True)
PATH_CSV = Path("../csv").resolve()
# Create the main directory for storing csv projects
PATH_CSV.mkdir(parents=True, exist_ok=True)
# Create the directory to store churn values for each project
PATH_CHURN = Path("../all_churn/").resolve()
PATH_CHURN.mkdir(parents=True, exist_ok=True)
PATH_CHURN_TEXT = Path("../all_churn/text/").resolve()
PATH_CHURN_TEXT.mkdir(parents=True, exist_ok=True)
PATH_CHURN_CSV = Path("../all_churn/csv/").resolve()
PATH_CHURN_CSV.mkdir(parents=True, exist_ok=True)
# Collect the number of added and deleted lines of code from git log for each project
# And save it as project_id.txt file
#saveText(PATH_SAMPLE, PATH_CHURN_TEXT)
# Prepare .csv files from .txt files created for each project
#prepareData(PATH_CHURN_TEXT, PATH_CHURN_CSV)
# Merge all .csv file and calculate churn for each project
mergeChurn(PATH_CHURN_CSV, PATH_CSV)
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time)) | StarcoderdataPython |
1708658 | <gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import torch
GLOBAL_MAXIMIZER = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
GLOBAL_MAXIMUM = 0.8
def cosine8(X):
r"""8d Cosine Mixture test function.
8-dimensional function (usually evaluated on `[-1, 1]^8`):
`f(x) = 0.1 sum_{i=1}^8 cos(5 pi x_i) - sum_{i=1}^8 x_i^2'
f has one maximizer for its global maximum at
`z_1 = (0, 0, ..., 0)`
with `f(z_1) = 0.8`
Args:
X: A Tensor of size `8` or `k x 8` (`k` batch evaluations).
Returns:
`f(X)`, the value of the 8d Cosine Mixture function.
"""
batch = X.ndimension() > 1
X = X if batch else X.unsqueeze(0)
result = 0.1 * (torch.cos(5.0 * math.pi * X)).sum(dim=-1) - (X ** 2).sum(dim=-1)
return result if batch else result.squeeze(0)
| StarcoderdataPython |
77507 | <reponame>rgerkin/pyrfume
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import pyrfume
from pyrfume import odorants
# Load the table of CIDs by sources
file_path = pyrfume.DATA_DIR / 'odorants' / 'all_cids.csv'
all_cids = pd.read_csv(file_path).set_index('CID')
# +
# Get info from PubChem
info = odorants.from_cids(all_cids.index)
# Turn the info into a dataframe
info = pd.DataFrame.from_records(info).set_index('CID')
info.head()
# -
# Join the CIDs and sources with the PubCheminfo
df = info.join(all_cids)
df = df.rename(columns={'MolecularWeight': 'MW',
'IsomericSMILES': 'SMILES',
'name': 'Name'})
df = df[['Name', 'MW', 'SMILES', 'IUPACName'] + list(df.columns[4:])]
df.head()
file_path = pyrfume.DATA_DIR / 'odorants' / 'all_cids_properties.csv'
df.to_csv(file_path)
| StarcoderdataPython |
70242 | <gh_stars>0
# coding: utf-8
# # DS3 Data Handling
# ## <NAME>
# ### NIAID Bioinformatics and Computational Biosciences Branch (BCBB)
# ---
# # Outline:
# - Intro to Python
# - Learn python in Y minutes
#
# - Importing Data
# - csv import
# - Excel import
# - Database import
# - Web import
#
# - Pandas
# - Importing Data
# - Removing missing values
# - Fun with Columns
# - Filtering
# - Grouping
# - Plotting
# - Getting data out
# - Reading and writing to Excel
# ---
# ### Learn Python in Y Minutes
# See Learn Python in Y Minutes IPython Notebook
# ---
# # An Introduction to Pandas
#
# ** Presentation originally developed by <NAME>, modified slightly by <NAME> **
#
# **pandas** is a Python package providing fast, flexible, and expressive data structures designed to work with *relational* or *labeled* data both. It is a fundamental high-level building block for doing practical, real world data analysis in Python.
#
# pandas is well suited for:
#
# - Tabular data with heterogeneously-typed columns, as in an SQL table or Excel spreadsheet
# - Ordered and unordered (not necessarily fixed-frequency) time series data.
# - Arbitrary matrix data (homogeneously typed or heterogeneous) with row and column labels
# - Any other form of observational / statistical data sets. The data actually need not be labeled at all to be placed into a pandas data structure
#
#
# Key features:
#
# - Easy handling of **missing data**
# - **Size mutability**: columns can be inserted and deleted from DataFrame and higher dimensional objects
# - Automatic and explicit **data alignment**: objects can be explicitly aligned to a set of labels, or the data can be aligned automatically
# - Powerful, flexible **group by functionality** to perform split-apply-combine operations on data sets
# - Intelligent label-based **slicing, fancy indexing, and subsetting** of large data sets
# - Intuitive **merging and joining** data sets
# - Flexible **reshaping and pivoting** of data sets
# - **Hierarchical labeling** of axes
# - Robust **IO tools** for loading data from flat files, Excel files, databases, and HDF5
# - **Time series functionality**: date range generation and frequency conversion, moving window statistics, moving window linear regressions, date shifting and lagging, etc.
# In[4]:
import pandas
# ## Data Import
# ### Import CVS
# Next, let's read in [our data](data/weather_year.csv).
# Because it's in a CSV file, we can use pandas' `read_csv` function to pull it directly into a [DataFrame](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe).
# In[7]:
help(pandas.read_csv, sep="\t")
# In[2]:
data = pandas.read_csv("data/weather_year.csv")
# We can get a summary of the DataFrame by asking for some information:
# In[3]:
data.info()
# ### Import Excel
# Using `len` on a DataFrame will give you the number of rows. You can get the column names using the `columns` property.
#
# The read_excel() method can read Excel 2003 (.xls) and Excel 2007 (.xlsx) files using the xlrd Python module and use the same parsing code as the above to convert tabular data into a DataFrame. See the cookbook for some advanced strategies
#
# Besides read_excel you can also read Excel files using the ExcelFile class.
# In[8]:
data = pandas.read_excel("data/weather_year.xlsx")
data.info()
# In[ ]:
data = pandas.read_excel("/Users/squiresrb/Dropbox/NIEHS/")
# In[ ]:
# using the ExcelFile class
xls = pandas.ExcelFile('data/weather_year.xlsx')
xls.parse('Sheet1', index_col=None, na_values=['NA'])
# In[9]:
# using the read_excel function
data = pandas.read_excel('data/weather_year.xlsx', 'Sheet1', index_col=None, na_values=['NA'])
# In[10]:
#Using the sheet index:
data = pandas.read_excel('data/weather_year.xlsx', 0, index_col=None, na_values=['NA'])
# In[11]:
#Using all default values:
data = pandas.read_excel('data/weather_year.xlsx')
# New in version 0.16.
#
# read_excel can read more than one sheet, by setting sheetname to either a list of sheet names, a list of sheet positions, or None to read all sheets.
# ### Import Form a Database (SQL)
# First we are going to create a sqlite3 database using hte columns and data from the csv file above. Then we will connect to the databae and read from it.
# In[12]:
import pandas as pd
import sqlite3
weather_df = pd.read_csv("data/weather_year.csv")
con = sqlite3.connect("data/test_db.sqlite")
con.execute("DROP TABLE IF EXISTS weather_year")
pd.io.sql.to_sql(weather_df, "weather_year", con)
# In[13]:
con = sqlite3.connect("data/test_db.sqlite")
data = pandas.read_sql("SELECT * from weather_year", con)
data.info()
# ### Additional Formats
# JSON: JavaScript Object Notation) is a lightweight data-interchange format
# HDF: high performance HDF5 format using the excellent
# ---
# ## Getting Started
# OK, let's get started by importing the pandas library.
# In[14]:
import pandas
# Next, let's read in our data.
# Because it's in a CSV file, we can use pandas' `read_csv` function to pull it directly into a [DataFrame](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe).
# In[15]:
data = pandas.read_csv("data/weather_year.csv")
# We can get a summary of the DataFrame by asking for some information:
# In[16]:
data.info()
# This gives us a lot of insight. First, we can see that there are 819 rows (entries).
#
# Each column is printed along with however many "non-null" values are present.
#
# Lastly, the data types (dtypes) of the columns are printed at the very bottom. We can see that there are 10 `float64`, 3 `int64`, and 5 `object` columns.
# In[17]:
len(data)
# Using `len` on a DataFrame will give you the number of rows. You can get the column names using the `columns` property.
# In[18]:
data.columns
# Columns can be accessed in two ways. The first is using the DataFrame like a dictionary with string keys:
# In[19]:
data["EDT"]
# You can get multiple columns out at the same time by passing in a list of strings.
# In[22]:
data[['EDT', 'Mean TemperatureF']]
# The second way to access columns is using the dot syntax. This only works if your column name could also be a Python variable name (i.e., no spaces), and if it doesn't collide with another DataFrame property or function name (e.g., count, sum).
# In[ ]:
data.
# We'll be mostly using the dot syntax here because you can auto-complete the names in IPython. The first pandas function we'll learn about is `head()`. This gives us the first 5 items in a column (or the first 5 rows in the DataFrame).
# In[23]:
data.EDT.head()
# Passing in a number `n` gives us the first `n` items in the column. There is also a corresponding `tail()` method that gives the *last* `n` items or rows.
# In[24]:
data.EDT.head(10)
# This also works with the dictionary syntax.
# In[25]:
data["EDT"].head()
# In[26]:
data["EDT"].describe()
# ## Fun with Columns
# The column names in `data` are a little unwieldy, so we're going to rename them. This is as easy as assigning a new list of column names to the `columns` property of the DataFrame.
#
# aeid = assay endpoint id (unique id)
# assay_component_endpoint_name = name of assay endpoint
# analysis_direction = the analyzed positive (upward) or negative (downward) direction
# signal_direction = the direction observed of the detected signal
# normalized_data_type = fold induction or percent positive control
# key_positive_control = positive control used to normalize data
# zprm.mdn = z-prime median across all plates (where applicable)
# zprm.mad = z prime median absolute deviation (mad)
# ssmd.mdn = strictly standardized mean difference median across all plates
# ssmd.mad = strictly standardized mean difference mad across all plates
# cv.mdn = coefficient of variation median across all plates
# cv.mad = coefficient of variation mad across all plates
# sn.mdn = signal-to-noise median across all plates
# sn.mad = signal-to-noise mad across all plates
# sb.mdn = signal-to-background median across all plates
# sb.mad = signal-to-background mad across all plates
# In[27]:
data.columns = ["date", "max_temp", "mean_temp", "min_temp", "max_dew",
"mean_dew", "min_dew", "max_humidity", "mean_humidity",
"min_humidity", "max_pressure", "mean_pressure",
"min_pressure", "max_visibilty", "mean_visibility",
"min_visibility", "max_wind", "mean_wind", "min_wind",
"precipitation", "cloud_cover", "events", "wind_dir"]
# These should be in the same order as the original columns. Let's take another look at our DataFrame summary. We can see that the second column is not entitled 'assay_endpoint_id' instead of 'aeid'.
# In[28]:
data.info()
# In[ ]:
data.pre
# Now our columns can all be accessed using the dot syntax!
# In[ ]:
data.mean_temp.head()
# There are lots useful methods on columns, such as `std()` to get the standard deviation. Most of pandas' methods will happily ignore missing values like `NaN`.
# In[ ]:
data.mean_temp.std()
# If you want to add labels and save the plot as a `png` file that is sized 800 pixels by 600 pixels:
# By the way, many of the column-specific methods also work on the entire DataFrame. Instead of a single number, you'll get a result for each column.
# In[ ]:
data.std()
# In[ ]:
data.mean_temp.max()
# ## Data Transformations
# Methods like `sum()` and `std()` work on entire columns. We can run our own functions across all values in a column (or row) using `apply()`.
#
# To give you an idea of how this works, let's consider the "date" column in our DataFrame (formally "EDT").
# In[29]:
data.date.head()
# We can use the `values` property of the column to get a list of values for the column. Inspecting the first value reveals that these are strings with a particular format.
# In[30]:
first_date = data.date.values[0]
first_date
# The `strptime` function from the `datetime` module will make quick work of this date string. There are many [more shortcuts available](http://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior) for `strptime`.
# In[31]:
# Import the datetime class from the datetime module
from datetime import datetime
# Convert date string to datetime object
datetime.strptime(first_date, "%Y-%m-%d")
# Using the `apply()` method, which takes a function (**without** the parentheses), we can apply `strptime` to each value in the column. We'll overwrite the string date values with their Python `datetime` equivalents.
# In[32]:
# Define a function to convert strings to dates
def string_to_date(date_string):
return datetime.strptime(date_string, "%Y-%m-%d")
# Run the function on every date string and overwrite the column
data.date = data.date.apply(string_to_date)
data.date.head()
# Let's go one step futher. Each row in our DataFrame represents the weather from a single day. Each row in a DataFrame is associated with an *index*, which is a label that uniquely identifies a row.
#
# Our row indices up to now have been auto-generated by pandas, and are simply integers from 0 to 365. If we use dates instead of integers for our index, we will get some extra benefits from pandas when plotting later on. Overwriting the index is as easy as assigning to the `index` property of the DataFrame.
# In[33]:
data.index = data.date
data.info()
# Now we can quickly look up a row by its date with the `loc[]` property \[[see docs](http://pandas.pydata.org/pandas-docs/stable/indexing.html)], which locates records by label.
# In[34]:
data.loc[datetime(2012, 8, 19)]
# We can also access a row (or range of rows) with the `iloc[]` property, which locates records by integer index.
# In[35]:
data.max_temp.iloc[7:15]
# With all of the dates in the index now, we no longer need the "date" column. Let's drop it.
# In[36]:
data = data.drop("CREATE_DATE", axis=1)
data.columns
# Note that we need to pass in `axis=1` in order to drop a column. For more details, check out the [documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.drop.html) for `drop`. The index values can now be accessed as `data.index.values`.
# In[37]:
data = data.drop("date", axis=1)
data.columns
# ## Handing Missing Values
# Pandas considers values like `NaN` and `None` to represent missing data. The `count()` function [[see docs](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.count.html)] can be used to tell whether values are missing. We use the parameter `axis=0` to indicate that we want to perform the count by rows, rather than columns.
# In[38]:
data.count(axis=0)
# It is pretty obvious that there are a lot of `NaN` entrys for the `events` column; 204 to be exact. Let's take a look at a few values from the `events` column:
# In[39]:
data.events.head(10)
# This isn't exactly what we want. One option is to drop all rows in the DataFrame with missing "events" values using the `dropna()` function \[[see docs](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.dropna.html)].
# In[40]:
data.dropna(subset=["events"]).info()
# Note that this didn't affect `data`; we're just looking at a copy.
#
# Instead of dropping rows with missing values, let's fill them with empty strings (you'll see why in a moment). This is easily done with the `fillna()` function. We'll go ahead and overwrite the "events" column with empty string missing values instead of `NaN`.
# In[41]:
data.events = data.events.fillna("")
data.events.head(10)
# Now we repeat the `count` function for the `events` column:
# In[42]:
data.events.count()
# As desired, there are no longer any empty entries in the `events` column. Why did we not need the `axis=0` parameter this time?
# ## Iteratively Accessing Rows
# You can iterate over each row in the DataFrame with `iterrows()`. Note that this function returns **both** the index and the row. Also, you must access columns in the row you get back from `iterrows()` with the dictionary syntax.
# In[43]:
num_rain = 0
for idx, row in data.iterrows():
if "Rain" in row["events"]:
num_rain += 1
"Days with rain: {0}".format(num_rain)
# ## Filtering
# Most of your time using pandas will likely be devoted to selecting rows of interest from a DataFrame. In addition to strings, the dictionary syntax accepts requests like:
# In[45]:
freezing_days = data[data.max_temp <= 32]
freezing_days.info()
# We get back another DataFrame with fewer rows (21 in this case). This DataFrame can be filtered down even more by adding a constrain that the temperature be greater than 20 degrees, in addition to being below freezing.
# In[ ]:
cold_days = freezing_days[freezing_days.min_temp >= 20]
cold_days.info()
# To see the high and low temperatures for the selected days:
# In[ ]:
cold_days[["max_temp","min_temp"]]
# Using boolean operations, we could have chosen to apply both filters to the original DataFrame at the same time.
# In[46]:
data[(data.max_temp <= 32) & (data.min_temp >= 20)]
# It's important to understand what's really going on underneath with filtering. Let's look at what kind of object we actually get back when creating a filter.
# In[ ]:
temp_max = data.max_temp <= 32
type(temp_max)
# This is a pandas `Series` object, which is the one-dimensional equivalent of a DataFrame. Because our DataFrame uses datetime objects for the index, we have a specialized `TimeSeries` object.
#
# What's inside the filter?
# In[ ]:
temp_max
# Our filter is nothing more than a `Series` with a *boolean value for every item in the index*. When we "run the filter" as so:
# In[ ]:
data[temp_max].info()
# pandas lines up the rows of the DataFrame and the filter using the index, and then keeps the rows with a `True` filter value. That's it.
#
# Let's create another filter.
# In[ ]:
temp_min = data.min_temp >= 20
temp_min
# Now we can see what the boolean operations are doing. Something like `&` (**not** `and`)...
# In[ ]:
temp_min & temp_max
# ...is just lining up the two filters using the index, performing a boolean AND operation, and returning the result as another `Series`.
#
# We can do other boolean operations too, like OR:
# In[ ]:
temp_min | temp_max
# Because the result is just another `Series`, we have all of the regular pandas functions at our disposal. The `any()` function returns `True` if any value in the `Series` is `True`.
# In[ ]:
temp_both = temp_min & temp_max
temp_both.any()
# Sometimes filters aren't so intuitive. This (sadly) doesn't work:
# In[ ]:
try:
data["Rain" in data.events]
except:
pass # "KeyError: no item named False"
# We can wrap it up in an `apply()` call fairly easily, though:
# In[ ]:
data[data.events.apply(lambda e: "Rain" in e)].info()
# We'll replace "T" with a very small number, and convert the rest of the strings to floats:
# In[47]:
# Convert precipitation to floating point number
# "T" means "trace of precipitation"
def precipitation_to_float(precip_str):
if precip_str == "T":
return 1e-10 # Very small value
return float(precip_str)
data.precipitation = data.precipitation.apply(precipitation_to_float)
data.precipitation.head()
# ---
# ## Ordering: Sorting data
# Sort by the events column, ascending
# In[62]:
get_ipython().magic('pinfo data.sort')
# In[64]:
data.sort(['max_temp', 'mean_temp'])
# In[ ]:
# ---
# # Data Transformation
# ---
# ## Grouping
# Besides `apply()`, another great DataFrame function is `groupby()`.
# It will group a DataFrame by one or more columns, and let you iterate through each group.
#
# As an example, let's group our DataFrame by the "cloud_cover" column (a value ranging from 0 to 8).
# In[49]:
cover_temps = {}
for cover, cover_data in data.groupby("cloud_cover"):
cover_temps[cover] = cover_data.mean_temp.mean() # The mean mean temp!
cover_temps
# When you iterate through the result of `groupby()`, you will get a tuple.
# The first item is the column value, and the second item is a filtered DataFrame (where the column equals the first tuple value).
#
# You can group by more than one column as well.
# In this case, the first tuple item returned by `groupby()` will itself be a tuple with the value of each column.
# In[50]:
for (cover, events), group_data in data.groupby(["cloud_cover", "events"]):
print("Cover: {0}, Events: {1}, Count: {2}".format(cover, events, len(group_data)))
# ## Reshaping: Creating New Columns
# Weather events in our DataFrame are stored in strings like "Rain-Thunderstorm" to represent that it rained and there was a thunderstorm that day. Let's split them out into boolean "rain", "thunderstorm", etc. columns.
#
# First, let's discover the different kinds of weather events we have with `unique()`.
# In[85]:
a= 1
print(a)
print(a+1)
# In[51]:
data.events.unique()
# Looks like we have "Rain", "Thunderstorm", "Fog", and "Snow" events. Creating a new column for each of these event kinds is a piece of cake with the dictionary syntax.
# In[52]:
for event_kind in ["Rain", "Thunderstorm", "Fog", "Snow"]:
col_name = event_kind.lower() # Turn "Rain" into "rain", etc.
data[col_name] = data.events.apply(lambda e: event_kind in e)
data.info()
# Our new columns show up at the bottom. We can access them now with the dot syntax.
# In[53]:
data.rain
# We can also do cool things like find out how many `True` values there are (i.e., how many days had rain)...
# In[54]:
data.rain.sum()
# ...and get all the days that had both rain and snow!
# In[55]:
data[data.rain & data.snow].info()
# ## Getting Data Out
# Writing data out in pandas is as easy as getting data in. To save our DataFrame out to a new csv file, we can just do this:
# In[76]:
data.to_csv("data/weather-mod.csv")
# Want to make that tab separated instead? No problem.
# In[77]:
data.to_csv("data/weather-mod.tsv", sep="\t")
# There's also support for [reading and writing Excel files](http://pandas.pydata.org/pandas-docs/stable/io.html#excel-files), if you need it.
# ## Updating a Cell
# In[78]:
for idx, row in data.iterrows():
data.max_temp.loc[idx] = 0
any(data.max_temp != 0) # Any rows with max_temp not equal to zero?
# Resources
#
# - [Learn Pandas](https://bitbucket.org/hrojas/learn-pandas)
# - [Compute](http://nbviewer.ipython.org/urls/bitbucket.org/hrojas/learn-pandas/raw/master/lessons/Cookbook%20-%20Compute.ipynb)
# - [Merge](http://nbviewer.ipython.org/urls/bitbucket.org/hrojas/learn-pandas/raw/master/lessons/Cookbook%20-%20Merge.ipynb)
# - [Select](http://nbviewer.ipython.org/urls/bitbucket.org/hrojas/learn-pandas/raw/master/lessons/Cookbook%20-%20Select.ipynb)
# - [Sort](http://nbviewer.ipython.org/urls/bitbucket.org/hrojas/learn-pandas/raw/master/lessons/Cookbook%20-%20Sort.ipynb)
#
#
# - [Intro to Pandas](https://bitbucket.org/hrojas/learn-pandas)
# - [Timeseries](http://nbviewer.ipython.org/github/changhiskhan/talks/blob/master/pydata2012/pandas_timeseries.ipynb)
# - [Statistics in Python](http://www.randalolson.com/2012/08/06/statistical-analysis-made-easy-in-python/)
#
# http://datacommunitydc.org/blog/2013/07/python-for-data-analysis-the-landscape-of-tutorials/
# # Exploratory Data Analysis
# In[ ]:
# ## Plotting
# Some methods, like `plot()` and `hist()` produce plots using [matplotlib](http://matplotlib.org/).
#
# To make plots using Matplotlib, you must first enable IPython's matplotlib mode. To do this, run the `%matplotlib inline` magic command to enable plotting in the current Notebook. \[If that doesn't work (because you have an older version of IPython), try `%pylab inline`. You may also have to restart the IPython kernel.\]
#
# We'll go over plotting in more detail later.
# In[56]:
get_ipython().magic('matplotlib inline')
data.mean_temp.hist()
# In[57]:
ax = data.mean_temp.hist() # get plot axes object
ax.set_xlabel('Daily Mean Temperature (F)')
ax.set_ylabel('# of Occurances')
ax.set_title('Mean Temperature Histogram')
fig = ax.get_figure() # get plot figure object
fig.set_size_inches(8,6) # set plot size
fig.savefig('MeanTempHistogram.png', dpi=100)
# We've already seen how the `hist()` function makes generating histograms a snap. Let's look at the `plot()` function now.
# In[58]:
data.max_temp.plot()
# That one line of code did a **lot** for us. First, it created a nice looking line plot using the maximum temperature column from our DataFrame. Second, because we used `datetime` objects in our index, pandas labeled the x-axis appropriately.
#
# Pandas is smart too. If we're only looking at a couple of days, the x-axis looks different:
# In[59]:
data.max_temp.tail().plot()
# The `plot()` function returns a matplotlib `AxesSubPlot` object. You can pass this object into subsequent calls to `plot()` in order to compose plots.
#
# Although `plot()` takes a variety of parameters to customize your plot, users familiar with matplotlib will feel right at home with the `AxesSubPlot` object.
# Prefer a bar plot? Pandas has got your covered.
# In[60]:
data.max_temp.tail().plot(kind="bar", rot=10)
# In[61]:
ax = data.max_temp.plot(title="Min and Max Temperatures")
data.min_temp.plot(style="red", ax=ax)
ax.set_ylabel("Temperature (F)")
# In[ ]:
| StarcoderdataPython |
3290126 | <gh_stars>0
# Generated by Django 3.0.7 on 2020-06-23 12:12
import apps.users.models
import django.db.models.deletion
import easy_thumbnails.fields
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0008_auto_20200623_1017'),
]
operations = [
migrations.AlterField(
model_name='otp',
name='type',
field=models.CharField(choices=[('E', 'Email'), ('P', 'Phone')], max_length=2, verbose_name='Type'),
),
migrations.AlterField(
model_name='user',
name='photo',
field=easy_thumbnails.fields.ThumbnailerImageField(blank=True, null=True,
upload_to=apps.users.models.User.get_path),
),
migrations.CreateModel(
name='NotificationSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_time', models.DateTimeField(auto_now_add=True, null=True)),
('last_update_time', models.DateTimeField(auto_now=True, null=True)),
('device_id', models.CharField(max_length=255, verbose_name='Device Id')),
('enabled', models.BooleanField(default=True, null=True, verbose_name='Enabled')),
('book_added', models.BooleanField(default=True, null=True, verbose_name='Book Added')),
('book_approved', models.BooleanField(default=True, null=True, verbose_name='Book Approved')),
('audio_approved', models.BooleanField(default=True, null=True, verbose_name='Audio Approved')),
('user',
models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='notification_setting',
to=settings.AUTH_USER_MODEL, verbose_name='Notification Setting')),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_time', models.DateTimeField(auto_now_add=True, null=True)),
('last_update_time', models.DateTimeField(auto_now=True, null=True)),
('title', models.CharField(max_length=255, verbose_name='Title')),
('type', models.CharField(choices=[('book_added', 'Book Added'), ('book_approved', 'Book Approved'),
('audio_approved', 'Audio Approved')], max_length=20,
verbose_name='Type')),
('message', models.CharField(max_length=1000, verbose_name='Message')),
('read', models.BooleanField(default=False, null=True, verbose_name='Read')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notifications',
to=settings.AUTH_USER_MODEL, verbose_name='Notifications')),
],
options={
'ordering': ['-read', '-creation_time'],
},
),
]
| StarcoderdataPython |
4821741 | <gh_stars>0
#!/usr/bin/env ipython2
import numpy as np
import scipy.special as ss
import scipy.interpolate as sint
from statepoint import StatePoint
from matplotlib import pyplot as plt
from uncertainties import ufloat
from gen_mgxs import mgxs
import pickle
from bisect import bisect
import os
import sys
def Pn_solve(sigtn, sigsn, Qn, deriv_term):
# d/dx[(n/(2n+1))*psinm1+(n+1)/(2n+1)*psip1]+sigtn*psin=sigsn*psin+Qn
# deriv_term+sigtn*psin=sigsn*psin+Qn
# psin = (Qn-deriv_term)/(sigtn-sigsn)
# psin = (Qn - deriv_term) / (sigtn - np.sum(sigsn,axis=0))
psin = (Qn - deriv_term) / (sigtn)
return psin[:]
def solve1g(N, sigtns1g, sigsns1g, Qnsg, psinsg, x, invdx, n_ratios):
# Loops through each of the n orders, sets up the derivitave term,
# and calls Pn_solve on it.
# n_ratios is [(n/(2n+1),(n+1)/(2n+1)) for n in range(N+1)]
for n in xrange(N+1):
# N+1 so we get an n==N in this loop
# Set up the deriv_term.
# Due to assumed reflective BC, deriv_term will always be 0 for
# ix ==0 and ix == last one, so we can skip those
if n > 0:
nm1_interp = sint.KroghInterpolator(x, psinsg[n - 1])
else:
nm1_interp = sint.KroghInterpolator([x[0],x[-1]],[0.0, 0.0])
if n < N:
np1_interp = sint.KroghInterpolator(x, psinsg[n + 1])
else:
np1_interp = sint.KroghInterpolator([x[0],x[-1]],[0.0, 0.0])
deriv_term = n_ratios[n][0] * nm1_interp.derivative(x) + \
n_ratios[n][1] * np1_interp.derivative(x)
# Now adjust for BC
deriv_term[0] = 0.0
deriv_term[-1] = 0.0
# Now we can pass this to Pn_solve to get our new psin values
psinsg[n,:] = Pn_solve(sigtns1g[n], sigsns1g[n], Qnsg[n], deriv_term)
return psinsg[:,:]
def fixedsrc(N, G, sigtns, sigsns, Qns, psins, x, invdx, n_ratios, eps_psi, max_inner):
# Not implemented yet. This wll be the MG solver.
eps = 1.0E4
iter = 0
newQns = np.zeros_like(Qns[:,0,:])
# import pdb; pdb.set_trace()
while ((eps > eps_psi) and (iter <= max_inner)):
# Develop scattering source
for g in range(G):
for n in range(N):
for ix in range(len(invdx) + 1):
newQns[n,ix] = Qns[g,n,ix] + \
np.dot(sigsns[:,n,g,ix], psins[:,n,ix])
# Run fixed src solver
psins[g,:,:] = solve1g(N, sigtns[g,:,:], sigsns[g,:,:,:], newQns,
psins[g,:,:], x, invdx, n_ratios)
# eps =
iter += 1
for g in xrange(G):
plt.plot(x,psins[g,0,:],label='Pn')
plt.plot(x,omcflux[g,0,:],label='OMC')
plt.legend(loc='best')
plt.show()
plt.close()
print "Inner Iterations = " + str(iter)
def init(x, G, N, flux_guess):
invdx = np.zeros(len(x) - 1)
for ix in xrange(len(invdx)):
invdx[ix] = 1.0 / (x[ix + 1] - x[ix])
n_ratios = [(float(n)/float(2 * n + 1), float(n + 1)/float(2 * n + 1))
for n in range(N + 1)]
psins = np.ones(shape=(G, N + 1, len(x)))
for g in xrange(G):
for n in xrange(N + 1):
psins[g,n,:] = flux_guess[g,n,:] / np.sum(flux_guess[g,n,:])
return invdx, n_ratios, psins
def get_openmc_mesh(spFile, tid, sid, G, N, extent):
sp = StatePoint(spFile)
sp.read_results()
sp.generate_stdev()
keff = ufloat(sp.k_combined[0], sp.k_combined[1])
GN = [[0.0 for n in xrange(N)] for g in xrange(G)]
data = np.array(GN[:][:])
dx = extent / float(N)
x = [(float(i) + 0.5) * dx for i in xrange(N)]
for g in xrange(G):
myg = G - g - 1
for n in xrange(N):
m, u = sp.get_value(tid, [('mesh',(1,1,n+1)),('energyin',g)], sid)
data[myg,n] = m
return x, data[:,:], keff
def get_openmc_mesh_matrix(spFile, tid, sid, G, N, extent):
sp = StatePoint(spFile)
sp.read_results()
sp.generate_stdev()
keff = ufloat(sp.k_combined[0], sp.k_combined[1])
GGN = [[[0.0 for n in xrange(N)] for go in xrange(G)] for g in xrange(G)]
data = np.array(GGN[:][:][:])
dx = extent / float(N)
x = [(float(i) + 0.5) * dx for i in xrange(N)]
for g in xrange(G):
myg = G - g - 1
for go in xrange(G):
mygo = G - go - 1
for n in xrange(N):
m, u = sp.get_value(tid, [('mesh',(1,1,n+1)),('energyin',g),
('energyout',go)], sid)
data[myg,mygo,n] = m
return x, data[:,:,:], keff
def get_omc_mgxs(sp, mesh_tids, mesh_sids, order, G, Nmesh, extent, xstype):
# Get flux-yN
fluxyn = np.zeros(shape=(order, G, Nmesh))
for l in range(order):
tid = mesh_tids[0]
sid = mesh_sids[0][l]
x, fluxyn[l,:,:], omck = get_openmc_mesh(sp,tid,sid,G,Nmesh,extent)
# Get scatt-pN
scattpn = np.zeros(shape=(order, G, G, Nmesh))
for l in range(order):
tid = mesh_tids[1]
sid = mesh_sids[1][l]
x, scattpn[l,:, :, :], omck = get_openmc_mesh_matrix(sp,tid,sid,G,Nmesh,extent)
# Get scatt-yN
scattyn = np.zeros(shape=(order, G, G, Nmesh))
for l in range(order):
tid = mesh_tids[2]
sid = mesh_sids[2][l]
x, scattyn[l,:,:,:], omck = get_openmc_mesh_matrix(sp,tid,sid,G,Nmesh,extent)
# Get total-yN
totalyn = np.zeros(shape=(order, G, Nmesh))
for l in range(order):
tid = mesh_tids[3]
sid = mesh_sids[3][l]
x, totalyn[l,:,:], omck = get_openmc_mesh(sp,tid,sid,G,Nmesh,extent)
# Get nu-fission (right now only doing iso weighting)
nusigfns = np.zeros(shape=(order, G, G, Nmesh))
tid = mesh_tids[4]
sid = mesh_sids[4][0]
# Now only doing iso weighting so l=0
x, nusigfns[0,:,:,:], omck = get_openmc_mesh_matrix(sp,tid,sid,G,Nmesh,extent)
Qns = np.zeros(shape=(order, G, Nmesh))
# put Q in nusigfns, leave as isotropic now
l = 0
Qsum = 0.0
for go in range(G):
for n in range(Nmesh):
Qns[l,go,n] = 0.0
for g in range(G):
Qns[l,go,n] += nusigfns[0,g,go,n]
Qsum += Qns[l,go,n]
Qns[l,:,:] /= Qsum
for l in range(1,order):
for g in range(G):
for n in range(Nmesh):
Qns[l,g,n] = 0.0
totaliso = totalyn[0,:,:]
for l in range(order):
for g in range(G):
for n in range(Nmesh):
# Nmeshormalize by flux
flux = fluxyn[l,g,n]
flux0 = fluxyn[0,g,n]
if flux0 != 0.0:
for go in range(G):
scattpn[l,g,go,n] /= flux0
if l == 0:
totaliso[g,n] /= flux0
if flux != 0.0:
for go in range(G):
scattyn[l,g,go,n] /= flux
nusigfns[l,g,go,n] /= flux
totalyn[l,g,n] /= flux
# Apply correction
if xstype == 'consP':
corr = totaliso[g,n] - totalyn[l,g,n]
for go in range(G):
scattyn[l,g,go,n] += corr
if xstype == 'iso':
sigtns = [totaliso for l in range(order)]
sigsns = scattpn[:]
elif xstype == 'consP':
sigtns = [totaliso for l in range(order)]
sigsns = scattyn[:]
elif xstype == 'yN':
sigtns = totalyn[:]
sigsns = scattyn[:]
return omck, np.swapaxes(fluxyn,0,1), x, np.swapaxes(sigtns,0,1), \
np.swapaxes(sigsns,0,1), np.swapaxes(nusigfns,0,1), np.swapaxes(Qns,0,1)
if __name__ == "__main__":
rcdef = plt.rcParams.copy
newparams = {'savefig.dpi': 100, 'figure.figsize': (24, 13.5),
'font.size': 16}
plt.rcParams.update(newparams)
if len(sys.argv) != 3:
raise ValueError("Must Provide Cross-Section Type [consP, iso, yN] & " +
"Run Type [FS, k]!")
else:
xstype = sys.argv[1]
if xstype not in ["consP", "iso", "yN"]:
raise ValueError("Invalid Cross-Section Type!")
runtype = sys.argv[2]
if runtype not in ["FS", "k"]:
raise ValueError("Invalid Run Type!")
show = False
save = True
G = 4
N = 1
Nmesh = 16
extent = 0.64
sp = './statepoint.08000.binary'
eps_psi = 1.0E-6
max_inner = 2
# First get the mgxs data and create x/s
if xstype == 'iso':
momWgt = False
trcorr = None
elif xstype == 'consP':
momWgt = True
trcorr = 'consP'
elif xstype == 'yN':
momWgt = True
trcorr = None
mesh_tids = [0, 1, 1, 0, 2]
mesh_sids = [[0,2,6,12], [0,1,2,3], [4,6,10,16], [16,18,22,27], [0]]
omck, omcflux, x, sigtns, sigsns, nusigfns, Qns = \
get_omc_mgxs(sp, mesh_tids, mesh_sids, N+1, G, Nmesh, extent, xstype)
print 'OpenMC k_eff=' + "{:12.5E}".format(omck)
# Set up some of our data we will use during the sweep
invdx, n_ratios, psins = init(x, G, N, omcflux)
if runtype == 'FS':
fixedsrc(N, G, sigtns, sigsns, Qns, psins, x, invdx, n_ratios, eps_psi, max_inner)
# Estimate k to compare with the openMC k
pnk = 0.0
for g in xrange(G):
for ix in xrange(Nmesh):
if Qns[g,0,ix] > 0.0:
pnk += np.sum(nusigfns[g,0,:,ix])*psins[g,0,ix] / Qns[g,0,ix]
else:
print "k-eigenvalue solver not yet implemented!"
pcm = 1.0E5*(pnk-omck)/omck
print "Pn k_eff = " + "{:12.5E}".format(pnk)
print "pcm = " + "{:12.5E}".format(pcm)
| StarcoderdataPython |
1789268 | import sys
import matplotlib.pyplot as plt
import numpy
PLOT1 = {
'labels': [],
'uncompressed': [],
'gzip': [],
'lz4': [],
'lzma': [],
}
PLOT2 = {
'labels': [],
'uncompressed': [],
'gzip': [],
'lz4': [],
'lzma': [],
}
PLOT3 = {
'labels': [],
'uncompressed': [],
'gzip': [],
'lz4': [],
'lzma': [],
}
def unquote(string):
return string[1:-1]
fd = open(sys.argv[1], 'r')
lines = fd.readlines()
headers = list(map(unquote, lines[0].strip().split(',')))
json_id = len(lines)
for line in lines[1:]:
columns = line.strip().split(',')
id = int(columns[0])
label = unquote(columns[2].replace('\\n', '\n'))
uncompressed = int(columns[3])
gzip = int(columns[4])
lz4 = int(columns[5])
lzma = int(columns[6])
if columns[1] == 'json':
json_id = id
PLOT2['labels'].append(label)
PLOT2['uncompressed'].append(uncompressed)
PLOT2['gzip'].append(gzip)
PLOT2['lz4'].append(lz4)
PLOT2['lzma'].append(lzma)
continue
if id < json_id:
PLOT1['labels'].append(label)
PLOT1['uncompressed'].append(uncompressed)
PLOT1['gzip'].append(gzip)
PLOT1['lz4'].append(lz4)
PLOT1['lzma'].append(lzma)
else:
PLOT3['labels'].append(label)
PLOT3['uncompressed'].append(uncompressed)
PLOT3['gzip'].append(gzip)
PLOT3['lz4'].append(lz4)
PLOT3['lzma'].append(lzma)
fd.close()
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, gridspec_kw={
'width_ratios': [ len(PLOT1['labels']), len(PLOT2['labels']) + 0.5, len(PLOT3['labels']) ]
})
ax2.set_xlim(-0.8,0.8)
x1 = numpy.arange(len(PLOT1['labels']))
x2 = numpy.arange(len(PLOT2['labels']))
x3 = numpy.arange(len(PLOT3['labels']))
width = 0.21
plot1_rects1 = ax1.bar(x1 - width * 1.5, PLOT1['uncompressed'], width, label=headers[3], edgecolor='#763EB2', color='#AB63FA', hatch="oo")
plot1_rects2 = ax1.bar(x1 - width * 0.5, PLOT1['gzip'], width, label=headers[4], edgecolor='#2C8B9B', color='#15D3F3', hatch="//")
plot1_rects3 = ax1.bar(x1 + width * 0.5, PLOT1['lz4'], width, label=headers[5], edgecolor='#984C3F', color='#EF553B', hatch="..")
plot1_rects4 = ax1.bar(x1 + width * 1.5, PLOT1['lzma'], width, label=headers[6], edgecolor='#20896D', color='#00CC96', hatch="---")
plot2_rects1 = ax2.bar(x2 - width * 1.5, PLOT2['uncompressed'], width, label=headers[3], edgecolor='#763EB2', color='#AB63FA', hatch="oo")
plot2_rects2 = ax2.bar(x2 - width * 0.5, PLOT2['gzip'], width, label=headers[4], edgecolor='#2C8B9B', color='#15D3F3', hatch="//")
plot2_rects3 = ax2.bar(x2 + width * 0.5, PLOT2['lz4'], width, label=headers[5], edgecolor='#984C3F', color='#EF553B', hatch="..")
plot2_rects4 = ax2.bar(x2 + width * 1.5, PLOT2['lzma'], width, label=headers[6], edgecolor='#20896D', color='#00CC96', hatch="---")
plot3_rects1 = ax3.bar(x3 - width * 1.5, PLOT3['uncompressed'], width, label=headers[3], edgecolor='#763EB2', color='#AB63FA', hatch="oo")
plot3_rects2 = ax3.bar(x3 - width * 0.5, PLOT3['gzip'], width, label=headers[4], edgecolor='#2C8B9B', color='#15D3F3', hatch="//")
plot3_rects3 = ax3.bar(x3 + width * 0.5, PLOT3['lz4'], width, label=headers[5], edgecolor='#984C3F', color='#EF553B', hatch="..")
plot3_rects4 = ax3.bar(x3 + width * 1.5, PLOT3['lzma'], width, label=headers[6], edgecolor='#20896D', color='#00CC96', hatch="---")
ax1.grid(b=True, axis='both', linewidth=0.1)
ax2.grid(b=True, axis='both', linewidth=0.1)
ax3.grid(b=True, axis='both', linewidth=0.1)
subplot_title_font_size = 10
title_y = -0.97
ax1.set_title('Schema-driven', fontsize=subplot_title_font_size, y=title_y)
ax3.set_title('Schema-less', fontsize=subplot_title_font_size, y=title_y)
title = sys.argv[2].replace(' ', '\\ ')
subtitle = sys.argv[3]
ax1.set_ylabel('Byte Size')
fig.suptitle('$\\bf{' + title + '}$' + '\n' + subtitle, y=0.95)
ax1.set_xticks(x1)
ax2.set_xticks(x2)
ax3.set_xticks(x3)
ax1.set_xticklabels(PLOT1['labels'], ha='center')
ax2.set_xticklabels(PLOT2['labels'], ha='center', fontweight='bold')
ax3.set_xticklabels(PLOT3['labels'], ha='center')
ax1.tick_params(axis="x", rotation=90)
ax2.tick_params(axis="x", rotation=90)
ax3.tick_params(axis="x", rotation=90)
ax2.tick_params(axis="y", left=False, labelleft=False)
ax3.tick_params(axis="y", left=False, labelleft=False)
handles, legend_labels = ax1.get_legend_handles_labels()
fig.legend(handles, legend_labels, loc='upper center', ncol=4, bbox_to_anchor=(0.5, 0.88))
fontsize = 3
padding = 3
ax1.bar_label(plot1_rects1, padding=padding, fontsize=fontsize)
ax1.bar_label(plot1_rects2, padding=padding, fontsize=fontsize)
ax1.bar_label(plot1_rects3, padding=padding, fontsize=fontsize)
ax1.bar_label(plot1_rects4, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects1, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects2, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects3, padding=padding, fontsize=fontsize)
ax2.bar_label(plot2_rects4, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects1, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects2, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects3, padding=padding, fontsize=fontsize)
ax3.bar_label(plot3_rects4, padding=padding, fontsize=fontsize)
dash_spacing = 4
ax1.spines['right'].set_linestyle((0,(dash_spacing,dash_spacing)))
ax2.spines['left'].set_linestyle((0,(dash_spacing,dash_spacing)))
ax2.spines['right'].set_linestyle((0,(dash_spacing,dash_spacing)))
ax3.spines['left'].set_linestyle((0,(dash_spacing,dash_spacing)))
fig.tight_layout()
fig.subplots_adjust(wspace=0)
fig.set_figheight(5)
fig.set_figwidth(10)
plt.subplots_adjust(top=0.79, bottom=0.40, left=0.07, right=0.97)
plt.savefig(sys.argv[4], dpi=500)
| StarcoderdataPython |
1622575 | import os, shutil
from conans import ConanFile, tools
class PcctsConan(ConanFile):
name = "pccts"
version = "1.33MR33"
settings = "os_build", "compiler", "arch_build"
generators = "gcc"
description = "PCCTS toolkit"
license = "public domain"
url = "https://github.com/db4/conan-pccts"
def build(self):
if self.settings.os_build == "Windows":
tools.get("http://www.polhode.com/win32.zip", sha1="db910f4397b2f77a58980e9ab3ba2603c42ba50e")
else:
tools.get("http://www.polhode.com/pccts133mr.zip", sha1="5b3417efd5f537434b568114bcda853b4975d851")
if tools.cross_building(self.settings):
shutil.copytree("pccts", "pccts-host")
self.run("cd pccts-host && make COPT=-DPCCTS_USE_STDARG")
tools.replace_in_file("pccts/sorcerer/makefile", "$(BIN)/antlr", "../../pccts-host/bin/antlr")
tools.replace_in_file("pccts/sorcerer/makefile", "$(BIN)/dlg", "../../pccts-host/bin/dlg")
tools.replace_in_file("pccts/support/genmk/makefile", "$(CC) -o", "$(CC) $(COPT) -o")
self.run("cd pccts && make CC=\"gcc @{0}\" COPT=-DPCCTS_USE_STDARG".format(
os.path.join(self.build_folder, "conanbuildinfo.gcc")))
def package(self):
tmp = tools.load("pccts/h/antlr.h")
license_contents = tmp[tmp.find(" * SOFTWARE RIGHTS"):tmp.find("*/")]
tools.save("LICENSE", license_contents)
self.copy("LICENSE")
self.copy("*.h", dst="include", src="pccts/h")
self.copy("*.cpp", dst="include", src="pccts/h")
self.copy("*", dst="bin", src="pccts/bin", excludes="*.txt")
def package_info(self):
self.env_info.path.append(os.path.join(self.package_folder, "bin"))
def package_id(self):
self.info.include_build_settings()
if self.info.settings.os_build == "Windows":
del self.info.settings.arch_build
del self.info.settings.compiler
| StarcoderdataPython |
53142 | <gh_stars>10-100
'''A class for managing 3DNet objects.'''
# python
import os
# scipy
from numpy.random import rand, randint
class ThreeDNet:
def __init__(self):
'''TODO'''
self.dir = "/home/mgualti/Data/3DNet/Cat10_ModelDatabase"
# 3D Net objects all have height of 1m
self.classes = ["bottle", "bottle_test", "bottle_train", "mug", "mug_test", "mug_train", "tetra_pak", "tetra_pak_test", "tetra_pak_train"]
self.scales = [ 0.20, 0.20, 0.20, 0.12, 0.12, 0.12, 0.15, 0.15, 0.15]
self.minScales = [ 0.10, 0.10, 0.10, 0.06, 0.06, 0.06, 0.10, 0.10, 0.10]
self.maxSales = [ 0.20, 0.20, 0.20, 0.12, 0.12, 0.12, 0.20, 0.20, 0.20]
def GetObjectByName(self, objectClass, objectName):
'''Gets the full object name given the short name and object class.'''
if objectClass not in self.classes:
raise Exception("Unrecognized category.")
classIndex = self.classes.index(objectClass)
fullObjectName = self.dir + "/" + objectClass + "/" + objectName + ".ply"
if not os.path.isfile(fullObjectName):
raise Exception("Unrecognized object name, {}.".format(fullObjectName))
return fullObjectName, self.scales[classIndex]
def GetObjectNames(self, objectClass):
'''Gets the names of all objects in the specified category.'''
if objectClass not in self.classes:
raise Exception("Unrecognized category.")
dirFileNames = os.listdir(self.dir + "/" + objectClass)
meshFileNames = []
for name in dirFileNames:
if len(name) > 3 and name[-4:] == ".ply":
meshFileNames.append(name[:-4])
return meshFileNames
def GetRandomObjectSet(self, objectClass, nObjects, randomScale):
'''Gets a list of objects and corresponding scales from the requested object class.'''
if objectClass not in self.classes:
raise Exception("Unrecognized category.")
classIndex = self.classes.index(objectClass)
dirFileNames = os.listdir(self.dir + "/" + objectClass)
allMeshFileNames = []
for name in dirFileNames:
if len(name) > 3 and name[-4:] == ".ply":
allMeshFileNames.append(name)
meshFileNames = []; meshScales = []
for obj in xrange(nObjects):
meshIdx = randint(len(allMeshFileNames)-1)
meshFileNames.append(self.dir + "/" + objectClass + "/" + allMeshFileNames[meshIdx])
if randomScale:
minScale = self.minScales[classIndex]
maxScale = self.maxSales[classIndex]
meshScales.append(minScale + (maxScale-minScale)*rand())
else:
meshScales.append(self.scales[classIndex])
return meshFileNames, meshScales
def GetRandomObjectFromClass(self, objectClass, randomScale):
'''Gets the full file name and scale of a random object in the specified category.
- Input objectClass: The name of the 3D Net class folder to choose objects from.
- Input randomScale: If True, scale is selected uniformly at random from the pre-specied range
for the object class. If False, scale is fixed for the object class.
- Returns objectName: Full file name of the object randomly chosen.
- Returns scale: Size of the object.
'''
if objectClass not in self.classes:
raise Exception("Unrecognized category.")
classIndex = self.classes.index(objectClass)
dirFileNames = os.listdir(self.dir + "/" + objectClass)
meshFileNames = []
for name in dirFileNames:
if len(name) > 3 and name[-4:] == ".ply":
meshFileNames.append(name)
meshIdx = randint(len(meshFileNames)-1)
meshFileName = self.dir + "/" + objectClass + "/" + meshFileNames[meshIdx]
if randomScale:
minScale = self.minScales[classIndex]
maxScale = self.maxSales[classIndex]
scale = minScale + (maxScale-minScale)*rand()
else:
scale = self.scales[classIndex]
return meshFileName, scale | StarcoderdataPython |
33746 | <gh_stars>0
#!/usr/bin/env python
#
# Script to generate a cap module and subroutines
# from a scheme xml file.
#
from __future__ import print_function
import os
import sys
import getopt
import xml.etree.ElementTree as ET
#################### Main program routine
def main():
args = parse_args()
data = parse_scheme(args['scheme'])
cap = Cap()
cap.filename = args['output']
cap.write(data)
#################### Parse the command line arguments
def parse_args():
args = {}
opts, rem = getopt.getopt(sys.argv[1:],
'hvo:',
['help',
'verbose',
'output=',
])
for opt, arg in opts:
if opt in ('-h', '--help'):
lusage()
elif opt in ('-v', '--verbose'):
args['verbose'] = True
elif opt in ('-o', '--output'):
args['output'] = arg
else:
usage()
if (not rem):
eprint("Must specify an input scheme file")
usage()
if (os.path.isfile(rem[0])):
args['scheme'] = rem[0]
else:
eprint("Unable to read input scheme file: {0}".format(rem[0]))
usage()
if (not 'output' in args):
args['output'] = sys.stdout
return args
#################### Parse the scheme xml file into a data dictionary
def parse_scheme(filename):
data = {}
tree = ET.parse(filename)
root = tree.getroot()
data['module'] = root.attrib.get('module')
data['subs'] = {}
for sub in root.findall('subroutine'):
name = sub.attrib.get('name')
data['subs'][name] = {}
data['subs'][name]['vars'] = []
for var in sub.findall('var'):
v = Var()
v.standard_name = var.find('standard_name').text
#v.long_name = var.find('long_name').text
v.units = var.find('units').text
v.local_name = var.find('local_name').text
v.type = var.find('type').text
v.rank = int(var.find('rank').text)
data['subs'][name]['vars'].append(v)
return data
#################### Print a usage statement
def usage():
name = os.path.basename(__file__)
eprint("Usage {0}: [-h] [-v] [-o output.f90] scheme.xml".format(name))
sys.exit(1)
#################### Print a long usage statement
def lusage():
pass
#################### Print a message to STDERR
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
###############################################################################
class Var(object):
def __init__(self, **kwargs):
self._standard_name = None
self._long_name = None
self._units = None
self._local_name = None
self._type = None
self._rank = None
self._container = None
for key, value in kwargs.items():
setattr(self, "_"+key, value)
@property
def standard_name(self):
'''Get the name of the variable.'''
return self._standard_name
@standard_name.setter
def standard_name(self, value):
self._standard_name = value
@property
def long_name(self):
'''Get the name of the variable.'''
return self._long_name
@long_name.setter
def long_name(self, value):
self._long_name = value
@property
def units(self):
'''Get the units of the variable.'''
return self._units
@units.setter
def units(self, value):
self._units = value
@property
def local_name(self):
'''Get the local variable name of the variable.'''
return self._local_name
@local_name.setter
def local_name(self, value):
self._local_name = value
@property
def type(self):
'''Get the type of the variable.'''
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def rank(self):
'''Get the rank of the variable.'''
return self._rank
@rank.setter
def rank(self, value):
if not isinstance(value, int):
raise TypeError('Invalid type for variable property rank, must be integer')
if (value == 0):
self._rank = ''
else:
self._rank = '('+ ','.join([':'] * value) +')'
@property
def intent(self):
'''Get the intent of the variable.'''
return self._intent
@intent.setter
def intent(self, value):
if not value in ['none', 'in', 'out', 'inout']:
raise ValueError('Invalid value {0} for variable property intent'.format(value))
self._intent = value
@property
def optional(self):
'''Get the optional of the variable.'''
return self._optional
@optional.setter
def optional(self, value):
if not value in ['T', 'F']:
raise ValueError('Invalid value {0} for variable property optional'.format(value))
self._optional = value
@property
def container(self):
'''Get the container of the variable.'''
return self._container
@container.setter
def container(self, value):
self._container = value
def compatible(self, other):
return self.standard_name == other.standard_name \
and self.long_name == other.long_name \
and self.units == other.units \
and self.type == other.type \
and self.rank == other.rank
def print_def(self):
'''Print the definition line for the variable.'''
str = "{s.type}, pointer :: {s.local_name}{s.rank}"
return str.format(s=self)
def print_get(self):
'''Print the data retrieval line for the variable.'''
str='''
call ccpp_field_get(cdata, '{s.standard_name}', {s.local_name}, ierr)
if (ierr /= 0) then
call ccpp_error('Unable to retrieve {s.standard_name}')
return
end if'''
return str.format(s=self)
def print_debug(self):
'''Print the data retrieval line for the variable.'''
str='''Contents of {s} (* = mandatory for compatibility):
standard_name = {s.standard_name} *
long_name = {s.long_name} *
units = {s.units} *
local_name = {s.local_name}
type = {s.type} *
rank = {s.rank} *
intent = {s.intent}
optional = {s.optional}
container = {s.container}'''
return str.format(s=self)
@classmethod
def from_table(cls, columns, data):
# DH* - workaround to use the existing table headers
standard_name = data[columns.index('longname')]
#standard_name = data[columns.index('standard_name')]
long_name = data[columns.index('description')]
#long_name = data[columns.index('long_name')]
units = data[columns.index('units')]
local_name = data[columns.index('local var name')]
#local_name = data[columns.index('local_name')]
type = data[columns.index('type')]
rank = data[columns.index('rank')]
intent = data[columns.index('intent')]
optional = data[columns.index('optional')]
# *DH
return cls(standard_name = standard_name,
long_name = long_name,
units = units,
local_name = local_name,
type = type,
rank = rank,
intent = intent,
optional = optional,
)
def to_xml(self, element):
element.set('name', self._standard_name)
sub_element = ET.SubElement(element, 'standard_name')
sub_element.text = self._standard_name
sub_element = ET.SubElement(element, 'long_name')
sub_element.text = self._long_name
sub_element = ET.SubElement(element, 'units')
sub_element.text = self._units
sub_element = ET.SubElement(element, 'local_name')
sub_element.text = self._local_name
sub_element = ET.SubElement(element, 'type')
sub_element.text = self._type
sub_element = ET.SubElement(element, 'rank')
sub_element.text = self._rank
sub_element = ET.SubElement(element, 'intent')
sub_element.text = self._intent
sub_element = ET.SubElement(element, 'optional')
sub_element.text = self._optional
sub_element = ET.SubElement(element, 'container')
sub_element.text = self._container
return element
###############################################################################
class Cap(object):
header='''
!
! This work (Common Community Physics Package), identified by NOAA, NCAR,
! CU/CIRES, is free of known copyright restrictions and is placed in the
! public domain.
!
! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
! THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
! IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
! CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
!
!>
!! @brief Auto-generated cap module for the {module} scheme
!!
!
module {module}_cap
use, intrinsic :: iso_c_binding, &
only: c_f_pointer, c_ptr
use :: ccpp_types, &
only: ccpp_t
use :: ccpp_fields, &
only: ccpp_field_get
use :: ccpp_errors, &
only: ccpp_error
use :: {module}, &
only: {subroutines}
implicit none
private
public :: {subroutine_caps}
contains
'''
sub='''
subroutine {subroutine}_cap(ptr) bind(c)
type(c_ptr), intent(inout) :: ptr
type(ccpp_t), pointer :: cdata
integer :: ierr
{var_defs}
call c_f_pointer(ptr, cdata)
{var_gets}
call {subroutine}({args})
end subroutine {subroutine}_cap
'''
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, "_"+key, value)
def write(self, data):
if (self.filename is not sys.stdout):
f = open(self.filename, 'w')
else:
f = sys.stdout
subs = ','.join(["{0}".format(s) for s in data['subs']])
sub_caps = ','.join(["{0}_cap".format(s) for s in data['subs']])
f.write(Cap.header.format(module = data['module'],
subroutines = subs,
subroutine_caps = sub_caps))
for (k, v) in data['subs'].items():
var_defs = "\n".join([" "*8 + x.print_def() for x in v['vars']])
var_gets = "\n".join([x.print_get() for x in v['vars']])
args = ','.join(["{0}={0}".format(x.local_name) for x in v['vars']])
f.write(Cap.sub.format(subroutine=k,
var_defs=var_defs,
var_gets=var_gets,
args=args))
f.write("end module {module}_cap\n".format(module = data['module']))
if (f is not sys.stdout):
f.close()
@property
def filename(self):
'''Get the filename of write the output to.'''
return self._filename
@filename.setter
def filename(self, value):
self._filename = value
###############################################################################
if __name__ == "__main__":
main()
| StarcoderdataPython |
124901 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME>, <EMAIL>
# <NAME>, <EMAIL>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""This sample application demonstrates the app class way to use configman."""
# there are two ways to invoke this app:
# .../generic_app.py --admin.application=demo3.Demo3App
# .../demo3.py
# this demo differs from demo2.py in the manner in which it works with
# configman. Rather than being a linear procedure, this app defines a app
# class with five features:
# 1) the app class derives from 'RequiredConfig'. This instruments the class
# with the mechanism for discovery of required configuration parameters.
# 2) closely aligned with point 1, this class defines a class level constant
# called 'required_config' that sets up Namespaces and Options to define
# the configuration requirements.
# 3) the app class defines three class level constants that identify the app.
# 'app_name', 'app_version', 'app_description'
# 4) the app class defines a constructor that accepts a DotDict derivative
# of configuration values.
# 5) the app class defines a parameterless 'main' function that executes the
# business logic of the application
from configman import RequiredConfig, Namespace
# the following class embodies the business logic of the application.
class Demo3App(RequiredConfig):
app_name = 'demo3_app'
app_version = '0.1'
app_description = __doc__
# create the definitions for the parameters that are to come from
# the command line or config file.
required_config = Namespace()
required_config.add_option('text', 'Socorro Forever', 'the input value',
short_form='t')
def __init__(self, config):
self.text = config.text
self.action_fn = Demo3App.action_converter(config.action)
def main(self):
self.action_fn(self.text)
@staticmethod
def echo_action(x):
print x
@staticmethod
def backwards_action(x):
print x[::-1]
@staticmethod
def upper_action(x):
print x.upper()
@staticmethod
def action_converter(action):
try:
return getattr(Demo3App, "%s_action" % action)
except AttributeError:
raise Exception("'%s' is not a valid action" % action)
# normally, all the parameters are defined within the class, but
# the methods of this class itself are used in the configuration parameters.
# Python doesn't allow reference to class members until the class is entirely
# defined. This tag along code injects the final config parameter after
# the class has been fully defined
list_of_actions = [x[:-7] for x in dir(Demo3App) if x.endswith('_action')]
doc_string = 'the action to take [%s]' % ', '.join(list_of_actions)
Demo3App.required_config.add_option('action', 'echo', doc_string,
short_form='a')
# if you'd rather invoke the app directly with its source file, this will
# allow it.
if __name__ == "__main__":
import generic_app
generic_app.main(Demo3App)
| StarcoderdataPython |
16816 | # Created by Hansi at 3/16/2020
import os
from algo.data_process.data_preprocessor import data_cleaning_flow
from algo.utils.file_utils import delete_create_folder
def extract_gt_tokens(text):
"""
Given GT string, method to extract GT labels.
GT string should be formatted as Twitter-Event-Data-2019.
parameters
-----------
:param text: str
:return: list
List of GT labels corresponding to a single event
Since there can be duplicate definitions for a single event, this list contains separate label lists for each
duplicate definition.
"""
duplicates = []
for element in text.split("|"):
labels = []
for subelement in element.split("["):
if subelement:
subelement = subelement.replace("\n", "")
subelement = subelement.replace("]", "")
tokens = subelement.split(",")
labels.append(tokens)
duplicates.append(labels)
return duplicates
def load_gt(folder_path):
"""
Method to read GT data into a dictionary formatted as {time-window: labels}
parameters
-----------
:param folder_path: str
Path to folder which contains GT data
:return: object
Dictionary of GT data
"""
gt = dict()
for root, dirs, files in os.walk(folder_path):
for file in files:
file_name = os.path.splitext(file)[0]
f = open(os.path.join(folder_path, file), 'r', encoding='utf-8')
events = []
for line in f:
tokens = extract_gt_tokens(line)
events.append(tokens)
gt[file_name] = events
f.close()
return gt
def generate_gt_string(tokens):
"""
Given a list of GT labels corresponding to a single event, convert them to a string formatted according to
Twitter-Event-Data-2019 GT format.
parameters
-----------
:param tokens: list
:return: str
"""
str = ""
for duplicate in tokens:
if str and str[-1] == "]":
str = str + "|"
for label in duplicate:
str = str + "["
for element in label:
if str[-1] == "[":
str = str + element
else:
str = str + "," + element
str = str + "]"
return str
def get_combined_gt(gt):
"""
Combine the GT labels of multiple events available at a time frame into single event representation.
parameters
-----------
:param gt: object
Dictionary of GT returned by load_GT
:return: object
Dictionary of combined GT
"""
combined_gt = dict()
for time_frame in gt.keys():
gt_events = gt[time_frame]
combined_gt_event = gt_events[0]
for event in gt_events[1:]:
temp = []
for duplicate in event:
for combined_event in combined_gt_event:
temp.append(combined_event + duplicate)
combined_gt_event = temp
# even though there is 1 event, it is added to a list to preserve consistency with general evaluation_v2 methods
events = [combined_gt_event]
combined_gt[time_frame] = events
return combined_gt
def preprocess_gt(input_filepath, output_filepath):
"""
Preprocess ground truth data in input_file and save to the output_file
parameters
-----------
:param input_filepath: str (.txt file path)
Ground truth file formatted as Twitter-Event-Data-2019
:param output_filepath: str (.txt file path)
:return:
"""
input_file = open(input_filepath, 'r')
output_file = open(output_filepath, 'a', encoding='utf-8')
events = []
for line in input_file:
tokens = extract_gt_tokens(line)
events.append(tokens)
# update tokens
new_events = []
for event in events:
new_duplicates = []
for duplicate in event:
new_labels = []
for label in duplicate:
new_elements = []
for element in label:
new_label = data_cleaning_flow(element)
new_elements.append(new_label)
new_labels.append(new_elements)
new_duplicates.append(new_labels)
new_events.append(new_duplicates)
for event in new_events:
str = generate_gt_string(event)
output_file.write(str)
output_file.write("\n")
output_file.close()
def preprocess_gt_bulk(input_folder_path, output_folder_path):
"""
Preprocess ground truth data in all files in input_folder and save to the output_folder
parameters
-----------
:param input_folder_path: str
Path to folder which contains GT data files
:param output_folder_path: str
Path to folder to save preprocessed GT data
:return:
"""
# delete if there already exist a folder and create new folder
delete_create_folder(output_folder_path)
for root, dirs, files in os.walk(input_folder_path):
for file in files:
input_filepath = os.path.join(input_folder_path, file)
output_filepath = os.path.join(output_folder_path, file)
preprocess_gt(input_filepath, output_filepath) | StarcoderdataPython |
1729390 | from contextlib import suppress
import json
from io import BytesIO
import re
from sys import argv
import appex
from bs4 import BeautifulSoup
import clipboard
import photos
import PIL.Image
from requests import Session
class Page:
"""An image-containing page of saatchiart.com.
Raises:
ValueError: If the url is invalid (invalid url format or doesn't point
to saatchi.com').
"""
_valid_url_regex = re.compile(r'https?:\/\/(www\.)?saatchiart\.com[/?]([-a-zA-Z0-9()@:%_\+.~#?&/\\=]*)')
@classmethod
def create_from_env(cls):
"""Create a page from the environment.
A page will be created from (in order; invalid urls will not be
accepted):
1. Sharing the url via the 'share' button.
2. System argument (first only).
3. The clipboard.
4. Asking the user in the console.
"""
with suppress(ValueError):
return Page(appex.get_url())
with suppress(ValueError, IndexError):
return Page(argv[1])
with suppress(ValueError):
return Page(clipboard.get())
url = input('Enter url:\n> ')
while True:
with suppress(ValueError):
return Page(url)
input('[error] Invalid url\n\n> ')
@classmethod
def _is_valid_url(cls, url: str) -> bool:
"""Return true if the input url is valid. False otherwise."""
return url is not None and Page._valid_url_regex.match(url)
def __init__(self, url: str, session: Session = None):
if not Page._is_valid_url(url):
raise ValueError('invalid url')
self.url = url
self.session = Session() if session is None else session
def _fetch_content(self) -> str:
"""Fetch the (html) content of this page."""
return self.session.get(self.url).text
def fetch_image_url(self) -> str:
"""Fetch the url of the artwork image of this page."""
soup = BeautifulSoup(self._fetch_content(), 'html5lib')
# this is a script element that purely contains a json dictionary
json_ = json.loads(soup.find(id='__NEXT_DATA__').text)
# they seem to be fond of nested json
return (json_['props']['pageProps']['initialState']['page']['data']
['artwork']['artworkImage']['imageUrl'])
def fetch_image(self) -> bytes:
"""Fetch the artwork image of this page."""
return self.session.get(self.fetch_image_url()).content
def save_image(image: bytes):
"""Save an image (in bytes) to the camera roll."""
photos.save_image(PIL.Image.open(BytesIO(image)))
def main():
page = Page.create_from_env()
save_image(page.fetch_image())
if __name__ == '__main__':
main()
if appex.is_running_extension():
appex.finish()
| StarcoderdataPython |
3325460 | <reponame>KamilKamilK/Clothes-sharing
# Generated by Django 3.1.1 on 2020-09-06 11:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('charity', '0003_auto_20200906_1142'),
]
operations = [
migrations.AlterField(
model_name='institution',
name='type',
field=models.TextField(choices=[('fundacja', 'Fundacja'), ('organizacja', 'Organizacja pozarządowa'), ('zbiórka', 'Zbiórka lokalna')]),
),
]
| StarcoderdataPython |
3312044 | <reponame>FlaskTeam/FlaskAXF
from flask_script import Manager
from App import create_app
app = create_app()
manager = Manager(app)
if __name__ == '__nain__':
manager.run | StarcoderdataPython |
1666993 | #!/usr/local/bin/managed_python3
"""
CLI Application to streamline the creation of PKGInfo files
for printer deployment in Munki.
Created by <NAME> for Syracuse University, 2014 - <EMAIL>
Bug squashing assistance from <NAME>
Much code reused from Printer PKG deploy scripts by:
<NAME>, SUNY Purchase, 2010
<NAME>, 2013
"""
import os, sys, subprocess, shlex, string, re, plistlib
from optparse import Option
dirname,filename = os.path.split(os.path.abspath(__file__))
os.chdir(dirname)
## Modify the following to match your environment.
## These are used to set driver dependencies.
## The dictionary is configured as 'Human readable':'munki pkgname'
driverCollection = {'Hewlett Packard':'HewlettPackardPrinterDrivers',\
'RICOH':'RicohPrinterDrivers',\
'Xerox':'XeroxPrinterDrivers',\
'Canon':'CanonPrinterDrivers'}
## Enter an example of your house naming convention
nameExample = "p1-loc.build.lafayette"
## defining variables so fnPrintCurrentState doesn't bark at me
## before they are populated.
printers = []
selectedPrinter = ""
DeviceURI = ""
SelectedPPD = ""
PrinterDriver = ""
OptionList = []
PkgInfoName = ""
def fnPrintCurrentState():
"""prints current state of script to user - showing
discovered and selected values."""
#os.system('clear')
print("=============================\n")
print("Selected Printer :", Printer)
if (DeviceURI):
print("Printer URI :", DeviceURI)
print("Printer Display Name :", PrinterDisplayName)
print("Printer Make & Model :", PrinterMakeModel)
print("Printer Location :", PrinterLocation)
if (DeviceURI[:6] == "smb://"):
print("\nPrinter Connection : Active Directory Queue")
print("Print Server :", PrintServer)
print("Printer Queue :", PrinterQueue)
else:
print("\nPrinter Connection : Direct")
if (SelectedPPD):
print("\nPPD Selected :", SelectedPPD)
if (PrinterDriver):
print("Selected Drivers :", PrinterDriver)
if (OptionList):
x = False
print("\nSelected Options :",)
for eachoption in OptionList:
if (x):
print(" :", eachoption)
else:
print(eachoption)
x = True
if (PkgInfoName):
print("\nDeployment Name :", PkgInfoName)
print("Deployment Version :", PkgInfoVersion)
print("\n=============================\n")
def fnGetConfiguredPrinter():
"""lists currently installed and configured printers on the system
where the script is running."""
if (len(printers) > 0):
del printers [:]
printersList = subprocess.run(['/usr/bin/lpstat', '-p'], stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()
for printerLine in printersList:
if printerLine.count(' ') > 1:
printerElements = printerLine.split()
if printerElements[0] == 'printer':
printers.append(printerElements.pop(1))
fnChooseConfiguredPrinter(printers)
def fnChooseConfiguredPrinter(printers):
""" creates enumerated list of printers for user to select for deployment."""
#os.system('clear')
print("\tPlease select the printer you wish to deploy.\n")
for prnIndex, printer in enumerate(printers):
print('\t[', prnIndex+1, ']', printer) #enumerate starting with 1
printerSelection = (int(input('\n\n\tChoice: '))-1) #subtract 1 from response
### check input here - TBD ###
#os.system('clear')
fnPrnSelVerify(printers[int(printerSelection)])
def fnPrnSelVerify(selectedPrinter):
"""verify correct printer selection prior to continuing, and
give user option to reselect. """
print('\n\tYou selected: ', selectedPrinter, "\n\n")
x = input("\tIs this correct? [y or n]: ")
if str(x) == "n":
fnChooseConfiguredPrinter(printers)
elif str(x) == "y":
global Printer
Printer = selectedPrinter
else:
#os.system('clear')
print("I'm sorry, I didn't understand that.")
fnPrnSelVerify(selectedPrinter)
def fnGetDeviceInformation(SelPrinter):
optionsRawList = subprocess.run(['/usr/bin/lpoptions', '-p', SelPrinter], stdout=subprocess.PIPE).stdout.decode('utf-8').split()
OptionsList = {}
for ov in optionsRawList:
if "=" in ov:
ovDictLoad = ov.split("=")
OptionsList[ovDictLoad[0]] = str(ovDictLoad[1])
global DeviceURI
DeviceURI = OptionsList['device-uri']
global PrinterDisplayName
PrinterDisplayName = OptionsList['printer-info']
global PrinterMakeModel
PrinterMakeModel = OptionsList['printer-make-and-model']
global PrinterLocation
try:
PrinterLocation = OptionsList['printer-location']
except:
PrinterLocation = ""
if (DeviceURI[:6] == "smb://"):
global PrintServer
global PrinterQueue
matched = re.match(r"(smb:\/\/[\w\-\.]+)\/(.+)", DeviceURI)
PrintServer = matched.group(1)
PrinterQueue = matched.group(2)
def fnChoosePPD():
"""prompts for search term, and shows matching contents from
/Library/Printers/PPDs/Contents/Resources for PPD selection."""
fnPrintCurrentState()
print("What PPD would you like to use with this printer?\n")
print("Enter a search term for the PPD. Usually, a model number works well when ")
print("attempting to select a PPD, so if you have an HP M401dne, try 'M401', or ")
print("for a Canon ImageRunner Advance 6075 copier, try simply '6075'.")
ppdSearchTerm = input('Search Term: ')
if (len(ppdSearchTerm) < 1):
fnChoosePPD()
ppdListRaw = subprocess.run(['/usr/sbin/lpinfo', '-m'], stdout=subprocess.PIPE).stdout.decode('utf-8').splitlines()
print(ppdListRaw)
ppdList = []
for ppd in ppdListRaw:
if ppd.startswith('drv'):
ppdList.append(ppd.split(' ', 1)[0])
if ppd.startswith('Library'):
ppdList.append('/' + ppd.split('gz ', 1)[0] + 'gz')
foundPPDs = []
for ppd in ppdList:
if str(ppdSearchTerm) in ppd:
foundPPDs.append(ppd)
fnPrintCurrentState()
if (len(foundPPDs) < 1):
print("I'm sorry - I couldn't find anything.")
print("Do you have the drivers installed on this system?")
junk = input("Press [Enter] to retry.")
fnChoosePPD()
else:
print("I found the following PPDs that might work - enter the number")
print("of the one you would like to use, or '9999' to search again.")
for ppdIndex, ppdSuggest in enumerate(foundPPDs):
print("[",ppdIndex+1,"] -", ppdSuggest)
print("[ 9999 ] - Search Again\n")
print("# of found PPDs:", len(foundPPDs))
ppdSelectIndex = (int(input('Selection: '))-1)
if ppdSelectIndex == "9998":
print("OK - restarting search")
fnChoosePPD()
elif (ppdSelectIndex >= 0) & (ppdSelectIndex < int(len(foundPPDs))):
global SelectedPPD
SelectedPPD = foundPPDs[int(ppdSelectIndex)]
print("You selected ", SelectedPPD)
else:
print("!!! ERROR, <NAME> - I don't have that in my list !!!\n\n")
fnChoosePPD()
def fnSetPackageDependancy(driverCollection):
"""Displays driver packages available via munki repo - dictionary
is populated in script configuration above. Will set user selection
as a dependant installation in the pkginfo file."""
print("These are the driver sets available in the Munki repository.")
print("Please select which set is required by this printer, or if")
print("you will install the drivers by hand.\n")
printerStyles = []
driverSets = []
printerStyles = sorted(driverCollection)
for listIndex, printerStyle in enumerate(printerStyles):
driverSets.append(driverCollection[printerStyle])
print('[',listIndex+1,'] -', printerStyle)
print("[9999] - No Dependency, will install by hand.")
driverSelect = (int(input('Selection: '))-1)
global PrinterDriver
if (driverSelect == 9998):
PrinterDriver = ''
elif ((driverSelect >= 0) & (driverSelect < len(driverSets))):
PrinterDriver = driverSets[driverSelect]
else:
print("I'm sorry, I didn't understand that input. Please try again")
fnSetPackageDependancy()
def fnSetPrinterOptions():
"""reads complete list of printer options via lpoptions -l. Parses them
to a list of 'option=value' suitable for inclusion in the printer creation
(lpadmin) command below. User can select from the list with a collection of
comma separated values."""
cmdGetOpts = ['lpoptions', '-p', Printer, '-l']
resultGetOpts = subprocess.run(cmdGetOpts, stdout=subprocess.PIPE).stdout.decode('utf-8')
resultLinesGetOpts = resultGetOpts.splitlines()
global OptionList
OptionList = []
printerOptionsDict = {}
printerOptions = []
for option in resultLinesGetOpts:
if len(option) > 3:
optionSet = option.split(':')
tempKey = optionSet[0]
tOK = tempKey.split("/")
oK = tOK[0]
query = re.compile('\*\w+')
optResult = query.findall(optionSet[1])
oV = optResult[0]
printerOptionsDict[oK] = oV
print(printerOptionsDict)
for printerOption in printerOptionsDict:
print(printerOption + ' : ', printerOptionsDict[printerOption])
printerOptions.append(printerOption + "=" + printerOptionsDict[printerOption])
for number, option in enumerate(printerOptions):
print("[", number+1, "] ", option)
optionSelect = str(input('Please enter the options you would like to include, separated by commas. : '))
if (len(optionSelect) > 0):
for s in optionSelect.split(','):
selection = int(s)-1
OptionList.append(printerOptions[selection])
if (DeviceURI[:6] == "smb://"):
OptionList.append('printer-op-policy=authenticated')
OptionList.append('printer-is-shared=False')
OptionList.append('printer-error-policy=abort-job')
def fnVerifySelections(retry):
"""Ensure that all selected values for printer, options, PPD and dependancies
are correct. If so, prompt for deployment name (with suggested naming convention)
and if not, restart the process"""
if (retry):
print("\tI'm sorry, I didn't understand that response.\
\n\tPlease enter 'y' or 'n'.")
verified = str(input('\tAre these settings correct? [y/n]: '))
if verified == 'y': #start prompting for printer name, version and description
fnPrintCurrentState()
global PkgInfoName
global PkgInfoDescription
global PkgInfoVersion
PkgInfoName = str(input('\tPlease enter the deployment name.\
\n\tExample: ' + nameExample + '\n\t>>> '))
PkgInfoDescription = str(input('\n\tPlease enter a printer description.\n\t>>> '))
PkgInfoVersion = str(input('\n\tPlease enter the deployment version: '))
elif verified == 'n':
printerSelection = fnGetConfiguredPrinter()
else:
fnPrintCurrentState()
fnVerifySelections(True)
def fnBuildInstallCommand():
"""pull together all selections into appropriate lpadmin printer creation
command."""
global InstallCommand
printerDisplayNameQuoted = '"%s"' % (PrinterDisplayName)
printerLocationQuoted = '"%s"' % (PrinterLocation)
SelectedPPDQuoted = '"%s"' % (SelectedPPD)
InstallCommandParts = ['/usr/sbin/lpadmin', '-E', '-p', Printer, \
'-L', printerLocationQuoted, '-D', \
printerDisplayNameQuoted]
# Slightly different options depending on type of PPD
if SelectedPPD.endswith('.gz'): # An installed driver
InstallCommandParts.append('-P')
InstallCommandParts.append(SelectedPPDQuoted)
if SelectedPPD.endswith('.ppd'): # Built in generic driver
InstallCommandParts.append('-m')
InstallCommandParts.append(SelectedPPDQuoted)
InstallCommandParts.append('-v')
InstallCommandParts.append(DeviceURI)
for opt in OptionList: #iterates through option list selections
InstallCommandParts.append('-o')
InstallCommandParts.append(opt)
InstallCommand = ' '.join(InstallCommandParts) #collapses it all into one nice string
def fnModifyScripts():
"""Reads in template installcheck, postinstall and uninstall scripts,
replacing tagged sections with generated content and commands. Writes
them out to temporary files in the same directory as the python script."""
with open("installcheck_script.sh", "wt") as fout:
with open("supportFiles/installcheck_script.sh", "rt") as fin:
for line in fin:
line = line.replace("<version>", PkgInfoVersion)
line = line.replace("<printername>", Printer)
fout.write(line)
with open("postinstall_script.sh", "wt") as fout:
with open("supportFiles/postinstall_script.sh", "rt") as fin:
for line in fin:
line = line.replace("<version>", PkgInfoVersion)
line = line.replace("<printername>", Printer)
line = line.replace("<installcommand>", InstallCommand)
fout.write(line)
with open("uninstall_script.sh", "wt") as fout:
with open("supportFiles/uninstall_script.sh", "rt") as fin:
for line in fin:
line = line.replace("<printername>", Printer)
fout.write(line)
fnMakePkgInfo() #calls the MakePkgInfo
cmdCleanup = ['rm', 'installcheck_script.sh', \
'postinstall_script.sh', 'uninstall_script.sh']
subprocess.call(cmdCleanup) # deletes temporary script files.
def fnMakePkgInfo():
"""Builds and executes the makepkginfo command utilizing the install scripts
generated in fnModifyScripts. Collects output into variable."""
pkgVers = '--pkgvers=' + PkgInfoVersion
printerDisplayName = '--displayname=' + PrinterMakeModel + ', ' + PrinterLocation
printerDescription = '--description=' + PkgInfoDescription
pkgInfoFileName = PkgInfoName + '-' + PkgInfoVersion + '.plist'
makePkgInfoCMD = ['/usr/local/munki/makepkginfo', '--unattended_install', \
'--uninstall_method=uninstall_script', \
'--name=' + PkgInfoName, printerDisplayName, printerDescription, \
'--nopkg', '--installcheck_script=installcheck_script.sh', \
'--postinstall_script=postinstall_script.sh', \
'--uninstall_script=uninstall_script.sh', \
'--minimum_os_version=10.6.8', pkgVers, \
"--category=Printers"]
# Only add the 'requires' key if PrinterDriver has a value
if PrinterDriver != '':
makePkgInfoCMD.append('-r')
makePkgInfoCMD.append(PrinterDriver)
pkginfoResult = subprocess.run(makePkgInfoCMD, stdout=subprocess.PIPE).stdout.decode('utf-8')
with open(pkgInfoFileName, "wt") as pkgout: #writes variable output to file.
for line in pkginfoResult:
pkgout.write(line)
### Now we add the uninstallable key
with open(pkgInfoFileName, 'rb') as fp:
plistInput = plistlib.load(fp)
plistInput["uninstallable"] = True
with open(pkgInfoFileName, 'wb') as fp:
plistlib.dump(plistInput, fp)
print("PkgInfo printer deployment file has been created as " + pkgInfoFileName)
###
# Kick the whole damn thing off
###
printerSelection = fnGetConfiguredPrinter()
fnPrintCurrentState()
fnGetDeviceInformation(Printer)
fnPrintCurrentState()
fnChoosePPD()
fnPrintCurrentState()
fnSetPackageDependancy(driverCollection)
fnPrintCurrentState()
fnSetPrinterOptions()
fnPrintCurrentState()
fnVerifySelections(False)
fnPrintCurrentState()
fnBuildInstallCommand()
fnModifyScripts() | StarcoderdataPython |
1713623 | import fileinput
import functools
import os
import random
import re
import subprocess
import sys
import tempfile
import time
import unittest
from distutils.version import LooseVersion
from threading import Thread
import assertions
from cassandra import ConsistencyLevel
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.query import SimpleStatement
from ccmlib.node import Node
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_in, assert_true, assert_is_instance
from dtest import CASSANDRA_DIR, DISABLE_VNODES, IGNORE_REQUIRE, debug
class RerunTestException(Exception):
"""
This exception can be raised to signal a likely harmless test problem. If fixing a test is reasonable, that should be preferred.
Ideally this is used in conjunction with the 'flaky' decorator, allowing the test to be automatically re-run and passed.
When raising this exception in methods decorated with @flaky(rerun_filter=requires_rerun), do so carefully.
Avoid overly broad try/except blocks, otherwise real (intermittent) bugs could be masked.
example usage:
@flaky(rerun_filter=requires_rerun) # see requires_rerun method below in this module
def some_flaky_test():
# some predictable code
# more predictable code
try:
# some code that occasionally fails for routine/predictable reasons (e.g. timeout)
except SomeNarrowException:
raise RerunTestException
When the test raises RerunTestException, the flaky plugin will re-run the test and it will pass if the next attempt(s) succeed.
"""
def requires_rerun(err, *args):
"""
For use in conjunction with the flaky decorator and it's rerun_filter argument. See RerunTestException above.
Returns True if the given flaky failure data (err) is of type RerunTestException, otherwise False.
"""
# err[0] contains the type of the error that occurred
return err[0] == RerunTestException
def rows_to_list(rows):
new_list = [list(row) for row in rows]
return new_list
def create_c1c2_table(tester, session, read_repair=None):
tester.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}, read_repair=read_repair)
def insert_c1c2(session, keys=None, n=None, consistency=ConsistencyLevel.QUORUM):
if (keys is None and n is None) or (keys is not None and n is not None):
raise ValueError("Expected exactly one of 'keys' or 'n' arguments to not be None; "
"got keys={keys}, n={n}".format(keys=keys, n=n))
if n:
keys = list(range(n))
statement = session.prepare("INSERT INTO cf (key, c1, c2) VALUES (?, 'value1', 'value2')")
statement.consistency_level = consistency
execute_concurrent_with_args(session, statement, [['k{}'.format(k)] for k in keys])
def query_c1c2(session, key, consistency=ConsistencyLevel.QUORUM, tolerate_missing=False, must_be_missing=False):
query = SimpleStatement('SELECT c1, c2 FROM cf WHERE key=\'k%d\'' % key, consistency_level=consistency)
rows = list(session.execute(query))
if not tolerate_missing:
assertions.assert_length_equal(rows, 1)
res = rows[0]
assert_true(len(res) == 2 and res[0] == 'value1' and res[1] == 'value2', res)
if must_be_missing:
assertions.assert_length_equal(rows, 0)
# work for cluster started by populate
def new_node(cluster, bootstrap=True, token=None, remote_debug_port='0', data_center=None):
i = len(cluster.nodes) + 1
node = Node('node%s' % i,
cluster,
bootstrap,
('127.0.0.%s' % i, 9160),
('127.0.0.%s' % i, 7000),
str(7000 + i * 100),
remote_debug_port,
token,
binary_interface=('127.0.0.%s' % i, 9042))
cluster.add(node, not bootstrap, data_center=data_center)
return node
def insert_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):
upds = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%06d\'" % (i, key, i) for i in xrange(offset * columns_count, columns_count * (offset + 1))]
query = 'BEGIN BATCH %s; APPLY BATCH' % '; '.join(upds)
simple_query = SimpleStatement(query, consistency_level=consistency)
session.execute(simple_query)
def query_columns(tester, session, key, columns_count, consistency=ConsistencyLevel.QUORUM, offset=0):
query = SimpleStatement('SELECT c, v FROM cf WHERE key=\'k%s\' AND c >= \'c%06d\' AND c <= \'c%06d\'' % (key, offset, columns_count + offset - 1), consistency_level=consistency)
res = list(session.execute(query))
assertions.assert_length_equal(res, columns_count)
for i in xrange(0, columns_count):
assert_equal(res[i][1], 'value{}'.format(i + offset))
def retry_till_success(fun, *args, **kwargs):
timeout = kwargs.pop('timeout', 60)
bypassed_exception = kwargs.pop('bypassed_exception', Exception)
deadline = time.time() + timeout
while True:
try:
return fun(*args, **kwargs)
except bypassed_exception:
if time.time() > deadline:
raise
else:
# brief pause before next attempt
time.sleep(0.25)
# Simple puts and get (on one row), testing both reads by names and by slice,
# with overwrites and flushes between inserts to make sure we hit multiple
# sstables on reads
def putget(cluster, session, cl=ConsistencyLevel.QUORUM):
_put_with_overwrite(cluster, session, 1, cl)
# reads by name
# We do not support proper IN queries yet
# if cluster.version() >= "1.2":
# session.execute('SELECT * FROM cf USING CONSISTENCY %s WHERE key=\'k0\' AND c IN (%s)' % (cl, ','.join(ks)))
# else:
# session.execute('SELECT %s FROM cf USING CONSISTENCY %s WHERE key=\'k0\'' % (','.join(ks), cl))
# _validate_row(cluster, session)
# slice reads
query = SimpleStatement('SELECT * FROM cf WHERE key=\'k0\'', consistency_level=cl)
rows = list(session.execute(query))
_validate_row(cluster, rows)
def _put_with_overwrite(cluster, session, nb_keys, cl=ConsistencyLevel.QUORUM):
for k in xrange(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i, k, i) for i in xrange(0, 100)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
for k in xrange(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 4, k, i * 2) for i in xrange(0, 50)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
for k in xrange(0, nb_keys):
kvs = ["UPDATE cf SET v=\'value%d\' WHERE key=\'k%s\' AND c=\'c%02d\'" % (i * 20, k, i * 5) for i in xrange(0, 20)]
query = SimpleStatement('BEGIN BATCH %s APPLY BATCH' % '; '.join(kvs), consistency_level=cl)
session.execute(query)
time.sleep(.01)
cluster.flush()
def _validate_row(cluster, res):
assertions.assert_length_equal(res, 100)
for i in xrange(0, 100):
if i % 5 == 0:
assert_equal(res[i][2], 'value{}'.format(i * 4), 'for {}, expecting value{}, got {}'.format(i, i * 4, res[i][2]))
elif i % 2 == 0:
assert_equal(res[i][2], 'value{}'.format(i * 2), 'for {}, expecting value{}, got {}'.format(i, i * 2, res[i][2]))
else:
assert_equal(res[i][2], 'value{}'.format(i), 'for {}, expecting value{}, got {}'.format(i, i, res[i][2]))
# Simple puts and range gets, with overwrites and flushes between inserts to
# make sure we hit multiple sstables on reads
def range_putget(cluster, session, cl=ConsistencyLevel.QUORUM):
keys = 100
_put_with_overwrite(cluster, session, keys, cl)
paged_results = session.execute('SELECT * FROM cf LIMIT 10000000')
rows = [result for result in paged_results]
assertions.assert_length_equal(rows, keys * 100)
for k in xrange(0, keys):
res = rows[:100]
del rows[:100]
_validate_row(cluster, res)
def replace_in_file(filepath, search_replacements):
"""
In-place file search and replace.
filepath - The path of the file to edit
search_replacements - a list of tuples (regex, replacement) that
represent however many search and replace operations you wish to
perform.
Note: This does not work with multi-line regexes.
"""
for line in fileinput.input(filepath, inplace=True):
for regex, replacement in search_replacements:
line = re.sub(regex, replacement, line)
sys.stdout.write(line)
def generate_ssl_stores(base_dir, passphrase='<PASSWORD>'):
"""
Util for generating ssl stores using java keytool -- nondestructive method if stores already exist this method is
a no-op.
@param base_dir (str) directory where keystore.jks, truststore.jks and ccm_node.cer will be placed
@param passphrase (Optional[str]) currently ccm expects a passphrase of '<PASSWORD>' so it's the default but it can be
overridden for failure testing
@return None
@throws CalledProcessError If the keytool fails during any step
"""
if os.path.exists(os.path.join(base_dir, 'keystore.jks')):
debug("keystores already exists - skipping generation of ssl keystores")
return
debug("generating keystore.jks in [{0}]".format(base_dir))
subprocess.check_call(['keytool', '-genkeypair', '-alias', 'ccm_node', '-keyalg', 'RSA', '-validity', '365',
'-keystore', os.path.join(base_dir, 'keystore.jks'), '-storepass', <PASSWORD>,
'-dname', 'cn=Cassandra Node,ou=CCMnode,o=DataStax,c=US', '-keypass', passphrase])
debug("exporting cert from keystore.jks in [{0}]".format(base_dir))
subprocess.check_call(['keytool', '-export', '-rfc', '-alias', 'ccm_node',
'-keystore', os.path.join(base_dir, 'keystore.jks'),
'-file', os.path.join(base_dir, 'ccm_node.cer'), '-storepass', passphrase])
debug("importing cert into truststore.jks in [{0}]".format(base_dir))
subprocess.check_call(['keytool', '-import', '-file', os.path.join(base_dir, 'ccm_node.cer'),
'-alias', 'ccm_node', '-keystore', os.path.join(base_dir, 'truststore.jks'),
'-storepass', <PASSWORD>, '-noprompt'])
class since(object):
def __init__(self, cass_version, max_version=None):
self.cass_version = LooseVersion(cass_version)
self.max_version = max_version
if self.max_version is not None:
self.max_version = LooseVersion(self.max_version)
def _skip_msg(self, version):
if version < self.cass_version:
return "%s < %s" % (version, self.cass_version)
if self.max_version and version > self.max_version:
return "%s > %s" % (version, self.max_version)
def _wrap_setUp(self, cls):
orig_setUp = cls.setUp
@functools.wraps(cls.setUp)
def wrapped_setUp(obj, *args, **kwargs):
orig_setUp(obj, *args, **kwargs)
version = LooseVersion(obj.cluster.version())
msg = self._skip_msg(version)
if msg:
obj.skip(msg)
cls.setUp = wrapped_setUp
return cls
def _wrap_function(self, f):
@functools.wraps(f)
def wrapped(obj):
version = LooseVersion(obj.cluster.version())
msg = self._skip_msg(version)
if msg:
obj.skip(msg)
f(obj)
return wrapped
def __call__(self, skippable):
if isinstance(skippable, type):
return self._wrap_setUp(skippable)
return self._wrap_function(skippable)
def no_vnodes():
"""
Skips the decorated test or test class if using vnodes.
"""
return unittest.skipIf(not DISABLE_VNODES, 'Test disabled for vnodes')
def require(require_pattern, broken_in=None):
"""
Skips the decorated class or method, unless the argument
'require_pattern' is a case-insensitive regex match for the name of the git
branch in the directory from which Cassandra is running. For example, the
method defined here:
@require('compaction-fixes')
def compaction_test(self):
...
will run if Cassandra is running from a directory whose current git branch
is named 'compaction-fixes'. If 'require_pattern' were
'.*compaction-fixes.*', it would run only when Cassandra is being run from a
branch whose name contains 'compaction-fixes'.
To accommodate current branch-naming conventions, it also will run if the
current Cassandra branch matches 'CASSANDRA-{require_pattern}'. This allows
users to run tests like:
@require(4200)
class TestNewFeature(self):
...
on branches named 'CASSANDRA-4200'.
If neither 'require_pattern' nor 'CASSANDRA-{require_pattern}' is a
case-insensitive match for the name of Cassandra's current git branch, the
test function or class will be skipped with unittest.skip.
To run decorated methods as if they were not decorated with @require, set
the environment variable IGNORE_REQUIRE to 'yes' or 'true'. To only run
methods decorated with require, set IGNORE_REQUIRE to 'yes' or 'true' and
run `nosetests` with `-a required`. (This uses the built-in `attrib`
plugin.)
"""
tagging_decorator = attr('required')
if IGNORE_REQUIRE:
return tagging_decorator
require_pattern = str(require_pattern)
git_branch = ''
git_branch = cassandra_git_branch()
if git_branch:
git_branch = git_branch.lower()
run_on_branch_patterns = (require_pattern, 'cassandra-{b}'.format(b=require_pattern))
# always run the test if the git branch name matches
if any(re.match(p, git_branch, re.IGNORECASE) for p in run_on_branch_patterns):
return tagging_decorator
# if skipping a buggy/flapping test, use since
elif broken_in:
def tag_and_skip_after_version(decorated):
return since('0', broken_in)(tagging_decorator(decorated))
return tag_and_skip_after_version
# otherwise, skip with a message
else:
def tag_and_skip(decorated):
return unittest.skip('require ' + str(require_pattern))(tagging_decorator(decorated))
return tag_and_skip
else:
return tagging_decorator
def known_failure(failure_source, jira_url, flaky=False, notes=''):
"""
Tag a test as a known failure. Associate it with the URL for a JIRA
ticket and tag it as flaky or not.
Valid values for failure_source include: 'cassandra', 'test', 'driver', and
'systemic'.
To run all known failures, use the functionality provided by the nosetests
attrib plugin, using the known_failure and known_flaky attributes:
# only run tests that are known to fail
$ nosetests -a known_failure
# only run tests that are not known to fail
$ nosetests -a !known_failure
# only run tests that fail because of cassandra bugs
$ nosetests -a known_failure=cassandra
# only run tests that fail because of cassandra bugs, but are not flaky
$ nosetests -a known_failure=cassandra -a !known_flaky
Known limitations: a given test may only be tagged once and still work as
expected with the attrib plugin machinery; if you decorate a test with
known_failure multiple times, the known_failure attribute of that test
will have the value applied by the outermost instance of the decorator.
"""
valid_failure_sources = ('cassandra', 'test', 'systemic', 'driver')
def wrapper(f):
assert_in(failure_source, valid_failure_sources)
assert_is_instance(flaky, bool)
tagged_func = attr(known_failure=failure_source,
jira_url=jira_url)(f)
if flaky:
tagged_func = attr('known_flaky')(tagged_func)
tagged_func = attr(failure_notes=notes)(tagged_func)
return tagged_func
return wrapper
def cassandra_git_branch(cdir=None):
'''Get the name of the git branch at CASSANDRA_DIR.
'''
cdir = CASSANDRA_DIR if cdir is None else cdir
try:
p = subprocess.Popen(['git', 'branch'], cwd=cdir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e: # e.g. if git isn't available, just give up and return None
debug('shelling out to git failed: {}'.format(e))
return
out, err = p.communicate()
# fail if git failed
if p.returncode != 0:
raise RuntimeError('Git printed error: {err}'.format(err=err))
[current_branch_line] = [line for line in out.splitlines() if line.startswith('*')]
return current_branch_line[1:].strip()
def safe_mkdtemp():
tmpdir = tempfile.mkdtemp()
# \ on Windows is interpreted as an escape character and doesn't do anyone any favors
return tmpdir.replace('\\', '/')
class InterruptBootstrap(Thread):
def __init__(self, node):
Thread.__init__(self)
self.node = node
def run(self):
self.node.watch_log_for("Prepare completed")
self.node.stop(gently=False)
class InterruptCompaction(Thread):
"""
Interrupt compaction by killing a node as soon as
the "Compacting" string is found in the log file
for the table specified. This requires debug level
logging in 2.1+ and expects debug information to be
available in a file called "debug.log" unless a
different name is passed in as a paramter.
"""
def __init__(self, node, tablename, filename='debug.log', delay=0):
Thread.__init__(self)
self.node = node
self.tablename = tablename
self.filename = filename
self.delay = delay
self.mark = node.mark_log(filename=self.filename)
def run(self):
self.node.watch_log_for("Compacting(.*)%s" % (self.tablename,), from_mark=self.mark, filename=self.filename)
if self.delay > 0:
random_delay = random.uniform(0, self.delay)
debug("Sleeping for {} seconds".format(random_delay))
time.sleep(random_delay)
debug("Killing node {}".format(self.node.address()))
self.node.stop(gently=False)
class KillOnBootstrap(Thread):
def __init__(self, node):
Thread.__init__(self)
self.node = node
def run(self):
self.node.watch_log_for("JOINING: Starting to bootstrap")
self.node.stop(gently=False)
def get_keyspace_metadata(session, keyspace_name):
cluster = session.cluster
cluster.refresh_keyspace_metadata(keyspace_name)
return cluster.metadata.keyspaces[keyspace_name]
def get_schema_metadata(session):
cluster = session.cluster
cluster.refresh_schema_metadata()
return cluster.metadata
def get_table_metadata(session, keyspace_name, table_name):
cluster = session.cluster
cluster.refresh_table_metadata(keyspace_name, table_name)
return cluster.metadata.keyspaces[keyspace_name].tables[table_name]
| StarcoderdataPython |
31656 | <filename>src/octopus/dispatcher/model/pool.py
####################################################################################################
# @file pool.py
# @package
# @author
# @date 2008/10/29
# @version 0.1
#
# @mainpage
#
####################################################################################################
from weakref import WeakKeyDictionary
from . import models
class PoolShareCreationException(Exception):
'''Raised on a poolshare submission error.'''
## A portion of a pool bound to a dispatchTree node
#
class PoolShare(models.Model):
pool = models.ModelField(False, 'name')
node = models.ModelField()
allocatedRN = models.IntegerField()
maxRN = models.IntegerField()
userDefinedMaxRN = models.BooleanField()
# Use PoolShare.UNBOUND as maxRN value to allow full pool usage
UNBOUND = -1
## Constructs a new pool share.
#
# @param id the pool share unique identifier. Use None for auto-allocation by the DispatchTree.
# @param pool the pool from which to draw render nodes
# @param node the node where the poolshare is affected
# @param maxRN the max number of render nodes this share is allowed to draw from the pool. Use PoolShare.UNBOUND for unlimited access to the pool.
#
def __init__(self, id, pool, node, maxRN):
self.id = int(id) if id else None
self.pool = pool
self.node = node
self.allocatedRN = 0
self.maxRN = int(maxRN)
# Keep track of previous poolshares on the node's "additionnalPoolShares"
for ps in self.node.poolShares.values():
self.node.additionnalPoolShares[ps.pool] = ps
# check if we already have a poolShare with this pool and node
if node in pool.poolShares:
# reassign to the node if it already exists
self.node.poolShares = WeakKeyDictionary()
self.node.poolShares[self.pool] = self.pool.poolShares[self.node]
# Remove existing ref of the pool assigned
del(self.node.additionnalPoolShares[self.pool])
raise PoolShareCreationException("PoolShare on node %s already exists for pool %s", node.name, pool.name)
# registration
self.pool.poolShares[self.node] = self
# remove any previous poolshare on this node
self.node.poolShares = WeakKeyDictionary()
self.node.poolShares[self.pool] = self
# the default maxRN at the creation is -1, if it is a different value, it means it's user defined
if self.maxRN != -1:
self.userDefinedMaxRN = True
else:
self.userDefinedMaxRN = False
def hasRenderNodesAvailable(self):
# If job has some render nodes authorized
# and has not already used all of them.
if self.maxRN > 0 and self.allocatedRN >= self.maxRN:
return False
# PRA: is it possible to have no render node available?
# As we have computed the authorized RN regarding the available nodes...
#
# Is there some render nodes available in the pool?
return any((rn.isAvailable() for rn in self.pool.renderNodes))
def __repr__(self):
return "PoolShare(id=%r, pool.name=%r, node=%r, maxRN=%r, allocatedRN=%r)" % (self.id, self.pool.name if self.pool else None, self.node.name, self.maxRN, self.allocatedRN)
## This class represents a Pool.
#
class Pool(models.Model):
name = models.StringField()
renderNodes = models.ModelListField(indexField='name')
poolShares = models.ModelDictField()
## Constructs a new Pool.
# @param parent the pool's parent
# @param name the pool's name
#
def __init__(self, id, name):
self.id = int(id) if id else None
self.name = name if name else ""
self.renderNodes = []
self.poolShares = WeakKeyDictionary()
def archive(self):
self.fireDestructionEvent(self)
## Adds a render node to the pool.
# @param rendernode the rendernode to add
#
def addRenderNode(self, rendernode):
if self not in rendernode.pools:
rendernode.pools.append(self)
if rendernode not in self.renderNodes:
self.renderNodes.append(rendernode)
self.fireChangeEvent(self, "renderNodes", [], self.renderNodes)
## Removes a render node from the pool.
# @param rendernode the rendernode to remove
#
def removeRenderNode(self, rendernode):
if self in rendernode.pools:
rendernode.pools.remove(self)
if rendernode in self.renderNodes:
self.renderNodes.remove(rendernode)
self.fireChangeEvent(self, "renderNodes", [], self.renderNodes)
## Sets the rendernodes associated to this pool to the given list of rendernodes
# @param renderNodes the list of rendernodes to associate to the pool
def setRenderNodes(self, renderNodes):
for rendernode in self.renderNodes[:]:
self.removeRenderNode(rendernode)
for rendernode in renderNodes:
self.addRenderNode(rendernode)
## Returns a human readable representation of the pool.
#
def __str__(self):
return u"Pool(id=%s, name=%s)" % (repr(self.id), repr(self.name))
def __repr__(self):
return u"Pool(id=%s, name=%s)" % (repr(self.id), repr(self.name))
| StarcoderdataPython |
3374169 | import time
def main(request, response):
use_broken_body = 'use_broken_body' in request.GET
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("Content-type", "text/html; charset=UTF-8")
response.writer.write_header("Transfer-encoding", "chunked")
response.writer.end_headers()
for idx in range(10):
if use_broken_body:
response.writer.write("%s\n%s\n" % (len(str(idx)), idx))
else:
response.writer.write("%s\r\n%s\r\n" % (len(str(idx)), idx))
response.writer.flush()
time.sleep(0.001)
response.writer.write("0\r\n\r\n")
| StarcoderdataPython |
1615704 | <gh_stars>1-10
#!/usr/bin/env python3
import sys
import boto3
import botocore
import subprocess
from typing import List, Tuple
CONFIG_STR = """upstream {application} {{
{servers}
server [::1]:9090 backup;
}}"""
UPSTREAM_LOCATION = "/etc/sgtcodfish/upstream.conf"
def load_bucket_name() -> str:
with open("/etc/sgtcodfish/lb-bucket-name", "r") as f:
bucket_name = f.read()
return bucket_name
def load_applications() -> List[str]:
with open("/etc/sgtcodfish/lb-applications", "r") as f:
applications = f.read().split(",")
return applications
def fetch_application_lbs(s3: botocore.client.BaseClient,
bucket_name: str,
application: str) -> List[Tuple[str, str]]:
try:
response = s3.get_object(Bucket=bucket_name, Key=application)
except botocore.exceptions.ClientError:
print("Failed to fetch", application, "from S3")
raise
lines = [l.split(" ") for l in sorted(response["Body"].read().split("\n"))]
return [(lb[0], lb[1]) for lb in lines]
def current_upstream() -> str:
with open(UPSTREAM_LOCATION, "r") as f:
contents = f.read()
return contents.strip()
def write_new_config(config: str) -> None:
with open(UPSTREAM_LOCATION, "w") as f:
f.write(UPSTREAM_LOCATION)
def reload_nginx():
subprocess.run(["systemctl", "reload", "nginx"])
def main() -> None:
s3 = boto3.client("s3")
bucket_name = load_bucket_name()
applications = load_applications()
total_config = ""
for application in applications:
lbs = fetch_application_lbs(s3, bucket_name, application)
nginx_config = CONFIG_STR.format(
application=application,
servers="\n".join([
" server [{}]:{};".format(lb[0], lb[1])
] for lb in lbs)
)
total_config += nginx_config + "\n"
total_config = total_config.strip()
upstream = current_upstream()
if upstream == total_config:
print("Upstream matches generated config, nothing to do")
sys.exit(0)
write_new_config(total_config)
reload_nginx()
print("Wrote new config")
if __name__ == "__main__":
try:
main()
except Exception as e:
print("Fatal error:", e)
sys.exit(1)
| StarcoderdataPython |
4833036 | <reponame>hmn21/positionchange
from __future__ import print_function
import paramiko
from datetime import datetime, timedelta
import functools
import pandas as pd
class AllowAnythingPolicy(paramiko.MissingHostKeyPolicy):
def missing_host_key(self, client, hostname, key):
return
hostname = "192.168.127.12"
password = "<PASSWORD>"
username = "databak"
port = 209
client = paramiko.SSHClient()
client.set_missing_host_key_policy(AllowAnythingPolicy())
client.connect(hostname, port=port, username=username, password=password)
sftp = client.open_sftp()
sftp.chdir('/databak/all/EsunnyData/run')
date = (datetime.now()- timedelta(1)).strftime("%Y%m%d")
#date = "20160520"
def my_callback(filename, bytes_so_far, bytes_total):
print('Transfer of %r is at %d/%d bytes (%.1f%%)' % (
filename, bytes_so_far, bytes_total, 100. * bytes_so_far / bytes_total))
for filename in sorted(sftp.listdir()):
if filename.startswith(date+'_08'):
callback_for_filename = functools.partial(my_callback, filename)
sftp.get(filename, filename, callback=callback_for_filename)
day = filename
if filename.startswith(date+'_20'):
callback_for_filename = functools.partial(my_callback, filename)
sftp.get(filename, filename, callback=callback_for_filename)
night = filename
df = pd.read_csv(day, header=None)
df.columns = ['utcReceiveTime', 'lastPrice', 'volume', ' volumeAcc', 'openInterest', 'bbz10', 'bbz9', 'bbz8', 'bbz7', 'bbz6', 'bbz5', 'bbz4', 'bbz3', 'bbz2', 'bbz1', 'bb10', 'bb9', 'bb8', 'bb7', 'bb6', 'bb5', 'bb4', 'bb3', 'bb2', 'bb1', 'ba1', 'ba2', 'ba3', 'ba4', 'ba5', 'ba6', 'ba7', 'ba8', 'ba9', 'ba10', 'baz1', 'baz2', 'baz3', 'baz4', 'baz5', 'baz6', 'baz7', 'baz8', 'baz9', 'baz10', 'utcQuoteTime']
df = df.assign(a1 = df.bbz1)
df = df.assign(a2 = df.bb1)
df = df.assign(a3 = df.ba1)
df = df.assign(a4 = df.baz1)
df = df[['utcReceiveTime', 'lastPrice', 'volume', ' volumeAcc', 'openInterest', 'a1', 'a2', 'a3', 'a4', 'bbz10', 'bbz9', 'bbz8', 'bbz7', 'bbz6', 'bbz5', 'bbz4', 'bbz3', 'bbz2', 'bbz1', 'bb10', 'bb9', 'bb8', 'bb7', 'bb6', 'bb5', 'bb4', 'bb3', 'bb2', 'bb1', 'ba1', 'ba2', 'ba3', 'ba4', 'ba5', 'ba6', 'ba7', 'ba8', 'ba9', 'ba10', 'baz1', 'baz2', 'baz3', 'baz4', 'baz5', 'baz6', 'baz7', 'baz8', 'baz9', 'baz10', 'utcQuoteTime']]
df.columns = ['utcReceiveTime', 'lastPrice', 'volume', ' volumeAcc', 'openInterest', 'bbz1', 'bb1', 'ba1', 'baz1', 'bbz10', 'bbz9', 'bbz8', 'bbz7', 'bbz6', 'bbz5', 'bbz4', 'bbz3', 'bbz2', 'bbz1', 'bb10', 'bb9', 'bb8', 'bb7', 'bb6', 'bb5', 'bb4', 'bb3', 'bb2', 'bb1', 'ba1', 'ba2', 'ba3', 'ba4', 'ba5', 'ba6', 'ba7', 'ba8', 'ba9', 'ba10', 'baz1', 'baz2', 'baz3', 'baz4', 'baz5', 'baz6', 'baz7', 'baz8', 'baz9', 'baz10', 'utcQuoteTime']
path = '\\\\172.30.50.120\\temp\\CME_COMEX\\GC1606\\' + 'GC1606-'+date+'-D.tick'
df.to_csv(path, index=False)
df = pd.read_csv(night, header=None)
df.columns = ['utcReceiveTime', 'lastPrice', 'volume', ' volumeAcc', 'openInterest', 'bbz10', 'bbz9', 'bbz8', 'bbz7', 'bbz6', 'bbz5', 'bbz4', 'bbz3', 'bbz2', 'bbz1', 'bb10', 'bb9', 'bb8', 'bb7', 'bb6', 'bb5', 'bb4', 'bb3', 'bb2', 'bb1', 'ba1', 'ba2', 'ba3', 'ba4', 'ba5', 'ba6', 'ba7', 'ba8', 'ba9', 'ba10', 'baz1', 'baz2', 'baz3', 'baz4', 'baz5', 'baz6', 'baz7', 'baz8', 'baz9', 'baz10', 'utcQuoteTime']
df = df.assign(a1 = df.bbz1)
df = df.assign(a2 = df.bb1)
df = df.assign(a3 = df.ba1)
df = df.assign(a4 = df.baz1)
df = df[['utcReceiveTime', 'lastPrice', 'volume', ' volumeAcc', 'openInterest', 'a1', 'a2', 'a3', 'a4', 'bbz10', 'bbz9', 'bbz8', 'bbz7', 'bbz6', 'bbz5', 'bbz4', 'bbz3', 'bbz2', 'bbz1', 'bb10', 'bb9', 'bb8', 'bb7', 'bb6', 'bb5', 'bb4', 'bb3', 'bb2', 'bb1', 'ba1', 'ba2', 'ba3', 'ba4', 'ba5', 'ba6', 'ba7', 'ba8', 'ba9', 'ba10', 'baz1', 'baz2', 'baz3', 'baz4', 'baz5', 'baz6', 'baz7', 'baz8', 'baz9', 'baz10', 'utcQuoteTime']]
df.columns = ['utcReceiveTime', 'lastPrice', 'volume', ' volumeAcc', 'openInterest', 'bbz1', 'bb1', 'ba1', 'baz1', 'bbz10', 'bbz9', 'bbz8', 'bbz7', 'bbz6', 'bbz5', 'bbz4', 'bbz3', 'bbz2', 'bbz1', 'bb10', 'bb9', 'bb8', 'bb7', 'bb6', 'bb5', 'bb4', 'bb3', 'bb2', 'bb1', 'ba1', 'ba2', 'ba3', 'ba4', 'ba5', 'ba6', 'ba7', 'ba8', 'ba9', 'ba10', 'baz1', 'baz2', 'baz3', 'baz4', 'baz5', 'baz6', 'baz7', 'baz8', 'baz9', 'baz10', 'utcQuoteTime']
path = '\\\\172.30.50.120\\temp\\CME_COMEX\\GC1606\\' + 'GC1606-'+date+'-N.tick'
df.to_csv(path, index=False) | StarcoderdataPython |
4804523 | <reponame>3ll3d00d/pypolarmap
from PyQt5.QtWidgets import QDialog, QDialogButtonBox
from model.preferences import DISPLAY_DB_RANGE, DISPLAY_COLOUR_MAP, DISPLAY_POLAR_360
from ui.display import Ui_displayControlsDialog
class DisplayModel:
'''
Parameters to feed into how a chart should be displayed.
'''
def __init__(self, preferences):
self.__preferences = preferences
self.__db_range = self.__preferences.get(DISPLAY_DB_RANGE)
self.__normalised = False
self.__normalisation_angle = 0
self.__visible_chart = None
self.__colour_map = self.__preferences.get(DISPLAY_COLOUR_MAP)
self.__locked = False
self.__full_polar_range = self.__preferences.get(DISPLAY_POLAR_360)
self.results_charts = []
self.measurement_model = None
def __repr__(self):
return self.__class__.__name__
@property
def colour_map(self):
return self.__colour_map
def accept(self, colour_map, db_range, is_normalised, normalisation_angle, full_polar_range):
self.lock()
should_refresh = False
norm_change = False
if self.__colour_map != colour_map:
self.__colour_map = colour_map
for chart in self.results_charts:
if hasattr(chart, 'update_colour_map'):
chart.update_colour_map(self.__colour_map, draw=False)
self.__preferences.set(DISPLAY_COLOUR_MAP, colour_map)
should_refresh = True
if self.__db_range != db_range:
self.__db_range = db_range
for chart in self.results_charts:
chart.update_decibel_range(draw=False)
self.__preferences.set(DISPLAY_DB_RANGE, db_range)
should_refresh = True
if self.__normalised != is_normalised:
self.__normalised = is_normalised
should_refresh = True
norm_change = True
if full_polar_range != self.__full_polar_range:
self.__full_polar_range = full_polar_range
should_refresh = True
if normalisation_angle != self.__normalisation_angle:
self.__normalisation_angle = normalisation_angle
if self.__normalised:
norm_change = True
should_refresh = True
if norm_change:
self.measurement_model.normalisation_changed()
self.unlock(should_refresh)
@property
def db_range(self):
return self.__db_range
@property
def normalised(self):
return self.__normalised
@property
def normalisation_angle(self):
return self.__normalisation_angle
@property
def full_polar_range(self):
return self.__full_polar_range
@property
def visible_chart(self):
return self.__visible_chart
@visible_chart.setter
def visible_chart(self, visible_chart):
if self.__visible_chart is not None and getattr(self.__visible_chart, 'hide', None) is not None:
self.__visible_chart.hide()
self.__visible_chart = visible_chart
self.redraw_visible()
def redraw_visible(self):
if self.__visible_chart is not None and self.__locked is not True:
display = getattr(self.__visible_chart, 'display', None)
if display is not None and callable(display):
display()
def lock(self):
''' flags the model as locked so changes do not result in a redraw '''
self.__locked = True
def unlock(self, should_redraw):
''' flags the model as unlocked and redraws '''
self.__locked = False
if should_redraw:
self.redraw_visible()
class DisplayControlDialog(QDialog, Ui_displayControlsDialog):
'''
Display Parameters dialog
'''
def __init__(self, parent, display_model, measurement_model):
super(DisplayControlDialog, self).__init__(parent)
self.setupUi(self)
self.__display_model = display_model
self.__measurement_model = measurement_model
self.yAxisRange.setValue(self.__display_model.db_range)
self.normaliseCheckBox.setChecked(self.__display_model.normalised)
for m in self.__measurement_model:
self.normalisationAngle.addItem(str(m.h))
self.__select_combo(self.normalisationAngle, str(self.__display_model.normalisation_angle))
stored_idx = 0
from app import cms_by_name
for idx, (name, cm) in enumerate(cms_by_name.items()):
self.colourMapSelector.addItem(name)
if name == self.__display_model.colour_map:
stored_idx = idx
self.colourMapSelector.setCurrentIndex(stored_idx)
self.buttonBox.button(QDialogButtonBox.Apply).clicked.connect(self.apply)
@staticmethod
def __select_combo(combo, value):
if value is not None:
idx = combo.findText(value)
if idx != -1:
combo.setCurrentIndex(idx)
return idx
return None
def apply(self):
''' Updates the parameters and reanalyses the model. '''
from app import wait_cursor
with wait_cursor():
self.__display_model.accept(self.colourMapSelector.currentText(),
self.yAxisRange.value(),
self.normaliseCheckBox.isChecked(),
self.normalisationAngle.currentText(),
self.polarRange.isChecked())
| StarcoderdataPython |
1721029 | <reponame>melon-yellow/py-misc
##########################################################################################################################
# Imports
import inspect
# Modules
from .safe import Safe
from .resolvable import Resolvable
from .methods import getcallable
##########################################################################################################################
# Safe Class
class Caller(Resolvable):
# Init Safe
def __init__(self, function, log=True):
# Check Parameters
if (not callable(function) or
not isinstance(log, bool)):
self = False
return None
# Init Resolvable
function = Safe(function, log)
super().__init__(function, log)
# Set Default Caller
self.call(lambda obj: obj.__callable__(*obj.args, **obj.kwargs))
# Set Bypass to False
self.__pass__ = False
# Set Arguments
self.args = list()
self.iargs = list()
self.kwargs = dict()
self.ikwargs = dict()
# Set Caller Arguments
def setargs(self, *args, **kwargs):
_params = (list(), dict())
_call = getcallable(self.__callable__)
params = inspect.getargspec(_call)[0]
# Set Keyword Arguments
for key in kwargs:
if key in params:
_params[1][key] = kwargs[key]
params.remove(key)
# Set Var Arguments
for arg in args:
if args.index(arg) < len(params):
_params[0].append(arg)
# Set Arguments
self.args = _params[0]
self.kwargs = _params[1]
return _params
# Set Caller Function
def call(self, function):
if not callable(function): return False
params = inspect.getargspec(function)[0]
if not len(params) == 1: return False
function = Safe(function, self.__logging__)
self.__caller__ = function
return function
# Call Method
def __call__(self, *args, **kwargs):
# Set Input Arguments
self.iargs = args
self.ikwargs = kwargs
# Execute Caller
return self.__caller__(self)
##########################################################################################################################
| StarcoderdataPython |
70765 | # Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'data_cube_ui.settings')
app = Celery('data_cube_ui',
broker=settings.CELERY_BROKER_URL,
backend=settings.CELERY_RESULT_BACKEND)
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(settings.INSTALLED_APPS) | StarcoderdataPython |
3804 | <gh_stars>1-10
#!/usr/bin/env python
'''
Author : <NAME>
Email : <EMAIL>
Description : shellfind.py is a Python command line utility which lets you look for shells on a site that the hacker must have uploaded. It considers all the shells available and tries all possibilities via dictionary match.
'''
import socket
import sys
import httplib
from urlparse import urlparse
import time as t
import urllib2
from urllib2 import Request, urlopen, URLError
negative = '\033[91m'
positive = '\033[32m'
wait = '\033[95m'
final = '\033[93m'
total_scanned_global=0
found_scanned_global=0
def OpenLog(log_file_name):
try:
f = open(log_file_name, 'r')
return f.read()
f.close()
except IOError:
return "File" + log_file_name + "does not exist."
def main():
socket.setdefaulttimeout(10)
print wait+"\n## ------ Welcome to Shell Finder Utility - Developed by <NAME> (http://bhavyanshu.github.io) | Apache License V2.0 | Project Source (https://github.com/bhavyanshu/Shell-Finder) ------ ##"
website_url = raw_input("\n\nEnter URL to scan ([eg, http://sitename.com or https://sitename.com/subdir ] | Do not add slash at the end of URL) : ")
parse_url=urlparse(website_url)
log_file_name = "LOG/"+parse_url.netloc+".log"
global total_scanned_global
global found_scanned_global
try:
try:
create=open(log_file_name,"w")
except:
print negative+"\nError generating log file. Please check directory access permissions."
print wait+"\nCreating a persistent connection to site "+website_url
conn = urllib2.Request(website_url)
urllib2.urlopen(website_url)
print positive+"Connected! Begining to scan for shells.."
except (urllib2.HTTPError) as Exit:
print negative+"\nEither the server is down or you are not connected to the internet."
exit()
try:
dictionary = open("dictionary","r")
except(IOError):
print negative+"Dictionary file not found_scanned_global. Please download the latest dictionary from github link"
exit()
keywords = dictionary.readlines()
for keys in keywords:
keys=keys.replace("\n","") #To replace newline with empty
New_URL = website_url+"/"+keys
print wait+">>>> "+New_URL
req=Request(New_URL)
try:
response = urlopen(req)
except URLError, e:
if hasattr(e,'reason'):
print negative+"Not found"
total_scanned_global = total_scanned_global+1
elif hasattr(e,'code'):
print negative+"Not found "
total_scanned_global = total_scanned_global+1
else:
try:
log_file=open(log_file_name,"a+") #Appending to it
except(IOError):
print negative+"Failed to create log file. Check dir permissions."
found_scanned_url=New_URL
print positive+"Possible shell found at ",found_scanned_url
log_file.writelines(found_scanned_url+"\n")
found_scanned_global=found_scanned_global+1
total_scanned_global=total_scanned_global+1
log_file.close()
print "\nTotal tries : ", total_scanned_global
print positive+"\nPossible shells: ",found_scanned_global
print final+"\nFollowing are the links to possible shells "
print OpenLog(log_file_name)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1724979 | <gh_stars>1-10
from datetime import datetime, timedelta, date
from Database import *;
from CommonFunctions import *;
# Ending semicolons intentionally left out because the sanitize function removes all semicolons
SELECT_QUERY = "SELECT %s FROM %s WHERE %s"
SELECT_ALL_QUERY = "SELECT %s FROM %s"
NEW_EMPLOYEE_INSERT = "INSERT INTO EMPLOYEE (ssn, Fname, Minit, Lname, startDate, supervisor) VALUES (%s)"
SELECT_EMPL_ROUTE_QUERY = "SELECT %s FROM %s WHERE %s IN (SELECT %s FROM %s WHERE %s)"
SELECT_EMPL_SCHEDULE = "SELECT %s FROM %s WHERE % (SELECT %s FROM %s WHERE %s)"
ADDRESS_INSERT_UPDATE = "INSERT INTO ADDRESS (E_ssn, street, city, state, zip) VALUES (%s, '%s', '%s', '%s', '%s') " \
"ON DUPLICATE KEY UPDATE street = '%s', city = '%s', state = '%s', zip = '%s';"
EMPLOYEE_INFORMATION_QUERY = "SELECT * FROM EMPLOYEE AS E LEFT JOIN ADDRESS AS A ON E.ssn = A.E_ssn WHERE E.ssn = %s";
# Semicolon left out intentionally
# I have an employee on a bus and they want to know what their stop order is
VISIT_QUERY = "SELECT v.arrivalTime, v.S_stopID FROM VISITS v, ROUTE r WHERE v.R_routeID = r.routeID AND r.routeID = %s" \
" AND r.routeName = '%s' AND v.typeOfDay = '%s' ORDER BY v.arrivalTime ASC";
# Gets all stops on a bus's route on a given type of date
# Use Case: I want to know at what time the 535 Lynnwood will be at each of its stops on March 15 2020
def CheckSchedule():
SeparatingLine();
# Dictionary (map) to receive multiple inputs from user
reqDict = {"Route Number (3 digits)": "",
"Route Name": "",
"Day": ""
};
reqDict["Day"] = GetDay();
for key in reqDict:
while (reqDict[key] == ""):
print("Please enter the " + key + " \n" +
"Or enter 'X' at any time to exit the program.");
entry = input(key + ": ");
entry = entry.strip();
if (entry.upper() == "X"):
EndProgram();
# Ensure Route number is valid
if (key == "Route Number (3 digits)"):
if (len(entry) == 3):
try:
int(entry);
except ValueError:
entry = "";
print("Invalid Input: Route Number must be exactly 3 numbers long.");
else:
entry = "";
elif (key == "Route Name"):
if (len(entry) == 0 or len(entry) > 20):
entry = "";
reqDict[key] = entry;
allValues = [];
for val in reqDict.values():
allValues.append(val);
print(allValues);
if (len(allValues) != 3):
print("Something went wrong with inputting data.");
return False;
query = VISIT_QUERY % (allValues[0], allValues[1], allValues[2]);
print(query);
result = SubmitQuery(query);
# global variable accessible anywhere to get Employee's name
if (result is False):
print("Error Submitting Query.");
return False;
# If the result is empty that means the system returned an empty set
elif (len(result) == 0):
print("Empty Set: No values exist for request.")
return False;
else:
print("Arrival Time | Stop ID");
for line in result:
print(DisplayClean(line));
return True;
# Validates an employee is valid
def ValidateEmployee():
ssn = "";
valid = False;
while (not valid):
print("Please enter the Social Security Number (9 digits) of the Employee you want to access. \n"
"Or enter X to exit the program.");
ssn = input("Enter SSN: ");
if (ssn.upper() == "X"):
EndProgram();
break;
elif (len(ssn) != 9):
print("SSN must be 9 digits long.");
continue;
try:
int(ssn);
valid = True;
except ValueError:
print("SSN must be a number.");
continue;
query = (SELECT_QUERY % ("Fname, Lname", "EMPLOYEE", "ssn = " + ssn));
result = SubmitQuery(query);
# global variable accessible anywhere to get Employee's name
if (result is None or len(result) != 1):
print("Could not locate employee with given SSN");
return None;
else:
name = ""
for n in result:
name += n;
emp = (ssn, name)
return emp;
# Sets the current employee
def SetCurrentEmployee():
SeparatingLine();
emp = ValidateEmployee();
if (emp is None or len(emp) < 2):
return False;
global ssn;
ssn = DisplayClean(emp[0]);
global name;
name = DisplayClean(emp[1]);
if (ssn == "" or name == ""):
print("Error: SSN or Name is empty.")
return False;
return True;
# Options for the employee
def EmployeeQueries():
if (not SetCurrentEmployee()):
print("Error trying to get employee. Please contact your system administrator.");
SeparatingLine();
choice = "";
while (choice == ""):
print("Get Employee's information: I \n"
"Get the buses that the Employee is assigned to: B \n"
"Get the routes and times that the Employee is driving: R \n"
"End program: X");
choice = input("Please enter a command: ").upper();
query = "";
if (choice == "I"):
# Join the employee table and the address table to get the full employee's information
# print(ssn + "\n");
# query = (SELECT_QUERY % ("*", "EMPLOYEE AS E LEFT JOIN ADDRESS AS A ON E.ssn = A.E_ssn", "E_ssn = " + ssn));
query = EMPLOYEE_INFORMATION_QUERY % ssn;
# query = SELECT_ALL_QUERY % ("*", "EMPLOYEE");
# employeeAddressQuery = "SELECT * FROM EMPLOYEE AS E, ADDRESS AS A WHERE E.ssn = " + ssn + " AND A.E_ssn = " + ssn;
# addressQuery = SELECT_QUERY % ("*", "ADDRESS", "E_ssn = " + ssn);
elif (choice == "B"):
# Gets all busses that the employee is assigned to
query = SELECT_QUERY % ("busID", "BUS", "E_driver = " + ssn);
elif (choice == "R"):
# Gets all routes that the employee is assigned to
query = SELECT_EMPL_ROUTE_QUERY % (
"R_routeID, R_routeName, timeStart, timeEnd", "SCHEDULED", "B_busID", "busID", "BUS", "E_driver = " + ssn);
elif (choice == "X"):
EndProgram();
# No valid input, restart the prompts
else:
EmployeeQueries();
result = SubmitQuery(query);
if (result is None):
print("Error Submitting Query.");
return None;
# If the result is empty that means the system returned an empty set
elif (len(result) == 0):
print("Empty Set: No values exist for request.")
return True;
else:
for line in result:
print(DisplayClean(line));
return True;
# Sets a current employee's address. Employee must exist beforehand.
def SetAddress(E_ssn):
SeparatingLine();
US_States = set(["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA", "HI", "ID", "IL", "IN", "IA", "KS",
"KY", "LA", "ME", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", "NM", "NY",
"NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV",
"WI", "WY"]);
# Dictionary (map) for employee's address
empDict = {"Street": "",
"City": "",
"State": "",
"Zip code": ""}
for key in empDict:
while (empDict.get(key) == ""):
print("Please fill out the Employee's address. \n"
"Enter 'X' at any time to exit the program.");
entry = "";
entry = input("Please enter employee's " + key + ": ").upper();
entry = entry.strip();
if (entry.upper() == "X"):
EndProgram();
if (entry == ""):
print(key + " cannot be NULL\n");
if (key == "Street"):
street = entry;
if (key == "City"):
city = entry;
if (key == "State"):
if entry not in US_States:
print("Error: Invalid U.S State");
entry = "";
continue;
else:
state = entry;
if (key == "Zip code"):
if (len(entry) == 5):
try:
int(entry);
zip = entry;
except ValueError:
print("Zip code must be a valid 5-digit number");
entry = "";
continue;
else:
print(key + " must be a valid 5-digit number");
entry = "";
continue;
entry = Sanitize(entry);
empDict[key] = entry;
query = ADDRESS_INSERT_UPDATE % (E_ssn, street, city, state, zip, street, city, state, zip);
result = SubmitInsert(query);
if (result is False):
print("Error Submitting Insert.");
return False;
print("Employee address Successfully added!");
return True;
# Creates a new employee and optionally, an address for that employee
def NewEmployee():
SeparatingLine();
# Dictionary (map) for employee's information
empDict = {"Social Security Number": "",
"First Name": "",
"Middle Initial": "",
"Last Name": "",
"Start Date (YYYY-MM-DD)": "",
"Supervisor SSN": ""}
# Iterate over the map and enter values
for key in empDict:
# While the inserted value isn't valid keep prompting user
while (empDict.get(key) == ""):
print("Please fill out the Employee's information. If the value is NULL please write 'NULL'. \n"
"Enter 'X' at any time to exit the program.");
entry = input("Please enter the new Employee's " + key + " :");
entry = entry.strip();
# Exit program
if (entry.upper() == "X"):
EndProgram();
# Ensure the start date follows proper date-time format
if (key == "Start Date (YYYY-MM-DD)" and entry != ""):
try:
dateObj = datetime.strptime(entry, '%Y-%m-%d').date();
except ValueError:
print("Error: Date must be in YYYY-MM-DD format.");
entry = "";
continue;
# Entered start date has to be less than 31 days from today (either past or future)
if (dateObj > date.today() + timedelta(days=31) or
dateObj < date.today() - timedelta(days=31)):
print("Date must be within 31 days from today");
entry = "";
# Make sure the SSN and Super-SSN can be integers.
# If expecting SSN and SuperSSN and not null
if (key == "Social Security Number" or (key == "Supervisor SSN" and entry.upper() != "NULL")):
# Make sure SSN is 9 long
if (len(entry) == 9):
# Make sure SSN can be int
try:
int(entry);
if (key == "Social Security Number"):
ssn = entry;
except ValueError:
entry = "";
# Supervisor SSN can not be same as Employee SSN
if (entry == empDict["Social Security Number"]):
print("Supervisor SSN can not be the same as the new Employee's SSN. \n"
"If there is no Supervisor please enter: NULL");
entry = "";
else:
entry = "";
# If the middle initial is more than 1 character and it's not NULL
if (key == "Middle Initial" and len(entry) > 1):
if (entry.upper() == "NULL"):
print("Middle initial null.");
entry = "NULL";
else:
print("Middle Initial invalid.");
entry = ""
elif (key == "Middle Initial" and len(entry) > 1):
print("Middle initial null.");
entry = "NULL";
if ((key == "First Name" or key == "<NAME>")):
if (entry.upper() == "NULL" or entry.upper() == "" or len(entry) <= 1):
print("Invalid value. Cannot be NULL and more than 1")
entry = "";
entry = Sanitize(entry);
empDict[key] = entry;
# Populate a string with the new query
newValues = "";
for key in empDict:
# If SSN or NULL don't put single quote around it
if (key == "Social Security Number" or key == "Supervisor SSN" or empDict[key] == "NULL"):
newValues += empDict[key] + ", ";
# If it's null don't add to string
elif (empDict[key] != ""):
newValues += "'" + empDict[key] + "', ";
# Remove trailing comma
if (newValues.endswith(', ')):
newValues = newValues[:-2];
# Move all data from map to list in order to insert them into
query = (NEW_EMPLOYEE_INSERT % (newValues));
result = SubmitInsert(query);
# global variable accessible anywhere to get Employee's name
if (result is False):
print("Error Submitting Query.");
return False;
print("New Employee Successfully added!");
nextChoice = input("Do you want to add address? (Y/N) ");
if (nextChoice.upper() == "Y"):
SetAddress(ssn);
return True;
# Update an employees address; creates address if it doesn't exist or updates the existing address
def UpdateAddress():
ssn = "";
valid = False;
while (not valid):
SeparatingLine();
print("Please enter the Social Security Number (9 digits) of the Employee you want to access. \n"
"Or enter X to exit the program.");
ssn = input("Enter SSN: ").strip();
if (ssn.upper() == "X"):
EndProgram();
break;
elif (len(ssn) != 9):
print("SSN must be 9 digits long.");
continue;
try:
int(ssn);
valid = True;
except ValueError:
print("SSN must be a number.");
continue;
SetAddress(ssn);
# Actions for employees
def EmployeeInterface():
SeparatingLine();
selection = "";
while (selection != "X"):
SeparatingLine();
print("Please select from one of the following options: ")
print("Add a new Employee: N \n"
"Access an Employee's information: I \n"
"Check route schedule for a given day: C \n"
"Update Employee address: A \n"
"Exit Program: X");
selection = input("Please enter a command: ")
selection = selection.upper();
if (selection == "N"):
NewEmployee();
elif (selection == "I"):
EmployeeQueries();
elif (selection == "C"):
CheckSchedule();
elif (selection == "A"):
UpdateAddress();
EndProgram();
| StarcoderdataPython |
1646281 | """Urls for Zinnia random entries"""
from django.conf.urls import url
from django.conf.urls import patterns
from zinnia.views.random import EntryRandom
urlpatterns = patterns(
'',
url(r'^$',
EntryRandom.as_view(),
name='zinnia_entry_random'),
)
| StarcoderdataPython |
1688076 | import re
import datetime
ET_CHAR = "∧"
OR_CHAR = "∨"
IF_CHAR = "→"
NOT_CHAR = "¬"
# First order Logic
ALL_CHAR = "∀"
EXISTS_CHAR = "∃"
# Modal Logic
NECESSARY_CHAR = "◻"
POSSIBLE_CHAR = "◇"
# Results
THEOREM_CHAR = "⊢"
NOT_THEOREM_CHAR = "⊬"
def get_all(l):
"Returns all elements in nested lists"
o = list()
for i in l:
if i:
o += get_all(i) if isinstance(i, list) else [i]
return o
def p_new(params):
if not params:
return "p1"
return "p" + str(max(map(lambda p: int(re.search(r"(?<=p)\d+", p).group()),
params)) + 1)
def max_p(params):
return max([int(re.search(r"\d+", p).group()) for p in params])
class FBF:
# The Well Formulated Formula, eventually annotated with a sign
def __str__(self):
txt = self.sign if self.sign else ""
if len(self.args) > 1 and self.char:
fbf1 = "({})".format(self.args[0].str_wo_sign()) \
if self.args[0].char \
else self.args[0].str_wo_sign()
if len(self.args[0].args) > 1 and fbf1[0] != "(":
fbf1 = "(" + fbf1 + ")"
fbf2 = "({})".format(self.args[1].str_wo_sign()) \
if self.args[1].char \
else self.args[1].str_wo_sign()
if len(self.args[1].args) > 1 and fbf2[0] != "(":
fbf2 = "(" + fbf2 + ")"
txt += "{} {} {}".format(fbf1, self.char, fbf2)
elif self.char:
fbf = "({})".format(self.args[0].str_wo_sign()) \
if self.args[0].char \
else self.args[0].str_wo_sign()
if len(self.args[0].args) > 1 and fbf[0] != "(":
fbf = "(" + fbf + ")"
txt += "{}{}".format(self.char, fbf)
else:
txt += self.args
return txt
def str_wo_sign(self):
if len(self.args) > 1 and self.char:
fbf1 = "({})".format(self.args[0].str_wo_sign()) \
if self.args[0].char \
else self.args[0].str_wo_sign()
if len(self.args[0].args) > 1 and fbf1[0] != "(":
fbf1 = "(" + fbf1 + ")"
fbf2 = "({})".format(self.args[1].str_wo_sign()) \
if self.args[1].char \
else self.args[1].str_wo_sign()
if len(self.args[1].args) > 1 and fbf2[0] != "(":
fbf2 = "(" + fbf2 + ")"
return "{} {} {}".format(fbf1, self.char, fbf2)
elif self.char:
fbf = "({})".format(self.args[0].str_wo_sign()) \
if self.args[0].char \
else self.args[0].str_wo_sign()
if len(self.args[0].args) > 1 and fbf[0] != "(":
fbf = "(" + fbf + ")"
return "{}{}".format(self.char, fbf)
else:
return self.args
def get_param(self):
"Get assigned parameteres of a formula"
return get_all(map(lambda f: f.get_param(),
self.args))
def get_all_param(self):
"Get all parameters of a formula"
return get_all(map(lambda f: f.get_all_param(),
self.args))
def __eq__(self, other):
# Two formulas are equal only when variables and function are the same
return self.args == other.args and self.char == other.char
def closed(self, other):
"Check if the formula is closed by the argument"
T = {"T", "Tc"}
F = {"F", "Fc"}
return self == other and \
((self.sign in T and other.sign in F) or
(self.sign in F and other.sign in T))
def __hash__(self):
return hash(str(self))
class Atom(FBF):
def __init__(self, value, sign=None):
"The atomic element"
self.args = value
self.sign = sign
self.char = ""
def get_all_param(self):
"Get all parameters"
param = re.search(r"(?<=\()\w+(?=\))", self.args)
return param.group() if param else []
def get_param(self):
"Return the parameter if assigned"
param = re.search(r"(?<=\()p\d+(?=\))", self.args)
if not param:
return None
return param.group() if len(param.group()) > 1 else None
def set_param(self, val):
"Set the parameter for first order rules"
return Atom(re.sub(r"(?<=\()\w+(?=\))",
val,
self.args),
sign=self.sign)
class Et(FBF):
def __init__(self, *args, sign=None):
"The logical et connector"
self.char = ET_CHAR
self.sign = sign
self.args = list(map(lambda a: Atom(a) if isinstance(a, str) else a,
args))
self.solver = "CL"
if len(self.args) < 2:
raise Exception("Object is not a valid formula")
elif len(self.args) > 2:
self = self.__init__(Et(*args[0:2]), *args[2:])
def set_param(self, param):
"Set all parameters of a formula"
return Et(self.args[0].set_param(param),
self.args[1].set_param(param),
sign=self.sign)
def solve(self, solver, params):
# just check a tableaux table
fbf1, fbf2 = self.args
if solver == "CL":
fbf1.sign = self.sign
fbf2.sign = self.sign
if self.sign == "T":
yield {fbf1, fbf2}, False
elif self.sign == "F":
yield {fbf1}, False
yield {fbf2}, False
elif solver == "INT":
fbf1.sign = self.sign
fbf2.sign = self.sign
if self.sign == "T":
yield {fbf1, fbf2}, False
elif self.sign in {"F", "Fc"}:
yield {fbf1}, False
yield {fbf2}, False
else:
yield {self}, False
class Or(FBF):
def __init__(self, *args, sign=None):
"The logical Or connector"
self.char = OR_CHAR
self.sign = sign
self.args = list(map(lambda a: Atom(a) if isinstance(a, str) else a,
args))
if len(self.args) < 2:
raise Exception("Object is not a valid formula")
elif len(self.args) > 2:
self = self.__init__(Or(*args[0:2]), *args[2:])
def set_param(self, param):
"Set all parameters of a formula"
return Or(self.args[0].set_param(param),
self.args[1].set_param(param),
sign=self.sign)
def solve(self, solver, params):
fbf1, fbf2 = self.args
if solver == "CL":
fbf1.sign = self.sign
fbf2.sign = self.sign
if self.sign == "T":
yield {fbf1}, False
yield {fbf2}, False
elif self.sign == "F":
yield {fbf1, fbf2}, False
elif solver == "INT":
fbf1.sign = self.sign
fbf2.sign = self.sign
if self.sign == "T":
yield {fbf1}, False
yield {fbf2}, False
elif self.sign in {"F", "Fc"}:
yield {fbf1, fbf2}, False
else:
yield {self}, False
class If(FBF):
def __init__(self, *args, sign=None):
"The logical if connector"
self.char = IF_CHAR
self.sign = sign
self.args = list(map(lambda a: Atom(a) if isinstance(a, str) else a,
args))
if len(self.args) < 2:
raise Exception("Object is not a valid formula")
elif len(self.args) > 2:
self = self.__init__(If(*args[0:2]), *args[2:])
def set_param(self, param):
"Set all parameters of a formula"
return If(self.args[0].set_param(param),
self.args[1].set_param(param),
sign=self.sign)
def solve(self, solver, params):
fbf1, fbf2 = self.args
if solver == "CL":
if self.sign == "T":
fbf1.sign = "F"
fbf2.sign = "T"
yield {fbf1}, False
yield {fbf2}, False
else:
fbf1.sign = "T"
fbf2.sign = "F"
yield {fbf1, fbf2}, False
elif solver == "INT":
if self.sign == "T":
if not fbf1.char:
fbf1.sign = "F"
fbf2.sign = "T"
yield {fbf1}, False
yield {fbf2}, False
elif fbf1.char == ET_CHAR:
fbf = If(fbf1.args[0],
If(fbf1.args[1], fbf2),
sign="T")
yield {fbf}, False
elif fbf1.char == OR_CHAR:
fbf1_new = If(fbf1.args[0], fbf2, sign="T")
fbf2_new = If(fbf1.args[1], fbf2, sign="T")
yield {fbf1_new, fbf2_new}, False
elif fbf1.char == IF_CHAR:
fbf1_new = If(fbf1.args[0], fbf2, sign="T")
fbf2_new = If(fbf1.args[1], fbf2, sign="F")
fbf2.sign = "T"
yield {fbf1_new, fbf2_new}, False
yield {fbf2}, False
elif fbf1.char == ALL_CHAR:
fbf1.char = "F"
fbf2.char = "T"
yield {fbf1, self}, False
yield {fbf2}, False
elif fbf1.char == EXISTS_CHAR:
fbf_new = If(All(fbf1.args[0]), self.args[1], sign="T")
yield {fbf_new}, False
else:
fbf1.sign = "F"
fbf2.sign = "T"
yield {fbf1, self}, False
yield {fbf2}, False
elif self.sign == "F":
fbf1.sign = "T"
fbf2.sign = "F"
yield {fbf1, fbf2}, True
elif self.sign == "Fc":
fbf1.sign = "T"
fbf2.sign = "Fc"
yield {fbf1, fbf2}, True
else:
yield {self}, False
class Not(FBF):
def __init__(self, *args, sign=None):
"The logical not operator"
self.char = NOT_CHAR
self.sign = sign
self.args = list(map(lambda a: Atom(a) if isinstance(a, str) else a,
args))
if len(self.args) != 1:
raise Exception("Object is not a valid formula")
def set_param(self, param):
"Set all parameters of a formula"
return Not(self.args[0].set_param(param),
sign=self.sign)
def solve(self, solver, params):
fbf = self.args[0]
if solver == "CL":
fbf.sign = "F" if self.sign == "T" else "T"
yield {fbf}, False
elif solver == "INT":
if self.sign == "T":
fbf.sign = "Fc"
yield {fbf}, False
elif self.sign in {"F", "Fc"}:
fbf.sign = "T"
yield {fbf}, True
else:
yield {self}, False
class All(FBF):
def __init__(self, *args, sign=None):
"The logical quantifier all operator"
self.char = ALL_CHAR
self.sign = sign
self.args = list(map(lambda a: Atom(a) if isinstance(a, str) else a,
args))
if len(self.args) != 1:
raise Exception("Object is not a valid formula")
def solve(self, solver, params):
fbf = self.args[0]
if not params:
params = {"p1"}
if solver == "CL":
fbf.sign = self.sign
if self.sign == "T":
for p in params:
yield {fbf.set_param(p), self}, False
elif self.sign == "F":
yield {fbf.set_param(p_new(params))}, False
elif solver == "INT":
if self.sign == "T":
fbf.sign = self.sign
for p in params:
yield {fbf.set_param(p), self}, False
elif self.sign == "F":
fbf.sign = "F"
yield {fbf.set_param(p_new(params))}, True
elif self.sign == "Fc":
fbf.sign = "F"
yield {fbf.set_param(p_new(params)), self}, True
else:
yield {self}, False
class Exists(FBF):
def __init__(self, *args, sign=None):
"The logical existential all operator"
self.char = EXISTS_CHAR
self.sign = sign
self.args = list(map(lambda a: Atom(a) if isinstance(a, str) else a,
args))
if len(self.args) != 1:
raise Exception("Object is not a valid formula")
def solve(self, solver, params):
fbf = self.args[0]
if not params:
params = {"p1"}
if solver == "CL":
fbf.sign = self.sign
if self.sign == "T":
yield {fbf.set_param(p_new(params))}, False
elif self.sign == "F":
for p in params:
yield {fbf.set_param(p)}, False
elif solver == "INT":
fbf.sign = self.sign
if self.sign == "T":
yield {fbf.set_param(p_new(params))}, False
elif self.sign == "F":
for p in params:
yield {fbf.set_param(p)}, False
elif self.sign == "Fc":
for p in params:
yield {fbf.set_param(p), self}, False
else:
yield {self}, False
class Tableaux:
def __init__(self, solver, S, old_S=list(), max_p=0, first_run=False):
self.solver = solver
if first_run:
S[0].sign = "F"
self.S = set(S)
# to check if the recoursive algorithm enters in a loop
self.old_S = old_S
self.max_p = max_p if max_p else len(S[0].get_all_param())
self.params = set(get_all(map(lambda f: f.get_param(),
self.S)))
if solver == "CL":
certain = {"T", "F"}
elif solver == "INT":
certain = {"T", "Fc"}
else:
raise Exception("Not a valid logic")
self.Sc = set(filter(lambda f: f.sign in certain,
self.S))
# print(self.solver + " - " + ", ".join(list(map(str, self.S))))
def is_closed(self):
return any([any(map(lambda f: fbf.closed(f), self.S - {fbf}))
for fbf in self.S])
def expand_solution(self, fbf):
# if a sequence is correct, this is the solution
sol = fbf.solve(self.solver, self.params)
if fbf.char in {ALL_CHAR, EXISTS_CHAR}:
return any((Tableaux(self.solver,
list(((self.Sc if c else
self.S) - {fbf}) | s),
old_S=self.old_S + [self.S],
max_p=self.max_p).solve()
for s, c in sol))
return all((Tableaux(self.solver,
list(((self.Sc if c else
self.S) - {fbf}) | s),
old_S=self.old_S + [self.S],
max_p=self.max_p).solve()
for s, c in sol))
def solve(self):
# check if the tableaux is closed
if self.is_closed():
return True
# get all formulas which a rule can be applied to
not_atomic = set(filter(lambda f: f.char, self.S))
# if any, the tableaux is open
if self.S in self.old_S or \
not not_atomic or \
(self.params and max_p(self.params) > self.max_p):
return False
# check if exists a sequence of formulas that closed the tableaux
# with an exploratory algorithm: it checks all possible sequences
return any((self.expand_solution(f) for f in not_atomic))
def is_theorem(formula, logic):
"Check if a formula is a theorem in a logic"
return Tableaux(logic, [formula], first_run=True).solve()
def solve(formula, logic=["CL", "INT"]):
"Solve the formula the result"
txt = str(formula)
print(txt)
results = dict()
for l in logic:
start = datetime.datetime.now()
t = is_theorem(formula, l)
time = datetime.datetime.now() - start
print(time, end=" "*3)
print(l + " "*(5 - len(l)) + (THEOREM_CHAR if t
else NOT_THEOREM_CHAR) + " " + txt)
results[l] = t
print("")
return results
if __name__ == "__main__":
solve(If("A", "B", "A", "A", "B", "B"))
solve(Or("A", Not("A")))
solve(Not(Not(Or("A", Not("A")))))
solve(Et("A", Not("A")))
solve(Not(Et("A", Not("A"))))
solve(If(Et(Or("P", "Q"),
If("P", "R"),
Et(Not("P"),
If("Q", "R"))),
"R"))
solve(If(Or("A", Not("A")),
"B", Not(Not("B"))))
solve(If("A", Not(Not("A"))))
solve(If(Not(Not("A")), "A"))
solve(If(Not(Not((Not("A")))), Not("A")))
# first order
# solve(If(All(Or("A(x)",
# "B(x)")),
# Or(All("A(x)"),
# All("B(x)"))))
solve(Not(If(All(Or("A(x)",
"B(x)")),
Or(All("A(x)"),
All("B(x)")))))
solve(If(All("A(x)"),
Not(Exists(Not("A(x)")))))
solve(If(Not(Exists(Not("A(x)"))),
All("A(x)")))
solve(If(All(If("A", "B(x)")),
If("A",
All("B(x)"))))
solve(If(If("A",
All("B(x)")),
All(If("A",
"B(x)"))))
solve(If(Exists(If("A(x)",
"B(x)")),
If(All("A(x)"),
Exists("B(x)"))))
solve(If(If(All("A(x)"),
Exists("B(x)")),
Exists(If("A(x)",
"B(x)"))))
solve(All(Or("A(x)", Not("A(x)"))))
solve(Not(Not(All(Or("A(x)",
Not("A(x)"))))))
solve(Exists(Or("A(x)",
Not("A(x)"))))
solve(Not(Not(Exists(Or("A(x)",
Not("A(x)"))))))
| StarcoderdataPython |
3203569 | from math import sqrt, atan2
class Vector2D:
def __init__(self, x=0, y=0):
self.point = [float(x), float(y)]
def __hash__(self):
return hash(tuple(self.point))
def __str__(self):
return str(self.point)
def __repr__(self):
return str(self.point)
def __eq__(self, other):
return self.point == other.point
def __abs__(self):
return sqrt( (self.x)**2 + (self.y)**2 ) # L-2 norm
def __add__(self, other):
return Vector2D(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Vector2D(self.x - other.x, self.y - other.y)
def __mul__(self, scalar):
return Vector2D(self.x * scalar, self.y * scalar)
def __getitem__(self, idx):
return self.point[idx]
def __setitem__(self, idx, value):
self.point[idx] = value
@property
def x(self):
return self.point[0]
@property
def y(self):
return self.point[1]
@property
def angle(self):
return atan2(self.y, self.x)
| StarcoderdataPython |
3334732 | def creategraph(): # static graph
# node structure = Tuple('node_name', 'node_heuristic-value')
graph = {('h', 120): [('g', 100), ('s', 70), ('b', 80)],
('s', 70): [('po', 110), ('rs', 20)],
('g', 100): [('rs', 20)],
('b', 80): [('ps', 26)],
('rs', 20): [('u', 0)],
('ps', 26): [('u', 0)]
}
goal = ('u', 0)
return graph, goal
def sort(openlist):
return sorted(openlist, key=lambda x: x[1])
def bestFirstSearch(graph, goal):
openlist = []
closelist = []
closelist.append(list(graph.keys())[0])
openlist.extend(list(graph.values())[0])
while True:
if closelist[-1] == goal:
return closelist, goal
openlist = sort(openlist)
closelist.append(openlist[0])
openlist.pop(0)
temp = graph.get(closelist[-1])
if temp is not None:
openlist.extend(temp)
else:
continue
return closelist, goal
def main():
graph, goal = creategraph()
output, goal = bestFirstSearch(graph, goal)
print("For the goal '{}' the path found is {}".format(goal[0], output))
main()
| StarcoderdataPython |
1759322 | from __future__ import annotations
from typing import Any, List, Optional
from pydantic import BaseModel, HttpUrl
from ikea_api.wrappers import types
from ikea_api.wrappers._parsers.item_base import ItemCode
__all__ = ["main"]
class Catalog(BaseModel):
name: str
url: HttpUrl
class CatalogRef(BaseModel):
elements: List[Catalog]
class CatalogRefs(BaseModel):
products: Optional[CatalogRef]
class ResponsePipItem(BaseModel):
id: ItemCode
priceNumeral: int
pipUrl: HttpUrl
catalogRefs: CatalogRefs
def get_category_name_and_url(catalog_refs: CatalogRefs):
if not catalog_refs.products:
return None, None
return catalog_refs.products.elements[0].name, catalog_refs.products.elements[0].url
def main(response: dict[str, Any]):
if not response:
return
parsed_item = ResponsePipItem(**response)
category_name, category_url = get_category_name_and_url(parsed_item.catalogRefs)
return types.PipItem(
item_code=parsed_item.id,
price=parsed_item.priceNumeral,
url=parsed_item.pipUrl,
category_name=category_name,
category_url=category_url,
)
| StarcoderdataPython |
1631496 | import discord
from discord.ext import commands
from discord.utils import get
import datetime
from discord import Member
class Joinlog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_member_join(self, member):
usedinvite = Noneg
logch = self.bot.db.execute("SELECT logging_join FROM guild_settings WHERE id = ?")
channel = discord.utils.get(guild.text_channels, name="logs")
vars = {
'{user.mention}': member.mention,
'{user}': str(member),
'{user.name}': member.name,
}
if logch:
embed = discord.Embed(title="Member Joined", url="https://tenor.com/view/penguin-hello-hi-hey-there-cutie-gif-3950966")
embed.set_author(name=f"{member}", icon_url=str(
member.avatar_url_as(static_format='png', size=2048))
embed.add_field(name='Account Created', value=datetime.datetime.utcnow() - member.created_at), + 'ago', inline=False)
if used invite:
embed.add_field(name="Invite used:", value=used_invite, inline=False)
embed.add_footer(name="Member Count:", value=f"{guild.members}")
try:
await logch.send(embed)
except Exception:
pass
def setup(bot):
bot.add_cog(Greetmsg(bot))
bot.logging.info("Event Loaded Joinlog!") | StarcoderdataPython |
58034 | <reponame>bitcraft/pyglet<gh_stars>10-100
import pyglet
# Cocoa implementation:
if pyglet.options['darwin_cocoa']:
from .cocoapy import *
| StarcoderdataPython |
13591 | # extdiff.py - external diff program support for mercurial
#
# Copyright 2006 <NAME> <<EMAIL>>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to allow external programs to compare revisions
The extdiff Mercurial extension allows you to use external programs
to compare revisions, or revision with working directory. The external
diff programs are called with a configurable set of options and two
non-option arguments: paths to directories containing snapshots of
files to compare.
The extdiff extension also allows you to configure new diff commands, so
you do not need to type :hg:`extdiff -p kdiff3` always. ::
[extdiff]
# add new command that runs GNU diff(1) in 'context diff' mode
cdiff = gdiff -Nprc5
## or the old way:
#cmd.cdiff = gdiff
#opts.cdiff = -Nprc5
# add new command called vdiff, runs kdiff3
vdiff = kdiff3
# add new command called meld, runs meld (no need to name twice)
meld =
# add new command called vimdiff, runs gvimdiff with DirDiff plugin
# (see http://www.vim.org/scripts/script.php?script_id=102) Non
# English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
# your .vimrc
vimdiff = gvim -f "+next" \\
"+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
Tool arguments can include variables that are expanded at runtime::
$parent1, $plabel1 - filename, descriptive label of first parent
$child, $clabel - filename, descriptive label of child revision
$parent2, $plabel2 - filename, descriptive label of second parent
$root - repository root
$parent is an alias for $parent1.
The extdiff extension will look in your [diff-tools] and [merge-tools]
sections for diff tool arguments, when none are specified in [extdiff].
::
[extdiff]
kdiff3 =
[diff-tools]
kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
You can use -I/-X and list of file or directory names like normal
:hg:`diff` command. The extdiff extension makes snapshots of only
needed files, so running the external diff program will actually be
pretty fast (at least faster than having to compare the entire tree).
'''
from mercurial.i18n import _
from mercurial.node import short, nullid
from mercurial import scmutil, scmutil, util, commands, encoding
import os, shlex, shutil, tempfile, re
def snapshot(ui, repo, files, node, tmproot):
'''snapshot files as of some revision
if not using snapshot, -I/-X does not work and recursive diff
in tools like kdiff3 and meld displays too many files.'''
dirname = os.path.basename(repo.root)
if dirname == "":
dirname = "root"
if node is not None:
dirname = '%s.%s' % (dirname, short(node))
base = os.path.join(tmproot, dirname)
os.mkdir(base)
if node is not None:
ui.note(_('making snapshot of %d files from rev %s\n') %
(len(files), short(node)))
else:
ui.note(_('making snapshot of %d files from working directory\n') %
(len(files)))
wopener = scmutil.opener(base)
fns_and_mtime = []
ctx = repo[node]
for fn in files:
wfn = util.pconvert(fn)
if not wfn in ctx:
# File doesn't exist; could be a bogus modify
continue
ui.note(' %s\n' % wfn)
dest = os.path.join(base, wfn)
fctx = ctx[wfn]
data = repo.wwritedata(wfn, fctx.data())
if 'l' in fctx.flags():
wopener.symlink(data, wfn)
else:
wopener.write(wfn, data)
if 'x' in fctx.flags():
util.setflags(dest, False, True)
if node is None:
fns_and_mtime.append((dest, repo.wjoin(fn),
os.lstat(dest).st_mtime))
return dirname, fns_and_mtime
def dodiff(ui, repo, diffcmd, diffopts, pats, opts):
'''Do the actuall diff:
- copy to a temp structure if diffing 2 internal revisions
- copy to a temp structure if diffing working revision with
another one and more than 1 file is changed
- just invoke the diff for a single file in the working dir
'''
revs = opts.get('rev')
change = opts.get('change')
args = ' '.join(diffopts)
do3way = '$parent2' in args
if revs and change:
msg = _('cannot specify --rev and --change at the same time')
raise util.Abort(msg)
elif change:
node2 = scmutil.revsingle(repo, change, None).node()
node1a, node1b = repo.changelog.parents(node2)
else:
node1a, node2 = scmutil.revpair(repo, revs)
if not revs:
node1b = repo.dirstate.p2()
else:
node1b = nullid
# Disable 3-way merge if there is only one parent
if do3way:
if node1b == nullid:
do3way = False
matcher = scmutil.match(repo[node2], pats, opts)
mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher)[:3])
if do3way:
mod_b, add_b, rem_b = map(set, repo.status(node1b, node2, matcher)[:3])
else:
mod_b, add_b, rem_b = set(), set(), set()
modadd = mod_a | add_a | mod_b | add_b
common = modadd | rem_a | rem_b
if not common:
return 0
tmproot = tempfile.mkdtemp(prefix='extdiff.')
try:
# Always make a copy of node1a (and node1b, if applicable)
dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot)[0]
rev1a = '@%d' % repo[node1a].rev()
if do3way:
dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot)[0]
rev1b = '@%d' % repo[node1b].rev()
else:
dir1b = None
rev1b = ''
fns_and_mtime = []
# If node2 in not the wc or there is >1 change, copy it
dir2root = ''
rev2 = ''
if node2:
dir2 = snapshot(ui, repo, modadd, node2, tmproot)[0]
rev2 = '@%d' % repo[node2].rev()
elif len(common) > 1:
#we only actually need to get the files to copy back to
#the working dir in this case (because the other cases
#are: diffing 2 revisions or single file -- in which case
#the file is already directly passed to the diff tool).
dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot)
else:
# This lets the diff tool open the changed file directly
dir2 = ''
dir2root = repo.root
label1a = rev1a
label1b = rev1b
label2 = rev2
# If only one change, diff the files instead of the directories
# Handle bogus modifies correctly by checking if the files exist
if len(common) == 1:
common_file = util.localpath(common.pop())
dir1a = os.path.join(tmproot, dir1a, common_file)
label1a = common_file + rev1a
if not os.path.isfile(dir1a):
dir1a = os.devnull
if do3way:
dir1b = os.path.join(tmproot, dir1b, common_file)
label1b = common_file + rev1b
if not os.path.isfile(dir1b):
dir1b = os.devnull
dir2 = os.path.join(dir2root, dir2, common_file)
label2 = common_file + rev2
# Function to quote file/dir names in the argument string.
# When not operating in 3-way mode, an empty string is
# returned for parent2
replace = dict(parent=dir1a, parent1=dir1a, parent2=dir1b,
plabel1=label1a, plabel2=label1b,
clabel=label2, child=dir2,
root=repo.root)
def quote(match):
key = match.group()[1:]
if not do3way and key == 'parent2':
return ''
return util.shellquote(replace[key])
# Match parent2 first, so 'parent1?' will match both parent1 and parent
regex = '\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)'
if not do3way and not re.search(regex, args):
args += ' $parent1 $child'
args = re.sub(regex, quote, args)
cmdline = util.shellquote(diffcmd) + ' ' + args
ui.debug('running %r in %s\n' % (cmdline, tmproot))
util.system(cmdline, cwd=tmproot, out=ui.fout)
for copy_fn, working_fn, mtime in fns_and_mtime:
if os.lstat(copy_fn).st_mtime != mtime:
ui.debug('file changed while diffing. '
'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
util.copyfile(copy_fn, working_fn)
return 1
finally:
ui.note(_('cleaning up temp directory\n'))
shutil.rmtree(tmproot)
def extdiff(ui, repo, *pats, **opts):
'''use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using
an external program. The default program used is diff, with
default options "-Npru".
To select a different program, use the -p/--program option. The
program will be passed the names of two directories to compare. To
pass additional options to the program, use -o/--option. These
will be passed before the names of the directories to compare.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.'''
program = opts.get('program')
option = opts.get('option')
if not program:
program = 'diff'
option = option or ['-Npru']
return dodiff(ui, repo, program, option, pats, opts)
cmdtable = {
"extdiff":
(extdiff,
[('p', 'program', '',
_('comparison program to run'), _('CMD')),
('o', 'option', [],
_('pass option to comparison program'), _('OPT')),
('r', 'rev', [],
_('revision'), _('REV')),
('c', 'change', '',
_('change made by revision'), _('REV')),
] + commands.walkopts,
_('hg extdiff [OPT]... [FILE]...')),
}
def uisetup(ui):
for cmd, path in ui.configitems('extdiff'):
if cmd.startswith('cmd.'):
cmd = cmd[4:]
if not path:
path = cmd
diffopts = ui.config('extdiff', 'opts.' + cmd, '')
diffopts = diffopts and [diffopts] or []
elif cmd.startswith('opts.'):
continue
else:
# command = path opts
if path:
diffopts = shlex.split(path)
path = diffopts.pop(0)
else:
path, diffopts = cmd, []
# look for diff arguments in [diff-tools] then [merge-tools]
if diffopts == []:
args = ui.config('diff-tools', cmd+'.diffargs') or \
ui.config('merge-tools', cmd+'.diffargs')
if args:
diffopts = shlex.split(args)
def save(cmd, path, diffopts):
'''use closure to save diff command to use'''
def mydiff(ui, repo, *pats, **opts):
return dodiff(ui, repo, path, diffopts + opts['option'],
pats, opts)
doc = _('''\
use %(path)s to diff repository (or selected files)
Show differences between revisions for the specified files, using
the %(path)s program.
When two revision arguments are given, then changes are shown
between those revisions. If only one revision is specified then
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.\
''') % dict(path=util.uirepr(path))
# We must translate the docstring right away since it is
# used as a format string. The string will unfortunately
# be translated again in commands.helpcmd and this will
# fail when the docstring contains non-ASCII characters.
# Decoding the string to a Unicode string here (using the
# right encoding) prevents that.
mydiff.__doc__ = doc.decode(encoding.encoding)
return mydiff
cmdtable[cmd] = (save(cmd, path, diffopts),
cmdtable['extdiff'][1][1:],
_('hg %s [OPTION]... [FILE]...') % cmd)
| StarcoderdataPython |
3396569 | from django.shortcuts import render
from rest_framework import generics, permissions, viewsets, renderers
from rest_framework.views import APIView
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from .service import CleaningFilter
from collections import Counter
from django.db.models import Count, Avg
from .models import Floor, RoomType, Room, Resident, Servant, Cleaning
from .serializers import FloorSerializer, RoomTypeSerializer, RoomSerializer, ResidentSerializer,\
ResidentCreateSerializer, ServantSerializer, CleaningSerializer, CleaningCreateSerializer
class FloorViewSet(viewsets.ModelViewSet):
"""Отображение для модели Этаж"""
queryset = Floor.objects.all()
serializer_class = FloorSerializer
class RoomTypeViewSet(viewsets.ModelViewSet):
"""Отображение для модели Тип Комнаты"""
queryset = RoomType.objects.all()
serializer_class = RoomTypeSerializer
class RoomViewSet(viewsets.ModelViewSet):
"""Отображение для модели Комната"""
queryset = Room.objects.all()
serializer_class = RoomSerializer
class ResidentViewSet(viewsets.ModelViewSet):
"""Отображение для модели Проживающий"""
queryset = Resident.objects.all()
def get_serializer_class(self):
if self.action == 'create':
return ResidentCreateSerializer
elif self.action != 'create':
return ResidentSerializer
class ServantViewSet(viewsets.ModelViewSet):
"""Отображение для модели Служащий"""
queryset = Servant.objects.all()
serializer_class = ServantSerializer
class CleaningViewSet(viewsets.ModelViewSet):
"""Отображение для модели Уборка"""
queryset = Cleaning.objects.all()
filter_backends = (DjangoFilterBackend,
)
filterset_class = CleaningFilter
def get_serializer_class(self):
if self.action == 'create':
return CleaningCreateSerializer
elif self.action != 'create':
return CleaningSerializer
"""Запросы к курсовой работе"""
class Query1(APIView):
"""о клиентах, проживавших в заданном номере"""
def get(self, request):
room = request.GET.get('room_number')
#room = '1'
resident_list = Resident.objects.filter(room=room)
serializer = ResidentSerializer(resident_list, many=True)
return Response({'result': serializer.data})
class Query3(APIView):
"""о том, кто из служащих убирал номер указанного клиента в заданный день недели"""
def get(self, request):
room = request.GET.get('resident')
day = request.GET.get('day')
#room = '1'
#day = 'Понедельник'
floor1 = Room.objects.filter(room_number=room)[0].floor
servant1 = Cleaning.objects.filter(floor=floor1, day=day)[0].servant
result = str(servant1)
return Response({'result': result})
class Query2(APIView):
"""о количестве клиентов, прибывших из заданного города"""
def get(self, request):
results = Resident.objects.values('from_town').order_by('from_town').annotate(Count('fio'))
return Response({'result': results})
class Query4(APIView):
"""сколько в гостинице свободных номеров"""
def get(self, request):
floor1 = Floor.objects.filter(floor_number=1)[0].number_of_rooms
floor2 = Floor.objects.filter(floor_number=2)[0].number_of_rooms
rooms = Room.objects.all().aggregate(Count('id'))['id__count']
results = floor1+floor2-rooms
return Response({'result': results})
# Create your views here.
| StarcoderdataPython |
4800180 | <filename>setup.py
from setuptools import setup, find_packages
import platform
from pathlib import Path
import subprocess
import sys
import warnings
assert platform.system() == 'Windows', "Sorry, this module is only compatible with Windows so far."
archstr = platform.machine()
if archstr.endswith('64'):
arch = "x64"
elif archstr.endswith('86'):
arch = "x86"
else:
if platform.architecture()[0] == "64bit":
arch = "x64"
else:
arch = "x86"
warnings.warn(f"vgamepad could not determine your system architecture: \
the vigembus installer will default to {arch}. If this is not your machine architecture, \
please cancel the upcoming vigembus installation and install vigembus manually from \
https://github.com/ViGEm/ViGEmBus/releases/tag/setup-v1.17.333")
pathMsi = Path(__file__).parent.absolute() / "vgamepad" / "win" / "vigem" / "install" / arch / ("ViGEmBusSetup_" + arch + ".msi")
# Prompt installation of the ViGEmBus driver (blocking call)
if sys.argv[1] != 'egg_info' and sys.argv[1] != 'sdist':
subprocess.call('msiexec /i %s' % str(pathMsi), shell=True)
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='vgamepad',
packages=[package for package in find_packages()],
version='0.0.5',
license='MIT',
description='Virtual XBox360 and DualShock4 gamepads in python',
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
url='https://github.com/yannbouteiller/vgamepad',
download_url='https://github.com/yannbouteiller/vgamepad/archive/refs/tags/v0.0.5.tar.gz',
keywords=['virtual', 'gamepad', 'python', 'xbox', 'dualshock', 'controller', 'emulator'],
install_requires=[],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Topic :: Software Development :: Build Tools',
'Topic :: Games/Entertainment',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
package_data={'vgamepad': [
'win/vigem/client/x64/ViGEmClient.dll',
'win/vigem/client/x86/ViGEmClient.dll',
'win/vigem/install/x64/ViGEmBusSetup_x64.msi',
'win/vigem/install/x86/ViGEmBusSetup_x86.msi',
]}
)
| StarcoderdataPython |
1619925 | <gh_stars>0
from zufang_flask.models import HouseItem
__author__ = 'GavinLiu'
__date__ = '2018/7/21 13:05'
from flask import Blueprint, jsonify, render_template
house = Blueprint('house', __name__)
@house.route('/index.html')
def index():
return render_template('index.html')
@house.route('/get_houselist', methods=['GET'])
def get_houselist():
house_list = [house_info.to_dict() for house_info in HouseItem.query.all()]
return jsonify(data=house_list)
| StarcoderdataPython |
1646069 | <gh_stars>100-1000
import mock
from couchdbkit import ResourceNotFound
def mock_report_configurations(report_configurations_by_id):
return mock.patch('corehq.apps.app_manager.models.ReportModule.reports', property(
lambda self: [report_configurations_by_id[r.report_id]
for r in self.report_configs]))
def mock_report_configuration_get(report_configurations_by_id):
def get_report_config(_id, domain):
try:
return (report_configurations_by_id[_id], False)
except KeyError:
raise ResourceNotFound
return mock.patch(
'corehq.apps.app_manager.fixtures.mobile_ucr.get_report_config',
get_report_config
)
def mock_report_data(data):
return mock.patch(
'corehq.apps.userreports.reports.data_source.ConfigurableReportDataSource.get_data',
lambda self: data)
| StarcoderdataPython |
3229078 | <gh_stars>1-10
#cat alladdress.txt | python3 address_to_hash160.py > alladdress160.txt
import sys
from bit.base58 import b58decode_check
from bit.utils import bytes_to_hex
def address_to_hash160(address):
address_bytes = b58decode_check(address)
address_hash160 = bytes_to_hex(address_bytes)[2:]
return address_hash160
for line in sys.stdin:
print (address_to_hash160(line.rstrip())) | StarcoderdataPython |
3321792 | a,b=map(float,input().split())
print("%.2lf"%(a/b))
| StarcoderdataPython |
1705894 | <filename>run_reg.py
"""
Author: <NAME>
Date: May 2020
调用训练好的模型
"""
import argparse
import numpy as np
import os
import torch
import logging
from tqdm import tqdm
import matplotlib
from pathlib import Path
import sys
import importlib
import cv2
from openni import openni2
from openni import _openni2 as c_api
from displayPoint import displayPoint1,displayPoint2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('')
# 模型 参数
parser.add_argument('--num_joint', default=6, type=int, help='number of joint in hand [default: 36*3]')
parser.add_argument('--model', default='pointnet2_reg_msg', help='model name [default: pointnet_cls]')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device [default: 0]')
parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')
# 摄像机参数
parser.add_argument('--width', type=int, default=640, help='resolutionX')
parser.add_argument('--height', type=int, default=400, help='resolutionY')
parser.add_argument('--fps', type=int, default=30, help='frame per second')
parser.add_argument('--mirroring', default=True, help='mirroring [default: False]')
parser.add_argument('--compression', default=True, help='compress or not, when saving the video [default: True]')
return parser.parse_args()
def pc_normalize(pc):
centroid = np.mean(pc, axis=0)
pc = pc - centroid
scale = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / scale
return pc, scale, centroid
def getOrbbec():
# 记载 openni
try:
if sys.platform == "win32":
libpath = "lib/Windows"
else:
libpath = "lib/Linux"
print("library path is: ", os.path.join(os.path.dirname(__file__), libpath))
openni2.initialize(os.path.join(os.path.dirname(__file__), libpath))
print("OpenNI2 initialized \n")
except Exception as ex:
print("ERROR OpenNI2 not initialized", ex, " check library path..\n")
return
# 加载 orbbec 相机
try:
device = openni2.Device.open_any()
return device
except Exception as ex:
print("ERROR Unable to open the device: ", ex, " device disconnected? \n")
return
def depth2uvd(depth_array):
U = np.tile(np.linspace(1, args.width, args.width), (args.height, 1)).astype(np.float32)
V = np.tile(np.linspace(1, args.height, args.height), (args.width, 1)).astype(np.float32).transpose(1, 0)
cloud_z = depth_array
cloud_x = ((U - 309.9648) * cloud_z) / 515.8994 + (244.1680 - V) * 1.3982e-06 * cloud_z
cloud_y = (V - 244.1680) * cloud_z / 516.2843
# 下采样
cloud_x = cloud_x[0::3, 0::3]
cloud_y = cloud_y[0::3, 0::3]
cloud_z = cloud_z[0::3, 0::3]
# 变换成标准形式 cloud_point[Num, 3],并删除无效点
cloud_x = np.reshape(cloud_x, (-1, 1))
cloud_y = np.reshape(cloud_y, (-1, 1))
cloud_z = np.reshape(cloud_z, (-1, 1))
cloud_point = np.hstack((cloud_x, cloud_y, cloud_z))
index = np.where(cloud_point[:, 2] == 0)[0]
cloud_point = np.delete(cloud_point, index, axis=0)
index = np.where(cloud_point[:, 2] > 2000)[0]
cloud_point = np.delete(cloud_point, index, axis=0)
return cloud_point
def main(args):
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''MODEL LOADING 加载模型'''
MODEL = importlib.import_module(args.model) # 导入模型所在的模块
classifier = MODEL.get_model(args.num_joint * 3, normal_channel=args.normal).cuda()
experiment_dir = Path('./log/regression/pointnet2_reg_msg')
checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
classifier.load_state_dict(checkpoint['model_state_dict'])
classifier.eval()
try:
''' 加载摄像机 '''
device = getOrbbec()
# 创建深度流
depth_stream = device.create_depth_stream()
depth_stream.set_mirroring_enabled(args.mirroring)
depth_stream.set_video_mode(c_api.OniVideoMode(resolutionX=args.width, resolutionY=args.height, fps=args.fps,
pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM))
# 获取uvc
cap = cv2.VideoCapture(0)
# 设置 镜像 帧同步
device.set_image_registration_mode(True)
device.set_depth_color_sync_enabled(True)
depth_stream.start()
while True:
# 读取帧
frame_depth = depth_stream.read_frame()
frame_depth_data = frame_depth.get_buffer_as_uint16()
# 读取帧的深度信息 depth_array 也是可以用在后端处理的 numpy格式的
depth_array = np.ndarray((frame_depth.height, frame_depth.width), dtype=np.uint16, buffer=frame_depth_data)
# 变换格式用于 opencv 显示
depth_uint8 = 1 - 250 / (depth_array)
depth_uint8[depth_uint8 > 1] = 1
depth_uint8[depth_uint8 < 0] = 0
cv2.imshow('depth', depth_uint8)
# 读取 彩色图
_, color_array = cap.read()
cv2.imshow('color', color_array)
# 对彩色图 color_array 做处理
# 对深度图 depth_array 做处理
# 键盘监听
if cv2.waitKey(1) == ord('q'):
# 关闭窗口 和 相机
depth_stream.stop()
cap.release()
cv2.destroyAllWindows()
break
# 检测设备是否关闭(没什么用)
try:
openni2.unload()
print("Device unloaded \n")
except Exception as ex:
print("Device not unloaded: ", ex, "\n")
except:
# 读取 depth
depth_array = matplotlib.image.imread('test_depth_1.tif').astype(np.float32)
# depth to UVD
cloud_point = depth2uvd(depth_array)
# 将点云归一化
cloud_point_normal, scale, centroid = pc_normalize(cloud_point)
cloud_point_normal = np.reshape(cloud_point_normal,(1,-1,3))
cloud_point_normal = cloud_point_normal.transpose(0, 2, 1)
# 对归一化的点云做预测
cloud_point_normal = torch.from_numpy(cloud_point_normal).cuda()
pred, _ = classifier(cloud_point_normal)
# 对预测结果做还原
pred_reduction = pred.cpu().data.numpy()
pred_reduction = pred_reduction * np.tile(scale, (args.num_joint * 3, 1)).transpose(1, 0)
pred_reduction = pred_reduction + np.tile(centroid, (1, args.num_joint))
pred_reduction = np.reshape(pred_reduction,(-1,3))
displayPoint2(cloud_point, pred_reduction, 'bear')
# with torch.no_grad():
# classifier.eval()
# instance_acc, class_acc = test(classifier.eval(), testDataLoader, vote_num=args.num_votes)
if __name__ == '__main__':
args = parse_args()
main(args)
| StarcoderdataPython |
180490 | """
Full CI based on determinants rather than on CSFs.
The approach is the one introduced by
Olsen J Chem Phys 89 2185 (1988)
It is also described in the book
Molecular electronic structure theory,
by Helgaker, <NAME>.
There it is called 'Minimal operator count (MOC) method'
written by <NAME>
Notation:
Book of Helgaker.
"""
import itertools
import numpy as np
import scipy.sparse as spspa
import scipy.sparse.linalg as spspalin
import scipy.linalg as splin
from scipy.special import binom
try: from PyQuante.cints import ijkl2intindex
except:
print "cints import failed in CI.py"
from PyQuante.pyints import ijkl2intindex
from PyQuante.CI import TransformInts
def single_excitations(n):
singles = []
for p in xrange(n):
for q in xrange(n):
singles.append((p,q))
return singles
def double_excitations(n):
doubles = []
for p in xrange(n):
for q in xrange(n):
for r in xrange(n):
for s in xrange(n):
doubles.append((p,q,r,s))
return doubles
def transform_one_ints(h,orbs):
""" Transform the one-electron Hamilton matrix from basis function
representation to MO basis,
orbs is the coefficient matrix with rows indexing orbitals and
colums indexing the basis function coefficients.
See
http://vergil.chemistry.gatech.edu/resources/programming/mp2-transform-project.pdf
for details.
For very large basis sizes, this might need to be calculated on
the fly.
"""
return np.dot(orbs.T, np.dot(h,orbs))
def e_pq_on_string(p,q,string):
"""
apply the excitation operator a^+_p a_q on a string
This gives new string and a phase factor.
It must have been checked that q is in string and p is not!
"""
if q not in string:
""" annihilate vacuum """
return 0,0
if p in string and p!=q:
""" try to create already occupied orbital which was
not destroyed """
return 0,0
# action of E_pq on string j gives new string e_pq_string:
e_pq_string = list(string)
# determine phase factor
phase_q = (-1)**e_pq_string.index(q)
# apply annihilator q
e_pq_string.remove(q)
# apply creator p
e_pq_string.append(p)
e_pq_string.sort()
phase_p = (-1)**e_pq_string.index(p)
return phase_p*phase_q, e_pq_string
class FCISolver(object):
""" Interface to the scipy.sparse.linalg.eigs eigenvalue solver"""
def __init__(self, h, ERI, enuke, orbs, n_elec, multiplicity, m_s, k=4, sigma_eigs=None, which='SA', v0=None, maxiter=None, tol=0, return_eigenvectors=True ):
"""
Parameters:
h : one-electron integrals over basis functions
ERI : electron repulsion integrals over basis functions
enuke : The nuclear attraction energy, Molecule.get_enuke()
orbs : coefficient matrix from HF calculation giving the
orbitals in rows and the bfs coeffs in columns
n_elec : total number of electron
multiplicity: 2*S+1
m_s : M_s component of total spin.
keyword parameters passed to eigs solver (see scipy docs):
k : number of eigenvalues computed
sigma_eigs : number to which eigenvalues are close
(should be set to increase performance)
which : if set to 'SR' calculate k smalles real part egenvalues
v0 : initial vector to start from
perhaps HF vector (1,0,0...)
maxiter : maximum number of Arnoldi updates allowed
tol : tolerance in calculation of eigenvalues
0 means machine precision
return_eigenvectors: return eigenvector in addition to eigenvalues
if set to True
"""
self.enuke = enuke
self.k = k
self.sigma_eigs = sigma_eigs
self.which = which
self.v0 = None
self.maxiter=maxiter
self.tol=tol
self.return_eigenvectors = return_eigenvectors
# number of alpha electrons
self.n_alpha = 0.5*n_elec + m_s
# number of beta electrons
self.n_beta = 0.5*n_elec - m_s
# number of orbitals
self.n_orbs = orbs.shape[0]
# number of alpha strings
self.len_alpha = int(binom(self.n_orbs,self.n_alpha))
# number of beta strings
self.len_beta = int(binom(self.n_orbs,self.n_beta))
assert self.n_alpha +self.n_beta == n_elec
# Instantiate Sigma class
self.SigmaInst = Sigma(np.eye(self.len_alpha, self.len_beta), h, ERI, orbs, n_elec, multiplicity, m_s)
# shape of the H matrix
self.H_mat_shape = (self.len_alpha*self.len_beta , self.len_alpha*self.len_beta)
# shape of the coefficient matrix in Sigma class
self.c_mat_shape = (self.len_alpha , self.len_beta)
# shape of the corresponding vector passed to eigs
self.c_vec_shape = self.len_alpha*self.len_beta
# Linear operator passed to eigensolver
self.LinOp = spspalin.LinearOperator(self.H_mat_shape, self.matvec, dtype=np.float64)
def matvec(self, vec):
""" The reshaped matrix vector step needed for the iterations
in eigs solver.
The steps are:
1. reshape vec to matrix
2. get sigma
3. reshape back and return
"""
vec_mat = vec.reshape( self.c_mat_shape )
self.SigmaInst.c_mat = vec_mat
new_vec_mat = self.SigmaInst.get_sigma()
return new_vec_mat.reshape(self.c_vec_shape)
def iterate(self):
eva, eve = spspalin.eigsh(self.LinOp,k=self.k, sigma = self.sigma_eigs, which = self.which, v0 = self.v0, maxiter= self.maxiter, tol=self.tol, return_eigenvectors = self.return_eigenvectors)
print "diagonalization sucessful"
self.eva, self.eve = self.sort_and_add_enuke(eva,eve)
return self.eva, self.eve
def sort_and_add_enuke(self, eva, eve):
""" sort the eva end eve and add the nuclear attraction energy
to eva. """
# sort
indx = eva.argsort()
eva = eva[indx]
eve = eve[:,indx]
# add enuke
eva += self.enuke
return eva, eve
class FCIExactSolver(object):
"""
In contrast to FCISolver, this method build the full CI
Hamiltonian matrix explicitly, and then diagonalizes it exactly.
It is only suitable for small CI spaces and is more intendend for
debugging purposes.
"""
def __init__(self, h, ERI, enuke, orbs, n_elec, multiplicity, m_s):
"""
Parameters:
h : one-electron integrals over basis functions
ERI : electron repulsion integrals over basis functions
orbs : coefficient matrix from HF calculation giving the
orbitals in rows and the bfs coeffs in columns
n_elec : total number of electron
multiplicity: 2*S+1
m_s : M_s component of total spin.
"""
# Instantiate FCISolver class to access necessarry structures.
self.FCISolverInst = FCISolver(h, ERI, enuke, orbs, n_elec, multiplicity, m_s)
def get_H_mat(self):
""" build the Hamiltonian matrix in the I_c = I_alpha I_beta space.
The principle is as follows:
With the Sigma class we have a (hopefully efficient) method to
calculate priducts of the Hamiltonian matrix (tensor) with a
coefficient vector (matrix).
The, e.g., 1st column of a matrix A is obtained by the
multiplication of A with the vector (1,0,0...,0).
This principle is applied for each of the len_alpha*len_beta
components of the coefficient vector.
The reshaping of the coeffitient vector to an coefficient
matrix is handled by the matvec method of FCISolver class.
"""
self.H_mat = np.zeros((self.FCISolverInst.len_alpha*self.FCISolverInst.len_beta,self.FCISolverInst.len_alpha*self.FCISolverInst.len_beta))
for col in xrange(self.FCISolverInst.len_alpha*self.FCISolverInst.len_beta):
""" loop over c_mat vector """
vec = np.zeros((self.FCISolverInst.len_alpha*self.FCISolverInst.len_beta))
vec[col] = 1.
self.H_mat[:,col] = self.FCISolverInst.matvec(vec)
print "build of H_mat successful."
def diagonalize(self):
""" diagonalize the Hamiltonian matrix """
try: self.H_mat
except: self.get_H_mat()
eva, eve = splin.eigh(self.H_mat)
self.eva, self.eve = self.FCISolverInst.sort_and_add_enuke(eva,eve)
print "diagonalization successful"""
return self.eva, self.eve
class Graph(object):
""" graph object determining vertex weights and arc weights in
reverse lexical ordering.
see Helgaker section 11.8.2
Different to Helgaker:
Attention: orbital numbering starts with 0!
Attention: address starts also with 0!
"""
def __init__(self, n_orbs, n_electron):
"""
n_orbs : number of orbitals
n_electron : number of electrons
"""
self.n_orbs = n_orbs
self.n_electron = n_electron
self.get_vertex_weights()
self.get_arc_weights()
self.get_occupations()
assert int(binom(self.n_orbs,self.n_electron)) == len(self.occupations)
def get_vertex_weights(self):
""" get the vertex weights
vertices are indexed as a two-dimensional n_orbs+1 x
n_electron+1 array:
rows: orbitals
columns: number of electrons
"""
self.vert_weights = np.zeros((self.n_orbs+1,self.n_electron+1), dtype=np.int32)
self.vert_weights[0,0] = 1
for row in xrange(1,self.n_orbs+1):
for column in xrange(self.n_electron+1):
if column > row:
""" upper triangle is left out """
continue
if row > column+ self.n_orbs - self.n_electron:
continue
if column==0:
"""check if vertex is allowed"""
self.vert_weights[row,column] = self.vert_weights[row-1,column]
else:
self.vert_weights[row,column] = self.vert_weights[row-1,column] + self.vert_weights[row-1,column-1]
def get_arc_weights(self):
""" get the arc weights
arc weigths for vertical arcs. Represented as (n,N) array
"""
self.arc_weights = np.zeros((self.n_orbs, self.n_electron), dtype=np.int32)
for row in xrange(self.n_orbs):
for column in xrange(self.n_electron):
if column > row:
""" upper triangle is left out """
continue
if row > column+ self.n_orbs - self.n_electron:
""" lower part """
continue
self.arc_weights[row,column] = self.vert_weights[row,column+1]
def address(self, occupation):
""" get the address of a string given its occupation as, e.g.,
(0,2,3) means string a^+_0 a^+_2 a^+_3
Attention: orbital numbering starts with 0!
Attention: address starts also with 0!
occupation : SORTED list of creation operators (integers)
"""
address = 0
for index in xrange(self.n_electron):
address += self.arc_weights[occupation[index],index]
return address
def get_occupations(self):
""" return a list of occupations (list of lists) in reverse
lexical order
Strategy:
create all occupations and the sort by address.
"""
occs = list(itertools.combinations(range(self.n_orbs), self.n_electron))
occs = sorted(occs, key=lambda occ:self.address(occ))
self.occupations = occs
class Sigma(object):
""" class to compute sigma matrix with a given index matrix c_mat """
def __init__(self, c_mat, h, ERI, orbs, n_elec, multiplicity, m_s):
"""
c_mat : The coefficient matrix indexed with alpha and
beta string addressing
h : one-electron integrals over basis functions
ERI : electron repulsion integrals over basis functions
orbs : coefficient matrix from HF calculation giving the
orbitals in rows and the bfs coeffs in columns
n_elec : total number of electron
multiplicity: 2*S+1
m_s : M_s component of total spin.
"""
self.c_mat = c_mat
self.h = h
self.ERI = ERI
self.orbs = orbs
self.n_elec = n_elec
self.multiplicity = multiplicity
self.m_s = m_s
# see (11.6.1)
self.n_alpha = int(0.5*self.n_elec + self.m_s)
self.n_beta = int(0.5*self.n_elec - self.m_s)
assert self.n_alpha+self.n_beta == self.n_elec
self.n_orbs = self.orbs.shape[0]
self.singles = single_excitations(self.n_orbs)
self.doubles = double_excitations(self.n_orbs)
self.AlphaStrings = Graph(self.n_orbs, self.n_alpha)
self.BetaStrings = Graph(self.n_orbs, self.n_beta)
def get_sigma(self):
""" add all sigma components togetther:
one electron terms: sigma_alpha, sigma_beta (11.8.20)
two electron terms: sigma_alpha_alpha, sigma_beta_beta,
sigma_alpha_beta (11.8.29)
"""
# if self.m_s == 0:
if False:
""" In case of 0 spin projection, i.e. as many spin up
(alpha) as spin down(beta) electrons, sigma_beta can be
obtained from sigma_alpha and sigma_alpha_beta can be
obtained from sigma_alpha_alpha. See
http://vergil.chemistry.gatech.edu/notes/ci/ci.pdf
and
J Chem Phys 89 2185 (1988)
for details.
ATTENTION. Disable this option for the moment. It gives me
incorrect results. Gives correct results if second line is
sigma_beta = ...* sigma_alpha
i.e. no transpose of sigma_alpha.
As long as I don't understand this, it should be disabled!
"""
sigma_alpha = self.get_sigma_alpha() + self.get_sigma_alpha_alpha()
# (-1)**S * sigma_alpha.T (Eq.15 in J Chem Phys paper)
sigma_beta = (-1)**(0.5*(self.multiplicity - 1)) * sigma_alpha.T
return sigma_alpha + sigma_beta + self.get_sigma_alpha_beta()
else:
""" M_s != 0 """
return self.get_sigma_alpha() + self.get_sigma_beta() + self.get_sigma_alpha_alpha() + self.get_sigma_beta_beta() + self.get_sigma_alpha_beta()
# one electron part
def get_sigma_alpha(self):
""" (11.8.26) """
try: self.k_alpha
except: self.get_k_alpha()
# dot product of sparse matrix k_alpha and dense matrix c_mat
return self.k_alpha*self.c_mat
def get_sigma_beta(self):
""" (11.8.27), beware the transpose!!!"""
try: self.k_beta
except: self.get_k_beta()
# dot product of sparse matrix k_beta and dense matrix c_mat
return self.c_mat*self.k_beta.transpose()
def get_k_alpha(self):
""" (11.8.24) """
self.k_alpha = self.get_k_gamma("alpha")
print " build of k_alpha successful"
def get_k_beta(self):
""" (11.8.25) """
self.k_beta = self.get_k_gamma("beta")
print " build of k_beta successful"
def get_k_gamma(self, alpha_beta):
""" get k_sigma matrices (11.8.24)
gamma is alpha or beta.
It is sparse! Hence is it constructed solely from the non-zero
elements.
"""
try: self.k_mat
except: self.get_k_mat()
if alpha_beta == "alpha":
Strings = self.AlphaStrings
elif alpha_beta == "beta":
Strings = self.BetaStrings
else:
raise ValueError, 'argument alpha_beta must be alpha or beta'
row_index = []
column_index = []
data = []
for i_string in Strings.occupations:
for j_string in Strings.occupations:
row = Strings.address(i_string)
column = Strings.address(j_string)
elem = 0
if row == column:
""" strings are equal """
elem = 0
for occ in i_string:
elem += self.k_mat[occ,occ]
row_index.append(row)
column_index.append(column)
data.append(elem)
continue
for p,q in self.singles:
""" loop over excitations """
# apply excitation operator on string:
phase, e_pq_j = e_pq_on_string(p,q,j_string)
if e_pq_j == 0:
""" tried to annihilate vaccum or to create
doubly """
continue
if row != Strings.address(e_pq_j):
""" strings differed by more than the pair p q """
continue
else:
elem += phase*self.k_mat[p,q]
# row_index.append(row)
# column_index.append(column)
# data.append(elem)
# # there will not be another pq that satisfies,
# # exit pq loop
# break
if abs(elem) > 1e-14:
row_index.append(row)
column_index.append(column)
data.append(elem)
return spspa.csr_matrix( (np.array(data),(np.array(row_index),np.array(column_index))), shape=(len(Strings.occupations),len(Strings.occupations)) )
def get_k_mat(self):
""" build k_pq from (11.8.8) """
try: self.h_mat
except: self.h_mat = transform_one_ints(self.h,self.orbs)
try: self.MOInts
except: self.MOInts = TransformInts(self.ERI,self.orbs)
# except: self.MOInts = TransformInts_test(self.ERI,self.orbs)
self.k_mat = np.zeros((self.n_orbs,self.n_orbs))
for p,q in self.singles:
for r in xrange(self.n_orbs):
self.k_mat[p,q] -= 0.5*self.MOInts[ijkl2intindex(p,r,r,q)]
# self.k_mat[p,q] -= 0.5*self.MOInts[p,r,r,q]
self.k_mat[p,q] += self.h_mat[p,q]
print "build of k_mat successful"
# two-electron part
def get_sigma_alpha_alpha(self):
""" (11.8.35) """
try: self.G_alpha
except: self.get_G_alpha()
# dot product of sparse matrix k_alpha and dense matrix c_mat
return self.G_alpha*self.c_mat
def get_sigma_beta_beta(self):
""" (11.8.36), beware the transpose!!!"""
try: self.G_beta
except: self.get_G_beta()
# dot product of sparse matrix k_beta and dense matrix c_mat
return self.c_mat* self.G_beta.transpose()
def get_G_alpha(self):
""" (11.8.33) """
self.G_alpha = self.get_G_gamma("alpha")
print "build of G_alpha successful"
def get_G_beta(self):
""" (11.8.34) """
self.G_beta = self.get_G_gamma("beta")
print "build of G_beta successful"
def get_G_gamma(self, alpha_beta):
""" get G_sigma matrices (11.8.33/34)
gamma is alpha or beta.
It is sparse! Hence is it constructed solely from the non-zero
elements.
"""
try: self.MOInts
except: self.MOInts = TransformInts(self.ERI,self.orbs)
# except: self.MOInts = TransformInts_test(self.ERI,self.orbs)
if alpha_beta == "alpha":
Strings = self.AlphaStrings
elif alpha_beta == "beta":
Strings = self.BetaStrings
else:
raise ValueError, 'argument alpha_beta must be alpha or beta'
row_index = []
column_index = []
data = []
for i_string in Strings.occupations:
for j_string in Strings.occupations:
row = Strings.address(i_string)
column = Strings.address(j_string)
elem = 0
for p,q,r,s in self.doubles:
""" loop over excitations """
# apply excitation E_rs operator on string:
phase_rs, e_rs_j = e_pq_on_string(r,s,j_string)
if e_rs_j == 0:
""" tried to annihilate vaccum or to create
doubly """
continue
# apply excitation E_pq operator on string:
phase_pq, e_pqrs_j = e_pq_on_string(p,q,e_rs_j)
if e_pqrs_j == 0:
""" tried to annihilate vaccum or to create
doubly """
continue
if row != Strings.address(e_pqrs_j):
""" strings differed by more than two pairs p q r s """
continue
else:
elem += 0.5 *phase_pq *phase_rs *self.MOInts[ijkl2intindex(p,q,r,s)]
# elem += 0.5 *phase_pq *phase_rs *self.MOInts[p,q,r,s]
### Need to think when can exit the loop. For sure if p!=q!=r!=s
# if p!=q and q!=r and r!=s:
# """ there will not be another pqrs that
# satisfies, exit pqrs loop """
# row_index.append(row)
# column_index.append(column)
# data.append(elem)
# break
if abs(elem) > 1e-14:
row_index.append(row)
column_index.append(column)
data.append(elem)
return spspa.csr_matrix( (np.array(data),(np.array(row_index),np.array(column_index))), shape=(len(Strings.occupations),len(Strings.occupations)) )
def get_sigma_alpha_beta(self):
""" (11.8.39) """
sigma_alpha_beta = np.zeros( (len(self.AlphaStrings.occupations),len(self.BetaStrings.occupations)) )
for p,q in self.singles:
""" Matrix summation """
sigma_alpha_beta += self.get_sigma_alpha_beta_pq(p,q)
return sigma_alpha_beta
def get_sigma_alpha_beta_pq(self,p,q):
""" (11.8.43)
Dot product of dense matrix with sparse matrix.
temp is the matrix multiplication of <I_a|E_pq|J_a>* C_Ia,Ja
from Eq. (11.8.41)
"""
try: self.D_alpha_ia_jb_pq_list
except: self.get_D_alpha_ia_jb_pq_list()
try: self.G_beta_ib_jb_pq_list
except : self.get_G_beta_ib_jb_pq_list()
temp = self.D_alpha_ia_jb_pq_list[p][q] * self.c_mat
return temp* self.G_beta_ib_jb_pq_list[p][q].transpose()
def get_D_alpha_ia_jb_pq_list(self):
"""
create a list of lists containing the sparse matrices
D_alpha_ia_jb_pq(p,q) from (11.8.41) without the product with
C_JaJb
"""
self.D_alpha_ia_jb_pq_list = []
for p in xrange(self.n_orbs):
""" loop manually over excitations """
row=[]
for q in xrange(self.n_orbs):
row.append(self.get_D_alpha_ia_jb_pq(p,q))
self.D_alpha_ia_jb_pq_list.append(row)
print "build of D_alpha_ia_jb_pq_list successful"
def get_D_alpha_ia_jb_pq(self,p,q):
""" (11.8.41) but matrix product with c_mat is pulled into
(11.8.39) """
row_index=[]
column_index=[]
data=[]
for ia_string in self.AlphaStrings.occupations:
"""
set up matrix <I_alpha|e^alpha_pq|J_alpha>
apply <I_alpha| E_pq^alpha = <I_alpha| a^+_p a_q
= a^+_q a_p |I_alpha>.
If this vanishes, the row I_alpha is zero, since
D_IJ^alpha [pq] is zero in (11.8.41).
This is done to get minimal operator count.
"""
phase, e_qp_ia = e_pq_on_string(q,p,ia_string)
if e_qp_ia == 0:
continue
for ja_string in self.AlphaStrings.occupations:
row = self.AlphaStrings.address(ia_string)
column = self.BetaStrings.address(ja_string)
if column == self.AlphaStrings.address(e_qp_ia):
""" nonzero element in <I_alpha|e^alpha_pq|J_alpha>"""
row_index.append(row)
column_index.append(column)
data.append(phase)
return spspa.csr_matrix( (np.array(data),(np.array(row_index),np.array(column_index))), shape=(len(self.AlphaStrings.occupations),len(self.AlphaStrings.occupations)) )
def get_G_beta_ib_jb_pq_list(self):
"""
create a list of lists containing the sparse matrices
G_beta_ib_jb_pq(p,q) from (11.8.42)
"""
self.G_beta_ib_jb_pq_list = []
for p in xrange(self.n_orbs):
""" loop manually over excitations """
row=[]
for q in xrange(self.n_orbs):
row.append(self.get_G_beta_ib_jb_pq(p,q))
self.G_beta_ib_jb_pq_list.append(row)
print "build of G_beta_ib_jb_pq_list successful"
def get_G_beta_ib_jb_pq(self,p,q):
""" (11.8.42) """
row_index=[]
column_index=[]
data=[]
for ib_string in self.BetaStrings.occupations:
"""
"""
for jb_string in self.BetaStrings.occupations:
row = self.BetaStrings.address(ib_string)
column = self.BetaStrings.address(jb_string)
elem = 0
for r,s in self.singles:
""" loop over excitations """
# apply excitation operator on string:
phase, e_rs_jb = e_pq_on_string(r,s,jb_string)
if e_rs_jb == 0:
""" tried to annihilate vaccum or to create
doubly """
continue
if row != self.BetaStrings.address(e_rs_jb):
""" strings differed by more than the pair p q """
continue
else:
elem += phase*self.MOInts[ijkl2intindex(p,q,r,s)]
# elem += phase*self.MOInts[p,q,r,s]
if abs(elem) > 1e-14:
row_index.append(row)
column_index.append(column)
data.append(elem)
return spspa.csr_matrix( (np.array(data),(np.array(row_index),np.array(column_index))), shape=(len(self.BetaStrings.occupations),len(self.BetaStrings.occupations)) )
def test_graph():
Inst = Graph(5,3)
print "address 0,1,2 = %i"%Inst.address([0,1,2])
print "address 0,1,3 = %i"%Inst.address([0,1,3])
print "address 1,2,3 = %i"%Inst.address([1,2,3])
print "address 0,3,4 = %i"%Inst.address([0,3,4])
print "address 2,3,4 = %i"%Inst.address([2,3,4])
print ""
print "All occupations in reverse lexical ordering:"
print Inst.occupations
def test_e_pq_on_string():
print "E_11 on [0,2] = ", e_pq_on_string(1,1,[0,2])
print "E_22 on [0,2] = ", e_pq_on_string(2,2,[0,2])
print "E_12 on [0,2] = ", e_pq_on_string(1,2,[0,2])
print "E_21 on [0,2] = ", e_pq_on_string(2,1,[0,2])
print "E_02 on [1,2] = ", e_pq_on_string(0,2,[1,2])
def test_fci():
""" test FCI calculation"""
from Molecule import Molecule
from PyQuante import SCF
nel = 2
mult = 1
m_s = 0
k=10
# h2 = Molecule('h2',[(1,(0,0,0)),(1,(1.4,0,0))], multiplicity = mult)
h2 = Molecule('h2',[(1,(0,0,0)),(1,(1.4,0,0))])
Solver = SCF(h2,method = "HF")
Solver.iterate()
print "orbital energies ",Solver.solver.orbe
print "HF energy = ",Solver.energy
# FCIInst = FCISolver(Solver.h, Solver.ERI, Solver.solver.orbs, nel, mult, m_s, k=k, sigma_eigs=None)
# eva, eve = FCIInst.iterate()
FCIInst = FCIExactSolver(Solver.h, Solver.ERI, h2.get_enuke(), Solver.solver.orbs, nel, mult, m_s)
eva,eve = FCIInst.diagonalize()
print "eva = ", eva
# print "eve = ",eve
print "correlation energy = ", eva[0] - Solver.energy
print "correlation energy should be (with 6-31g**) -0.03387 a.u."
if __name__ == "__main__":
# test_graph()
test_fci()
# test_e_pq_on_string()
| StarcoderdataPython |
3214478 | <reponame>gjeunen/reference_database_creator
#! /usr/bin/env python3
## import modules
import argparse
from Bio import Entrez
import time
from urllib.error import HTTPError
import http.client
http.client.HTTPConnection._http_vsn = 10
http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'
import subprocess as sp
import shutil
import re
import pandas as pd
from tqdm import tqdm
from Bio.Seq import Seq
from Bio import SeqIO
import os
import matplotlib
import matplotlib.pyplot as plt
from Bio import AlignIO
from Bio import Phylo
from Bio.Align.Applications import MuscleCommandline
from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceMatrix
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
## function: download sequencing data from NCBI
def ncbi_download(args):
DB = args.database
QUERY = args.query
OUTPUT = args.output_filename
EMAIL = args.email
Entrez.email = EMAIL
print('\nlooking up the number of sequences that match the query\n')
first_handle = Entrez.esearch(db=DB, term=QUERY, rettype='fasta')
first_record = Entrez.read(first_handle)
first_handle.close()
count = int(first_record['Count'])
second_handle = Entrez.esearch(db=DB, term=QUERY, retmax=count, rettype='fasta', usehistory = 'y')
second_record = Entrez.read(second_handle)
second_handle.close()
id_list = second_record['IdList']
count = int(second_record['Count'])
assert(count == len(id_list))
webenv = second_record['WebEnv']
query_key = second_record['QueryKey']
print('found {} matching sequences'.format(second_record['Count']))
print('\nstarting the download\n')
batch_size = 5000
out_handle = open(OUTPUT, 'w')
for start in tqdm(range(0, count, batch_size)):
attempt = 1
success = False
while attempt <= 3 and not success:
attempt += 1
try:
fetch_handle = Entrez.efetch(db=DB, rettype='fasta',
retstart=start, retmax=batch_size,
webenv=webenv, query_key=query_key)
success = True
except HTTPError as err:
if 500 <= err.code <= 599:
print(f"Received error from server {err}")
print("Attempt {attempt} of 3")
time.sleep(15)
else:
raise
data = fetch_handle.read()
fetch_handle.close()
out_handle.write(data)
out_handle.close()
## function: in silico PCR
def in_silico_pcr(args):
## user input
FWD = args.fwd
REV = args.rev
ASSAY = args.assay
INPUT = args.input
## reverse complement reverse primer sequence
REV_DNA = Seq(REV)
REV_CORRECT = str(REV_DNA.reverse_complement())
## setting variable names using the info from user input
TRIMMED_INIT = 'init_trimmed_' + ASSAY + '_' + INPUT
UNTRIMMED_INIT = 'init_untrimmed_' + ASSAY + '_' + INPUT
REVCOMP_UNTRIMMED_INIT = 'revcomp_' + UNTRIMMED_INIT
TRIMMED_REVCOMP = 'revcomp_' + TRIMMED_INIT
UNTRIMMED_REVCOMP = 'untrimmed_' + REVCOMP_UNTRIMMED_INIT
FINAL_TRIMMED = 'final_trimmed_' + ASSAY + '_' + INPUT
OVERLAP = str(min([len(FWD), len(REV_CORRECT)]))
#ERROR = str(round(min([3/len(FWD), 3/len(REV_CORRECT)]), 2))
#print(ERROR)
ERROR = str(4.5)
ADAPTER = FWD + '...' + REV_CORRECT
## run cutadapt on downloaded fasta file
count_init = len(list(SeqIO.parse(INPUT, 'fasta')))
print('\nrunning in silico PCR on fasta file containing {} sequences'.format(count_init))
#cmnd_cutadapt_1 = ['cutadapt', '-g', ADAPTER, '-o', TRIMMED_INIT, INPUT, '--untrimmed-output', UNTRIMMED_INIT, '--no-indels', '-e', ERROR, '--overlap', OVERLAP, '--quiet']
cmnd_cutadapt_1 = ['cutadapt', '-g', ADAPTER, '-o', TRIMMED_INIT, INPUT, '--untrimmed-output', UNTRIMMED_INIT, '--no-indels', '-e', ERROR, '--overlap', OVERLAP]
sp.call(cmnd_cutadapt_1)
count_trimmed_init = len(list(SeqIO.parse(TRIMMED_INIT, 'fasta')))
print('\nfound primers in {} sequences'.format(count_trimmed_init))
## run vsearch to reverse complement untrimmed sequences
count_untrimmed_init = len(list(SeqIO.parse(UNTRIMMED_INIT, 'fasta')))
print('\nreverse complementing {} untrimmed sequences'.format(count_untrimmed_init))
cmnd_vsearch_revcomp = ['vsearch', '--fastx_revcomp', UNTRIMMED_INIT, '--fastaout', REVCOMP_UNTRIMMED_INIT, '--quiet']
sp.call(cmnd_vsearch_revcomp)
## run cutadapt on reverse complemented untrimmed sequences
print('\nrunning in silico PCR on {} reverse complemented untrimmed sequences'.format(count_untrimmed_init))
cmnd_cutadapt_2 = ['cutadapt', '-g', ADAPTER, '-o', TRIMMED_REVCOMP, REVCOMP_UNTRIMMED_INIT, '--untrimmed-output', UNTRIMMED_REVCOMP, '--no-indels', '-e', ERROR, '--overlap', OVERLAP, '--quiet']
sp.call(cmnd_cutadapt_2)
count_trimmed_second = len(list(SeqIO.parse(TRIMMED_REVCOMP, 'fasta')))
print('\nfound primers in {} sequences'.format(count_trimmed_second))
## concatenate both trimmed files
with open(FINAL_TRIMMED, 'wb') as wfd:
for f in [TRIMMED_INIT, TRIMMED_REVCOMP]:
with open(f, 'rb') as fd:
shutil.copyfileobj(fd, wfd)
## remove intermediary files
files = [TRIMMED_INIT, UNTRIMMED_INIT, REVCOMP_UNTRIMMED_INIT, TRIMMED_REVCOMP, UNTRIMMED_REVCOMP]
for file in files:
os.remove(file)
## function: creating reference database with taxonomy
def ref_database(args):
INPUT = args.input
OUTPUT = args.output
EMAIL = args.email
## retrieve accession numbers from fasta file and store in list
Entrez.email = EMAIL
accessions = []
sequence_number = []
correct_accessions = []
with open(INPUT) as myfile:
for line in myfile:
#pattern = re.search(r"^\>(.+?)\.", line)
#print(pattern)
#if pattern:
# found = pattern.group(1)
# accessions.append(found)
if line.startswith('>'):
pattern = line.lstrip('>').split('.')[0]
sequence_number.append(pattern)
if pattern not in accessions:
accessions.append(pattern)
#print(pattern)
#print(len(accessions))
## remove wrongly formatted lines (not accession numbers)
mistakes = ['@', '#', '$', '%', '&', '(', ')', '!', '<', '?', '|', ',', '.', '+', '=', '`', '~']
for item in accessions:
if not any(mistake in item for mistake in mistakes):
correct_accessions.append(item)
print('\nfound {} accessions in input file'.format(len(sequence_number)))
print('\nfound {} unique accessions in input file'.format(len(accessions)))
if len(accessions) - len(correct_accessions) == 0:
print('\nfound no incorrect formatting in accession numbers')
else:
print('\nremoved {} accessions due to incorrect formatting'.format(len(accessions) - len(correct_accessions)))
## find taxids for all correct accession numbers
NCBI_list = []
batch_size = 5000
accession_taxid = []
taxids = []
print("\ndownloading {} taxonomic ID's from NCBI".format(len(correct_accessions)))
for start in tqdm(range(0, len(correct_accessions), batch_size)):
group = correct_accessions[start : start + batch_size]
attempt = 1
success = False
while attempt <= 3 and not success:
attempt += 1
try:
handle = Entrez.efetch(db = 'nuccore', id = ",".join(group), retmode = 'xml', rettype = 'fasta')
record = Entrez.read(handle)
NCBI_list.append(record)
success = True
except HTTPError as err:
if 500 <= err.code <= 599:
print(f"Received error from server {err}")
print(f"Attempt {attempt} of 3")
time.sleep(15)
else:
raise
## format data into two lists
for record in NCBI_list:
for i in range(len(record)):
acc = record[i]['TSeq_accver']
taxid = record[i]['TSeq_taxid']
accession_taxid.append(str(acc) + ' ' + str(taxid))
taxids.append(str(taxid))
uniq_taxid = list(set(taxids))
print("\nfound {} unique taxonomic ID's".format(len(uniq_taxid)))
## retrieve taxonomic lineage for 1000 taxids at a time
lineage_list = []
lineage_batch = 5000
print("\ndownloading taxonomic lineage for {} taxonomic ID's".format(len(uniq_taxid)))
for start in tqdm(range(0, len(uniq_taxid), lineage_batch)):
lineage_group = uniq_taxid[start : start + lineage_batch]
lineage_attempt = 1
lineage_success = False
while lineage_attempt <= 3 and not lineage_success:
lineage_attempt += 1
try:
lineage_search = Entrez.efetch(db = 'taxonomy', retmode = 'xml', id = ','.join(lineage_group))
lineage_record = Entrez.read(lineage_search)
lineage_list.append(lineage_record)
lineage_success = True
except HTTPError as err:
if 500 <= err.code <= 599:
print(f'Received error from server {err}')
print(f'Attempt {lineage_attempt} of 3')
time.sleep(15)
else:
raise
## format downloaded info to pandas dataframe containing needed info for taxonomic lineage
lineage_info = []
for key in lineage_list:
for i in range(len(key)):
lineage = {d['Rank']:d['ScientificName'] for d in key[i]['LineageEx'] if d['Rank'] in ['superkingdom',
'phylum', 'class', 'order', 'family', 'genus', 'species']}
lineage['species'] = key[i]['ScientificName']
lineage['taxid'] = key[i]['TaxId']
lineage_info.append(lineage)
tax_list = pd.DataFrame(lineage_info)
## combine dataframe with accession list and fasta sequence file
accession_and_taxid = pd.DataFrame(accession_taxid)
accession_and_taxid = accession_and_taxid[0].str.split(' ', expand = True)
accession_and_taxid['accession'] = accession_and_taxid[0].str.split('.').str[0]
accession_and_taxid.columns = ['acc_name', 'taxid', 'accession']
sequence = pd.DataFrame(pd.read_csv(INPUT, sep = '\t', header = None).values.reshape(-1,2))
sequence['accession'] = sequence[0].str[1:].str.split('.').str[0]
sequence.columns = ['name', 'sequence', 'accession']
accession_and_taxid = accession_and_taxid.astype('str')
tax_list = tax_list.astype('str')
sequence = sequence.astype('str')
df = accession_and_taxid.merge(tax_list, how = 'left', on = 'taxid')
df = df.merge(sequence, on = 'accession')
## clean up dataframe
## format the dataframe to final output
df['species'] = df['species'].str.replace(' ', '_')
df['sintax'] = '>' + df['accession'] + ';tax=d:' + df['superkingdom'] + ',p:' + df['phylum'] + ',c:' + df['class'] + ',o:' + df['order'] + ',f:' + df['family'] + ',g:' + df['genus'] + ',s:' + df['species']
datafr = df[['sintax', 'sequence']]
datafr.to_csv(OUTPUT, index = None, header = None, sep = '\n')
## function: dereplicating the database
def dereplicate(args):
INPUT = args.input
OUTPUT = args.output
## subfunctions to be called
def fasta_to_dict_wDesc(fasta_file):
seq_dict = {}
for record in SeqIO.parse(fasta_file, 'fasta'):
record.description = record.description.replace(' ', '_')
record.id = record.description
rec_id = record.id
rec_desc = record.description
rec_seq = str(record.seq)
seq_dict.setdefault(rec_id, {})['sequence'] = rec_seq
seq_dict.setdefault(rec_id, {})['description'] = rec_desc
return seq_dict
def derep(seqdict):
rep_dict = {}
derep_dict = {}
for k,v in seqdict.items():
rep_dict.setdefault(v, []).append(k)
for key, value in rep_dict.items():
numreads = len(value)
newname = value[0]
derep_dict[newname] = {'seq': key, 'size': numreads, 'readlist': value}
return derep_dict
def derep_to_seq(derep_dict, size = 'no'):
new_dict = {}
read_dict = {}
for k,v in derep_dict.items():
data = v
if size == 'no':
base_id = k
else:
base_id = k + ';size='+str(data['size'])
read_dict[base_id] = data['readlist']
new_dict[base_id] = data['seq']
return (new_dict, read_dict)
## split sequence file into two dictionaries and define which species need dereplication
seq_file = INPUT
seqs = fasta_to_dict_wDesc(seq_file)
print('\nfound {} sequences in input file'.format(len(seqs)))
seq_just_id = {}
taxonly = {}
for k,v in seqs.items():
parts = v['description'].split(';tax=')
seq_id = parts[0]
tax = parts[1]
seq_just_id[seq_id] = v['sequence']
taxonly.setdefault(tax, []).append(seq_id)
print('\ndatabase is comprised of {} unique taxa'.format(len(taxonly)))
need_derep = []
singletons = {}
for k,v in taxonly.items():
if len(v) > 1:
need_derep.append(k)
else:
singletons[v[0]] = k
print('\n{} taxa only occur once in the database'.format(len(singletons)))
print('\n{} taxa occur multiple times in the database'.format(len(need_derep)))
tax_index = {}
for k,v in taxonly.items():
if k in need_derep:
for seqid in v:
tax_index[seqid] = k
## dereplicate sequences for species represented more than once in the datbase
all_dereps = {}
for d in need_derep:
temp_seq_dict = {}
for seqid in taxonly[d]:
temp_seq_dict[seqid] = seq_just_id[seqid]
dr_temp = derep(temp_seq_dict)
derep_seq = derep_to_seq(dr_temp, size = 'no')
derep_seq = derep_seq[0]
for k,v in derep_seq.items():
new_id = k+';tax='+tax_index[k]
all_dereps[new_id] = v
## combine species present only once in the database with the dereplicated dataset
all_new_seqs = {}
for k,v in singletons.items():
new_id = k + ';tax=' + v
seq = seq_just_id[k]
all_new_seqs[new_id] = seq
for key, value in all_dereps.items():
all_new_seqs[key] = value
print('\n{} sequences left after dereplication\n'.format(len(all_new_seqs)))
## save the dereplicated database
output = OUTPUT
seqout = open(output, 'w')
for k,v in all_new_seqs.items():
seqout.write('>' + k + '\n' + v + '\n')
seqout.close()
## function: phylogenetic tree builder
def phylo(args):
SPECIES = args.species
DATABASE = args.database
EMAIL = args.email
OUTPUT = args.output
Entrez.email = EMAIL
directory = 'temp'
try:
os.makedirs(directory, exist_ok = True)
except OSError as error:
print("Directory '%s' can not be created" % directory)
## read in the text file with species names
species = []
with open(SPECIES) as species_list:
for spec in species_list:
spec = spec.rstrip('\n')
species.append(spec)
print('\nfound ' + str(len(species)) + ' species of interest: ' + str(species) + '\n')
## retrieve the lineage information for each species
## first: uniq ID from species name
## second: tax ID from uniq ID
## third: taxonomic information from tax ID
## fourth: format similar to database
print('retrieving the taxonomic information from NCBI for ' + str(len(species)) + ' species of interest\n')
uid = []
for item in species:
handle = Entrez.esearch(db = 'nucleotide', term = item, retmode = 'xml', rettype = 'fasta')
record = Entrez.read(handle)
uid.append(record['IdList'][0])
accession_taxid = []
taxids = []
for id in uid:
handle = Entrez.efetch(db = 'nuccore', id = id, retmode = 'xml', rettype = 'fasta')
record = Entrez.read(handle)
acc = record[0]['TSeq_accver']
taxid = record[0]['TSeq_taxid']
accession_taxid.append(str(acc) + ' ' + str(taxid))
taxids.append(str(taxid))
lineage_list = []
for taxid in taxids:
lineage_search = Entrez.efetch(db = 'taxonomy', retmode = 'xml', id = taxid)
lineage_record = Entrez.read(lineage_search)
lineage_list.append(lineage_record)
lineage_info = []
for key in lineage_list:
lineage = {d['Rank']:d['ScientificName'] for d in key[0]['LineageEx'] if d['Rank'] in ['superkingdom', 'phylum', 'class',
'order', 'family', 'genus', 'species']}
lineage['species'] = key[0]['ScientificName']
lineage['taxid'] = key[0]['TaxId']
lineage_info.append(lineage)
df = pd.DataFrame(lineage_info)
df['species'] = df['species'].str.replace(' ', '_')
df['sintax'] = 'd:' + df['superkingdom'] + ',p:' + df['phylum'] + ',c:' + df['class'] + ',o:' + df['order'] + ',f:' + df['family'] + ',g:' + df['genus'] + ',s:' + df['species']
datafr = df['sintax']
species_interest = datafr.values.tolist()
## extract all entries from the database that share a family status with the species of interest
for record in SeqIO.parse(DATABASE, 'fasta'):
family_rec = record.id.split(',')[4]
genus_rec = record.id.split(',')[5]
species_rec = record.id.split(',')[6]
for species in species_interest:
family_int = species.split(',')[4]
genus_int = species.split(',')[5]
species_int = species.split(',')[6]
spec_int = species.split(',')[6].split(':')[1]
if family_int == family_rec:
with open(f'{directory}/{spec_int}_family.fasta', 'a') as f:
SeqIO.write(record, f, 'fasta')
if genus_int == genus_rec:
with open(f'{directory}/{spec_int}_genus.fasta', 'a') as f:
SeqIO.write(record, f, 'fasta')
if species_int == species_rec:
with open(f'{directory}/{spec_int}_species.fasta', 'a') as f:
SeqIO.write(record, f, 'fasta')
## extract information for data table from newly generated files
newdict = {}
for species in species_interest:
spec_int = species.split(',')[6].split(':')[1]
try:
spec_number = list(SeqIO.parse(f'{directory}/{spec_int}_species.fasta', 'fasta'))
spec_num = len(spec_number)
except:
spec_num = 0
try:
gen_number = list(SeqIO.parse(f'{directory}/{spec_int}_genus.fasta', 'fasta'))
gen_num = len(gen_number)
gen_list = []
for record in gen_number:
gen = record.id.split(',')[6].split(':')[1]
if gen not in gen_list:
gen_list.append(gen)
except:
gen_num = 0
gen_list = ['NA']
try:
fam_number = list(SeqIO.parse(f'{directory}/{spec_int}_family.fasta', 'fasta'))
fam_num = len(fam_number)
fam_list = []
for record in fam_number:
fam = record.id.split(',')[6].split(':')[1]
if fam not in fam_list:
fam_list.append(fam)
except:
fam_num = 0
fam_list = ['NA']
newdict[spec_int] = {'species': spec_int, 'species_occur': spec_num, 'species_gen': gen_list, 'gen_entries': gen_num, 'species_fam': fam_list, 'fam_entries': fam_num}
## print information on which species are present in the database
for species in species_interest:
spec_int = species.split(',')[6].split(':')[1]
if newdict[spec_int]['species_occur'] == 0:
print(str(newdict[spec_int]['species']) + ': not present in the reference database\n')
else:
print(str(newdict[spec_int]['species']) + ': ' + str(newdict[spec_int]['species_occur']) + ' entries in the database\n')
## output data table on species of interest
df = pd.DataFrame.from_dict(newdict, orient = 'index')
df = df[['species', 'species_occur', 'gen_entries', 'fam_entries', 'species_gen', 'species_fam']]
df.to_csv(OUTPUT, sep = '\t', index = None)
## generate phylogenetic trees for every species of interest based on number of entries in genus and family
## first: check number of entries in if statement
## second: shorten the headers of the sequences in the file, so that it can be printed on the figure
## third: run muscle to generate alignment
## fourth: calculate distance from alignment
## fifth: generate tree figure
for species in species_interest:
spec_int = species.split(',')[6].split(':')[1]
if newdict[spec_int]['fam_entries'] > 50:
print(str(newdict[spec_int]['species']) + ': ' + str(newdict[spec_int]['fam_entries']) + ' family entries too large. Generating phylogenetic tree on genus level with ' + str(newdict[spec_int]['gen_entries']) + ' entries\n')
select = []
for record in SeqIO.parse(f'{directory}/{spec_int}_genus.fasta', 'fasta'):
record.description = record.description.replace(';', ',')
record.id = record.description
record.id = record.id.split(',')[0] + ';' + record.id.split(',')[7].split(':')[1]
record.description = record.id
select.append(record)
handle = open(f'{directory}/{spec_int}_genus_align.fasta', 'w')
SeqIO.write(select, handle, 'fasta')
handle.close()
muscle_cline = MuscleCommandline(input = f'{directory}/{spec_int}_genus_align.fasta',
out = f'{directory}/{spec_int}_genus_align.clw',
diags = True,
maxiters = 1,
log = f'{directory}/{spec_int}_genus_align_log.txt',
clw = True)
muscle_cline()
with open(f'{directory}/{spec_int}_genus_align.clw' , 'r') as aln:
alignment = AlignIO.read(aln, 'clustal')
calculator = DistanceCalculator('identity')
Distance_matrix = calculator.get_distance(alignment)
constructor = DistanceTreeConstructor(calculator, 'nj')
tree = constructor.build_tree(alignment)
fig = plt.figure(figsize = (25,15), dpi = 100)
matplotlib.rc('font', size=12)
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
axes = fig.add_subplot(1, 1, 1)
Phylo.draw(tree, axes=axes, do_show = False)
fig.savefig(f'{spec_int}_genus_align_tree.pdf')
else:
print(str(newdict[spec_int]['species']) + ': ' + str(newdict[spec_int]['fam_entries']) + ' family entries. Generating phylogenetic tree on family level\n')
select = []
for record in SeqIO.parse(f'{directory}/{spec_int}_family.fasta', 'fasta'):
record.description = record.description.replace(';', ',')
record.id = record.description
record.id = record.id.split(',')[0] + ';' + record.id.split(',')[7].split(':')[1]
record.description = record.id
select.append(record)
handle = open(f'{directory}/{spec_int}_family_align.fasta', 'w')
SeqIO.write(select, handle, 'fasta')
handle.close()
muscle_cline = MuscleCommandline(input = f'{directory}/{spec_int}_family_align.fasta',
out = f'{directory}/{spec_int}_family_align.clw',
diags = True,
maxiters = 1,
log = f'{directory}/{spec_int}_family_align_log.txt',
clw = True)
muscle_cline()
with open(f'{directory}/{spec_int}_family_align.clw' , 'r') as aln:
alignment = AlignIO.read(aln, 'clustal')
calculator = DistanceCalculator('identity')
Distance_matrix = calculator.get_distance(alignment)
constructor = DistanceTreeConstructor(calculator, 'nj')
tree = constructor.build_tree(alignment)
fig = plt.figure(figsize = (25,15), dpi = 100)
matplotlib.rc('font', size=12)
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
axes = fig.add_subplot(1, 1, 1)
Phylo.draw(tree, axes=axes, do_show = False)
fig.savefig(f'{spec_int}_family_align_tree.pdf')
## function: argparse parser
def main():
parser = argparse.ArgumentParser(description = 'creating a curated reference database')
subparser = parser.add_subparsers()
ncbi_download_parser = subparser.add_parser('ncbi_download', description = 'downloading fasta sequence file from NCBI based on text query')
ncbi_download_parser.set_defaults(func = ncbi_download)
ncbi_download_parser.add_argument('--database', help = 'database used to download sequences. Example: "nucleotide"', dest = 'database', type = str, required = True)
ncbi_download_parser.add_argument('--query', help = 'query search to limit portion of database to be downloaded. Example: "18S[All Fields] NOT "uncultured"[All Fields] AND is_nuccore[filter] AND ("1"[SLEN] : "50000"[SLEN])"', dest = 'query', type = str, required = True)
ncbi_download_parser.add_argument('--output', help = 'output filename. Example: "18S_fasta_NCBI_trial.fasta"', dest = 'output_filename', type = str, required = True)
ncbi_download_parser.add_argument('--email', help = 'email address to connect to NCBI servers', dest = 'email', type = str, required = True)
in_silico_pcr_parser = subparser.add_parser('in_silico_pcr', description = 'curating the downloaded reference sequences with an in silico PCR')
in_silico_pcr_parser.set_defaults(func = in_silico_pcr)
in_silico_pcr_parser.add_argument('--fwd', help = 'forward primer sequence in 5-3 direction', dest = 'fwd', type = str, required = True)
in_silico_pcr_parser.add_argument('--rev', help = 'reverse primer sequence in 5-3 direction', dest = 'rev', type = str, required = True)
in_silico_pcr_parser.add_argument('--assay', help = 'name of primer assay', dest = 'assay', type = str, required = True)
in_silico_pcr_parser.add_argument('--input', help = 'input filename', dest = 'input', type = str, required = True)
ref_database_parser = subparser.add_parser('ref_database', description = 'creating the reference database with taxonomic information')
ref_database_parser.set_defaults(func = ref_database)
ref_database_parser.add_argument('--input', help = 'input file containing the curated fasta sequences after in silico PCR', dest = 'input', type = str, required = True)
ref_database_parser.add_argument('--output', help = 'curated reference database output file', dest = 'output', type = str, required = True)
ref_database_parser.add_argument('--email', help = 'email address to connect to NCBI servers', dest = 'email', type = str, required = True)
dereplication_parser = subparser.add_parser('dereplicate', description = 'dereplicating the database')
dereplication_parser.set_defaults(func = dereplicate)
dereplication_parser.add_argument('--input', help = 'filename of the curated reference database', dest = 'input', type = str, required = True)
dereplication_parser.add_argument('--output', help = 'filename of the dereplicated curated reference database', dest = 'output', type = str, required = True)
phylo_parser = subparser.add_parser('phylo_build', description = 'generating phylogenetic trees for species of interest')
phylo_parser.set_defaults(func = phylo)
phylo_parser.add_argument('--species', help = 'text file containing list of species separated by newlines', dest = 'species', type = str, required = True)
phylo_parser.add_argument('--database', help = 'curated reference database', dest = 'database', type = str, required = True)
phylo_parser.add_argument('--email', help = 'email address to connect to NCBI servers', dest = 'email', type = str, required = True)
phylo_parser.add_argument('--output', help = 'filename for output table', dest = 'output', type = str, required = True)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3217002 | import numpy as np
import glob
import shutil
import os
import cv2
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
clothes_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/cloth'
clothes_mask_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/cloth-mask'
image_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/image'
image_parse_dir = '/home/ssai1/dhgwag/VITON/VITON-HD/datasets/train/image-parse'
result_dir = '/home/ssai1/yjcho/blackened_datasets'
def load_one_image(image_path):
img = Image.open(image_path).convert('RGB')
# img.save('img_test.jpg', format='jpeg')
np_img = np.array(img)
return np_img
def load_one_image_parse(image_parse_path):
# img_parse = Image.open(image_parse_path).convert('RGB')
img_parse = Image.open(image_parse_path)
# img_parse.save('img_parse_test.png', format='png')
np_img_parse = np.array(img_parse)
return np_img_parse
def get_parse_clothes(img_parse):
"""
img_parse: numpy array
"""
# print(np.unique(img_parse))
parse_upper = ((img_parse == 5).astype(np.float32) +
(img_parse == 6).astype(np.float32) +
(img_parse == 7).astype(np.float32))
# print("parse_cloth's elements:", np.unique(parse_upper))
return parse_upper
def parse2mask(parse):
"""
parse: NUMPY ARRAY upper clothes
"""
upper_mask = parse[np.where(parse > 0.0)] = 1.0
def clothes_darkenizer(img, mask):
# print("mask", mask.shape)
np_clothes = np.copy(img)
# print(type(np_clothes), np_clothes.shape)
np_clothes[np.where(mask == 0.0)] = 0.0 # only clothes will survive
Image.fromarray(np.uint8(np_clothes)).save('np_clothes.jpg')
PIL_clothes = Image.fromarray(np.uint8(np_clothes)).convert('RGB')
PIL_clothes.save('PIL_clothes.jpg')
PIL_gray_clothes = ImageOps.grayscale(PIL_clothes)
PIL_gray_clothes.save('gray_PIL.jpg')
np_gray_clothes = np.array(PIL_gray_clothes)
# stack three times
np_gray_clothes = np.stack([np_gray_clothes,np_gray_clothes,np_gray_clothes], axis=-1)
return np_gray_clothes
def merge_images(img1, img2, img2_mask):
"""
img1: main image
img2: sub image
img2_mask
"""
result = np.copy(img1)
result[np.where(img2_mask != 0)] = img2[np.where(img2_mask != 0)]
return result
def main():
shutil.rmtree(result_dir) if os.path.exists(result_dir) else None
os.mkdir(result_dir) if not os.path.exists(result_dir) else None
result_cloth_dir = os.path.join(result_dir, 'cloth')
result_cloth_mask_dir = os.path.join(result_dir, 'cloth-mask')
result_image_dir = os.path.join(result_dir, 'image')
result_image_parse_dir = os.path.join(result_dir, 'image-parse')
os.mkdir(result_cloth_dir)
os.mkdir(result_cloth_mask_dir)
os.mkdir(result_image_dir)
os.mkdir(result_image_parse_dir)
# human image processing
for img_path in glob.glob(os.path.join(image_dir, '*.jpg')):
img_parse_path = os.path.join(image_parse_dir, os.path.basename(img_path)).replace('.jpg', '.png')
img = load_one_image(img_path)
img_parse = load_one_image_parse(img_parse_path)
parse_upper = get_parse_clothes(img_parse)
np_gray_clothes = clothes_darkenizer(img, parse_upper)
result_img = merge_images(img, np_gray_clothes, parse_upper)
PIL_result_img = Image.fromarray(result_img)
PIL_result_img.save(os.path.join(result_image_dir, os.path.basename(img_path)))
Image.fromarray(img_parse).save(os.path.join(result_image_parse_dir, os.path.basename(img_parse_path)))
# plt.imshow(np.array(result_img))
# plt.show()
# clothes image processing
for clothes_path in glob.glob(os.path.join(clothes_dir, '*.jpg')):
clothes_mask_path = os.path.join(clothes_mask_dir, os.path.basename(clothes_path))
clothes = load_one_image(clothes_path)
clothes_mask = load_one_image(clothes_mask_path)
np_gray_clothes = clothes_darkenizer(clothes, clothes_mask)
result_img = merge_images(clothes, np_gray_clothes, clothes_mask)
PIL_result_img = Image.fromarray(result_img)
PIL_result_img.save(os.path.join(result_cloth_dir, os.path.basename(clothes_path)))
Image.fromarray(clothes_mask).save(os.path.join(result_cloth_mask_dir, os.path.basename(clothes_mask_path)))
# plt.imshow(np.array(result_img))
# plt.show()
if __name__ == '__main__':
main() | StarcoderdataPython |
1671851 | <gh_stars>1-10
#
# 1438. Longest Continuous Subarray With Absolute Diff Less Than or Equal to Limit
#
# Q: https://leetcode.com/problems/longest-continuous-subarray-with-absolute-diff-less-than-or-equal-to-limit/
# A: https://leetcode.com/problems/longest-continuous-subarray-with-absolute-diff-less-than-or-equal-to-limit/discuss/751204/Javascript-Python3-C%2B%2B-Map-for-MinMax
#
class Solution:
def longestSubarray(self, A: List[int], T: int, best = 0) -> int:
m = collections.Counter()
N, i, j = len(A), 0, 1 # A[i..j) => i inclusive to j non-inclusive
m[A[i]] = 1
while True:
minmax = list(map(lambda pair: pair[0], sorted(m.items()))) # hack since Python3 lacks an ordered map
minimum = minmax[0]
maximum = minmax[-1]
if maximum - minimum <= T:
best = max(best, j - i) # 🎯 best, ie. max length from i inclusive to j non-inclusive
if j == N:
break
m[A[j]] += 1 # "grow" window
j += 1
else:
m[A[i]] -= 1 # "shrink" window
if not m[A[i]]:
del m[A[i]]
i += 1
return best
| StarcoderdataPython |
3255064 | <reponame>andypymont/adventofcode<gh_stars>0
"""
2021 Day 11
https://adventofcode.com/2021/day/11
"""
from collections import deque
from itertools import count
from typing import Dict, Iterator, Set
import aocd # type: ignore
def read_octopuses(text: str) -> Dict[complex, int]:
octopuses: Dict[complex, int] = {}
for y, line in enumerate(text.split("\n")):
for x, digit in enumerate(line):
octopuses[complex(x, y)] = int(digit)
return octopuses
DIRECTIONS: Set[complex] = {
-1 - 1j,
-1 + 0j,
-1 + 1j,
0 - 1j,
0 + 1j,
1 - 1j,
1 + 0j,
1 + 1j,
}
def neighbours(point: complex) -> Iterator[complex]:
return (point + direction for direction in DIRECTIONS)
def step(octopuses: Dict[complex, int]) -> Dict[complex, int]:
octopuses = {location: energy + 1 for location, energy in octopuses.items()}
flashed: Set[complex] = set()
flashers = deque(octopus for octopus, energy in octopuses.items() if energy > 9)
while flashers:
flasher = flashers.popleft()
if flasher not in flashed:
flasher_neighbours = {n for n in neighbours(flasher) if n in octopuses}
octopuses.update({n: octopuses[n] + 1 for n in flasher_neighbours})
flashers.extend(
n for n in flasher_neighbours if octopuses[n] > 9 and n not in flashed
)
flashed.add(flasher)
return {
location: (0 if energy > 9 else energy)
for location, energy in octopuses.items()
}
def flashes(octopuses: Dict[complex, int], steps: int) -> int:
flash_count = 0
for _ in range(steps):
octopuses = step(octopuses)
flash_count += sum(1 for energy in octopuses.values() if energy == 0)
return flash_count
def all_flash(octopuses: Dict[complex, int]) -> int:
for step_no in count(1):
octopuses = step(octopuses)
if set(octopuses.values()) == {0}:
return step_no
return -1
def test_part1() -> None:
"""
Examples for Part 1.
"""
octopuses = {
0 + 0j: 5,
1 + 0j: 4,
2 + 0j: 8,
3 + 0j: 3,
4 + 0j: 1,
5 + 0j: 4,
6 + 0j: 3,
7 + 0j: 2,
8 + 0j: 2,
9 + 0j: 3,
0 + 1j: 2,
1 + 1j: 7,
2 + 1j: 4,
3 + 1j: 5,
4 + 1j: 8,
5 + 1j: 5,
6 + 1j: 4,
7 + 1j: 7,
8 + 1j: 1,
9 + 1j: 1,
0 + 2j: 5,
1 + 2j: 2,
2 + 2j: 6,
3 + 2j: 4,
4 + 2j: 5,
5 + 2j: 5,
6 + 2j: 6,
7 + 2j: 1,
8 + 2j: 7,
9 + 2j: 3,
0 + 3j: 6,
1 + 3j: 1,
2 + 3j: 4,
3 + 3j: 1,
4 + 3j: 3,
5 + 3j: 3,
6 + 3j: 6,
7 + 3j: 1,
8 + 3j: 4,
9 + 3j: 6,
0 + 4j: 6,
1 + 4j: 3,
2 + 4j: 5,
3 + 4j: 7,
4 + 4j: 3,
5 + 4j: 8,
6 + 4j: 5,
7 + 4j: 4,
8 + 4j: 7,
9 + 4j: 8,
0 + 5j: 4,
1 + 5j: 1,
2 + 5j: 6,
3 + 5j: 7,
4 + 5j: 5,
5 + 5j: 2,
6 + 5j: 4,
7 + 5j: 6,
8 + 5j: 4,
9 + 5j: 5,
0 + 6j: 2,
1 + 6j: 1,
2 + 6j: 7,
3 + 6j: 6,
4 + 6j: 8,
5 + 6j: 4,
6 + 6j: 1,
7 + 6j: 7,
8 + 6j: 2,
9 + 6j: 1,
0 + 7j: 6,
1 + 7j: 8,
2 + 7j: 8,
3 + 7j: 2,
4 + 7j: 8,
5 + 7j: 8,
6 + 7j: 1,
7 + 7j: 1,
8 + 7j: 3,
9 + 7j: 4,
0 + 8j: 4,
1 + 8j: 8,
2 + 8j: 4,
3 + 8j: 6,
4 + 8j: 8,
5 + 8j: 4,
6 + 8j: 8,
7 + 8j: 5,
8 + 8j: 5,
9 + 8j: 4,
0 + 9j: 5,
1 + 9j: 2,
2 + 9j: 8,
3 + 9j: 3,
4 + 9j: 7,
5 + 9j: 5,
6 + 9j: 1,
7 + 9j: 5,
8 + 9j: 2,
9 + 9j: 6,
}
assert (
read_octopuses(
"\n".join(
(
"5483143223",
"2745854711",
"5264556173",
"6141336146",
"6357385478",
"4167524645",
"2176841721",
"6882881134",
"4846848554",
"5283751526",
)
)
)
== octopuses
)
step1 = {
0 + 0j: 6,
1 + 0j: 5,
2 + 0j: 9,
3 + 0j: 4,
4 + 0j: 2,
5 + 0j: 5,
6 + 0j: 4,
7 + 0j: 3,
8 + 0j: 3,
9 + 0j: 4,
0 + 1j: 3,
1 + 1j: 8,
2 + 1j: 5,
3 + 1j: 6,
4 + 1j: 9,
5 + 1j: 6,
6 + 1j: 5,
7 + 1j: 8,
8 + 1j: 2,
9 + 1j: 2,
0 + 2j: 6,
1 + 2j: 3,
2 + 2j: 7,
3 + 2j: 5,
4 + 2j: 6,
5 + 2j: 6,
6 + 2j: 7,
7 + 2j: 2,
8 + 2j: 8,
9 + 2j: 4,
0 + 3j: 7,
1 + 3j: 2,
2 + 3j: 5,
3 + 3j: 2,
4 + 3j: 4,
5 + 3j: 4,
6 + 3j: 7,
7 + 3j: 2,
8 + 3j: 5,
9 + 3j: 7,
0 + 4j: 7,
1 + 4j: 4,
2 + 4j: 6,
3 + 4j: 8,
4 + 4j: 4,
5 + 4j: 9,
6 + 4j: 6,
7 + 4j: 5,
8 + 4j: 8,
9 + 4j: 9,
0 + 5j: 5,
1 + 5j: 2,
2 + 5j: 7,
3 + 5j: 8,
4 + 5j: 6,
5 + 5j: 3,
6 + 5j: 5,
7 + 5j: 7,
8 + 5j: 5,
9 + 5j: 6,
0 + 6j: 3,
1 + 6j: 2,
2 + 6j: 8,
3 + 6j: 7,
4 + 6j: 9,
5 + 6j: 5,
6 + 6j: 2,
7 + 6j: 8,
8 + 6j: 3,
9 + 6j: 2,
0 + 7j: 7,
1 + 7j: 9,
2 + 7j: 9,
3 + 7j: 3,
4 + 7j: 9,
5 + 7j: 9,
6 + 7j: 2,
7 + 7j: 2,
8 + 7j: 4,
9 + 7j: 5,
0 + 8j: 5,
1 + 8j: 9,
2 + 8j: 5,
3 + 8j: 7,
4 + 8j: 9,
5 + 8j: 5,
6 + 8j: 9,
7 + 8j: 6,
8 + 8j: 6,
9 + 8j: 5,
0 + 9j: 6,
1 + 9j: 3,
2 + 9j: 9,
3 + 9j: 4,
4 + 9j: 8,
5 + 9j: 6,
6 + 9j: 2,
7 + 9j: 6,
8 + 9j: 3,
9 + 9j: 7,
}
assert step(octopuses) == step1
step2 = {
0 + 0j: 8,
1 + 0j: 8,
2 + 0j: 0,
3 + 0j: 7,
4 + 0j: 4,
5 + 0j: 7,
6 + 0j: 6,
7 + 0j: 5,
8 + 0j: 5,
9 + 0j: 5,
0 + 1j: 5,
1 + 1j: 0,
2 + 1j: 8,
3 + 1j: 9,
4 + 1j: 0,
5 + 1j: 8,
6 + 1j: 7,
7 + 1j: 0,
8 + 1j: 5,
9 + 1j: 4,
0 + 2j: 8,
1 + 2j: 5,
2 + 2j: 9,
3 + 2j: 7,
4 + 2j: 8,
5 + 2j: 8,
6 + 2j: 9,
7 + 2j: 6,
8 + 2j: 0,
9 + 2j: 8,
0 + 3j: 8,
1 + 3j: 4,
2 + 3j: 8,
3 + 3j: 5,
4 + 3j: 7,
5 + 3j: 6,
6 + 3j: 9,
7 + 3j: 6,
8 + 3j: 0,
9 + 3j: 0,
0 + 4j: 8,
1 + 4j: 7,
2 + 4j: 0,
3 + 4j: 0,
4 + 4j: 9,
5 + 4j: 0,
6 + 4j: 8,
7 + 4j: 8,
8 + 4j: 0,
9 + 4j: 0,
0 + 5j: 6,
1 + 5j: 6,
2 + 5j: 0,
3 + 5j: 0,
4 + 5j: 0,
5 + 5j: 8,
6 + 5j: 8,
7 + 5j: 9,
8 + 5j: 8,
9 + 5j: 9,
0 + 6j: 6,
1 + 6j: 8,
2 + 6j: 0,
3 + 6j: 0,
4 + 6j: 0,
5 + 6j: 0,
6 + 6j: 5,
7 + 6j: 9,
8 + 6j: 4,
9 + 6j: 3,
0 + 7j: 0,
1 + 7j: 0,
2 + 7j: 0,
3 + 7j: 0,
4 + 7j: 0,
5 + 7j: 0,
6 + 7j: 7,
7 + 7j: 4,
8 + 7j: 5,
9 + 7j: 6,
0 + 8j: 9,
1 + 8j: 0,
2 + 8j: 0,
3 + 8j: 0,
4 + 8j: 0,
5 + 8j: 0,
6 + 8j: 0,
7 + 8j: 8,
8 + 8j: 7,
9 + 8j: 6,
0 + 9j: 8,
1 + 9j: 7,
2 + 9j: 0,
3 + 9j: 0,
4 + 9j: 0,
5 + 9j: 0,
6 + 9j: 6,
7 + 9j: 8,
8 + 9j: 4,
9 + 9j: 8,
}
assert step(step1) == step2
assert flashes(octopuses, 1) == 0
assert flashes(octopuses, 2) == 35
assert flashes(octopuses, 3) == 80
assert flashes(octopuses, 10) == 204
def test_part2() -> None:
"""
Examples for Part 2.
"""
octopuses = {
0 + 0j: 5,
1 + 0j: 4,
2 + 0j: 8,
3 + 0j: 3,
4 + 0j: 1,
5 + 0j: 4,
6 + 0j: 3,
7 + 0j: 2,
8 + 0j: 2,
9 + 0j: 3,
0 + 1j: 2,
1 + 1j: 7,
2 + 1j: 4,
3 + 1j: 5,
4 + 1j: 8,
5 + 1j: 5,
6 + 1j: 4,
7 + 1j: 7,
8 + 1j: 1,
9 + 1j: 1,
0 + 2j: 5,
1 + 2j: 2,
2 + 2j: 6,
3 + 2j: 4,
4 + 2j: 5,
5 + 2j: 5,
6 + 2j: 6,
7 + 2j: 1,
8 + 2j: 7,
9 + 2j: 3,
0 + 3j: 6,
1 + 3j: 1,
2 + 3j: 4,
3 + 3j: 1,
4 + 3j: 3,
5 + 3j: 3,
6 + 3j: 6,
7 + 3j: 1,
8 + 3j: 4,
9 + 3j: 6,
0 + 4j: 6,
1 + 4j: 3,
2 + 4j: 5,
3 + 4j: 7,
4 + 4j: 3,
5 + 4j: 8,
6 + 4j: 5,
7 + 4j: 4,
8 + 4j: 7,
9 + 4j: 8,
0 + 5j: 4,
1 + 5j: 1,
2 + 5j: 6,
3 + 5j: 7,
4 + 5j: 5,
5 + 5j: 2,
6 + 5j: 4,
7 + 5j: 6,
8 + 5j: 4,
9 + 5j: 5,
0 + 6j: 2,
1 + 6j: 1,
2 + 6j: 7,
3 + 6j: 6,
4 + 6j: 8,
5 + 6j: 4,
6 + 6j: 1,
7 + 6j: 7,
8 + 6j: 2,
9 + 6j: 1,
0 + 7j: 6,
1 + 7j: 8,
2 + 7j: 8,
3 + 7j: 2,
4 + 7j: 8,
5 + 7j: 8,
6 + 7j: 1,
7 + 7j: 1,
8 + 7j: 3,
9 + 7j: 4,
0 + 8j: 4,
1 + 8j: 8,
2 + 8j: 4,
3 + 8j: 6,
4 + 8j: 8,
5 + 8j: 4,
6 + 8j: 8,
7 + 8j: 5,
8 + 8j: 5,
9 + 8j: 4,
0 + 9j: 5,
1 + 9j: 2,
2 + 9j: 8,
3 + 9j: 3,
4 + 9j: 7,
5 + 9j: 5,
6 + 9j: 1,
7 + 9j: 5,
8 + 9j: 2,
9 + 9j: 6,
}
assert all_flash(octopuses) == 195
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2021, day=11)
octopuses = read_octopuses(data)
print(f"Part 1: {flashes(octopuses, 100)}")
print(f"Part 2: {all_flash(octopuses)}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
108420 | import pandas as pd
import numpy
import matplotlib
import sklearn_crfsuite
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn_crfsuite import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import RandomizedSearchCV
from sklearn_crfsuite import scorers
from sklearn.externals import joblib
from glob import glob
import scipy.stats
import operator
import sys
import os
# FBcols = ["FB%d" % d for d in range(4096)]
# GGcols = ["GG%d" % d for d in range(512)]
# elmocols = ["ELMO%d" % d for d in range(1024)]
features = ["GS%d" % d for d in range(4096)] + ['wordCount','chartStart','charEnd']
# labelNames = ['semanticType','Symptom','PMH','MEDS','ALLG','FAMHx','SOCHx','pysch','lifestyle','substanceUse','PE','FORM','supportProvision','transition']
labelNames = ['supportProvision']
files = glob("/Users/karanjani/Desktop/csvWithVecs/TrainCSV_Updated/*.csv")
#MAYBE CREATE A LIST FOR featurelabels so you can add what you wish to the FB vectors?
for name in labelNames:
featureMaster = []
labelMaster = []
for file in files[:10]:
df = pd.read_csv(file)
df = df.dropna(axis=0, how='any')
df = df[df.speakerID == 'doctor']
#DROP ALL LABELS + ANY FEATURES YOU DON'T WANT TO INCLUDE
dfX = df[features]
# dfX = df.drop(['labelType','stringList','transition'], axis=1)
#CREATE LIST OF LIST OF DICTS OF FEATURES
list_of_FeatureDicts = dfX.to_dict(orient='records')
featureMaster += [list_of_FeatureDicts]
#CREATE LIST OF LIST OF STRINGS OF LABELS
labels = df[name].values.tolist()
labelMaster += [labels]
X_train, X_valid, Y_train, Y_valid = train_test_split(featureMaster, labelMaster, test_size = 0.2)
crf = sklearn_crfsuite.CRF(
algorithm='lbfgs',
max_iterations=100,
all_possible_transitions=True)
params_space = {'c1': scipy.stats.expon(scale=0.5),'c2': scipy.stats.expon(scale=0.05)}
f1_scorer = make_scorer(metrics.flat_f1_score,average='weighted', labels=numpy.unique(name))
rs = RandomizedSearchCV(crf, params_space,
cv=2,
verbose=1,
n_jobs=-1,
n_iter=10,
scoring=f1_scorer)
rs.fit(X_train, Y_train)
print('best params:', rs.best_params_)
print('best CV score:', rs.best_score_)
print('model size: {:0.2f}M'.format(rs.best_estimator_.size_ / 1000000))
| StarcoderdataPython |
3220579 | <reponame>domdinicola/django-admin-extra-urls
# -*- coding: utf-8 -*-
import logging
from django.contrib.admin import site
from admin_extra_urls.extras import reverse
from admin_extra_urls.mixins import _confirm_action
from demo.models import DemoModel1
logger = logging.getLogger(__name__)
def test_confirm(django_app, admin_user):
url = reverse('admin:demo_demomodel1_changelist')
res = django_app.get(url, user=admin_user)
res = res.click('Confirm')
assert str(res.content).find("Confirm action")
res = res.form.submit().follow()
assert str(res.context['messages']._loaded_messages[0].message) == 'Successfully executed'
def test_confirm_action(rf, staff_user):
request = rf.get('/customer/details')
request.user = staff_user
_confirm_action(site._registry[DemoModel1], request,
lambda r: True,
"Confirm action",
"Successfully executed",
description="",
pk=None,
extra_context={'a': 1})
| StarcoderdataPython |
1691338 | import base64
import json
import requests
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
def paytm_oauth(request):
code = request.GET.get('code', None)
url = settings.PAYTMOAUTH_PROVIDER_URL + settings.PAYTMOAUTH_AUTHENTICATION_ENDPOINT
header_value = settings.PAYTMOAUTH_CLIENT_ID + ':' + settings.PAYTMOAUTH_CLIENT_SECRET
authorization_header = base64.b64encode(header_value.encode('ascii'))
authorization_header = str(authorization_header)
if code:
headers = {
"Authorization": "Basic " + authorization_header,
"Content-Type": "application/x-www-form-urlencoded"
}
payload = {
"grant_type":"authorization_code",
"code": code,
"client_id":settings.PAYTMOAUTH_CLIENT_ID,
"scope": settings.PAYTMOAUTH_SCOPE
}
try:
response = requests.post(url, headers=headers, data=payload)
except Exception as e:
print ('Error : Request for retriving access token failed', e)
response = None
if response and response.status_code == 200:
partial_response = json.loads(response.text)
url = settings.PAYTMOAUTH_PROVIDER_URL + settings.PAYTMOUATH_RESOURCE_ACCESS_ENDPOINT
headers = {
'session_token': partial_response.get('access_token')
}
try:
authentication_response = requests.get(url, headers=headers)
except Exception as e:
print('Error : Request for retriving authentication response failed', e)
authentication_response = None
else:
user_detail = json.loads(authentication_response.text)
username = user_detail.get('id')
email = user_detail.get('email')
first_name = user_detail.get('firstName', None)
last_name = user_detail.get('lastName', None)
if User.objects.filter(username=username).exists():
user = User.objects.get(username=username)
user.email = email
# first and last name may change
if first_name:
user.first_name = first_name
if last_name:
user.last_name = last_name
# hack : the proper django way is to use an
# authentication backend
if not hasattr(user, 'backend'):
user.backend = 'django.contrib.auth.backends.ModelBackend'
user.save()
login(request, user)
else:
new_user = User(username=username, email=email,
first_name=first_name, last_name=last_name)
# hack : the proper django way is to use an
# authentication backend
if not hasattr(new_user, 'backend'):
new_user.backend = 'django.contrib.auth.backends.ModelBackend'
new_user.save()
login(request, new_user)
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
return HttpResponseRedirect('/') | StarcoderdataPython |
1755724 | <reponame>hubert-he/FATE<gh_stars>1000+
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.framework.homo.blocks.base import HomoTransferBase
from federatedml.secureprotol.diffie_hellman import DiffieHellman
from federatedml.util import consts
class DHTransVar(HomoTransferBase):
def __init__(self, server=(consts.ARBITER,), clients=(consts.GUEST, consts.HOST), prefix=None):
super().__init__(server=server, clients=clients, prefix=prefix)
self.p_power_r = self.create_client_to_server_variable(name="p_power_r")
self.p_power_r_bc = self.create_server_to_client_variable(name="p_power_r_bc")
self.pubkey = self.create_server_to_client_variable(name="pubkey")
class Server(object):
def __init__(self, trans_var: DHTransVar = None):
if trans_var is None:
trans_var = DHTransVar()
self._p_power_r = trans_var.p_power_r
self._p_power_r_bc = trans_var.p_power_r_bc
self._pubkey = trans_var.pubkey
self._client_parties = trans_var.client_parties
def key_exchange(self):
p, g = DiffieHellman.key_pair()
self._pubkey.remote_parties(obj=(int(p), int(g)), parties=self._client_parties)
pubkey = dict(self._p_power_r.get_parties(parties=self._client_parties))
self._p_power_r_bc.remote_parties(obj=pubkey, parties=self._client_parties)
class Client(object):
def __init__(self, trans_var: DHTransVar = None):
if trans_var is None:
trans_var = DHTransVar()
self._p_power_r = trans_var.p_power_r
self._p_power_r_bc = trans_var.p_power_r_bc
self._pubkey = trans_var.pubkey
self._server_parties = trans_var.server_parties
def key_exchange(self, uuid: str):
p, g = self._pubkey.get_parties(parties=self._server_parties)[0]
r = DiffieHellman.generate_secret(p)
gr = DiffieHellman.encrypt(g, r, p)
self._p_power_r.remote_parties(obj=(uuid, gr), parties=self._server_parties)
cipher_texts = self._p_power_r_bc.get_parties(parties=self._server_parties)[0]
share_secret = {uid: DiffieHellman.decrypt(gr, r, p) for uid, gr in cipher_texts.items() if uid != uuid}
return share_secret
| StarcoderdataPython |
3366592 | <filename>ErrorDistribution/error_distribution.py
# coding: utf-8
# In[17]:
from train import *
import pandas as pd
import numpy as np
# In[63]:
params = PARAMS
params['filename'] = "model1.csv"
params['max_steps'] = 1000000
params['learning_rate'] = 0.01
params['layers'] = [100, 200, 100]
params['dropout'] = 0.05
params['training_set_size'] = 90000
# In[64]:
all_data = tf.contrib.learn.datasets.base.load_csv_without_header(
filename="micro_data_train_valid.csv",
target_dtype=np.float32,
features_dtype=np.float32)
X = all_data.data[:,:15]
y = all_data.target / PARAMS['div_const']
X = (X - np.mean(X, axis=0, keepdims=True))/np.std(X, axis=0, keepdims=True)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=PARAMS['test_ratio'],
random_state=42)
# In[65]:
def pred(classifier, X_test, y_test):
pred_iterable = classifier.predict(input_fn = lambda: predict_input_fn(X_test))
pred = []
for i in range(y_test.size):
pred.append(next(pred_iterable))
return pred
def train(X_train, y_train, X_test, y_test, params):
global batch_step
global training_set_size
PARAMS['max_steps'] = params['max_steps']
PARAMS['learning_rate'] = params['learning_rate']
PARAMS['layers'] = params['layers']
PARAMS['dropout'] = params['dropout']
PARAMS['training_set_size'] = params['training_set_size']
MODEL_DIR = "model_" + str(id)
if os.path.isdir(MODEL_DIR):
print("Removing old model dir...")
shutil.rmtree(MODEL_DIR)
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column(
"", dimension=X_train.shape[1])]
batch_step = 0
training_set_size = PARAMS['training_set_size']
config = run_config.RunConfig(log_device_placement=False, save_checkpoints_secs=5)
classifier = tf.contrib.learn.DNNRegressor( # activation_fn: tf.nn.relu by default
feature_columns=feature_columns,
hidden_units=PARAMS['layers'],
model_dir=MODEL_DIR,
optimizer=tf.train.AdamOptimizer(learning_rate=PARAMS['learning_rate'], epsilon=0.8),
dropout=PARAMS['dropout'],
config=config)
monitor = RegressionMonitor(x=X_test, y=y_test)
classifier.fit(input_fn = lambda: input_fn(X_train,y_train),
steps=PARAMS['max_steps'], monitors=[monitor])
return y_test, pred(classifier, X_test, y_test)
# In[66]:
y, pred = train(X_train, y_train, X_test, y_test, params)
# In[71]:
errors = zip(y,
pred,
np.subtract(y, pred) * PARAMS['div_const'],
np.subtract(y, pred) / y)
df = pd.DataFrame(np.array(list(errors)),
columns = ["y", "pred", "absolute_error", "relative_error"])
# In[72]:
df.to_csv(params["filename"], index=False)
| StarcoderdataPython |
1639413 | <reponame>panicmarvin/OpenRAM
import design
import debug
import utils
from tech import GDS,layer
class replica_bitcell(design.design):
"""
A single bit cell (6T, 8T, etc.)
This module implements the single memory cell used in the design. It
is a hand-made cell, so the layout and netlist should be available in
the technology library. """
pin_names = ["BL", "BR", "WL", "vdd", "gnd"]
(width,height) = utils.get_libcell_size("replica_cell_6t", GDS["unit"], layer["boundary"])
pin_map = utils.get_libcell_pins(pin_names, "replica_cell_6t", GDS["unit"], layer["boundary"])
def __init__(self):
design.design.__init__(self, "replica_cell_6t")
debug.info(2, "Create replica bitcell object")
self.width = replica_bitcell.width
self.height = replica_bitcell.height
self.pin_map = replica_bitcell.pin_map
| StarcoderdataPython |
1791224 | <filename>Python/Exercise/Exercise_2018/Translate/googleTranslate.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests # pip install requests
import json
import execjs # pip install PyExecJS
import urllib3 # pip install urllib3
'''
author by Benji
date at 2018.12.07
实现: 模拟浏览器中Google翻译的url请求
不同于Baidu直接给出API, Google翻译需要调用其封装的lib
参考: https://www.jianshu.com/p/95cf6e73d6ee
https://cloud.google.com/translate/docs/apis
'''
class PyJsParams():
def __init__(self):
self.ctx = execjs.compile("""
function TL(a) {
var k = "";
var b = 406644;
var b1 = 3293161072;
var jd = ".";
var $b = "+-a^+6";
var Zb = "+-3^+b+-f";
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var m = a.charCodeAt(g);
128 > m ? e[f++] = m : (2048 > m ? e[f++] = m >> 6 | 192 : (55296 == (m & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (m = 65536 + ((m & 1023) << 10) + (a.charCodeAt(++g) & 1023),
e[f++] = m >> 18 | 240,
e[f++] = m >> 12 & 63 | 128) : e[f++] = m >> 12 | 224,
e[f++] = m >> 6 & 63 | 128),
e[f++] = m & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++) a += e[f],
a = RL(a, $b);
a = RL(a, Zb);
a ^= b1 || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return a.toString() + jd + (a ^ b)
};
function RL(a, b) {
var t = "a";
var Yb = "+";
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2),
d = d >= t ? d.charCodeAt(0) - 87 : Number(d),
d = b.charAt(c + 1) == Yb ? a >>> d: a << d;
a = b.charAt(c) == Yb ? a + d & 4294967295 : a ^ d
}
return a
}
""")
def getTk(self, text):
return self.ctx.call("TL", text)
def buildUrl(text, tk):
baseUrl = 'https://translate.google.com/translate_a/single?client=webapp&'
baseUrl += '&sl=auto&tl=' + toLang
baseUrl += '&hl=en&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&source=btn&ssel=0&tsel=0&kc=0&'
baseUrl += 'tk='+str(tk)+'&'
baseUrl += 'q='+text
return baseUrl
def translate(text, jsParas):
url = buildUrl(text, jsParas.getTk(text))
try:
# 添加headers, 模仿浏览器行为
headers = requests.utils.default_headers()
headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
urllib3.disable_warnings()
# solve: SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate
r = requests.get(url, headers=headers, verify=False)
result = json.loads(r.text)
res = str(result[0][0][0])
except Exception as e:
res = ''
print("翻译"+text+"失败")
print("错误信息:")
print(e)
finally:
return res
toLang = 'en'
if __name__ == '__main__':
jsParas = PyJsParams()
res = translate('小顺子给春宫娘娘请安了', jsParas)
print(res)
'''
output
Xiaoshun gave the Spring Palace girl an appointment.
'''
| StarcoderdataPython |
4826252 | <gh_stars>1-10
import numpy as np
class RidgeRegressor:
"""
Regression weights of kernel Ridge regression
Parameters
----------
kernel : {'gaussian'}
Name of the kernel to use.
sigma : float, optional
Bandwidth parameter for various kernel: standard deviation for Gaussian kernel.
lambd: float, optional
Tikhonov regularization parameter
Examples
--------
>>> import numpy as np
>>> krr = RidgeRegressor('Gaussian', sigma=3, lambd=1e-3)
>>> x_support = np.random.randn(50, 10)
>>> krr.set_support(x_support)
>>> x = np.random.randn(30, 10)
>>> alpha = krr(x)
"""
def __init__(self, kernel, lambd=None, **kwargs):
self.kernel = Kernel(kernel, **kwargs)
self.lambd = lambd
def set_support(self, x_train):
"""Specified input training data
There should be a call to update the EVD of K and lambda after.
"""
self.n_train = len(x_train)
self.kernel.set_support(x_train)
def update_sigma(self, sigma=None):
"""Setting bandwith parameter
Useful to try several regularization parameter.
There should be a call to update lambda after setting the bandwith.
"""
if sigma is not None:
self.kernel.__init__(self.kernel.kernel, sigma=sigma)
self.kernel.set_support(self.kernel.x)
K = self.kernel.get_k()
self.w_0, self.v = np.linalg.eigh(K)
def update_lambda(self, lambd=None):
"""Setting Tikhonov regularization parameter
Useful to try several regularization parameter.
"""
if lambd is None:
if self.lambd is None:
raise ValueError('No specification of regularization parameter')
lambd = self.lambd
if not hasattr(self, 'w_0'):
self.update_sigma()
w = self.w_0 + self.n_train * lambd
w **= -1
self.K_inv = (self.v * w) @ self.v.T
def __call__(self, x_test):
"""Weighting scheme computation.
Parameters
----------
x_test : ndarray
Points to compute kernel ridge regression weights, of shape (nb_points, input_dim).
Returns
-------
out : ndarray
Similarity matrix of size (nb_points, n_train) given by kernel ridge regression.
"""
if not hasattr(self, 'K_inv'):
self.train()
K_x = self.kernel(x_test)
return K_x.T @ self.K_inv
def train(self, sigma=None, lambd=None):
self.update_sigma(sigma)
self.update_lambda(lambd)
def set_phi(self, phi):
self.c_beta = self.K_inv @ phi
def call_with_phi(self, x):
K_x = self.kernel(x)
return K_x.T @ self.c_beta
class Kernel:
"""
Computation of classical kernels
Parameters
----------
kernel : {'gaussian'}
Name of the kernel to use.
sigma : int, optional
Parameter for various kernel: standard deviation for Gaussian kernel.
Examples
--------
>>> import numpy as np
>>> x_support = np.random.randn(50, 10)
>>> kernel_computer = Kernel('Gaussian', sigma=3)
>>> kernel_computer.set_support(x_support)
>>> x = np.random.randn(30, 10)
>>> k = kernel_computer(x)
"""
def __init__(self, kernel, **kwargs):
self.kernel = kernel.lower()
if self.kernel == "gaussian":
self.sigma2 = 2 * (kwargs['sigma'] ** 2)
if self.kernel == "laplacian":
self.sigma = kwargs['sigma']
self._call_method = getattr(self, self.kernel + '_kernel')
def set_support(self, x):
"""Set train support for kernel method.
Parameters
----------
x : ndarray
Training set given as a design matrix, of shape (nb_points, input_dim).
"""
self.reset()
self.x = x
def __call__(self, x):
"""Kernel computation.
Parameters
----------
x : ndarray
Points to compute kernel, of shape (nb_points, input_dim).
Returns
-------
out : ndarray
kernel matrix k(x, x_support).
"""
return self._call_method(x)
def get_k(self):
"""Kernel computations.
Get kernel matrix on support points.
"""
return self(self.x)
def gaussian_kernel(self, x):
"""Gaussian kernel.
Implement k(x, y) = exp(-norm{x - y}^2 / (2 * sigma2)).
"""
K = self.x @ x.T
K *= 2
if not hasattr(self, "_attr_1"):
self._attr1 = np.sum(self.x ** 2, axis=1)[:, np.newaxis]
K -= self._attr1
K -= np.sum(x ** 2, axis=1)
K /= self.sigma2
np.exp(K, out=K)
return K
def laplacian_kernel(self, x):
"""Laplacian kernel
return exp(-norm{x - y} / (sigma))
sigma = kernel_parameter
"""
K = self.x @ x.T
K *= -2
if not hasattr(self, "_attr_1"):
self._attri_1 = np.sum(self.x ** 2, axis=1)[:, np.newaxis]
K += self._attri_1
K += np.sum(x ** 2, axis=1)
K[K < 0] = 0
np.sqrt(K, out=K)
K /= - self.sigma
np.exp(K, out=K)
return K
def linear_kernel(self, x):
"""Linear kernel.
Implement k(x, y) = x^T y.
"""
return self.x @ x.T
def reset(self):
"""Resetting attributes."""
if hasattr(self, "_attr_1"):
delattr(self, "_attr_1")
if __name__=="__main__":
krr = RidgeRegressor('Gaussian', sigma=3, lambd=1e-3)
x_support = np.random.randn(50, 10)
krr.set_support(x_support)
x = np.random.randn(30, 10)
alpha = krr(x)
assert(alpha.shape==(30,50))
| StarcoderdataPython |
3387752 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
"""HTTP/2 Error Code"""
from aenum import IntEnum, extend_enum
__all__ = ['ErrorCode']
class ErrorCode(IntEnum):
"""[ErrorCode] HTTP/2 Error Code"""
#: NO_ERROR, Graceful shutdown [RFC-ietf-httpbis-http2bis-07, Section 7]
NO_ERROR = 0x00000000
#: PROTOCOL_ERROR, Protocol error detected [RFC-ietf-httpbis-http2bis-07,
#: Section 7]
PROTOCOL_ERROR = 0x00000001
#: INTERNAL_ERROR, Implementation fault [RFC-ietf-httpbis-http2bis-07, Section
#: 7]
INTERNAL_ERROR = 0x00000002
#: FLOW_CONTROL_ERROR, Flow-control limits exceeded [RFC-ietf-httpbis-
#: http2bis-07, Section 7]
FLOW_CONTROL_ERROR = 0x00000003
#: SETTINGS_TIMEOUT, Settings not acknowledged [RFC-ietf-httpbis-http2bis-07,
#: Section 7]
SETTINGS_TIMEOUT = 0x00000004
#: STREAM_CLOSED, Frame received for closed stream [RFC-ietf-httpbis-
#: http2bis-07, Section 7]
STREAM_CLOSED = 0x00000005
#: FRAME_SIZE_ERROR, Frame size incorrect [RFC-ietf-httpbis-http2bis-07,
#: Section 7]
FRAME_SIZE_ERROR = 0x00000006
#: REFUSED_STREAM, Stream not processed [RFC-ietf-httpbis-http2bis-07, Section
#: 7]
REFUSED_STREAM = 0x00000007
#: CANCEL, Stream cancelled [RFC-ietf-httpbis-http2bis-07, Section 7]
CANCEL = 0x00000008
#: COMPRESSION_ERROR, Compression state not updated [RFC-ietf-httpbis-
#: http2bis-07, Section 7]
COMPRESSION_ERROR = 0x00000009
#: CONNECT_ERROR, TCP connection error for CONNECT method [RFC-ietf-httpbis-
#: http2bis-07, Section 7]
CONNECT_ERROR = 0x0000000A
#: ENHANCE_YOUR_CALM, Processing capacity exceeded [RFC-ietf-httpbis-
#: http2bis-07, Section 7]
ENHANCE_YOUR_CALM = 0x0000000B
#: INADEQUATE_SECURITY, Negotiated TLS parameters not acceptable [RFC-ietf-
#: httpbis-http2bis-07, Section 7]
INADEQUATE_SECURITY = 0x0000000C
#: HTTP_1_1_REQUIRED, Use HTTP/1.1 for the request [RFC-ietf-httpbis-
#: http2bis-07, Section 7]
HTTP_1_1_REQUIRED = 0x0000000D
@staticmethod
def get(key: 'int | str', default: 'int' = -1) -> 'ErrorCode':
"""Backport support for original codes."""
if isinstance(key, int):
return ErrorCode(key)
if key not in ErrorCode._member_map_: # pylint: disable=no-member
extend_enum(ErrorCode, key, default)
return ErrorCode[key] # type: ignore[misc]
@classmethod
def _missing_(cls, value: 'int') -> 'ErrorCode':
"""Lookup function used when value is not found."""
if not (isinstance(value, int) and 0x00000000 <= value <= 0xFFFFFFFF):
raise ValueError('%r is not a valid %s' % (value, cls.__name__))
if 0x0000000E <= value <= 0xFFFFFFFF:
#: Unassigned
temp = hex(value)[2:].upper().zfill(8)
extend_enum(cls, 'Unassigned_0x%s' % (temp[:4]+'_'+temp[4:]), value)
return cls(value)
return super()._missing_(value)
| StarcoderdataPython |
48628 | <reponame>ATSM-Bot/rickroll-lang
from sys import stdout
from random import choice
# Keywords
KW_print = 'i_just_wanna_tell_u_how_im_feeling'
KW_if = 'and_if_u_ask_me_how_im_feeling'
KW_let = 'give_u_up'
KW_import1 = 'we_know_the'
KW_import2 = "and_we're_gonna_play_it"
KW_def1 = 'never_knew'
KW_def2 = 'could_feel_this_way'
KW_return1 = 'when_i_give_my'
KW_return2 = 'it_will_be_completely'
KW_main = 'take_me_to_ur_heart'
KW_end = 'say_good_bye'
KW_break = 'desert_u'
KW_continue = 'run_around'
KW_endless_loop = 'together_forever_and_never_to_part'
KW_while_loop = 'together_forever_with'
keywords = {
KW_print,
KW_if,
KW_let,
KW_import1,
KW_import2,
KW_def1,
KW_def2,
KW_return1,
KW_return2,
KW_main,
KW_end,
KW_break,
KW_continue,
KW_endless_loop,
KW_while_loop
}
# Tokens that the interpreter will totally ignore
ignore_tokens = {'~', "'"}
# Characters in numbers
digits = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.'}
# Separators are used in tokenization
separators = {
'(', ')', '[', ']', '{', '}', ',', '\n', ' ', '+', '-', '*', '/', '%', '^', '='
}
# Operators
OP_arithmetic = {'+', '-', '*', '/', '%', '^'}
OP_relational = {'is', 'is_not', 'is_greater_than', 'is_less_than', 'and', 'or'}
OP_logical = {'and', 'or'}
OP_assignment = {'='}
OP_other = {'[', ']', '(', ')', '{', '}', ','}
OP_build_in_functions = {'to_string', 'to_int', 'to_float', 'length'}
error_lyrics = [
'"If you knew what Im feeling, you would not say no~"',
'"You know the rules, and so do I~"',
'"'+"There ain't no mistaking, is true love we are making~"+'"'
]
def join_list(l):
result = ''
for i in l: result += f'{i} '
return result
def error(error_msg):
stdout.write(error_msg)
exit('------'*10 + '\n' + choice(error_lyrics))
| StarcoderdataPython |
3324155 | # %% [231. Power of Two](https://leetcode.com/problems/power-of-two/)
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
return n > 0 and bin(n).count("1") == 1
| StarcoderdataPython |
23431 | """
Name: modules.py
Desc: This script defines some base module for building networks.
"""
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
class UNet_down_block(nn.Module):
def __init__(self, input_channel, output_channel, down_size=True):
super(UNet_down_block, self).__init__()
self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.max_pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU()
self.down_size = down_size
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
if self.down_size:
x = self.max_pool(x)
return x
class UNet_up_block(nn.Module):
def __init__(self, prev_channel, input_channel, output_channel, up_sample=True):
super(UNet_up_block, self).__init__()
self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.conv1 = nn.Conv2d(prev_channel + input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.relu = torch.nn.ReLU()
self.up_sample = up_sample
def forward(self, prev_feature_map, x):
if self.up_sample:
x = self.up_sampling(x)
x = torch.cat((x, prev_feature_map), dim=1)
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
return x
class UNet(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3):
super(UNet, self).__init__()
self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2**(4+i), 2**(5+i), True) for i in range(0, downsample)]
)
bottleneck = 2**(4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2**(4+i), 2**(5+i), 2**(4+i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.relu(self.last_conv2(x))
#x = self.last_conv2(x)
return x
'''
class UNetDepth(nn.Module):
def __init__(self):
super(UNetDepth, self).__init__()
self.down_block1 = UNet_down_block(3, 16, False)
self.down_block2 = UNet_down_block(16, 32, True)
self.down_block3 = UNet_down_block(32, 64, True)
self.down_block4 = UNet_down_block(64, 128, True)
self.down_block5 = UNet_down_block(128, 256, True)
self.down_block6 = UNet_down_block(256, 512, True)
self.down_block7 = UNet_down_block(512, 1024, False)
self.mid_conv1 = nn.Conv2d(1024, 1024, 3, padding=1)
self.bn1 = nn.GroupNorm(8, 1024)
self.mid_conv2 = nn.Conv2d(1024, 1024, 3, padding=1)
self.bn2 = nn.GroupNorm(8, 1024)
self.mid_conv3 = torch.nn.Conv2d(1024, 1024, 3, padding=1)
self.bn3 = torch.nn.GroupNorm(8, 1024)
self.up_block1 = UNet_up_block(512, 1024, 512, False)
self.up_block2 = UNet_up_block(256, 512, 256, True)
self.up_block3 = UNet_up_block(128, 256, 128, True)
self.up_block4 = UNet_up_block(64, 128, 64, True)
self.up_block5 = UNet_up_block(32, 64, 32, True)
self.up_block6 = UNet_up_block(16, 32, 16, True)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, 1, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.x1 = self.down_block1(x)
x = self.x2 = self.down_block2(self.x1)
x = self.x3 = self.down_block3(self.x2)
x = self.x4 = self.down_block4(self.x3)
x = self.x5 = self.down_block5(self.x4)
x = self.x6 = self.down_block6(self.x5)
x = self.x7 = self.down_block7(self.x6)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
x = self.up_block1(self.x6, x)
x = self.up_block2(self.x5, x)
x = self.up_block3(self.x4, x)
x = self.up_block4(self.x3, x)
x = self.up_block5(self.x2, x)
x = self.up_block6(self.x1, x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
return x
'''
class UNetDepth(nn.Module):
def __init__(self):
super(UNetDepth, self).__init__()
self.down_block1 = UNet_down_block(3, 16, False)
self.down_block2 = UNet_down_block(16, 32, True)
self.down_block3 = UNet_down_block(32, 64, True)
self.down_block4 = UNet_down_block(64, 128, True)
self.down_block5 = UNet_down_block(128, 256, True)
self.down_block6 = UNet_down_block(256, 512, False)
self.mid_conv1 = nn.Conv2d(512, 512, 3, padding=1)
self.bn1 = nn.GroupNorm(8, 512)
self.mid_conv2 = nn.Conv2d(512, 512, 3, padding=1)
self.bn2 = nn.GroupNorm(8, 512)
self.mid_conv3 = torch.nn.Conv2d(512, 512, 3, padding=1)
self.bn3 = torch.nn.GroupNorm(8, 512)
self.up_block1 = UNet_up_block(256, 512, 256, False)
self.up_block2 = UNet_up_block(128, 256, 128, True)
self.up_block3 = UNet_up_block(64, 128, 64, True)
self.up_block4 = UNet_up_block(32, 64, 32, True)
self.up_block5 = UNet_up_block(16, 32, 16, True)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, 1, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.x1 = self.down_block1(x)
x = self.x2 = self.down_block2(self.x1)
x = self.x3 = self.down_block3(self.x2)
x = self.x4 = self.down_block4(self.x3)
x = self.x5 = self.down_block5(self.x4)
x = self.x6 = self.down_block6(self.x5)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
x = self.up_block1(self.x5, x)
x = self.up_block2(self.x4, x)
x = self.up_block3(self.x3, x)
x = self.up_block4(self.x2, x)
x = self.up_block5(self.x1, x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
return x
class UNet_sim(nn.Module):
def __init__(self, downsample=4, in_channels=3, out_channels=3):
super(UNet_sim, self).__init__()
self.downsample, self.in_channels, self.out_channels = downsample, in_channels, out_channels
self.conv = ConvBlock(in_channels, 64)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (6 + i), 2 ** (7 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (6 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (6 + i), 2 ** (7 + i), 2 ** (6 + i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(64, 64, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 64)
self.last_conv2 = nn.Conv2d(64, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
x = self.last_bn(self.last_conv1(x))
x = self.last_conv2(x)
return x
class Encoder(nn.Module):
def __init__(self, downsample=6, in_channels=3):
""":downsample the number of down blocks
:in_channels the channel of input tensor
"""
super(Encoder, self).__init__()
self.in_channels, self.downsample = in_channels, downsample
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (4 + i), 2 ** (5 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
return xvals, x
class Decoder(nn.Module):
def __init__(self, downsample, out_channels, combine_num=0):
super(Decoder, self).__init__()
self.out_channels, self.downsample = out_channels, downsample
self.combine_num = combine_num
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (4 + i), 2 ** (5 + i), 2 ** (4 + i)) for i in range(0, self.downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, self.out_channels, 1, padding=0)
self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.relu = nn.ReLU()
def forward(self, xvals, x):
devals = []
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
if i < self.combine_num:
devals.append(x)
y = self.last_bn(self.last_conv1(x))
y = self.last_conv2(x)
if len(devals) > 0:
for j, decode in enumerate(devals):
for _ in range(len(devals) - 1 - j):
decode = self.up_sampling(decode)
devals[j] = decode
combine_x = torch.cat(devals[::-1], dim=1)
return y, combine_x
else:
return y, x
class Encoder_sim(nn.Module):
def __init__(self, downsample=4, in_channels=3):
super(Encoder_sim, self).__init__()
self.downsample = downsample
self.conv = ConvBlock(in_channels, 64)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (6 + i), 2 ** (7 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (6 + self.downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
return xvals, x
class Decoder_sim(nn.Module):
def __init__(self, downsample, out_channels):
super(Decoder_sim, self).__init__()
self.downsample, self.out_channels = downsample, out_channels
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (6 + i), 2 ** (7 + i), 2 ** (6 + i)) for i in range(0, self.downsample)]
)
self.last_conv1 = nn.Conv2d(64, 64, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 64)
self.last_conv2 = nn.Conv2d(64, self.out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, xvals, x):
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
y = self.last_bn(self.last_conv1(x))
y = self.last_conv2(y)
return y, x
class ThreeD2NorDepth(nn.Module):
def __init__(self, downsample=3, use_simple=True):
super(ThreeD2NorDepth, self).__init__()
if use_simple:
self.threeD_encoder = Encoder_sim(downsample=downsample, in_channels=3)
self.normal_decoder = Decoder_sim(downsample=downsample, out_channels=3)
self.depth_decoder = Decoder_sim(downsample=downsample, out_channels=1)
else:
self.threeD_encoder = Encoder(downsample=downsample, in_channels=3)
self.normal_decoder = Decoder(downsample=downsample, out_channels=3, combine_num=0)
self.depth_decoder = Decoder(downsample=downsample, out_channels=1, combine_num=0)
def forward(self, x):
xvals, x = self.threeD_encoder(x)
nor, _ = self.normal_decoder(xvals, x)
dep, _ = self.depth_decoder(xvals, x)
return nor, dep
class AlbedoDecoder_sim(nn.Module):
def __init__(self, downsample=6, out_channels=1):
super(AlbedoDecoder_sim, self).__init__()
self.out_channels, self.downsample = out_channels, downsample
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (7 + i), 2 ** (8 + i), 2 ** (7 + i)) for i in range(0, self.downsample)])
self.last_conv1 = nn.Conv2d(128, 64, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 64)
self.last_conv2 = nn.Conv2d(64, self.out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, xvals, x):
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
y = self.last_bn(self.last_conv1(x))
y = self.last_conv2(y)
return y, x
class AlbedoDecoder(nn.Module):
def __init__(self, downsample=6, out_channels=1):
super(AlbedoDecoder, self).__init__()
self.out_channels, self.downsample = out_channels, downsample
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (5 + i), 2 ** (6 + i), 2 ** (5 + i)) for i in range(0, self.downsample)])
self.last_conv1 = nn.Conv2d(32, 32, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 32)
self.last_conv2 = nn.Conv2d(32, self.out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, xvals, x):
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
y = self.last_bn(self.last_conv1(x))
y = self.last_conv2(y)
return y, x
class ConvBlock(nn.Module):
def __init__(self, f1, f2, kernel_size=3, padding=1, use_groupnorm=False, groups=8, dilation=1, transpose=False):
super(ConvBlock, self).__init__()
self.transpose = transpose
self.conv = nn.Conv2d(f1, f2, (kernel_size, kernel_size), dilation=dilation, padding=padding*dilation)
if self.transpose:
self.convt = nn.ConvTranspose2d(
f1, f1, (3, 3), dilation=dilation, stride=2, padding=dilation, output_padding=1
)
if use_groupnorm:
self.bn = nn.GroupNorm(groups, f1)
else:
self.bn = nn.BatchNorm2d(f1)
def forward(self, x):
# x = F.dropout(x, 0.04, self.training)
x = self.bn(x)
if self.transpose:
# x = F.upsample(x, scale_factor=2, mode='bilinear')
x = F.relu(self.convt(x))
# x = x[:, :, :-1, :-1]
x = F.relu(self.conv(x))
return x | StarcoderdataPython |
161306 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('send', views.send, name='send'),
path('recv', views.recv, name='recv'),
path('send_action', views.send_action, name='send_action'),
path('recv_action', views.recv_action, name='recv_action'),
path('delete_action', views.delete_action, name='delete_action'),
] | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.